1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
58 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
60 return STMT_VINFO_VECTYPE (stmt_info
);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
66 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
68 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
69 basic_block bb
= gimple_bb (stmt
);
70 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
76 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
78 return (bb
->loop_father
== loop
->inner
);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
86 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
87 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
88 int misalign
, enum vect_cost_model_location where
)
92 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
93 stmt_info_for_cost si
= { count
, kind
,
94 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
96 body_cost_vec
->safe_push (si
);
98 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
101 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
102 count
, kind
, stmt_info
, misalign
, where
);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
108 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
110 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
120 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
121 tree array
, unsigned HOST_WIDE_INT n
)
123 tree vect_type
, vect
, vect_name
, array_ref
;
126 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
127 vect_type
= TREE_TYPE (TREE_TYPE (array
));
128 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
129 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
130 build_int_cst (size_type_node
, n
),
131 NULL_TREE
, NULL_TREE
);
133 new_stmt
= gimple_build_assign (vect
, array_ref
);
134 vect_name
= make_ssa_name (vect
, new_stmt
);
135 gimple_assign_set_lhs (new_stmt
, vect_name
);
136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
146 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
147 tree array
, unsigned HOST_WIDE_INT n
)
152 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (array_ref
, vect
);
157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
165 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
167 tree mem_ref
, alias_ptr_type
;
169 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
170 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
183 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
184 enum vect_relevant relevant
, bool live_p
,
185 bool used_in_pattern
)
187 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
188 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
189 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
190 gimple
*pattern_stmt
;
192 if (dump_enabled_p ())
193 dump_printf_loc (MSG_NOTE
, vect_location
,
194 "mark relevant %d, live %d.\n", relevant
, live_p
);
196 /* If this stmt is an original stmt in a pattern, we might need to mark its
197 related pattern stmt instead of the original stmt. However, such stmts
198 may have their own uses that are not in any pattern, in such cases the
199 stmt itself should be marked. */
200 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
203 if (!used_in_pattern
)
205 imm_use_iterator imm_iter
;
209 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
210 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
212 if (is_gimple_assign (stmt
))
213 lhs
= gimple_assign_lhs (stmt
);
215 lhs
= gimple_call_lhs (stmt
);
217 /* This use is out of pattern use, if LHS has other uses that are
218 pattern uses, we should mark the stmt itself, and not the pattern
220 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
221 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
223 if (is_gimple_debug (USE_STMT (use_p
)))
225 use_stmt
= USE_STMT (use_p
);
227 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
230 if (vinfo_for_stmt (use_stmt
)
231 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
241 /* This is the last stmt in a sequence that was detected as a
242 pattern that can potentially be vectorized. Don't mark the stmt
243 as relevant/live because it's not going to be vectorized.
244 Instead mark the pattern-stmt that replaces it. */
246 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
248 if (dump_enabled_p ())
249 dump_printf_loc (MSG_NOTE
, vect_location
,
250 "last stmt in pattern. don't mark"
251 " relevant/live.\n");
252 stmt_info
= vinfo_for_stmt (pattern_stmt
);
253 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
254 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
255 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
260 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
261 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
262 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
264 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
265 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
267 if (dump_enabled_p ())
268 dump_printf_loc (MSG_NOTE
, vect_location
,
269 "already marked relevant/live.\n");
273 worklist
->safe_push (stmt
);
277 /* Function vect_stmt_relevant_p.
279 Return true if STMT in loop that is represented by LOOP_VINFO is
280 "relevant for vectorization".
282 A stmt is considered "relevant for vectorization" if:
283 - it has uses outside the loop.
284 - it has vdefs (it alters memory).
285 - control stmts in the loop (except for the exit condition).
287 CHECKME: what other side effects would the vectorizer allow? */
290 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
291 enum vect_relevant
*relevant
, bool *live_p
)
293 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
295 imm_use_iterator imm_iter
;
299 *relevant
= vect_unused_in_scope
;
302 /* cond stmt other than loop exit cond. */
303 if (is_ctrl_stmt (stmt
)
304 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
305 != loop_exit_ctrl_vec_info_type
)
306 *relevant
= vect_used_in_scope
;
308 /* changing memory. */
309 if (gimple_code (stmt
) != GIMPLE_PHI
)
310 if (gimple_vdef (stmt
)
311 && !gimple_clobber_p (stmt
))
313 if (dump_enabled_p ())
314 dump_printf_loc (MSG_NOTE
, vect_location
,
315 "vec_stmt_relevant_p: stmt has vdefs.\n");
316 *relevant
= vect_used_in_scope
;
319 /* uses outside the loop. */
320 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
322 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
324 basic_block bb
= gimple_bb (USE_STMT (use_p
));
325 if (!flow_bb_inside_loop_p (loop
, bb
))
327 if (dump_enabled_p ())
328 dump_printf_loc (MSG_NOTE
, vect_location
,
329 "vec_stmt_relevant_p: used out of loop.\n");
331 if (is_gimple_debug (USE_STMT (use_p
)))
334 /* We expect all such uses to be in the loop exit phis
335 (because of loop closed form) */
336 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
337 gcc_assert (bb
== single_exit (loop
)->dest
);
344 return (*live_p
|| *relevant
);
348 /* Function exist_non_indexing_operands_for_use_p
350 USE is one of the uses attached to STMT. Check if USE is
351 used in STMT for anything other than indexing an array. */
354 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
357 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
359 /* USE corresponds to some operand in STMT. If there is no data
360 reference in STMT, then any operand that corresponds to USE
361 is not indexing an array. */
362 if (!STMT_VINFO_DATA_REF (stmt_info
))
365 /* STMT has a data_ref. FORNOW this means that its of one of
369 (This should have been verified in analyze_data_refs).
371 'var' in the second case corresponds to a def, not a use,
372 so USE cannot correspond to any operands that are not used
375 Therefore, all we need to check is if STMT falls into the
376 first case, and whether var corresponds to USE. */
378 if (!gimple_assign_copy_p (stmt
))
380 if (is_gimple_call (stmt
)
381 && gimple_call_internal_p (stmt
))
382 switch (gimple_call_internal_fn (stmt
))
385 operand
= gimple_call_arg (stmt
, 3);
390 operand
= gimple_call_arg (stmt
, 2);
400 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
402 operand
= gimple_assign_rhs1 (stmt
);
403 if (TREE_CODE (operand
) != SSA_NAME
)
414 Function process_use.
417 - a USE in STMT in a loop represented by LOOP_VINFO
418 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
419 that defined USE. This is done by calling mark_relevant and passing it
420 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
421 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
425 Generally, LIVE_P and RELEVANT are used to define the liveness and
426 relevance info of the DEF_STMT of this USE:
427 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
428 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
430 - case 1: If USE is used only for address computations (e.g. array indexing),
431 which does not need to be directly vectorized, then the liveness/relevance
432 of the respective DEF_STMT is left unchanged.
433 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
434 skip DEF_STMT cause it had already been processed.
435 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
436 be modified accordingly.
438 Return true if everything is as expected. Return false otherwise. */
441 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
442 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
445 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
446 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
447 stmt_vec_info dstmt_vinfo
;
448 basic_block bb
, def_bb
;
450 enum vect_def_type dt
;
452 /* case 1: we are only interested in uses that need to be vectorized. Uses
453 that are used for address computation are not considered relevant. */
454 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
457 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
459 if (dump_enabled_p ())
460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
461 "not vectorized: unsupported use in stmt.\n");
465 if (!def_stmt
|| gimple_nop_p (def_stmt
))
468 def_bb
= gimple_bb (def_stmt
);
469 if (!flow_bb_inside_loop_p (loop
, def_bb
))
471 if (dump_enabled_p ())
472 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
476 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
477 DEF_STMT must have already been processed, because this should be the
478 only way that STMT, which is a reduction-phi, was put in the worklist,
479 as there should be no other uses for DEF_STMT in the loop. So we just
480 check that everything is as expected, and we are done. */
481 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
482 bb
= gimple_bb (stmt
);
483 if (gimple_code (stmt
) == GIMPLE_PHI
484 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
485 && gimple_code (def_stmt
) != GIMPLE_PHI
486 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
487 && bb
->loop_father
== def_bb
->loop_father
)
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
,
491 "reduc-stmt defining reduc-phi in the same nest.\n");
492 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
493 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
494 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
495 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
496 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
500 /* case 3a: outer-loop stmt defining an inner-loop stmt:
501 outer-loop-header-bb:
507 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
509 if (dump_enabled_p ())
510 dump_printf_loc (MSG_NOTE
, vect_location
,
511 "outer-loop def-stmt defining inner-loop stmt.\n");
515 case vect_unused_in_scope
:
516 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
517 vect_used_in_scope
: vect_unused_in_scope
;
520 case vect_used_in_outer_by_reduction
:
521 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
522 relevant
= vect_used_by_reduction
;
525 case vect_used_in_outer
:
526 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
527 relevant
= vect_used_in_scope
;
530 case vect_used_in_scope
:
538 /* case 3b: inner-loop stmt defining an outer-loop stmt:
539 outer-loop-header-bb:
543 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
545 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
547 if (dump_enabled_p ())
548 dump_printf_loc (MSG_NOTE
, vect_location
,
549 "inner-loop def-stmt defining outer-loop stmt.\n");
553 case vect_unused_in_scope
:
554 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
555 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
556 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
559 case vect_used_by_reduction
:
560 relevant
= vect_used_in_outer_by_reduction
;
563 case vect_used_in_scope
:
564 relevant
= vect_used_in_outer
;
572 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
573 is_pattern_stmt_p (stmt_vinfo
));
578 /* Function vect_mark_stmts_to_be_vectorized.
580 Not all stmts in the loop need to be vectorized. For example:
589 Stmt 1 and 3 do not need to be vectorized, because loop control and
590 addressing of vectorized data-refs are handled differently.
592 This pass detects such stmts. */
595 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
597 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
598 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
599 unsigned int nbbs
= loop
->num_nodes
;
600 gimple_stmt_iterator si
;
603 stmt_vec_info stmt_vinfo
;
607 enum vect_relevant relevant
, tmp_relevant
;
608 enum vect_def_type def_type
;
610 if (dump_enabled_p ())
611 dump_printf_loc (MSG_NOTE
, vect_location
,
612 "=== vect_mark_stmts_to_be_vectorized ===\n");
614 auto_vec
<gimple
*, 64> worklist
;
616 /* 1. Init worklist. */
617 for (i
= 0; i
< nbbs
; i
++)
620 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
623 if (dump_enabled_p ())
625 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
626 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
629 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
630 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
632 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
634 stmt
= gsi_stmt (si
);
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
638 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
641 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
642 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
646 /* 2. Process_worklist */
647 while (worklist
.length () > 0)
652 stmt
= worklist
.pop ();
653 if (dump_enabled_p ())
655 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
659 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
660 (DEF_STMT) as relevant/irrelevant and live/dead according to the
661 liveness and relevance properties of STMT. */
662 stmt_vinfo
= vinfo_for_stmt (stmt
);
663 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
664 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
666 /* Generally, the liveness and relevance properties of STMT are
667 propagated as is to the DEF_STMTs of its USEs:
668 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
669 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
671 One exception is when STMT has been identified as defining a reduction
672 variable; in this case we set the liveness/relevance as follows:
674 relevant = vect_used_by_reduction
675 This is because we distinguish between two kinds of relevant stmts -
676 those that are used by a reduction computation, and those that are
677 (also) used by a regular computation. This allows us later on to
678 identify stmts that are used solely by a reduction, and therefore the
679 order of the results that they produce does not have to be kept. */
681 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
682 tmp_relevant
= relevant
;
685 case vect_reduction_def
:
686 switch (tmp_relevant
)
688 case vect_unused_in_scope
:
689 relevant
= vect_used_by_reduction
;
692 case vect_used_by_reduction
:
693 if (gimple_code (stmt
) == GIMPLE_PHI
)
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
700 "unsupported use of reduction.\n");
707 case vect_nested_cycle
:
708 if (tmp_relevant
!= vect_unused_in_scope
709 && tmp_relevant
!= vect_used_in_outer_by_reduction
710 && tmp_relevant
!= vect_used_in_outer
)
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
714 "unsupported use of nested cycle.\n");
722 case vect_double_reduction_def
:
723 if (tmp_relevant
!= vect_unused_in_scope
724 && tmp_relevant
!= vect_used_by_reduction
)
726 if (dump_enabled_p ())
727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
728 "unsupported use of double reduction.\n");
740 if (is_pattern_stmt_p (stmt_vinfo
))
742 /* Pattern statements are not inserted into the code, so
743 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
744 have to scan the RHS or function arguments instead. */
745 if (is_gimple_assign (stmt
))
747 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
748 tree op
= gimple_assign_rhs1 (stmt
);
751 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
753 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
754 live_p
, relevant
, &worklist
, false)
755 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
756 live_p
, relevant
, &worklist
, false))
760 for (; i
< gimple_num_ops (stmt
); i
++)
762 op
= gimple_op (stmt
, i
);
763 if (TREE_CODE (op
) == SSA_NAME
764 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
769 else if (is_gimple_call (stmt
))
771 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
773 tree arg
= gimple_call_arg (stmt
, i
);
774 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
781 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
783 tree op
= USE_FROM_PTR (use_p
);
784 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
789 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
792 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
794 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
798 } /* while worklist */
804 /* Function vect_model_simple_cost.
806 Models cost for simple operations, i.e. those that only emit ncopies of a
807 single op. Right now, this does not account for multiple insns that could
808 be generated for the single vector op. We will handle that shortly. */
811 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
812 enum vect_def_type
*dt
,
813 stmt_vector_for_cost
*prologue_cost_vec
,
814 stmt_vector_for_cost
*body_cost_vec
)
817 int inside_cost
= 0, prologue_cost
= 0;
819 /* The SLP costs were already calculated during SLP tree build. */
820 if (PURE_SLP_STMT (stmt_info
))
823 /* FORNOW: Assuming maximum 2 args per stmts. */
824 for (i
= 0; i
< 2; i
++)
825 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
826 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
827 stmt_info
, 0, vect_prologue
);
829 /* Pass the inside-of-loop statements to the target-specific cost model. */
830 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
831 stmt_info
, 0, vect_body
);
833 if (dump_enabled_p ())
834 dump_printf_loc (MSG_NOTE
, vect_location
,
835 "vect_model_simple_cost: inside_cost = %d, "
836 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
840 /* Model cost for type demotion and promotion operations. PWR is normally
841 zero for single-step promotions and demotions. It will be one if
842 two-step promotion/demotion is required, and so on. Each additional
843 step doubles the number of instructions required. */
846 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
847 enum vect_def_type
*dt
, int pwr
)
850 int inside_cost
= 0, prologue_cost
= 0;
851 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
852 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
853 void *target_cost_data
;
855 /* The SLP costs were already calculated during SLP tree build. */
856 if (PURE_SLP_STMT (stmt_info
))
860 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
862 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
864 for (i
= 0; i
< pwr
+ 1; i
++)
866 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
868 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
869 vec_promote_demote
, stmt_info
, 0,
873 /* FORNOW: Assuming maximum 2 args per stmts. */
874 for (i
= 0; i
< 2; i
++)
875 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
876 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
877 stmt_info
, 0, vect_prologue
);
879 if (dump_enabled_p ())
880 dump_printf_loc (MSG_NOTE
, vect_location
,
881 "vect_model_promotion_demotion_cost: inside_cost = %d, "
882 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
885 /* Function vect_cost_group_size
887 For grouped load or store, return the group_size only if it is the first
888 load or store of a group, else return 1. This ensures that group size is
889 only returned once per group. */
892 vect_cost_group_size (stmt_vec_info stmt_info
)
894 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
896 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
897 return GROUP_SIZE (stmt_info
);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
909 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
910 bool store_lanes_p
, enum vect_def_type dt
,
912 stmt_vector_for_cost
*prologue_cost_vec
,
913 stmt_vector_for_cost
*body_cost_vec
)
916 unsigned int inside_cost
= 0, prologue_cost
= 0;
917 struct data_reference
*first_dr
;
920 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
921 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
922 stmt_info
, 0, vect_prologue
);
924 /* Grouped access? */
925 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
929 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
934 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
935 group_size
= vect_cost_group_size (stmt_info
);
938 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
940 /* Not a grouped access. */
944 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
947 /* We assume that the cost of a single store-lanes instruction is
948 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
949 access is instead being provided by a permute-and-store operation,
950 include the cost of the permutes. */
951 if (!store_lanes_p
&& group_size
> 1
952 && !STMT_VINFO_STRIDED_P (stmt_info
))
954 /* Uses a high and low interleave or shuffle operations for each
956 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
957 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
958 stmt_info
, 0, vect_body
);
960 if (dump_enabled_p ())
961 dump_printf_loc (MSG_NOTE
, vect_location
,
962 "vect_model_store_cost: strided group_size = %d .\n",
966 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
967 /* Costs of the stores. */
968 if (STMT_VINFO_STRIDED_P (stmt_info
)
969 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
971 /* N scalar stores plus extracting the elements. */
972 inside_cost
+= record_stmt_cost (body_cost_vec
,
973 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
974 scalar_store
, stmt_info
, 0, vect_body
);
977 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
979 if (STMT_VINFO_STRIDED_P (stmt_info
))
980 inside_cost
+= record_stmt_cost (body_cost_vec
,
981 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
982 vec_to_scalar
, stmt_info
, 0, vect_body
);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE
, vect_location
,
986 "vect_model_store_cost: inside_cost = %d, "
987 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
991 /* Calculate cost of DR's memory access. */
993 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
994 unsigned int *inside_cost
,
995 stmt_vector_for_cost
*body_cost_vec
)
997 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
998 gimple
*stmt
= DR_STMT (dr
);
999 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1001 switch (alignment_support_scheme
)
1005 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1006 vector_store
, stmt_info
, 0,
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE
, vect_location
,
1011 "vect_model_store_cost: aligned.\n");
1015 case dr_unaligned_supported
:
1017 /* Here, we assign an additional cost for the unaligned store. */
1018 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1019 unaligned_store
, stmt_info
,
1020 DR_MISALIGNMENT (dr
), vect_body
);
1021 if (dump_enabled_p ())
1022 dump_printf_loc (MSG_NOTE
, vect_location
,
1023 "vect_model_store_cost: unaligned supported by "
1028 case dr_unaligned_unsupported
:
1030 *inside_cost
= VECT_MAX_COST
;
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1034 "vect_model_store_cost: unsupported access.\n");
1044 /* Function vect_model_load_cost
1046 Models cost for loads. In the case of grouped accesses, the last access
1047 has the overhead of the grouped access attributed to it. Since unaligned
1048 accesses are supported for loads, we also account for the costs of the
1049 access scheme chosen. */
1052 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1053 bool load_lanes_p
, slp_tree slp_node
,
1054 stmt_vector_for_cost
*prologue_cost_vec
,
1055 stmt_vector_for_cost
*body_cost_vec
)
1059 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1060 unsigned int inside_cost
= 0, prologue_cost
= 0;
1062 /* Grouped accesses? */
1063 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1064 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1066 group_size
= vect_cost_group_size (stmt_info
);
1067 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1069 /* Not a grouped access. */
1076 /* We assume that the cost of a single load-lanes instruction is
1077 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1078 access is instead being provided by a load-and-permute operation,
1079 include the cost of the permutes. */
1080 if (!load_lanes_p
&& group_size
> 1
1081 && !STMT_VINFO_STRIDED_P (stmt_info
))
1083 /* Uses an even and odd extract operations or shuffle operations
1084 for each needed permute. */
1085 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1086 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1087 stmt_info
, 0, vect_body
);
1089 if (dump_enabled_p ())
1090 dump_printf_loc (MSG_NOTE
, vect_location
,
1091 "vect_model_load_cost: strided group_size = %d .\n",
1095 /* The loads themselves. */
1096 if (STMT_VINFO_STRIDED_P (stmt_info
)
1097 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1099 /* N scalar loads plus gathering them into a vector. */
1100 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1101 inside_cost
+= record_stmt_cost (body_cost_vec
,
1102 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1103 scalar_load
, stmt_info
, 0, vect_body
);
1106 vect_get_load_cost (first_dr
, ncopies
,
1107 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1108 || group_size
> 1 || slp_node
),
1109 &inside_cost
, &prologue_cost
,
1110 prologue_cost_vec
, body_cost_vec
, true);
1111 if (STMT_VINFO_STRIDED_P (stmt_info
))
1112 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1113 stmt_info
, 0, vect_body
);
1115 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE
, vect_location
,
1117 "vect_model_load_cost: inside_cost = %d, "
1118 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1122 /* Calculate cost of DR's memory access. */
1124 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1125 bool add_realign_cost
, unsigned int *inside_cost
,
1126 unsigned int *prologue_cost
,
1127 stmt_vector_for_cost
*prologue_cost_vec
,
1128 stmt_vector_for_cost
*body_cost_vec
,
1129 bool record_prologue_costs
)
1131 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1132 gimple
*stmt
= DR_STMT (dr
);
1133 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1135 switch (alignment_support_scheme
)
1139 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1140 stmt_info
, 0, vect_body
);
1142 if (dump_enabled_p ())
1143 dump_printf_loc (MSG_NOTE
, vect_location
,
1144 "vect_model_load_cost: aligned.\n");
1148 case dr_unaligned_supported
:
1150 /* Here, we assign an additional cost for the unaligned load. */
1151 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1152 unaligned_load
, stmt_info
,
1153 DR_MISALIGNMENT (dr
), vect_body
);
1155 if (dump_enabled_p ())
1156 dump_printf_loc (MSG_NOTE
, vect_location
,
1157 "vect_model_load_cost: unaligned supported by "
1162 case dr_explicit_realign
:
1164 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1165 vector_load
, stmt_info
, 0, vect_body
);
1166 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1167 vec_perm
, stmt_info
, 0, vect_body
);
1169 /* FIXME: If the misalignment remains fixed across the iterations of
1170 the containing loop, the following cost should be added to the
1172 if (targetm
.vectorize
.builtin_mask_for_load
)
1173 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1174 stmt_info
, 0, vect_body
);
1176 if (dump_enabled_p ())
1177 dump_printf_loc (MSG_NOTE
, vect_location
,
1178 "vect_model_load_cost: explicit realign\n");
1182 case dr_explicit_realign_optimized
:
1184 if (dump_enabled_p ())
1185 dump_printf_loc (MSG_NOTE
, vect_location
,
1186 "vect_model_load_cost: unaligned software "
1189 /* Unaligned software pipeline has a load of an address, an initial
1190 load, and possibly a mask operation to "prime" the loop. However,
1191 if this is an access in a group of loads, which provide grouped
1192 access, then the above cost should only be considered for one
1193 access in the group. Inside the loop, there is a load op
1194 and a realignment op. */
1196 if (add_realign_cost
&& record_prologue_costs
)
1198 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1199 vector_stmt
, stmt_info
,
1201 if (targetm
.vectorize
.builtin_mask_for_load
)
1202 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1203 vector_stmt
, stmt_info
,
1207 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1208 stmt_info
, 0, vect_body
);
1209 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1210 stmt_info
, 0, vect_body
);
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_NOTE
, vect_location
,
1214 "vect_model_load_cost: explicit realign optimized"
1220 case dr_unaligned_unsupported
:
1222 *inside_cost
= VECT_MAX_COST
;
1224 if (dump_enabled_p ())
1225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1226 "vect_model_load_cost: unsupported access.\n");
1235 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1236 the loop preheader for the vectorized stmt STMT. */
1239 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1242 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1245 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1246 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1250 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1254 if (nested_in_vect_loop_p (loop
, stmt
))
1257 pe
= loop_preheader_edge (loop
);
1258 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1259 gcc_assert (!new_bb
);
1263 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1265 gimple_stmt_iterator gsi_bb_start
;
1267 gcc_assert (bb_vinfo
);
1268 bb
= BB_VINFO_BB (bb_vinfo
);
1269 gsi_bb_start
= gsi_after_labels (bb
);
1270 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1274 if (dump_enabled_p ())
1276 dump_printf_loc (MSG_NOTE
, vect_location
,
1277 "created new init_stmt: ");
1278 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1282 /* Function vect_init_vector.
1284 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1285 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1286 vector type a vector with all elements equal to VAL is created first.
1287 Place the initialization at BSI if it is not NULL. Otherwise, place the
1288 initialization at the loop preheader.
1289 Return the DEF of INIT_STMT.
1290 It will be used in the vectorization of STMT. */
1293 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1298 if (TREE_CODE (type
) == VECTOR_TYPE
1299 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1301 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1303 /* Scalar boolean value should be transformed into
1304 all zeros or all ones value before building a vector. */
1305 if (VECTOR_BOOLEAN_TYPE_P (type
))
1307 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1308 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1310 if (CONSTANT_CLASS_P (val
))
1311 val
= integer_zerop (val
) ? false_val
: true_val
;
1314 new_temp
= make_ssa_name (TREE_TYPE (type
));
1315 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1316 val
, true_val
, false_val
);
1317 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1321 else if (CONSTANT_CLASS_P (val
))
1322 val
= fold_convert (TREE_TYPE (type
), val
);
1325 new_temp
= make_ssa_name (TREE_TYPE (type
));
1326 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1327 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1331 val
= build_vector_from_val (type
, val
);
1334 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1335 init_stmt
= gimple_build_assign (new_temp
, val
);
1336 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1341 /* Function vect_get_vec_def_for_operand.
1343 OP is an operand in STMT. This function returns a (vector) def that will be
1344 used in the vectorized stmt for STMT.
1346 In the case that OP is an SSA_NAME which is defined in the loop, then
1347 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1349 In case OP is an invariant or constant, a new stmt that creates a vector def
1350 needs to be introduced. VECTYPE may be used to specify a required type for
1351 vector invariant. */
1354 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1359 stmt_vec_info def_stmt_info
= NULL
;
1360 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1361 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1362 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1363 enum vect_def_type dt
;
1367 if (dump_enabled_p ())
1369 dump_printf_loc (MSG_NOTE
, vect_location
,
1370 "vect_get_vec_def_for_operand: ");
1371 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1372 dump_printf (MSG_NOTE
, "\n");
1375 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1376 gcc_assert (is_simple_use
);
1377 if (dump_enabled_p ())
1379 int loc_printed
= 0;
1383 dump_printf (MSG_NOTE
, " def_stmt = ");
1385 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1386 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1392 /* operand is a constant or a loop invariant. */
1393 case vect_constant_def
:
1394 case vect_external_def
:
1397 vector_type
= vectype
;
1398 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1399 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1400 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1402 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1404 gcc_assert (vector_type
);
1405 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1408 /* operand is defined inside the loop. */
1409 case vect_internal_def
:
1411 /* Get the def from the vectorized stmt. */
1412 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1414 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1415 /* Get vectorized pattern statement. */
1417 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1418 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1419 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1420 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1421 gcc_assert (vec_stmt
);
1422 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1423 vec_oprnd
= PHI_RESULT (vec_stmt
);
1424 else if (is_gimple_call (vec_stmt
))
1425 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1427 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1431 /* operand is defined by a loop header phi - reduction */
1432 case vect_reduction_def
:
1433 case vect_double_reduction_def
:
1434 case vect_nested_cycle
:
1435 /* Code should use get_initial_def_for_reduction. */
1438 /* operand is defined by loop-header phi - induction. */
1439 case vect_induction_def
:
1441 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1443 /* Get the def from the vectorized stmt. */
1444 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1445 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1446 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1447 vec_oprnd
= PHI_RESULT (vec_stmt
);
1449 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1459 /* Function vect_get_vec_def_for_stmt_copy
1461 Return a vector-def for an operand. This function is used when the
1462 vectorized stmt to be created (by the caller to this function) is a "copy"
1463 created in case the vectorized result cannot fit in one vector, and several
1464 copies of the vector-stmt are required. In this case the vector-def is
1465 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1466 of the stmt that defines VEC_OPRND.
1467 DT is the type of the vector def VEC_OPRND.
1470 In case the vectorization factor (VF) is bigger than the number
1471 of elements that can fit in a vectype (nunits), we have to generate
1472 more than one vector stmt to vectorize the scalar stmt. This situation
1473 arises when there are multiple data-types operated upon in the loop; the
1474 smallest data-type determines the VF, and as a result, when vectorizing
1475 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1476 vector stmt (each computing a vector of 'nunits' results, and together
1477 computing 'VF' results in each iteration). This function is called when
1478 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1479 which VF=16 and nunits=4, so the number of copies required is 4):
1481 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1483 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1484 VS1.1: vx.1 = memref1 VS1.2
1485 VS1.2: vx.2 = memref2 VS1.3
1486 VS1.3: vx.3 = memref3
1488 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1489 VSnew.1: vz1 = vx.1 + ... VSnew.2
1490 VSnew.2: vz2 = vx.2 + ... VSnew.3
1491 VSnew.3: vz3 = vx.3 + ...
1493 The vectorization of S1 is explained in vectorizable_load.
1494 The vectorization of S2:
1495 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1496 the function 'vect_get_vec_def_for_operand' is called to
1497 get the relevant vector-def for each operand of S2. For operand x it
1498 returns the vector-def 'vx.0'.
1500 To create the remaining copies of the vector-stmt (VSnew.j), this
1501 function is called to get the relevant vector-def for each operand. It is
1502 obtained from the respective VS1.j stmt, which is recorded in the
1503 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1505 For example, to obtain the vector-def 'vx.1' in order to create the
1506 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1507 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1508 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1509 and return its def ('vx.1').
1510 Overall, to create the above sequence this function will be called 3 times:
1511 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1512 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1513 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1516 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1518 gimple
*vec_stmt_for_operand
;
1519 stmt_vec_info def_stmt_info
;
1521 /* Do nothing; can reuse same def. */
1522 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1525 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1526 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1527 gcc_assert (def_stmt_info
);
1528 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1529 gcc_assert (vec_stmt_for_operand
);
1530 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1531 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1533 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1538 /* Get vectorized definitions for the operands to create a copy of an original
1539 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1542 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1543 vec
<tree
> *vec_oprnds0
,
1544 vec
<tree
> *vec_oprnds1
)
1546 tree vec_oprnd
= vec_oprnds0
->pop ();
1548 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1549 vec_oprnds0
->quick_push (vec_oprnd
);
1551 if (vec_oprnds1
&& vec_oprnds1
->length ())
1553 vec_oprnd
= vec_oprnds1
->pop ();
1554 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1555 vec_oprnds1
->quick_push (vec_oprnd
);
1560 /* Get vectorized definitions for OP0 and OP1.
1561 REDUC_INDEX is the index of reduction operand in case of reduction,
1562 and -1 otherwise. */
1565 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1566 vec
<tree
> *vec_oprnds0
,
1567 vec
<tree
> *vec_oprnds1
,
1568 slp_tree slp_node
, int reduc_index
)
1572 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1573 auto_vec
<tree
> ops (nops
);
1574 auto_vec
<vec
<tree
> > vec_defs (nops
);
1576 ops
.quick_push (op0
);
1578 ops
.quick_push (op1
);
1580 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1582 *vec_oprnds0
= vec_defs
[0];
1584 *vec_oprnds1
= vec_defs
[1];
1590 vec_oprnds0
->create (1);
1591 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1592 vec_oprnds0
->quick_push (vec_oprnd
);
1596 vec_oprnds1
->create (1);
1597 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1598 vec_oprnds1
->quick_push (vec_oprnd
);
1604 /* Function vect_finish_stmt_generation.
1606 Insert a new stmt. */
1609 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1610 gimple_stmt_iterator
*gsi
)
1612 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1613 vec_info
*vinfo
= stmt_info
->vinfo
;
1615 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1617 if (!gsi_end_p (*gsi
)
1618 && gimple_has_mem_ops (vec_stmt
))
1620 gimple
*at_stmt
= gsi_stmt (*gsi
);
1621 tree vuse
= gimple_vuse (at_stmt
);
1622 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1624 tree vdef
= gimple_vdef (at_stmt
);
1625 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1626 /* If we have an SSA vuse and insert a store, update virtual
1627 SSA form to avoid triggering the renamer. Do so only
1628 if we can easily see all uses - which is what almost always
1629 happens with the way vectorized stmts are inserted. */
1630 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1631 && ((is_gimple_assign (vec_stmt
)
1632 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1633 || (is_gimple_call (vec_stmt
)
1634 && !(gimple_call_flags (vec_stmt
)
1635 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1637 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1638 gimple_set_vdef (vec_stmt
, new_vdef
);
1639 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1643 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1645 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1647 if (dump_enabled_p ())
1649 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1650 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1653 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1655 /* While EH edges will generally prevent vectorization, stmt might
1656 e.g. be in a must-not-throw region. Ensure newly created stmts
1657 that could throw are part of the same region. */
1658 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1659 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1660 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1663 /* We want to vectorize a call to combined function CFN with function
1664 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1665 as the types of all inputs. Check whether this is possible using
1666 an internal function, returning its code if so or IFN_LAST if not. */
1669 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1670 tree vectype_out
, tree vectype_in
)
1673 if (internal_fn_p (cfn
))
1674 ifn
= as_internal_fn (cfn
);
1676 ifn
= associated_internal_fn (fndecl
);
1677 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1679 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1680 if (info
.vectorizable
)
1682 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1683 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1684 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1685 OPTIMIZE_FOR_SPEED
))
1693 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1694 gimple_stmt_iterator
*);
1697 /* Function vectorizable_mask_load_store.
1699 Check if STMT performs a conditional load or store that can be vectorized.
1700 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1701 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1702 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1705 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1706 gimple
**vec_stmt
, slp_tree slp_node
)
1708 tree vec_dest
= NULL
;
1709 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1710 stmt_vec_info prev_stmt_info
;
1711 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1712 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1713 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1714 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1715 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1716 tree rhs_vectype
= NULL_TREE
;
1721 tree dataref_ptr
= NULL_TREE
;
1723 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1727 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1728 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1729 int gather_scale
= 1;
1730 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1734 enum vect_def_type dt
;
1736 if (slp_node
!= NULL
)
1739 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1740 gcc_assert (ncopies
>= 1);
1742 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1743 mask
= gimple_call_arg (stmt
, 2);
1745 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1748 /* FORNOW. This restriction should be relaxed. */
1749 if (nested_in_vect_loop
&& ncopies
> 1)
1751 if (dump_enabled_p ())
1752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1753 "multiple types in nested loop.");
1757 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1760 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1763 if (!STMT_VINFO_DATA_REF (stmt_info
))
1766 elem_type
= TREE_TYPE (vectype
);
1768 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1771 if (STMT_VINFO_STRIDED_P (stmt_info
))
1774 if (TREE_CODE (mask
) != SSA_NAME
)
1777 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1781 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1783 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
))
1788 tree rhs
= gimple_call_arg (stmt
, 3);
1789 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
1793 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1796 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1797 &gather_off
, &gather_scale
);
1798 gcc_assert (gather_decl
);
1799 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1800 &gather_off_vectype
))
1802 if (dump_enabled_p ())
1803 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1804 "gather index use not simple.");
1808 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1810 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1811 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1813 if (dump_enabled_p ())
1814 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1815 "masked gather with integer mask not supported.");
1819 else if (tree_int_cst_compare (nested_in_vect_loop
1820 ? STMT_VINFO_DR_STEP (stmt_info
)
1821 : DR_STEP (dr
), size_zero_node
) <= 0)
1823 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1824 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1825 TYPE_MODE (mask_vectype
),
1828 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
1831 if (!vec_stmt
) /* transformation not required. */
1833 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1835 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1838 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1844 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1846 tree vec_oprnd0
= NULL_TREE
, op
;
1847 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1848 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1849 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1850 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1851 tree mask_perm_mask
= NULL_TREE
;
1852 edge pe
= loop_preheader_edge (loop
);
1855 enum { NARROW
, NONE
, WIDEN
} modifier
;
1856 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1858 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1859 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1860 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1861 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1862 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1863 scaletype
= TREE_VALUE (arglist
);
1864 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1865 && types_compatible_p (srctype
, masktype
));
1867 if (nunits
== gather_off_nunits
)
1869 else if (nunits
== gather_off_nunits
/ 2)
1871 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1874 for (i
= 0; i
< gather_off_nunits
; ++i
)
1875 sel
[i
] = i
| nunits
;
1877 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1879 else if (nunits
== gather_off_nunits
* 2)
1881 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1884 for (i
= 0; i
< nunits
; ++i
)
1885 sel
[i
] = i
< gather_off_nunits
1886 ? i
: i
+ nunits
- gather_off_nunits
;
1888 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1890 for (i
= 0; i
< nunits
; ++i
)
1891 sel
[i
] = i
| gather_off_nunits
;
1892 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1897 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1899 ptr
= fold_convert (ptrtype
, gather_base
);
1900 if (!is_gimple_min_invariant (ptr
))
1902 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1903 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1904 gcc_assert (!new_bb
);
1907 scale
= build_int_cst (scaletype
, gather_scale
);
1909 prev_stmt_info
= NULL
;
1910 for (j
= 0; j
< ncopies
; ++j
)
1912 if (modifier
== WIDEN
&& (j
& 1))
1913 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1914 perm_mask
, stmt
, gsi
);
1917 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1920 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1922 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1924 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1925 == TYPE_VECTOR_SUBPARTS (idxtype
));
1926 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1927 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1929 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1930 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1934 if (mask_perm_mask
&& (j
& 1))
1935 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1936 mask_perm_mask
, stmt
, gsi
);
1940 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1943 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1944 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1948 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1950 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1951 == TYPE_VECTOR_SUBPARTS (masktype
));
1952 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1953 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1955 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1956 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1962 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1965 if (!useless_type_conversion_p (vectype
, rettype
))
1967 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1968 == TYPE_VECTOR_SUBPARTS (rettype
));
1969 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1970 gimple_call_set_lhs (new_stmt
, op
);
1971 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1972 var
= make_ssa_name (vec_dest
);
1973 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1974 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1978 var
= make_ssa_name (vec_dest
, new_stmt
);
1979 gimple_call_set_lhs (new_stmt
, var
);
1982 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1984 if (modifier
== NARROW
)
1991 var
= permute_vec_elements (prev_res
, var
,
1992 perm_mask
, stmt
, gsi
);
1993 new_stmt
= SSA_NAME_DEF_STMT (var
);
1996 if (prev_stmt_info
== NULL
)
1997 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1999 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2000 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2003 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2005 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2007 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2008 stmt_info
= vinfo_for_stmt (stmt
);
2010 tree lhs
= gimple_call_lhs (stmt
);
2011 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2012 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2013 set_vinfo_for_stmt (stmt
, NULL
);
2014 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2015 gsi_replace (gsi
, new_stmt
, true);
2020 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2021 prev_stmt_info
= NULL
;
2022 for (i
= 0; i
< ncopies
; i
++)
2024 unsigned align
, misalign
;
2028 tree rhs
= gimple_call_arg (stmt
, 3);
2029 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2030 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2031 /* We should have catched mismatched types earlier. */
2032 gcc_assert (useless_type_conversion_p (vectype
,
2033 TREE_TYPE (vec_rhs
)));
2034 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2035 NULL_TREE
, &dummy
, gsi
,
2036 &ptr_incr
, false, &inv_p
);
2037 gcc_assert (!inv_p
);
2041 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2042 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2043 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2044 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2045 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2046 TYPE_SIZE_UNIT (vectype
));
2049 align
= TYPE_ALIGN_UNIT (vectype
);
2050 if (aligned_access_p (dr
))
2052 else if (DR_MISALIGNMENT (dr
) == -1)
2054 align
= TYPE_ALIGN_UNIT (elem_type
);
2058 misalign
= DR_MISALIGNMENT (dr
);
2059 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2061 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2062 misalign
? misalign
& -misalign
: align
);
2064 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2065 ptr
, vec_mask
, vec_rhs
);
2066 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2068 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2070 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2071 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2076 tree vec_mask
= NULL_TREE
;
2077 prev_stmt_info
= NULL
;
2078 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2079 for (i
= 0; i
< ncopies
; i
++)
2081 unsigned align
, misalign
;
2085 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2086 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2087 NULL_TREE
, &dummy
, gsi
,
2088 &ptr_incr
, false, &inv_p
);
2089 gcc_assert (!inv_p
);
2093 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2094 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2095 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2096 TYPE_SIZE_UNIT (vectype
));
2099 align
= TYPE_ALIGN_UNIT (vectype
);
2100 if (aligned_access_p (dr
))
2102 else if (DR_MISALIGNMENT (dr
) == -1)
2104 align
= TYPE_ALIGN_UNIT (elem_type
);
2108 misalign
= DR_MISALIGNMENT (dr
);
2109 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2111 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2112 misalign
? misalign
& -misalign
: align
);
2114 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2116 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2117 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2119 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2121 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2122 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2128 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2130 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2132 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2133 stmt_info
= vinfo_for_stmt (stmt
);
2135 tree lhs
= gimple_call_lhs (stmt
);
2136 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2137 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2138 set_vinfo_for_stmt (stmt
, NULL
);
2139 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2140 gsi_replace (gsi
, new_stmt
, true);
2146 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2147 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2148 in a single step. On success, store the binary pack code in
2152 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2153 tree_code
*convert_code
)
2155 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2156 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2160 int multi_step_cvt
= 0;
2161 auto_vec
<tree
, 8> interm_types
;
2162 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2163 &code
, &multi_step_cvt
,
2168 *convert_code
= code
;
2172 /* Function vectorizable_call.
2174 Check if GS performs a function call that can be vectorized.
2175 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2176 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2177 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2180 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2187 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2188 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2189 tree vectype_out
, vectype_in
;
2192 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2193 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2194 vec_info
*vinfo
= stmt_info
->vinfo
;
2195 tree fndecl
, new_temp
, rhs_type
;
2197 enum vect_def_type dt
[3]
2198 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2199 gimple
*new_stmt
= NULL
;
2201 vec
<tree
> vargs
= vNULL
;
2202 enum { NARROW
, NONE
, WIDEN
} modifier
;
2206 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2209 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2212 /* Is GS a vectorizable call? */
2213 stmt
= dyn_cast
<gcall
*> (gs
);
2217 if (gimple_call_internal_p (stmt
)
2218 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2219 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2220 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2223 if (gimple_call_lhs (stmt
) == NULL_TREE
2224 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2227 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2229 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2231 /* Process function arguments. */
2232 rhs_type
= NULL_TREE
;
2233 vectype_in
= NULL_TREE
;
2234 nargs
= gimple_call_num_args (stmt
);
2236 /* Bail out if the function has more than three arguments, we do not have
2237 interesting builtin functions to vectorize with more than two arguments
2238 except for fma. No arguments is also not good. */
2239 if (nargs
== 0 || nargs
> 3)
2242 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2243 if (gimple_call_internal_p (stmt
)
2244 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2247 rhs_type
= unsigned_type_node
;
2250 for (i
= 0; i
< nargs
; i
++)
2254 op
= gimple_call_arg (stmt
, i
);
2256 /* We can only handle calls with arguments of the same type. */
2258 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2262 "argument types differ.\n");
2266 rhs_type
= TREE_TYPE (op
);
2268 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2272 "use not simple.\n");
2277 vectype_in
= opvectype
;
2279 && opvectype
!= vectype_in
)
2281 if (dump_enabled_p ())
2282 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2283 "argument vector types differ.\n");
2287 /* If all arguments are external or constant defs use a vector type with
2288 the same size as the output vector type. */
2290 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2292 gcc_assert (vectype_in
);
2295 if (dump_enabled_p ())
2297 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2298 "no vectype for scalar type ");
2299 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2300 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2307 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2308 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2309 if (nunits_in
== nunits_out
/ 2)
2311 else if (nunits_out
== nunits_in
)
2313 else if (nunits_out
== nunits_in
/ 2)
2318 /* We only handle functions that do not read or clobber memory. */
2319 if (gimple_vuse (stmt
))
2321 if (dump_enabled_p ())
2322 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2323 "function reads from or writes to memory.\n");
2327 /* For now, we only vectorize functions if a target specific builtin
2328 is available. TODO -- in some cases, it might be profitable to
2329 insert the calls for pieces of the vector, in order to be able
2330 to vectorize other operations in the loop. */
2332 internal_fn ifn
= IFN_LAST
;
2333 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2334 tree callee
= gimple_call_fndecl (stmt
);
2336 /* First try using an internal function. */
2337 tree_code convert_code
= ERROR_MARK
;
2339 && (modifier
== NONE
2340 || (modifier
== NARROW
2341 && simple_integer_narrowing (vectype_out
, vectype_in
,
2343 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2346 /* If that fails, try asking for a target-specific built-in function. */
2347 if (ifn
== IFN_LAST
)
2349 if (cfn
!= CFN_LAST
)
2350 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2351 (cfn
, vectype_out
, vectype_in
);
2353 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2354 (callee
, vectype_out
, vectype_in
);
2357 if (ifn
== IFN_LAST
&& !fndecl
)
2359 if (cfn
== CFN_GOMP_SIMD_LANE
2362 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2363 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2364 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2365 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2367 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2368 { 0, 1, 2, ... vf - 1 } vector. */
2369 gcc_assert (nargs
== 0);
2373 if (dump_enabled_p ())
2374 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2375 "function is not vectorizable.\n");
2380 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2382 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2383 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2385 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2387 /* Sanity check: make sure that at least one copy of the vectorized stmt
2388 needs to be generated. */
2389 gcc_assert (ncopies
>= 1);
2391 if (!vec_stmt
) /* transformation not required. */
2393 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2397 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2398 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2399 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2400 vec_promote_demote
, stmt_info
, 0, vect_body
);
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2411 scalar_dest
= gimple_call_lhs (stmt
);
2412 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2414 prev_stmt_info
= NULL
;
2415 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2417 tree prev_res
= NULL_TREE
;
2418 for (j
= 0; j
< ncopies
; ++j
)
2420 /* Build argument list for the vectorized call. */
2422 vargs
.create (nargs
);
2428 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2429 vec
<tree
> vec_oprnds0
;
2431 for (i
= 0; i
< nargs
; i
++)
2432 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2433 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2434 vec_oprnds0
= vec_defs
[0];
2436 /* Arguments are ready. Create the new vector stmt. */
2437 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2440 for (k
= 0; k
< nargs
; k
++)
2442 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2443 vargs
[k
] = vec_oprndsk
[i
];
2445 if (modifier
== NARROW
)
2447 tree half_res
= make_ssa_name (vectype_in
);
2448 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2449 gimple_call_set_lhs (new_stmt
, half_res
);
2450 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2453 prev_res
= half_res
;
2456 new_temp
= make_ssa_name (vec_dest
);
2457 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2458 prev_res
, half_res
);
2462 if (ifn
!= IFN_LAST
)
2463 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2465 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2466 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2467 gimple_call_set_lhs (new_stmt
, new_temp
);
2469 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2470 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2473 for (i
= 0; i
< nargs
; i
++)
2475 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2476 vec_oprndsi
.release ();
2481 for (i
= 0; i
< nargs
; i
++)
2483 op
= gimple_call_arg (stmt
, i
);
2486 = vect_get_vec_def_for_operand (op
, stmt
);
2489 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2491 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2494 vargs
.quick_push (vec_oprnd0
);
2497 if (gimple_call_internal_p (stmt
)
2498 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2500 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2502 for (k
= 0; k
< nunits_out
; ++k
)
2503 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2504 tree cst
= build_vector (vectype_out
, v
);
2506 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2507 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2508 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2509 new_temp
= make_ssa_name (vec_dest
);
2510 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2512 else if (modifier
== NARROW
)
2514 tree half_res
= make_ssa_name (vectype_in
);
2515 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2516 gimple_call_set_lhs (new_stmt
, half_res
);
2517 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2520 prev_res
= half_res
;
2523 new_temp
= make_ssa_name (vec_dest
);
2524 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2525 prev_res
, half_res
);
2529 if (ifn
!= IFN_LAST
)
2530 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2532 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2533 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2534 gimple_call_set_lhs (new_stmt
, new_temp
);
2536 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2538 if (j
== (modifier
== NARROW
? 1 : 0))
2539 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2541 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2543 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2546 else if (modifier
== NARROW
)
2548 for (j
= 0; j
< ncopies
; ++j
)
2550 /* Build argument list for the vectorized call. */
2552 vargs
.create (nargs
* 2);
2558 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2559 vec
<tree
> vec_oprnds0
;
2561 for (i
= 0; i
< nargs
; i
++)
2562 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2563 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2564 vec_oprnds0
= vec_defs
[0];
2566 /* Arguments are ready. Create the new vector stmt. */
2567 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2571 for (k
= 0; k
< nargs
; k
++)
2573 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2574 vargs
.quick_push (vec_oprndsk
[i
]);
2575 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2577 if (ifn
!= IFN_LAST
)
2578 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2580 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2581 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2582 gimple_call_set_lhs (new_stmt
, new_temp
);
2583 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2584 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2587 for (i
= 0; i
< nargs
; i
++)
2589 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2590 vec_oprndsi
.release ();
2595 for (i
= 0; i
< nargs
; i
++)
2597 op
= gimple_call_arg (stmt
, i
);
2601 = vect_get_vec_def_for_operand (op
, stmt
);
2603 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2607 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2609 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2611 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2614 vargs
.quick_push (vec_oprnd0
);
2615 vargs
.quick_push (vec_oprnd1
);
2618 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2619 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2620 gimple_call_set_lhs (new_stmt
, new_temp
);
2621 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2624 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2626 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2628 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2631 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2634 /* No current target implements this case. */
2639 /* The call in STMT might prevent it from being removed in dce.
2640 We however cannot remove it here, due to the way the ssa name
2641 it defines is mapped to the new definition. So just replace
2642 rhs of the statement with something harmless. */
2647 type
= TREE_TYPE (scalar_dest
);
2648 if (is_pattern_stmt_p (stmt_info
))
2649 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2651 lhs
= gimple_call_lhs (stmt
);
2653 if (gimple_call_internal_p (stmt
)
2654 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2656 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2657 with vf - 1 rather than 0, that is the last iteration of the
2659 imm_use_iterator iter
;
2660 use_operand_p use_p
;
2662 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2664 basic_block use_bb
= gimple_bb (use_stmt
);
2666 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2668 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2669 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2670 ncopies
* nunits_out
- 1));
2671 update_stmt (use_stmt
);
2676 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2677 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2678 set_vinfo_for_stmt (stmt
, NULL
);
2679 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2680 gsi_replace (gsi
, new_stmt
, false);
2686 struct simd_call_arg_info
2690 enum vect_def_type dt
;
2691 HOST_WIDE_INT linear_step
;
2693 bool simd_lane_linear
;
2696 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2697 is linear within simd lane (but not within whole loop), note it in
2701 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2702 struct simd_call_arg_info
*arginfo
)
2704 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2706 if (!is_gimple_assign (def_stmt
)
2707 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2708 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2711 tree base
= gimple_assign_rhs1 (def_stmt
);
2712 HOST_WIDE_INT linear_step
= 0;
2713 tree v
= gimple_assign_rhs2 (def_stmt
);
2714 while (TREE_CODE (v
) == SSA_NAME
)
2717 def_stmt
= SSA_NAME_DEF_STMT (v
);
2718 if (is_gimple_assign (def_stmt
))
2719 switch (gimple_assign_rhs_code (def_stmt
))
2722 t
= gimple_assign_rhs2 (def_stmt
);
2723 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2725 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2726 v
= gimple_assign_rhs1 (def_stmt
);
2729 t
= gimple_assign_rhs2 (def_stmt
);
2730 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2732 linear_step
= tree_to_shwi (t
);
2733 v
= gimple_assign_rhs1 (def_stmt
);
2736 t
= gimple_assign_rhs1 (def_stmt
);
2737 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2738 || (TYPE_PRECISION (TREE_TYPE (v
))
2739 < TYPE_PRECISION (TREE_TYPE (t
))))
2748 else if (is_gimple_call (def_stmt
)
2749 && gimple_call_internal_p (def_stmt
)
2750 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2752 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2753 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2758 arginfo
->linear_step
= linear_step
;
2760 arginfo
->simd_lane_linear
= true;
2766 /* Function vectorizable_simd_clone_call.
2768 Check if STMT performs a function call that can be vectorized
2769 by calling a simd clone of the function.
2770 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2771 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2772 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2775 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2776 gimple
**vec_stmt
, slp_tree slp_node
)
2781 tree vec_oprnd0
= NULL_TREE
;
2782 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2784 unsigned int nunits
;
2785 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2786 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2787 vec_info
*vinfo
= stmt_info
->vinfo
;
2788 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2789 tree fndecl
, new_temp
;
2791 gimple
*new_stmt
= NULL
;
2793 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2794 vec
<tree
> vargs
= vNULL
;
2796 tree lhs
, rtype
, ratype
;
2797 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2799 /* Is STMT a vectorizable call? */
2800 if (!is_gimple_call (stmt
))
2803 fndecl
= gimple_call_fndecl (stmt
);
2804 if (fndecl
== NULL_TREE
)
2807 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2808 if (node
== NULL
|| node
->simd_clones
== NULL
)
2811 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2814 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2817 if (gimple_call_lhs (stmt
)
2818 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2821 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2823 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2825 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2829 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2832 /* Process function arguments. */
2833 nargs
= gimple_call_num_args (stmt
);
2835 /* Bail out if the function has zero arguments. */
2839 arginfo
.create (nargs
);
2841 for (i
= 0; i
< nargs
; i
++)
2843 simd_call_arg_info thisarginfo
;
2846 thisarginfo
.linear_step
= 0;
2847 thisarginfo
.align
= 0;
2848 thisarginfo
.op
= NULL_TREE
;
2849 thisarginfo
.simd_lane_linear
= false;
2851 op
= gimple_call_arg (stmt
, i
);
2852 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2853 &thisarginfo
.vectype
)
2854 || thisarginfo
.dt
== vect_uninitialized_def
)
2856 if (dump_enabled_p ())
2857 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2858 "use not simple.\n");
2863 if (thisarginfo
.dt
== vect_constant_def
2864 || thisarginfo
.dt
== vect_external_def
)
2865 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2867 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2869 /* For linear arguments, the analyze phase should have saved
2870 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2871 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2872 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2874 gcc_assert (vec_stmt
);
2875 thisarginfo
.linear_step
2876 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2878 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2879 thisarginfo
.simd_lane_linear
2880 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2881 == boolean_true_node
);
2882 /* If loop has been peeled for alignment, we need to adjust it. */
2883 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2884 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2885 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2887 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2888 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2889 tree opt
= TREE_TYPE (thisarginfo
.op
);
2890 bias
= fold_convert (TREE_TYPE (step
), bias
);
2891 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2893 = fold_build2 (POINTER_TYPE_P (opt
)
2894 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2895 thisarginfo
.op
, bias
);
2899 && thisarginfo
.dt
!= vect_constant_def
2900 && thisarginfo
.dt
!= vect_external_def
2902 && TREE_CODE (op
) == SSA_NAME
2903 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2905 && tree_fits_shwi_p (iv
.step
))
2907 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2908 thisarginfo
.op
= iv
.base
;
2910 else if ((thisarginfo
.dt
== vect_constant_def
2911 || thisarginfo
.dt
== vect_external_def
)
2912 && POINTER_TYPE_P (TREE_TYPE (op
)))
2913 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2914 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2916 if (POINTER_TYPE_P (TREE_TYPE (op
))
2917 && !thisarginfo
.linear_step
2919 && thisarginfo
.dt
!= vect_constant_def
2920 && thisarginfo
.dt
!= vect_external_def
2923 && TREE_CODE (op
) == SSA_NAME
)
2924 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2926 arginfo
.quick_push (thisarginfo
);
2929 unsigned int badness
= 0;
2930 struct cgraph_node
*bestn
= NULL
;
2931 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2932 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2934 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2935 n
= n
->simdclone
->next_clone
)
2937 unsigned int this_badness
= 0;
2938 if (n
->simdclone
->simdlen
2939 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2940 || n
->simdclone
->nargs
!= nargs
)
2942 if (n
->simdclone
->simdlen
2943 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2944 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2945 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2946 if (n
->simdclone
->inbranch
)
2947 this_badness
+= 2048;
2948 int target_badness
= targetm
.simd_clone
.usable (n
);
2949 if (target_badness
< 0)
2951 this_badness
+= target_badness
* 512;
2952 /* FORNOW: Have to add code to add the mask argument. */
2953 if (n
->simdclone
->inbranch
)
2955 for (i
= 0; i
< nargs
; i
++)
2957 switch (n
->simdclone
->args
[i
].arg_type
)
2959 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2960 if (!useless_type_conversion_p
2961 (n
->simdclone
->args
[i
].orig_type
,
2962 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2964 else if (arginfo
[i
].dt
== vect_constant_def
2965 || arginfo
[i
].dt
== vect_external_def
2966 || arginfo
[i
].linear_step
)
2969 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2970 if (arginfo
[i
].dt
!= vect_constant_def
2971 && arginfo
[i
].dt
!= vect_external_def
)
2974 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2975 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2976 if (arginfo
[i
].dt
== vect_constant_def
2977 || arginfo
[i
].dt
== vect_external_def
2978 || (arginfo
[i
].linear_step
2979 != n
->simdclone
->args
[i
].linear_step
))
2982 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2983 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2984 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2985 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2986 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2987 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2991 case SIMD_CLONE_ARG_TYPE_MASK
:
2994 if (i
== (size_t) -1)
2996 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3001 if (arginfo
[i
].align
)
3002 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3003 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3005 if (i
== (size_t) -1)
3007 if (bestn
== NULL
|| this_badness
< badness
)
3010 badness
= this_badness
;
3020 for (i
= 0; i
< nargs
; i
++)
3021 if ((arginfo
[i
].dt
== vect_constant_def
3022 || arginfo
[i
].dt
== vect_external_def
)
3023 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3026 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3028 if (arginfo
[i
].vectype
== NULL
3029 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3030 > bestn
->simdclone
->simdlen
))
3037 fndecl
= bestn
->decl
;
3038 nunits
= bestn
->simdclone
->simdlen
;
3039 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3041 /* If the function isn't const, only allow it in simd loops where user
3042 has asserted that at least nunits consecutive iterations can be
3043 performed using SIMD instructions. */
3044 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3045 && gimple_vuse (stmt
))
3051 /* Sanity check: make sure that at least one copy of the vectorized stmt
3052 needs to be generated. */
3053 gcc_assert (ncopies
>= 1);
3055 if (!vec_stmt
) /* transformation not required. */
3057 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3058 for (i
= 0; i
< nargs
; i
++)
3059 if (bestn
->simdclone
->args
[i
].arg_type
3060 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3062 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3064 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3065 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3066 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3067 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3068 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3069 tree sll
= arginfo
[i
].simd_lane_linear
3070 ? boolean_true_node
: boolean_false_node
;
3071 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3073 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3074 if (dump_enabled_p ())
3075 dump_printf_loc (MSG_NOTE
, vect_location
,
3076 "=== vectorizable_simd_clone_call ===\n");
3077 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3084 if (dump_enabled_p ())
3085 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3088 scalar_dest
= gimple_call_lhs (stmt
);
3089 vec_dest
= NULL_TREE
;
3094 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3095 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3096 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3099 rtype
= TREE_TYPE (ratype
);
3103 prev_stmt_info
= NULL
;
3104 for (j
= 0; j
< ncopies
; ++j
)
3106 /* Build argument list for the vectorized call. */
3108 vargs
.create (nargs
);
3112 for (i
= 0; i
< nargs
; i
++)
3114 unsigned int k
, l
, m
, o
;
3116 op
= gimple_call_arg (stmt
, i
);
3117 switch (bestn
->simdclone
->args
[i
].arg_type
)
3119 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3120 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3121 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3122 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3124 if (TYPE_VECTOR_SUBPARTS (atype
)
3125 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3127 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3128 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3129 / TYPE_VECTOR_SUBPARTS (atype
));
3130 gcc_assert ((k
& (k
- 1)) == 0);
3133 = vect_get_vec_def_for_operand (op
, stmt
);
3136 vec_oprnd0
= arginfo
[i
].op
;
3137 if ((m
& (k
- 1)) == 0)
3139 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3142 arginfo
[i
].op
= vec_oprnd0
;
3144 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3146 bitsize_int ((m
& (k
- 1)) * prec
));
3148 = gimple_build_assign (make_ssa_name (atype
),
3150 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3151 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3155 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3156 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3157 gcc_assert ((k
& (k
- 1)) == 0);
3158 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3160 vec_alloc (ctor_elts
, k
);
3163 for (l
= 0; l
< k
; l
++)
3165 if (m
== 0 && l
== 0)
3167 = vect_get_vec_def_for_operand (op
, stmt
);
3170 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3172 arginfo
[i
].op
= vec_oprnd0
;
3175 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3179 vargs
.safe_push (vec_oprnd0
);
3182 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3184 = gimple_build_assign (make_ssa_name (atype
),
3186 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3187 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3192 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3193 vargs
.safe_push (op
);
3195 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3200 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3205 edge pe
= loop_preheader_edge (loop
);
3206 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3207 gcc_assert (!new_bb
);
3209 if (arginfo
[i
].simd_lane_linear
)
3211 vargs
.safe_push (arginfo
[i
].op
);
3214 tree phi_res
= copy_ssa_name (op
);
3215 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3216 set_vinfo_for_stmt (new_phi
,
3217 new_stmt_vec_info (new_phi
, loop_vinfo
));
3218 add_phi_arg (new_phi
, arginfo
[i
].op
,
3219 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3221 = POINTER_TYPE_P (TREE_TYPE (op
))
3222 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3223 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3224 ? sizetype
: TREE_TYPE (op
);
3226 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3228 tree tcst
= wide_int_to_tree (type
, cst
);
3229 tree phi_arg
= copy_ssa_name (op
);
3231 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3232 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3233 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3234 set_vinfo_for_stmt (new_stmt
,
3235 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3236 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3238 arginfo
[i
].op
= phi_res
;
3239 vargs
.safe_push (phi_res
);
3244 = POINTER_TYPE_P (TREE_TYPE (op
))
3245 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3246 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3247 ? sizetype
: TREE_TYPE (op
);
3249 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3251 tree tcst
= wide_int_to_tree (type
, cst
);
3252 new_temp
= make_ssa_name (TREE_TYPE (op
));
3253 new_stmt
= gimple_build_assign (new_temp
, code
,
3254 arginfo
[i
].op
, tcst
);
3255 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3256 vargs
.safe_push (new_temp
);
3259 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3260 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3261 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3262 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3268 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3271 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3273 new_temp
= create_tmp_var (ratype
);
3274 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3275 == TYPE_VECTOR_SUBPARTS (rtype
))
3276 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3278 new_temp
= make_ssa_name (rtype
, new_stmt
);
3279 gimple_call_set_lhs (new_stmt
, new_temp
);
3281 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3285 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3288 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3289 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3290 gcc_assert ((k
& (k
- 1)) == 0);
3291 for (l
= 0; l
< k
; l
++)
3296 t
= build_fold_addr_expr (new_temp
);
3297 t
= build2 (MEM_REF
, vectype
, t
,
3298 build_int_cst (TREE_TYPE (t
),
3299 l
* prec
/ BITS_PER_UNIT
));
3302 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3303 size_int (prec
), bitsize_int (l
* prec
));
3305 = gimple_build_assign (make_ssa_name (vectype
), t
);
3306 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3307 if (j
== 0 && l
== 0)
3308 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3310 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3312 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3317 tree clobber
= build_constructor (ratype
, NULL
);
3318 TREE_THIS_VOLATILE (clobber
) = 1;
3319 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3320 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3324 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3326 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3327 / TYPE_VECTOR_SUBPARTS (rtype
));
3328 gcc_assert ((k
& (k
- 1)) == 0);
3329 if ((j
& (k
- 1)) == 0)
3330 vec_alloc (ret_ctor_elts
, k
);
3333 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3334 for (m
= 0; m
< o
; m
++)
3336 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3337 size_int (m
), NULL_TREE
, NULL_TREE
);
3339 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3340 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3341 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3342 gimple_assign_lhs (new_stmt
));
3344 tree clobber
= build_constructor (ratype
, NULL
);
3345 TREE_THIS_VOLATILE (clobber
) = 1;
3346 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3347 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3350 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3351 if ((j
& (k
- 1)) != k
- 1)
3353 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3355 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3356 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3358 if ((unsigned) j
== k
- 1)
3359 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3361 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3363 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3368 tree t
= build_fold_addr_expr (new_temp
);
3369 t
= build2 (MEM_REF
, vectype
, t
,
3370 build_int_cst (TREE_TYPE (t
), 0));
3372 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3373 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3374 tree clobber
= build_constructor (ratype
, NULL
);
3375 TREE_THIS_VOLATILE (clobber
) = 1;
3376 vect_finish_stmt_generation (stmt
,
3377 gimple_build_assign (new_temp
,
3383 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3385 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3387 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3392 /* The call in STMT might prevent it from being removed in dce.
3393 We however cannot remove it here, due to the way the ssa name
3394 it defines is mapped to the new definition. So just replace
3395 rhs of the statement with something harmless. */
3402 type
= TREE_TYPE (scalar_dest
);
3403 if (is_pattern_stmt_p (stmt_info
))
3404 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3406 lhs
= gimple_call_lhs (stmt
);
3407 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3410 new_stmt
= gimple_build_nop ();
3411 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3412 set_vinfo_for_stmt (stmt
, NULL
);
3413 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3414 gsi_replace (gsi
, new_stmt
, true);
3415 unlink_stmt_vdef (stmt
);
3421 /* Function vect_gen_widened_results_half
3423 Create a vector stmt whose code, type, number of arguments, and result
3424 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3425 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3426 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3427 needs to be created (DECL is a function-decl of a target-builtin).
3428 STMT is the original scalar stmt that we are vectorizing. */
3431 vect_gen_widened_results_half (enum tree_code code
,
3433 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3434 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3440 /* Generate half of the widened result: */
3441 if (code
== CALL_EXPR
)
3443 /* Target specific support */
3444 if (op_type
== binary_op
)
3445 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3447 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3448 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3449 gimple_call_set_lhs (new_stmt
, new_temp
);
3453 /* Generic support */
3454 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3455 if (op_type
!= binary_op
)
3457 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3458 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3459 gimple_assign_set_lhs (new_stmt
, new_temp
);
3461 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3467 /* Get vectorized definitions for loop-based vectorization. For the first
3468 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3469 scalar operand), and for the rest we get a copy with
3470 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3471 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3472 The vectors are collected into VEC_OPRNDS. */
3475 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3476 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3480 /* Get first vector operand. */
3481 /* All the vector operands except the very first one (that is scalar oprnd)
3483 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3484 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3486 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3488 vec_oprnds
->quick_push (vec_oprnd
);
3490 /* Get second vector operand. */
3491 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3492 vec_oprnds
->quick_push (vec_oprnd
);
3496 /* For conversion in multiple steps, continue to get operands
3499 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3503 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3504 For multi-step conversions store the resulting vectors and call the function
3508 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3509 int multi_step_cvt
, gimple
*stmt
,
3511 gimple_stmt_iterator
*gsi
,
3512 slp_tree slp_node
, enum tree_code code
,
3513 stmt_vec_info
*prev_stmt_info
)
3516 tree vop0
, vop1
, new_tmp
, vec_dest
;
3518 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3520 vec_dest
= vec_dsts
.pop ();
3522 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3524 /* Create demotion operation. */
3525 vop0
= (*vec_oprnds
)[i
];
3526 vop1
= (*vec_oprnds
)[i
+ 1];
3527 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3528 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3529 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3530 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3533 /* Store the resulting vector for next recursive call. */
3534 (*vec_oprnds
)[i
/2] = new_tmp
;
3537 /* This is the last step of the conversion sequence. Store the
3538 vectors in SLP_NODE or in vector info of the scalar statement
3539 (or in STMT_VINFO_RELATED_STMT chain). */
3541 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3544 if (!*prev_stmt_info
)
3545 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3547 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3549 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3554 /* For multi-step demotion operations we first generate demotion operations
3555 from the source type to the intermediate types, and then combine the
3556 results (stored in VEC_OPRNDS) in demotion operation to the destination
3560 /* At each level of recursion we have half of the operands we had at the
3562 vec_oprnds
->truncate ((i
+1)/2);
3563 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3564 stmt
, vec_dsts
, gsi
, slp_node
,
3565 VEC_PACK_TRUNC_EXPR
,
3569 vec_dsts
.quick_push (vec_dest
);
3573 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3574 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3575 the resulting vectors and call the function recursively. */
3578 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3579 vec
<tree
> *vec_oprnds1
,
3580 gimple
*stmt
, tree vec_dest
,
3581 gimple_stmt_iterator
*gsi
,
3582 enum tree_code code1
,
3583 enum tree_code code2
, tree decl1
,
3584 tree decl2
, int op_type
)
3587 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3588 gimple
*new_stmt1
, *new_stmt2
;
3589 vec
<tree
> vec_tmp
= vNULL
;
3591 vec_tmp
.create (vec_oprnds0
->length () * 2);
3592 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3594 if (op_type
== binary_op
)
3595 vop1
= (*vec_oprnds1
)[i
];
3599 /* Generate the two halves of promotion operation. */
3600 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3601 op_type
, vec_dest
, gsi
, stmt
);
3602 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3603 op_type
, vec_dest
, gsi
, stmt
);
3604 if (is_gimple_call (new_stmt1
))
3606 new_tmp1
= gimple_call_lhs (new_stmt1
);
3607 new_tmp2
= gimple_call_lhs (new_stmt2
);
3611 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3612 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3615 /* Store the results for the next step. */
3616 vec_tmp
.quick_push (new_tmp1
);
3617 vec_tmp
.quick_push (new_tmp2
);
3620 vec_oprnds0
->release ();
3621 *vec_oprnds0
= vec_tmp
;
3625 /* Check if STMT performs a conversion operation, that can be vectorized.
3626 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3627 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3628 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3631 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3632 gimple
**vec_stmt
, slp_tree slp_node
)
3636 tree op0
, op1
= NULL_TREE
;
3637 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3638 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3639 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3640 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3641 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3642 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3645 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3646 gimple
*new_stmt
= NULL
;
3647 stmt_vec_info prev_stmt_info
;
3650 tree vectype_out
, vectype_in
;
3652 tree lhs_type
, rhs_type
;
3653 enum { NARROW
, NONE
, WIDEN
} modifier
;
3654 vec
<tree
> vec_oprnds0
= vNULL
;
3655 vec
<tree
> vec_oprnds1
= vNULL
;
3657 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3658 vec_info
*vinfo
= stmt_info
->vinfo
;
3659 int multi_step_cvt
= 0;
3660 vec
<tree
> vec_dsts
= vNULL
;
3661 vec
<tree
> interm_types
= vNULL
;
3662 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3664 machine_mode rhs_mode
;
3665 unsigned short fltsz
;
3667 /* Is STMT a vectorizable conversion? */
3669 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3672 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3675 if (!is_gimple_assign (stmt
))
3678 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3681 code
= gimple_assign_rhs_code (stmt
);
3682 if (!CONVERT_EXPR_CODE_P (code
)
3683 && code
!= FIX_TRUNC_EXPR
3684 && code
!= FLOAT_EXPR
3685 && code
!= WIDEN_MULT_EXPR
3686 && code
!= WIDEN_LSHIFT_EXPR
)
3689 op_type
= TREE_CODE_LENGTH (code
);
3691 /* Check types of lhs and rhs. */
3692 scalar_dest
= gimple_assign_lhs (stmt
);
3693 lhs_type
= TREE_TYPE (scalar_dest
);
3694 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3696 op0
= gimple_assign_rhs1 (stmt
);
3697 rhs_type
= TREE_TYPE (op0
);
3699 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3700 && !((INTEGRAL_TYPE_P (lhs_type
)
3701 && INTEGRAL_TYPE_P (rhs_type
))
3702 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3703 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3706 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3707 && ((INTEGRAL_TYPE_P (lhs_type
)
3708 && (TYPE_PRECISION (lhs_type
)
3709 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3710 || (INTEGRAL_TYPE_P (rhs_type
)
3711 && (TYPE_PRECISION (rhs_type
)
3712 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3714 if (dump_enabled_p ())
3715 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3716 "type conversion to/from bit-precision unsupported."
3721 /* Check the operands of the operation. */
3722 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3724 if (dump_enabled_p ())
3725 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3726 "use not simple.\n");
3729 if (op_type
== binary_op
)
3733 op1
= gimple_assign_rhs2 (stmt
);
3734 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3735 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3737 if (CONSTANT_CLASS_P (op0
))
3738 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3740 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3744 if (dump_enabled_p ())
3745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3746 "use not simple.\n");
3751 /* If op0 is an external or constant defs use a vector type of
3752 the same size as the output vector type. */
3754 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3756 gcc_assert (vectype_in
);
3759 if (dump_enabled_p ())
3761 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3762 "no vectype for scalar type ");
3763 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3764 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3770 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3771 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3773 if (dump_enabled_p ())
3775 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3776 "can't convert between boolean and non "
3778 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3779 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3785 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3786 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3787 if (nunits_in
< nunits_out
)
3789 else if (nunits_out
== nunits_in
)
3794 /* Multiple types in SLP are handled by creating the appropriate number of
3795 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3797 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3799 else if (modifier
== NARROW
)
3800 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3802 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3804 /* Sanity check: make sure that at least one copy of the vectorized stmt
3805 needs to be generated. */
3806 gcc_assert (ncopies
>= 1);
3808 /* Supportable by target? */
3812 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3814 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3819 if (dump_enabled_p ())
3820 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3821 "conversion not supported by target.\n");
3825 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3826 &code1
, &code2
, &multi_step_cvt
,
3829 /* Binary widening operation can only be supported directly by the
3831 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3835 if (code
!= FLOAT_EXPR
3836 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3837 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3840 rhs_mode
= TYPE_MODE (rhs_type
);
3841 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3842 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3843 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3844 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3847 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3848 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3849 if (cvt_type
== NULL_TREE
)
3852 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3854 if (!supportable_convert_operation (code
, vectype_out
,
3855 cvt_type
, &decl1
, &codecvt1
))
3858 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3859 cvt_type
, &codecvt1
,
3860 &codecvt2
, &multi_step_cvt
,
3864 gcc_assert (multi_step_cvt
== 0);
3866 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3867 vectype_in
, &code1
, &code2
,
3868 &multi_step_cvt
, &interm_types
))
3872 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3875 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3876 codecvt2
= ERROR_MARK
;
3880 interm_types
.safe_push (cvt_type
);
3881 cvt_type
= NULL_TREE
;
3886 gcc_assert (op_type
== unary_op
);
3887 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3888 &code1
, &multi_step_cvt
,
3892 if (code
!= FIX_TRUNC_EXPR
3893 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3894 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3897 rhs_mode
= TYPE_MODE (rhs_type
);
3899 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3900 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3901 if (cvt_type
== NULL_TREE
)
3903 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3906 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3907 &code1
, &multi_step_cvt
,
3916 if (!vec_stmt
) /* transformation not required. */
3918 if (dump_enabled_p ())
3919 dump_printf_loc (MSG_NOTE
, vect_location
,
3920 "=== vectorizable_conversion ===\n");
3921 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3923 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3924 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3926 else if (modifier
== NARROW
)
3928 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3929 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3933 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3934 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3936 interm_types
.release ();
3941 if (dump_enabled_p ())
3942 dump_printf_loc (MSG_NOTE
, vect_location
,
3943 "transform conversion. ncopies = %d.\n", ncopies
);
3945 if (op_type
== binary_op
)
3947 if (CONSTANT_CLASS_P (op0
))
3948 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3949 else if (CONSTANT_CLASS_P (op1
))
3950 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3953 /* In case of multi-step conversion, we first generate conversion operations
3954 to the intermediate types, and then from that types to the final one.
3955 We create vector destinations for the intermediate type (TYPES) received
3956 from supportable_*_operation, and store them in the correct order
3957 for future use in vect_create_vectorized_*_stmts (). */
3958 vec_dsts
.create (multi_step_cvt
+ 1);
3959 vec_dest
= vect_create_destination_var (scalar_dest
,
3960 (cvt_type
&& modifier
== WIDEN
)
3961 ? cvt_type
: vectype_out
);
3962 vec_dsts
.quick_push (vec_dest
);
3966 for (i
= interm_types
.length () - 1;
3967 interm_types
.iterate (i
, &intermediate_type
); i
--)
3969 vec_dest
= vect_create_destination_var (scalar_dest
,
3971 vec_dsts
.quick_push (vec_dest
);
3976 vec_dest
= vect_create_destination_var (scalar_dest
,
3978 ? vectype_out
: cvt_type
);
3982 if (modifier
== WIDEN
)
3984 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3985 if (op_type
== binary_op
)
3986 vec_oprnds1
.create (1);
3988 else if (modifier
== NARROW
)
3989 vec_oprnds0
.create (
3990 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3992 else if (code
== WIDEN_LSHIFT_EXPR
)
3993 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3996 prev_stmt_info
= NULL
;
4000 for (j
= 0; j
< ncopies
; j
++)
4003 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
4006 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4008 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4010 /* Arguments are ready, create the new vector stmt. */
4011 if (code1
== CALL_EXPR
)
4013 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4014 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4015 gimple_call_set_lhs (new_stmt
, new_temp
);
4019 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4020 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4021 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4022 gimple_assign_set_lhs (new_stmt
, new_temp
);
4025 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4027 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4030 if (!prev_stmt_info
)
4031 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4033 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4034 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4041 /* In case the vectorization factor (VF) is bigger than the number
4042 of elements that we can fit in a vectype (nunits), we have to
4043 generate more than one vector stmt - i.e - we need to "unroll"
4044 the vector stmt by a factor VF/nunits. */
4045 for (j
= 0; j
< ncopies
; j
++)
4052 if (code
== WIDEN_LSHIFT_EXPR
)
4057 /* Store vec_oprnd1 for every vector stmt to be created
4058 for SLP_NODE. We check during the analysis that all
4059 the shift arguments are the same. */
4060 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4061 vec_oprnds1
.quick_push (vec_oprnd1
);
4063 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4067 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4068 &vec_oprnds1
, slp_node
, -1);
4072 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4073 vec_oprnds0
.quick_push (vec_oprnd0
);
4074 if (op_type
== binary_op
)
4076 if (code
== WIDEN_LSHIFT_EXPR
)
4079 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4080 vec_oprnds1
.quick_push (vec_oprnd1
);
4086 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4087 vec_oprnds0
.truncate (0);
4088 vec_oprnds0
.quick_push (vec_oprnd0
);
4089 if (op_type
== binary_op
)
4091 if (code
== WIDEN_LSHIFT_EXPR
)
4094 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4096 vec_oprnds1
.truncate (0);
4097 vec_oprnds1
.quick_push (vec_oprnd1
);
4101 /* Arguments are ready. Create the new vector stmts. */
4102 for (i
= multi_step_cvt
; i
>= 0; i
--)
4104 tree this_dest
= vec_dsts
[i
];
4105 enum tree_code c1
= code1
, c2
= code2
;
4106 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4111 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4113 stmt
, this_dest
, gsi
,
4114 c1
, c2
, decl1
, decl2
,
4118 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4122 if (codecvt1
== CALL_EXPR
)
4124 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4125 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4126 gimple_call_set_lhs (new_stmt
, new_temp
);
4130 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4131 new_temp
= make_ssa_name (vec_dest
);
4132 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4139 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4142 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4145 if (!prev_stmt_info
)
4146 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4148 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4149 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4154 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4158 /* In case the vectorization factor (VF) is bigger than the number
4159 of elements that we can fit in a vectype (nunits), we have to
4160 generate more than one vector stmt - i.e - we need to "unroll"
4161 the vector stmt by a factor VF/nunits. */
4162 for (j
= 0; j
< ncopies
; j
++)
4166 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4170 vec_oprnds0
.truncate (0);
4171 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4172 vect_pow2 (multi_step_cvt
) - 1);
4175 /* Arguments are ready. Create the new vector stmts. */
4177 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4179 if (codecvt1
== CALL_EXPR
)
4181 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4182 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4183 gimple_call_set_lhs (new_stmt
, new_temp
);
4187 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4188 new_temp
= make_ssa_name (vec_dest
);
4189 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4193 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4194 vec_oprnds0
[i
] = new_temp
;
4197 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4198 stmt
, vec_dsts
, gsi
,
4203 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4207 vec_oprnds0
.release ();
4208 vec_oprnds1
.release ();
4209 vec_dsts
.release ();
4210 interm_types
.release ();
4216 /* Function vectorizable_assignment.
4218 Check if STMT performs an assignment (copy) that can be vectorized.
4219 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4220 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4221 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4224 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4225 gimple
**vec_stmt
, slp_tree slp_node
)
4230 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4231 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4234 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4237 vec
<tree
> vec_oprnds
= vNULL
;
4239 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4240 vec_info
*vinfo
= stmt_info
->vinfo
;
4241 gimple
*new_stmt
= NULL
;
4242 stmt_vec_info prev_stmt_info
= NULL
;
4243 enum tree_code code
;
4246 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4249 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4252 /* Is vectorizable assignment? */
4253 if (!is_gimple_assign (stmt
))
4256 scalar_dest
= gimple_assign_lhs (stmt
);
4257 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4260 code
= gimple_assign_rhs_code (stmt
);
4261 if (gimple_assign_single_p (stmt
)
4262 || code
== PAREN_EXPR
4263 || CONVERT_EXPR_CODE_P (code
))
4264 op
= gimple_assign_rhs1 (stmt
);
4268 if (code
== VIEW_CONVERT_EXPR
)
4269 op
= TREE_OPERAND (op
, 0);
4271 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4272 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4274 /* Multiple types in SLP are handled by creating the appropriate number of
4275 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4277 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4280 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4282 gcc_assert (ncopies
>= 1);
4284 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4286 if (dump_enabled_p ())
4287 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4288 "use not simple.\n");
4292 /* We can handle NOP_EXPR conversions that do not change the number
4293 of elements or the vector size. */
4294 if ((CONVERT_EXPR_CODE_P (code
)
4295 || code
== VIEW_CONVERT_EXPR
)
4297 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4298 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4299 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4302 /* We do not handle bit-precision changes. */
4303 if ((CONVERT_EXPR_CODE_P (code
)
4304 || code
== VIEW_CONVERT_EXPR
)
4305 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4306 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4307 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4308 || ((TYPE_PRECISION (TREE_TYPE (op
))
4309 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4310 /* But a conversion that does not change the bit-pattern is ok. */
4311 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4312 > TYPE_PRECISION (TREE_TYPE (op
)))
4313 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4314 /* Conversion between boolean types of different sizes is
4315 a simple assignment in case their vectypes are same
4317 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4318 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4320 if (dump_enabled_p ())
4321 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4322 "type conversion to/from bit-precision "
4327 if (!vec_stmt
) /* transformation not required. */
4329 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_NOTE
, vect_location
,
4332 "=== vectorizable_assignment ===\n");
4333 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4338 if (dump_enabled_p ())
4339 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4342 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4345 for (j
= 0; j
< ncopies
; j
++)
4349 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4351 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4353 /* Arguments are ready. create the new vector stmt. */
4354 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4356 if (CONVERT_EXPR_CODE_P (code
)
4357 || code
== VIEW_CONVERT_EXPR
)
4358 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4359 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4360 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4361 gimple_assign_set_lhs (new_stmt
, new_temp
);
4362 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4364 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4371 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4373 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4375 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4378 vec_oprnds
.release ();
4383 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4384 either as shift by a scalar or by a vector. */
4387 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4390 machine_mode vec_mode
;
4395 vectype
= get_vectype_for_scalar_type (scalar_type
);
4399 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4401 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4403 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4405 || (optab_handler (optab
, TYPE_MODE (vectype
))
4406 == CODE_FOR_nothing
))
4410 vec_mode
= TYPE_MODE (vectype
);
4411 icode
= (int) optab_handler (optab
, vec_mode
);
4412 if (icode
== CODE_FOR_nothing
)
4419 /* Function vectorizable_shift.
4421 Check if STMT performs a shift operation that can be vectorized.
4422 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4423 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4424 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4427 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4428 gimple
**vec_stmt
, slp_tree slp_node
)
4432 tree op0
, op1
= NULL
;
4433 tree vec_oprnd1
= NULL_TREE
;
4434 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4436 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4437 enum tree_code code
;
4438 machine_mode vec_mode
;
4442 machine_mode optab_op2_mode
;
4444 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4445 gimple
*new_stmt
= NULL
;
4446 stmt_vec_info prev_stmt_info
;
4453 vec
<tree
> vec_oprnds0
= vNULL
;
4454 vec
<tree
> vec_oprnds1
= vNULL
;
4457 bool scalar_shift_arg
= true;
4458 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4459 vec_info
*vinfo
= stmt_info
->vinfo
;
4462 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4465 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4468 /* Is STMT a vectorizable binary/unary operation? */
4469 if (!is_gimple_assign (stmt
))
4472 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4475 code
= gimple_assign_rhs_code (stmt
);
4477 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4478 || code
== RROTATE_EXPR
))
4481 scalar_dest
= gimple_assign_lhs (stmt
);
4482 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4483 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4484 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4486 if (dump_enabled_p ())
4487 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4488 "bit-precision shifts not supported.\n");
4492 op0
= gimple_assign_rhs1 (stmt
);
4493 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4495 if (dump_enabled_p ())
4496 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4497 "use not simple.\n");
4500 /* If op0 is an external or constant def use a vector type with
4501 the same size as the output vector type. */
4503 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4505 gcc_assert (vectype
);
4508 if (dump_enabled_p ())
4509 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4510 "no vectype for scalar type\n");
4514 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4515 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4516 if (nunits_out
!= nunits_in
)
4519 op1
= gimple_assign_rhs2 (stmt
);
4520 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4522 if (dump_enabled_p ())
4523 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4524 "use not simple.\n");
4529 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4533 /* Multiple types in SLP are handled by creating the appropriate number of
4534 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4536 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4539 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4541 gcc_assert (ncopies
>= 1);
4543 /* Determine whether the shift amount is a vector, or scalar. If the
4544 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4546 if ((dt
[1] == vect_internal_def
4547 || dt
[1] == vect_induction_def
)
4549 scalar_shift_arg
= false;
4550 else if (dt
[1] == vect_constant_def
4551 || dt
[1] == vect_external_def
4552 || dt
[1] == vect_internal_def
)
4554 /* In SLP, need to check whether the shift count is the same,
4555 in loops if it is a constant or invariant, it is always
4559 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4562 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4563 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4564 scalar_shift_arg
= false;
4569 if (dump_enabled_p ())
4570 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4571 "operand mode requires invariant argument.\n");
4575 /* Vector shifted by vector. */
4576 if (!scalar_shift_arg
)
4578 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4579 if (dump_enabled_p ())
4580 dump_printf_loc (MSG_NOTE
, vect_location
,
4581 "vector/vector shift/rotate found.\n");
4584 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4585 if (op1_vectype
== NULL_TREE
4586 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4588 if (dump_enabled_p ())
4589 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4590 "unusable type for last operand in"
4591 " vector/vector shift/rotate.\n");
4595 /* See if the machine has a vector shifted by scalar insn and if not
4596 then see if it has a vector shifted by vector insn. */
4599 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4601 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4603 if (dump_enabled_p ())
4604 dump_printf_loc (MSG_NOTE
, vect_location
,
4605 "vector/scalar shift/rotate found.\n");
4609 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4611 && (optab_handler (optab
, TYPE_MODE (vectype
))
4612 != CODE_FOR_nothing
))
4614 scalar_shift_arg
= false;
4616 if (dump_enabled_p ())
4617 dump_printf_loc (MSG_NOTE
, vect_location
,
4618 "vector/vector shift/rotate found.\n");
4620 /* Unlike the other binary operators, shifts/rotates have
4621 the rhs being int, instead of the same type as the lhs,
4622 so make sure the scalar is the right type if we are
4623 dealing with vectors of long long/long/short/char. */
4624 if (dt
[1] == vect_constant_def
)
4625 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4626 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4630 && TYPE_MODE (TREE_TYPE (vectype
))
4631 != TYPE_MODE (TREE_TYPE (op1
)))
4633 if (dump_enabled_p ())
4634 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4635 "unusable type for last operand in"
4636 " vector/vector shift/rotate.\n");
4639 if (vec_stmt
&& !slp_node
)
4641 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4642 op1
= vect_init_vector (stmt
, op1
,
4643 TREE_TYPE (vectype
), NULL
);
4650 /* Supportable by target? */
4653 if (dump_enabled_p ())
4654 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4658 vec_mode
= TYPE_MODE (vectype
);
4659 icode
= (int) optab_handler (optab
, vec_mode
);
4660 if (icode
== CODE_FOR_nothing
)
4662 if (dump_enabled_p ())
4663 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4664 "op not supported by target.\n");
4665 /* Check only during analysis. */
4666 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4667 || (vf
< vect_min_worthwhile_factor (code
)
4670 if (dump_enabled_p ())
4671 dump_printf_loc (MSG_NOTE
, vect_location
,
4672 "proceeding using word mode.\n");
4675 /* Worthwhile without SIMD support? Check only during analysis. */
4676 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4677 && vf
< vect_min_worthwhile_factor (code
)
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4682 "not worthwhile without SIMD support.\n");
4686 if (!vec_stmt
) /* transformation not required. */
4688 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4689 if (dump_enabled_p ())
4690 dump_printf_loc (MSG_NOTE
, vect_location
,
4691 "=== vectorizable_shift ===\n");
4692 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4698 if (dump_enabled_p ())
4699 dump_printf_loc (MSG_NOTE
, vect_location
,
4700 "transform binary/unary operation.\n");
4703 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4705 prev_stmt_info
= NULL
;
4706 for (j
= 0; j
< ncopies
; j
++)
4711 if (scalar_shift_arg
)
4713 /* Vector shl and shr insn patterns can be defined with scalar
4714 operand 2 (shift operand). In this case, use constant or loop
4715 invariant op1 directly, without extending it to vector mode
4717 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4718 if (!VECTOR_MODE_P (optab_op2_mode
))
4720 if (dump_enabled_p ())
4721 dump_printf_loc (MSG_NOTE
, vect_location
,
4722 "operand 1 using scalar mode.\n");
4724 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4725 vec_oprnds1
.quick_push (vec_oprnd1
);
4728 /* Store vec_oprnd1 for every vector stmt to be created
4729 for SLP_NODE. We check during the analysis that all
4730 the shift arguments are the same.
4731 TODO: Allow different constants for different vector
4732 stmts generated for an SLP instance. */
4733 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4734 vec_oprnds1
.quick_push (vec_oprnd1
);
4739 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4740 (a special case for certain kind of vector shifts); otherwise,
4741 operand 1 should be of a vector type (the usual case). */
4743 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4746 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4750 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4752 /* Arguments are ready. Create the new vector stmt. */
4753 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4755 vop1
= vec_oprnds1
[i
];
4756 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4757 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4758 gimple_assign_set_lhs (new_stmt
, new_temp
);
4759 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4761 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4768 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4770 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4771 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4774 vec_oprnds0
.release ();
4775 vec_oprnds1
.release ();
4781 /* Function vectorizable_operation.
4783 Check if STMT performs a binary, unary or ternary operation that can
4785 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4786 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4787 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4790 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4791 gimple
**vec_stmt
, slp_tree slp_node
)
4795 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4796 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4798 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4799 enum tree_code code
;
4800 machine_mode vec_mode
;
4804 bool target_support_p
;
4806 enum vect_def_type dt
[3]
4807 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4808 gimple
*new_stmt
= NULL
;
4809 stmt_vec_info prev_stmt_info
;
4815 vec
<tree
> vec_oprnds0
= vNULL
;
4816 vec
<tree
> vec_oprnds1
= vNULL
;
4817 vec
<tree
> vec_oprnds2
= vNULL
;
4818 tree vop0
, vop1
, vop2
;
4819 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4820 vec_info
*vinfo
= stmt_info
->vinfo
;
4823 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4826 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4829 /* Is STMT a vectorizable binary/unary operation? */
4830 if (!is_gimple_assign (stmt
))
4833 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4836 code
= gimple_assign_rhs_code (stmt
);
4838 /* For pointer addition, we should use the normal plus for
4839 the vector addition. */
4840 if (code
== POINTER_PLUS_EXPR
)
4843 /* Support only unary or binary operations. */
4844 op_type
= TREE_CODE_LENGTH (code
);
4845 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4847 if (dump_enabled_p ())
4848 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4849 "num. args = %d (not unary/binary/ternary op).\n",
4854 scalar_dest
= gimple_assign_lhs (stmt
);
4855 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4857 /* Most operations cannot handle bit-precision types without extra
4859 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4860 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4861 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4862 /* Exception are bitwise binary operations. */
4863 && code
!= BIT_IOR_EXPR
4864 && code
!= BIT_XOR_EXPR
4865 && code
!= BIT_AND_EXPR
)
4867 if (dump_enabled_p ())
4868 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4869 "bit-precision arithmetic not supported.\n");
4873 op0
= gimple_assign_rhs1 (stmt
);
4874 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4876 if (dump_enabled_p ())
4877 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4878 "use not simple.\n");
4881 /* If op0 is an external or constant def use a vector type with
4882 the same size as the output vector type. */
4885 /* For boolean type we cannot determine vectype by
4886 invariant value (don't know whether it is a vector
4887 of booleans or vector of integers). We use output
4888 vectype because operations on boolean don't change
4890 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4892 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4894 if (dump_enabled_p ())
4895 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4896 "not supported operation on bool value.\n");
4899 vectype
= vectype_out
;
4902 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4905 gcc_assert (vectype
);
4908 if (dump_enabled_p ())
4910 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4911 "no vectype for scalar type ");
4912 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4914 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4920 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4921 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4922 if (nunits_out
!= nunits_in
)
4925 if (op_type
== binary_op
|| op_type
== ternary_op
)
4927 op1
= gimple_assign_rhs2 (stmt
);
4928 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4930 if (dump_enabled_p ())
4931 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4932 "use not simple.\n");
4936 if (op_type
== ternary_op
)
4938 op2
= gimple_assign_rhs3 (stmt
);
4939 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4941 if (dump_enabled_p ())
4942 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4943 "use not simple.\n");
4949 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4953 /* Multiple types in SLP are handled by creating the appropriate number of
4954 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4956 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4959 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4961 gcc_assert (ncopies
>= 1);
4963 /* Shifts are handled in vectorizable_shift (). */
4964 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4965 || code
== RROTATE_EXPR
)
4968 /* Supportable by target? */
4970 vec_mode
= TYPE_MODE (vectype
);
4971 if (code
== MULT_HIGHPART_EXPR
)
4972 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4975 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4978 if (dump_enabled_p ())
4979 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4983 target_support_p
= (optab_handler (optab
, vec_mode
)
4984 != CODE_FOR_nothing
);
4987 if (!target_support_p
)
4989 if (dump_enabled_p ())
4990 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4991 "op not supported by target.\n");
4992 /* Check only during analysis. */
4993 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4994 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4996 if (dump_enabled_p ())
4997 dump_printf_loc (MSG_NOTE
, vect_location
,
4998 "proceeding using word mode.\n");
5001 /* Worthwhile without SIMD support? Check only during analysis. */
5002 if (!VECTOR_MODE_P (vec_mode
)
5004 && vf
< vect_min_worthwhile_factor (code
))
5006 if (dump_enabled_p ())
5007 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5008 "not worthwhile without SIMD support.\n");
5012 if (!vec_stmt
) /* transformation not required. */
5014 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5015 if (dump_enabled_p ())
5016 dump_printf_loc (MSG_NOTE
, vect_location
,
5017 "=== vectorizable_operation ===\n");
5018 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
5024 if (dump_enabled_p ())
5025 dump_printf_loc (MSG_NOTE
, vect_location
,
5026 "transform binary/unary operation.\n");
5029 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5031 /* In case the vectorization factor (VF) is bigger than the number
5032 of elements that we can fit in a vectype (nunits), we have to generate
5033 more than one vector stmt - i.e - we need to "unroll" the
5034 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5035 from one copy of the vector stmt to the next, in the field
5036 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5037 stages to find the correct vector defs to be used when vectorizing
5038 stmts that use the defs of the current stmt. The example below
5039 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5040 we need to create 4 vectorized stmts):
5042 before vectorization:
5043 RELATED_STMT VEC_STMT
5047 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5049 RELATED_STMT VEC_STMT
5050 VS1_0: vx0 = memref0 VS1_1 -
5051 VS1_1: vx1 = memref1 VS1_2 -
5052 VS1_2: vx2 = memref2 VS1_3 -
5053 VS1_3: vx3 = memref3 - -
5054 S1: x = load - VS1_0
5057 step2: vectorize stmt S2 (done here):
5058 To vectorize stmt S2 we first need to find the relevant vector
5059 def for the first operand 'x'. This is, as usual, obtained from
5060 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5061 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5062 relevant vector def 'vx0'. Having found 'vx0' we can generate
5063 the vector stmt VS2_0, and as usual, record it in the
5064 STMT_VINFO_VEC_STMT of stmt S2.
5065 When creating the second copy (VS2_1), we obtain the relevant vector
5066 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5067 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5068 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5069 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5070 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5071 chain of stmts and pointers:
5072 RELATED_STMT VEC_STMT
5073 VS1_0: vx0 = memref0 VS1_1 -
5074 VS1_1: vx1 = memref1 VS1_2 -
5075 VS1_2: vx2 = memref2 VS1_3 -
5076 VS1_3: vx3 = memref3 - -
5077 S1: x = load - VS1_0
5078 VS2_0: vz0 = vx0 + v1 VS2_1 -
5079 VS2_1: vz1 = vx1 + v1 VS2_2 -
5080 VS2_2: vz2 = vx2 + v1 VS2_3 -
5081 VS2_3: vz3 = vx3 + v1 - -
5082 S2: z = x + 1 - VS2_0 */
5084 prev_stmt_info
= NULL
;
5085 for (j
= 0; j
< ncopies
; j
++)
5090 if (op_type
== binary_op
|| op_type
== ternary_op
)
5091 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5094 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5096 if (op_type
== ternary_op
)
5098 vec_oprnds2
.create (1);
5099 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5105 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5106 if (op_type
== ternary_op
)
5108 tree vec_oprnd
= vec_oprnds2
.pop ();
5109 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5114 /* Arguments are ready. Create the new vector stmt. */
5115 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5117 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5118 ? vec_oprnds1
[i
] : NULL_TREE
);
5119 vop2
= ((op_type
== ternary_op
)
5120 ? vec_oprnds2
[i
] : NULL_TREE
);
5121 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5122 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5123 gimple_assign_set_lhs (new_stmt
, new_temp
);
5124 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5126 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5133 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5135 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5136 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5139 vec_oprnds0
.release ();
5140 vec_oprnds1
.release ();
5141 vec_oprnds2
.release ();
5146 /* A helper function to ensure data reference DR's base alignment
5150 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5155 if (DR_VECT_AUX (dr
)->base_misaligned
)
5157 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5158 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5160 if (decl_in_symtab_p (base_decl
))
5161 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5164 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5165 DECL_USER_ALIGN (base_decl
) = 1;
5167 DR_VECT_AUX (dr
)->base_misaligned
= false;
5172 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5173 reversal of the vector elements. If that is impossible to do,
5177 perm_mask_for_reverse (tree vectype
)
5182 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5183 sel
= XALLOCAVEC (unsigned char, nunits
);
5185 for (i
= 0; i
< nunits
; ++i
)
5186 sel
[i
] = nunits
- 1 - i
;
5188 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5190 return vect_gen_perm_mask_checked (vectype
, sel
);
5193 /* Function vectorizable_store.
5195 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5197 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5198 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5199 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5202 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5208 tree vec_oprnd
= NULL_TREE
;
5209 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5210 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5212 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5213 struct loop
*loop
= NULL
;
5214 machine_mode vec_mode
;
5216 enum dr_alignment_support alignment_support_scheme
;
5218 enum vect_def_type dt
;
5219 stmt_vec_info prev_stmt_info
= NULL
;
5220 tree dataref_ptr
= NULL_TREE
;
5221 tree dataref_offset
= NULL_TREE
;
5222 gimple
*ptr_incr
= NULL
;
5225 gimple
*next_stmt
, *first_stmt
= NULL
;
5226 bool grouped_store
= false;
5227 bool store_lanes_p
= false;
5228 unsigned int group_size
, i
;
5229 vec
<tree
> dr_chain
= vNULL
;
5230 vec
<tree
> oprnds
= vNULL
;
5231 vec
<tree
> result_chain
= vNULL
;
5233 bool negative
= false;
5234 tree offset
= NULL_TREE
;
5235 vec
<tree
> vec_oprnds
= vNULL
;
5236 bool slp
= (slp_node
!= NULL
);
5237 unsigned int vec_num
;
5238 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5239 vec_info
*vinfo
= stmt_info
->vinfo
;
5241 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5242 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5243 int scatter_scale
= 1;
5244 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5245 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5248 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5251 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5254 /* Is vectorizable store? */
5256 if (!is_gimple_assign (stmt
))
5259 scalar_dest
= gimple_assign_lhs (stmt
);
5260 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5261 && is_pattern_stmt_p (stmt_info
))
5262 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5263 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5264 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5265 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5266 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5267 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5268 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5269 && TREE_CODE (scalar_dest
) != MEM_REF
)
5272 gcc_assert (gimple_assign_single_p (stmt
));
5274 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5275 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5278 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5280 /* Multiple types in SLP are handled by creating the appropriate number of
5281 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5283 if (slp
|| PURE_SLP_STMT (stmt_info
))
5286 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5288 gcc_assert (ncopies
>= 1);
5290 /* FORNOW. This restriction should be relaxed. */
5291 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5293 if (dump_enabled_p ())
5294 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5295 "multiple types in nested loop.\n");
5299 op
= gimple_assign_rhs1 (stmt
);
5300 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5302 if (dump_enabled_p ())
5303 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5304 "use not simple.\n");
5308 elem_type
= TREE_TYPE (vectype
);
5309 vec_mode
= TYPE_MODE (vectype
);
5311 /* FORNOW. In some cases can vectorize even if data-type not supported
5312 (e.g. - array initialization with 0). */
5313 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5316 if (!STMT_VINFO_DATA_REF (stmt_info
))
5319 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5322 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5323 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5324 size_zero_node
) < 0;
5325 if (negative
&& ncopies
> 1)
5327 if (dump_enabled_p ())
5328 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5329 "multiple types with negative step.\n");
5334 gcc_assert (!grouped_store
);
5335 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5336 if (alignment_support_scheme
!= dr_aligned
5337 && alignment_support_scheme
!= dr_unaligned_supported
)
5339 if (dump_enabled_p ())
5340 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5341 "negative step but alignment required.\n");
5344 if (dt
!= vect_constant_def
5345 && dt
!= vect_external_def
5346 && !perm_mask_for_reverse (vectype
))
5348 if (dump_enabled_p ())
5349 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5350 "negative step and reversing not supported.\n");
5356 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5358 grouped_store
= true;
5359 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5360 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5362 && !PURE_SLP_STMT (stmt_info
)
5363 && !STMT_VINFO_STRIDED_P (stmt_info
))
5365 if (vect_store_lanes_supported (vectype
, group_size
))
5366 store_lanes_p
= true;
5367 else if (!vect_grouped_store_supported (vectype
, group_size
))
5371 if (STMT_VINFO_STRIDED_P (stmt_info
)
5372 && (slp
|| PURE_SLP_STMT (stmt_info
))
5373 && (group_size
> nunits
5374 || nunits
% group_size
!= 0))
5376 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5377 "unhandled strided group store\n");
5381 if (first_stmt
== stmt
)
5383 /* STMT is the leader of the group. Check the operands of all the
5384 stmts of the group. */
5385 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5388 gcc_assert (gimple_assign_single_p (next_stmt
));
5389 op
= gimple_assign_rhs1 (next_stmt
);
5390 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5392 if (dump_enabled_p ())
5393 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5394 "use not simple.\n");
5397 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5402 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5405 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5406 &scatter_off
, &scatter_scale
);
5407 gcc_assert (scatter_decl
);
5408 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5409 &scatter_off_vectype
))
5411 if (dump_enabled_p ())
5412 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5413 "scatter index use not simple.");
5418 if (!vec_stmt
) /* transformation not required. */
5420 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5421 /* The SLP costs are calculated during SLP analysis. */
5422 if (!PURE_SLP_STMT (stmt_info
))
5423 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5430 ensure_base_align (stmt_info
, dr
);
5432 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5434 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5435 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5436 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5437 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5438 edge pe
= loop_preheader_edge (loop
);
5441 enum { NARROW
, NONE
, WIDEN
} modifier
;
5442 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5444 if (nunits
== (unsigned int) scatter_off_nunits
)
5446 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5448 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5451 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5452 sel
[i
] = i
| nunits
;
5454 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5455 gcc_assert (perm_mask
!= NULL_TREE
);
5457 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5459 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5462 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5463 sel
[i
] = i
| scatter_off_nunits
;
5465 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5466 gcc_assert (perm_mask
!= NULL_TREE
);
5472 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5473 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5474 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5475 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5476 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5477 scaletype
= TREE_VALUE (arglist
);
5479 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5480 && TREE_CODE (rettype
) == VOID_TYPE
);
5482 ptr
= fold_convert (ptrtype
, scatter_base
);
5483 if (!is_gimple_min_invariant (ptr
))
5485 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5486 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5487 gcc_assert (!new_bb
);
5490 /* Currently we support only unconditional scatter stores,
5491 so mask should be all ones. */
5492 mask
= build_int_cst (masktype
, -1);
5493 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5495 scale
= build_int_cst (scaletype
, scatter_scale
);
5497 prev_stmt_info
= NULL
;
5498 for (j
= 0; j
< ncopies
; ++j
)
5503 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5505 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5507 else if (modifier
!= NONE
&& (j
& 1))
5509 if (modifier
== WIDEN
)
5512 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5513 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5516 else if (modifier
== NARROW
)
5518 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5521 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5529 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5531 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5534 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5536 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5537 == TYPE_VECTOR_SUBPARTS (srctype
));
5538 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5539 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5540 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5541 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5545 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5547 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5548 == TYPE_VECTOR_SUBPARTS (idxtype
));
5549 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5550 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5551 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5552 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5557 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5559 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5561 if (prev_stmt_info
== NULL
)
5562 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5564 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5565 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5572 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5573 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5575 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5578 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5580 /* We vectorize all the stmts of the interleaving group when we
5581 reach the last stmt in the group. */
5582 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5583 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5592 grouped_store
= false;
5593 /* VEC_NUM is the number of vect stmts to be created for this
5595 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5596 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5597 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5598 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5599 op
= gimple_assign_rhs1 (first_stmt
);
5602 /* VEC_NUM is the number of vect stmts to be created for this
5604 vec_num
= group_size
;
5610 group_size
= vec_num
= 1;
5613 if (dump_enabled_p ())
5614 dump_printf_loc (MSG_NOTE
, vect_location
,
5615 "transform store. ncopies = %d\n", ncopies
);
5617 if (STMT_VINFO_STRIDED_P (stmt_info
))
5619 gimple_stmt_iterator incr_gsi
;
5625 gimple_seq stmts
= NULL
;
5626 tree stride_base
, stride_step
, alias_off
;
5630 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5633 = fold_build_pointer_plus
5634 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5635 size_binop (PLUS_EXPR
,
5636 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5637 convert_to_ptrofftype (DR_INIT(first_dr
))));
5638 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5640 /* For a store with loop-invariant (but other than power-of-2)
5641 stride (i.e. not a grouped access) like so:
5643 for (i = 0; i < n; i += stride)
5646 we generate a new induction variable and new stores from
5647 the components of the (vectorized) rhs:
5649 for (j = 0; ; j += VF*stride)
5654 array[j + stride] = tmp2;
5658 unsigned nstores
= nunits
;
5659 tree ltype
= elem_type
;
5662 nstores
= nunits
/ group_size
;
5663 if (group_size
< nunits
)
5664 ltype
= build_vector_type (elem_type
, group_size
);
5667 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5668 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5672 ivstep
= stride_step
;
5673 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5674 build_int_cst (TREE_TYPE (ivstep
),
5675 ncopies
* nstores
));
5677 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5679 create_iv (stride_base
, ivstep
, NULL
,
5680 loop
, &incr_gsi
, insert_after
,
5682 incr
= gsi_stmt (incr_gsi
);
5683 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5685 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5687 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5689 prev_stmt_info
= NULL
;
5690 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5691 next_stmt
= first_stmt
;
5692 for (g
= 0; g
< group_size
; g
++)
5694 running_off
= offvar
;
5697 tree size
= TYPE_SIZE_UNIT (ltype
);
5698 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5700 tree newoff
= copy_ssa_name (running_off
, NULL
);
5701 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5703 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5704 running_off
= newoff
;
5706 for (j
= 0; j
< ncopies
; j
++)
5708 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5709 and first_stmt == stmt. */
5714 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5716 vec_oprnd
= vec_oprnds
[0];
5720 gcc_assert (gimple_assign_single_p (next_stmt
));
5721 op
= gimple_assign_rhs1 (next_stmt
);
5722 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5728 vec_oprnd
= vec_oprnds
[j
];
5731 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5732 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5736 for (i
= 0; i
< nstores
; i
++)
5738 tree newref
, newoff
;
5739 gimple
*incr
, *assign
;
5740 tree size
= TYPE_SIZE (ltype
);
5741 /* Extract the i'th component. */
5742 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5743 bitsize_int (i
), size
);
5744 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5747 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5751 newref
= build2 (MEM_REF
, ltype
,
5752 running_off
, alias_off
);
5754 /* And store it to *running_off. */
5755 assign
= gimple_build_assign (newref
, elem
);
5756 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5758 newoff
= copy_ssa_name (running_off
, NULL
);
5759 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5760 running_off
, stride_step
);
5761 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5763 running_off
= newoff
;
5764 if (g
== group_size
- 1
5767 if (j
== 0 && i
== 0)
5768 STMT_VINFO_VEC_STMT (stmt_info
)
5769 = *vec_stmt
= assign
;
5771 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5772 prev_stmt_info
= vinfo_for_stmt (assign
);
5776 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5781 dr_chain
.create (group_size
);
5782 oprnds
.create (group_size
);
5784 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5785 gcc_assert (alignment_support_scheme
);
5786 /* Targets with store-lane instructions must not require explicit
5788 gcc_assert (!store_lanes_p
5789 || alignment_support_scheme
== dr_aligned
5790 || alignment_support_scheme
== dr_unaligned_supported
);
5793 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5796 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5798 aggr_type
= vectype
;
5800 /* In case the vectorization factor (VF) is bigger than the number
5801 of elements that we can fit in a vectype (nunits), we have to generate
5802 more than one vector stmt - i.e - we need to "unroll" the
5803 vector stmt by a factor VF/nunits. For more details see documentation in
5804 vect_get_vec_def_for_copy_stmt. */
5806 /* In case of interleaving (non-unit grouped access):
5813 We create vectorized stores starting from base address (the access of the
5814 first stmt in the chain (S2 in the above example), when the last store stmt
5815 of the chain (S4) is reached:
5818 VS2: &base + vec_size*1 = vx0
5819 VS3: &base + vec_size*2 = vx1
5820 VS4: &base + vec_size*3 = vx3
5822 Then permutation statements are generated:
5824 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5825 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5828 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5829 (the order of the data-refs in the output of vect_permute_store_chain
5830 corresponds to the order of scalar stmts in the interleaving chain - see
5831 the documentation of vect_permute_store_chain()).
5833 In case of both multiple types and interleaving, above vector stores and
5834 permutation stmts are created for every copy. The result vector stmts are
5835 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5836 STMT_VINFO_RELATED_STMT for the next copies.
5839 prev_stmt_info
= NULL
;
5840 for (j
= 0; j
< ncopies
; j
++)
5847 /* Get vectorized arguments for SLP_NODE. */
5848 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5849 NULL
, slp_node
, -1);
5851 vec_oprnd
= vec_oprnds
[0];
5855 /* For interleaved stores we collect vectorized defs for all the
5856 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5857 used as an input to vect_permute_store_chain(), and OPRNDS as
5858 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5860 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5861 OPRNDS are of size 1. */
5862 next_stmt
= first_stmt
;
5863 for (i
= 0; i
< group_size
; i
++)
5865 /* Since gaps are not supported for interleaved stores,
5866 GROUP_SIZE is the exact number of stmts in the chain.
5867 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5868 there is no interleaving, GROUP_SIZE is 1, and only one
5869 iteration of the loop will be executed. */
5870 gcc_assert (next_stmt
5871 && gimple_assign_single_p (next_stmt
));
5872 op
= gimple_assign_rhs1 (next_stmt
);
5874 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5875 dr_chain
.quick_push (vec_oprnd
);
5876 oprnds
.quick_push (vec_oprnd
);
5877 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5881 /* We should have catched mismatched types earlier. */
5882 gcc_assert (useless_type_conversion_p (vectype
,
5883 TREE_TYPE (vec_oprnd
)));
5884 bool simd_lane_access_p
5885 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5886 if (simd_lane_access_p
5887 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5888 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5889 && integer_zerop (DR_OFFSET (first_dr
))
5890 && integer_zerop (DR_INIT (first_dr
))
5891 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5892 get_alias_set (DR_REF (first_dr
))))
5894 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5895 dataref_offset
= build_int_cst (reference_alias_ptr_type
5896 (DR_REF (first_dr
)), 0);
5901 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5902 simd_lane_access_p
? loop
: NULL
,
5903 offset
, &dummy
, gsi
, &ptr_incr
,
5904 simd_lane_access_p
, &inv_p
);
5905 gcc_assert (bb_vinfo
|| !inv_p
);
5909 /* For interleaved stores we created vectorized defs for all the
5910 defs stored in OPRNDS in the previous iteration (previous copy).
5911 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5912 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5914 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5915 OPRNDS are of size 1. */
5916 for (i
= 0; i
< group_size
; i
++)
5919 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5920 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5921 dr_chain
[i
] = vec_oprnd
;
5922 oprnds
[i
] = vec_oprnd
;
5926 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5927 TYPE_SIZE_UNIT (aggr_type
));
5929 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5930 TYPE_SIZE_UNIT (aggr_type
));
5937 /* Combine all the vectors into an array. */
5938 vec_array
= create_vector_array (vectype
, vec_num
);
5939 for (i
= 0; i
< vec_num
; i
++)
5941 vec_oprnd
= dr_chain
[i
];
5942 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5946 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5947 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5948 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5949 gimple_call_set_lhs (new_stmt
, data_ref
);
5950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5958 result_chain
.create (group_size
);
5960 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5964 next_stmt
= first_stmt
;
5965 for (i
= 0; i
< vec_num
; i
++)
5967 unsigned align
, misalign
;
5970 /* Bump the vector pointer. */
5971 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5975 vec_oprnd
= vec_oprnds
[i
];
5976 else if (grouped_store
)
5977 /* For grouped stores vectorized defs are interleaved in
5978 vect_permute_store_chain(). */
5979 vec_oprnd
= result_chain
[i
];
5981 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5985 : build_int_cst (reference_alias_ptr_type
5986 (DR_REF (first_dr
)), 0));
5987 align
= TYPE_ALIGN_UNIT (vectype
);
5988 if (aligned_access_p (first_dr
))
5990 else if (DR_MISALIGNMENT (first_dr
) == -1)
5992 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5993 align
= TYPE_ALIGN_UNIT (elem_type
);
5995 align
= get_object_alignment (DR_REF (first_dr
))
5998 TREE_TYPE (data_ref
)
5999 = build_aligned_type (TREE_TYPE (data_ref
),
6000 align
* BITS_PER_UNIT
);
6004 TREE_TYPE (data_ref
)
6005 = build_aligned_type (TREE_TYPE (data_ref
),
6006 TYPE_ALIGN (elem_type
));
6007 misalign
= DR_MISALIGNMENT (first_dr
);
6009 if (dataref_offset
== NULL_TREE
6010 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6011 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6015 && dt
!= vect_constant_def
6016 && dt
!= vect_external_def
)
6018 tree perm_mask
= perm_mask_for_reverse (vectype
);
6020 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6022 tree new_temp
= make_ssa_name (perm_dest
);
6024 /* Generate the permute statement. */
6026 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6027 vec_oprnd
, perm_mask
);
6028 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6030 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6031 vec_oprnd
= new_temp
;
6034 /* Arguments are ready. Create the new vector stmt. */
6035 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6036 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6041 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6049 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6051 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6052 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6056 dr_chain
.release ();
6058 result_chain
.release ();
6059 vec_oprnds
.release ();
6064 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6065 VECTOR_CST mask. No checks are made that the target platform supports the
6066 mask, so callers may wish to test can_vec_perm_p separately, or use
6067 vect_gen_perm_mask_checked. */
6070 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
6072 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
6075 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6077 mask_elt_type
= lang_hooks
.types
.type_for_mode
6078 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
6079 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6081 mask_elts
= XALLOCAVEC (tree
, nunits
);
6082 for (i
= nunits
- 1; i
>= 0; i
--)
6083 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
6084 mask_vec
= build_vector (mask_type
, mask_elts
);
6089 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6090 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6093 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6095 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6096 return vect_gen_perm_mask_any (vectype
, sel
);
6099 /* Given a vector variable X and Y, that was generated for the scalar
6100 STMT, generate instructions to permute the vector elements of X and Y
6101 using permutation mask MASK_VEC, insert them at *GSI and return the
6102 permuted vector variable. */
6105 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6106 gimple_stmt_iterator
*gsi
)
6108 tree vectype
= TREE_TYPE (x
);
6109 tree perm_dest
, data_ref
;
6112 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6113 data_ref
= make_ssa_name (perm_dest
);
6115 /* Generate the permute statement. */
6116 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6117 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6122 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6123 inserting them on the loops preheader edge. Returns true if we
6124 were successful in doing so (and thus STMT can be moved then),
6125 otherwise returns false. */
6128 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6134 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6136 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6137 if (!gimple_nop_p (def_stmt
)
6138 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6140 /* Make sure we don't need to recurse. While we could do
6141 so in simple cases when there are more complex use webs
6142 we don't have an easy way to preserve stmt order to fulfil
6143 dependencies within them. */
6146 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6148 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6150 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6151 if (!gimple_nop_p (def_stmt2
)
6152 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6162 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6164 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6165 if (!gimple_nop_p (def_stmt
)
6166 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6168 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6169 gsi_remove (&gsi
, false);
6170 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6177 /* vectorizable_load.
6179 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6181 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6182 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6183 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6186 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6187 slp_tree slp_node
, slp_instance slp_node_instance
)
6190 tree vec_dest
= NULL
;
6191 tree data_ref
= NULL
;
6192 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6193 stmt_vec_info prev_stmt_info
;
6194 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6195 struct loop
*loop
= NULL
;
6196 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6197 bool nested_in_vect_loop
= false;
6198 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6202 gimple
*new_stmt
= NULL
;
6204 enum dr_alignment_support alignment_support_scheme
;
6205 tree dataref_ptr
= NULL_TREE
;
6206 tree dataref_offset
= NULL_TREE
;
6207 gimple
*ptr_incr
= NULL
;
6209 int i
, j
, group_size
= -1, group_gap_adj
;
6210 tree msq
= NULL_TREE
, lsq
;
6211 tree offset
= NULL_TREE
;
6212 tree byte_offset
= NULL_TREE
;
6213 tree realignment_token
= NULL_TREE
;
6215 vec
<tree
> dr_chain
= vNULL
;
6216 bool grouped_load
= false;
6217 bool load_lanes_p
= false;
6219 gimple
*first_stmt_for_drptr
= NULL
;
6221 bool negative
= false;
6222 bool compute_in_loop
= false;
6223 struct loop
*at_loop
;
6225 bool slp
= (slp_node
!= NULL
);
6226 bool slp_perm
= false;
6227 enum tree_code code
;
6228 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6231 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6232 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6233 int gather_scale
= 1;
6234 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6235 vec_info
*vinfo
= stmt_info
->vinfo
;
6237 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6240 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6243 /* Is vectorizable load? */
6244 if (!is_gimple_assign (stmt
))
6247 scalar_dest
= gimple_assign_lhs (stmt
);
6248 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6251 code
= gimple_assign_rhs_code (stmt
);
6252 if (code
!= ARRAY_REF
6253 && code
!= BIT_FIELD_REF
6254 && code
!= INDIRECT_REF
6255 && code
!= COMPONENT_REF
6256 && code
!= IMAGPART_EXPR
6257 && code
!= REALPART_EXPR
6259 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6262 if (!STMT_VINFO_DATA_REF (stmt_info
))
6265 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6266 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6270 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6271 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6272 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6277 /* Multiple types in SLP are handled by creating the appropriate number of
6278 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6280 if (slp
|| PURE_SLP_STMT (stmt_info
))
6283 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6285 gcc_assert (ncopies
>= 1);
6287 /* FORNOW. This restriction should be relaxed. */
6288 if (nested_in_vect_loop
&& ncopies
> 1)
6290 if (dump_enabled_p ())
6291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6292 "multiple types in nested loop.\n");
6296 /* Invalidate assumptions made by dependence analysis when vectorization
6297 on the unrolled body effectively re-orders stmts. */
6299 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6300 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6301 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6303 if (dump_enabled_p ())
6304 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6305 "cannot perform implicit CSE when unrolling "
6306 "with negative dependence distance\n");
6310 elem_type
= TREE_TYPE (vectype
);
6311 mode
= TYPE_MODE (vectype
);
6313 /* FORNOW. In some cases can vectorize even if data-type not supported
6314 (e.g. - data copies). */
6315 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6317 if (dump_enabled_p ())
6318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6319 "Aligned load, but unsupported type.\n");
6323 /* Check if the load is a part of an interleaving chain. */
6324 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6326 grouped_load
= true;
6328 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6330 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6332 /* If this is single-element interleaving with an element distance
6333 that leaves unused vector loads around punt - we at least create
6334 very sub-optimal code in that case (and blow up memory,
6336 bool force_peeling
= false;
6337 if (first_stmt
== stmt
6338 && !GROUP_NEXT_ELEMENT (stmt_info
))
6340 if (GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6342 if (dump_enabled_p ())
6343 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6344 "single-element interleaving not supported "
6345 "for not adjacent vector loads\n");
6349 /* Single-element interleaving requires peeling for gaps. */
6350 force_peeling
= true;
6353 /* If there is a gap in the end of the group or the group size cannot
6354 be made a multiple of the vector element count then we access excess
6355 elements in the last iteration and thus need to peel that off. */
6357 && ! STMT_VINFO_STRIDED_P (stmt_info
)
6359 || GROUP_GAP (vinfo_for_stmt (first_stmt
)) != 0
6360 || (!slp
&& vf
% GROUP_SIZE (vinfo_for_stmt (first_stmt
)) != 0)))
6362 if (dump_enabled_p ())
6363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6364 "Data access with gaps requires scalar "
6368 if (dump_enabled_p ())
6369 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6370 "Peeling for outer loop is not supported\n");
6374 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
6377 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6380 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6382 && !PURE_SLP_STMT (stmt_info
)
6383 && !STMT_VINFO_STRIDED_P (stmt_info
))
6385 if (vect_load_lanes_supported (vectype
, group_size
))
6386 load_lanes_p
= true;
6387 else if (!vect_grouped_load_supported (vectype
, group_size
))
6391 /* Invalidate assumptions made by dependence analysis when vectorization
6392 on the unrolled body effectively re-orders stmts. */
6393 if (!PURE_SLP_STMT (stmt_info
)
6394 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6395 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6396 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6398 if (dump_enabled_p ())
6399 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6400 "cannot perform implicit CSE when performing "
6401 "group loads with negative dependence distance\n");
6405 /* Similarly when the stmt is a load that is both part of a SLP
6406 instance and a loop vectorized stmt via the same-dr mechanism
6407 we have to give up. */
6408 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6409 && (STMT_SLP_TYPE (stmt_info
)
6410 != STMT_SLP_TYPE (vinfo_for_stmt
6411 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6413 if (dump_enabled_p ())
6414 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6415 "conflicting SLP types for CSEd load\n");
6421 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6424 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6425 &gather_off
, &gather_scale
);
6426 gcc_assert (gather_decl
);
6427 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6428 &gather_off_vectype
))
6430 if (dump_enabled_p ())
6431 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6432 "gather index use not simple.\n");
6436 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6439 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6440 && (group_size
> nunits
6441 || nunits
% group_size
!= 0))
6443 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6444 "unhandled strided group load\n");
6450 negative
= tree_int_cst_compare (nested_in_vect_loop
6451 ? STMT_VINFO_DR_STEP (stmt_info
)
6453 size_zero_node
) < 0;
6454 if (negative
&& ncopies
> 1)
6456 if (dump_enabled_p ())
6457 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6458 "multiple types with negative step.\n");
6466 if (dump_enabled_p ())
6467 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6468 "negative step for group load not supported"
6472 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6473 if (alignment_support_scheme
!= dr_aligned
6474 && alignment_support_scheme
!= dr_unaligned_supported
)
6476 if (dump_enabled_p ())
6477 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6478 "negative step but alignment required.\n");
6481 if (!perm_mask_for_reverse (vectype
))
6483 if (dump_enabled_p ())
6484 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6485 "negative step and reversing not supported."
6492 if (!vec_stmt
) /* transformation not required. */
6494 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6495 /* The SLP costs are calculated during SLP analysis. */
6496 if (!PURE_SLP_STMT (stmt_info
))
6497 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6502 if (dump_enabled_p ())
6503 dump_printf_loc (MSG_NOTE
, vect_location
,
6504 "transform load. ncopies = %d\n", ncopies
);
6508 ensure_base_align (stmt_info
, dr
);
6510 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6512 tree vec_oprnd0
= NULL_TREE
, op
;
6513 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6514 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6515 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6516 edge pe
= loop_preheader_edge (loop
);
6519 enum { NARROW
, NONE
, WIDEN
} modifier
;
6520 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6522 if (nunits
== gather_off_nunits
)
6524 else if (nunits
== gather_off_nunits
/ 2)
6526 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6529 for (i
= 0; i
< gather_off_nunits
; ++i
)
6530 sel
[i
] = i
| nunits
;
6532 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6534 else if (nunits
== gather_off_nunits
* 2)
6536 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6539 for (i
= 0; i
< nunits
; ++i
)
6540 sel
[i
] = i
< gather_off_nunits
6541 ? i
: i
+ nunits
- gather_off_nunits
;
6543 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6549 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6550 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6551 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6552 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6553 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6554 scaletype
= TREE_VALUE (arglist
);
6555 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6557 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6559 ptr
= fold_convert (ptrtype
, gather_base
);
6560 if (!is_gimple_min_invariant (ptr
))
6562 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6563 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6564 gcc_assert (!new_bb
);
6567 /* Currently we support only unconditional gather loads,
6568 so mask should be all ones. */
6569 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6570 mask
= build_int_cst (masktype
, -1);
6571 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6573 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6574 mask
= build_vector_from_val (masktype
, mask
);
6575 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6577 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6581 for (j
= 0; j
< 6; ++j
)
6583 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6584 mask
= build_real (TREE_TYPE (masktype
), r
);
6585 mask
= build_vector_from_val (masktype
, mask
);
6586 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6591 scale
= build_int_cst (scaletype
, gather_scale
);
6593 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6594 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6595 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6599 for (j
= 0; j
< 6; ++j
)
6601 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6602 merge
= build_real (TREE_TYPE (rettype
), r
);
6606 merge
= build_vector_from_val (rettype
, merge
);
6607 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6609 prev_stmt_info
= NULL
;
6610 for (j
= 0; j
< ncopies
; ++j
)
6612 if (modifier
== WIDEN
&& (j
& 1))
6613 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6614 perm_mask
, stmt
, gsi
);
6617 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6620 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6622 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6624 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6625 == TYPE_VECTOR_SUBPARTS (idxtype
));
6626 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6627 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6629 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6630 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6635 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6637 if (!useless_type_conversion_p (vectype
, rettype
))
6639 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6640 == TYPE_VECTOR_SUBPARTS (rettype
));
6641 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6642 gimple_call_set_lhs (new_stmt
, op
);
6643 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6644 var
= make_ssa_name (vec_dest
);
6645 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6647 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6651 var
= make_ssa_name (vec_dest
, new_stmt
);
6652 gimple_call_set_lhs (new_stmt
, var
);
6655 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6657 if (modifier
== NARROW
)
6664 var
= permute_vec_elements (prev_res
, var
,
6665 perm_mask
, stmt
, gsi
);
6666 new_stmt
= SSA_NAME_DEF_STMT (var
);
6669 if (prev_stmt_info
== NULL
)
6670 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6672 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6673 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6677 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6679 gimple_stmt_iterator incr_gsi
;
6685 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6686 gimple_seq stmts
= NULL
;
6687 tree stride_base
, stride_step
, alias_off
;
6689 gcc_assert (!nested_in_vect_loop
);
6691 if (slp
&& grouped_load
)
6692 first_dr
= STMT_VINFO_DATA_REF
6693 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6698 = fold_build_pointer_plus
6699 (DR_BASE_ADDRESS (first_dr
),
6700 size_binop (PLUS_EXPR
,
6701 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6702 convert_to_ptrofftype (DR_INIT (first_dr
))));
6703 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6705 /* For a load with loop-invariant (but other than power-of-2)
6706 stride (i.e. not a grouped access) like so:
6708 for (i = 0; i < n; i += stride)
6711 we generate a new induction variable and new accesses to
6712 form a new vector (or vectors, depending on ncopies):
6714 for (j = 0; ; j += VF*stride)
6716 tmp2 = array[j + stride];
6718 vectemp = {tmp1, tmp2, ...}
6721 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6722 build_int_cst (TREE_TYPE (stride_step
), vf
));
6724 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6726 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6727 loop
, &incr_gsi
, insert_after
,
6729 incr
= gsi_stmt (incr_gsi
);
6730 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6732 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6733 &stmts
, true, NULL_TREE
);
6735 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6737 prev_stmt_info
= NULL
;
6738 running_off
= offvar
;
6739 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6740 int nloads
= nunits
;
6741 tree ltype
= TREE_TYPE (vectype
);
6742 auto_vec
<tree
> dr_chain
;
6745 nloads
= nunits
/ group_size
;
6746 if (group_size
< nunits
)
6747 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6750 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6751 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6753 dr_chain
.create (ncopies
);
6755 for (j
= 0; j
< ncopies
; j
++)
6761 vec_alloc (v
, nloads
);
6762 for (i
= 0; i
< nloads
; i
++)
6764 tree newref
, newoff
;
6766 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6768 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6771 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6772 newoff
= copy_ssa_name (running_off
);
6773 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6774 running_off
, stride_step
);
6775 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6777 running_off
= newoff
;
6780 vec_inv
= build_constructor (vectype
, v
);
6781 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6782 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6786 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6787 build2 (MEM_REF
, ltype
,
6788 running_off
, alias_off
));
6789 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6791 tree newoff
= copy_ssa_name (running_off
);
6792 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6793 running_off
, stride_step
);
6794 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6796 running_off
= newoff
;
6801 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6803 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6808 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6810 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6811 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6815 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6816 slp_node_instance
, false);
6822 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6823 /* For SLP vectorization we directly vectorize a subchain
6824 without permutation. */
6825 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6826 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6827 /* For BB vectorization always use the first stmt to base
6828 the data ref pointer on. */
6830 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6832 /* Check if the chain of loads is already vectorized. */
6833 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6834 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6835 ??? But we can only do so if there is exactly one
6836 as we have no way to get at the rest. Leave the CSE
6838 ??? With the group load eventually participating
6839 in multiple different permutations (having multiple
6840 slp nodes which refer to the same group) the CSE
6841 is even wrong code. See PR56270. */
6844 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6847 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6848 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6851 /* VEC_NUM is the number of vect stmts to be created for this group. */
6854 grouped_load
= false;
6855 /* For SLP permutation support we need to load the whole group,
6856 not only the number of vector stmts the permutation result
6859 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6861 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6862 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6865 vec_num
= group_size
;
6871 group_size
= vec_num
= 1;
6875 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6876 gcc_assert (alignment_support_scheme
);
6877 /* Targets with load-lane instructions must not require explicit
6879 gcc_assert (!load_lanes_p
6880 || alignment_support_scheme
== dr_aligned
6881 || alignment_support_scheme
== dr_unaligned_supported
);
6883 /* In case the vectorization factor (VF) is bigger than the number
6884 of elements that we can fit in a vectype (nunits), we have to generate
6885 more than one vector stmt - i.e - we need to "unroll" the
6886 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6887 from one copy of the vector stmt to the next, in the field
6888 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6889 stages to find the correct vector defs to be used when vectorizing
6890 stmts that use the defs of the current stmt. The example below
6891 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6892 need to create 4 vectorized stmts):
6894 before vectorization:
6895 RELATED_STMT VEC_STMT
6899 step 1: vectorize stmt S1:
6900 We first create the vector stmt VS1_0, and, as usual, record a
6901 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6902 Next, we create the vector stmt VS1_1, and record a pointer to
6903 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6904 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6906 RELATED_STMT VEC_STMT
6907 VS1_0: vx0 = memref0 VS1_1 -
6908 VS1_1: vx1 = memref1 VS1_2 -
6909 VS1_2: vx2 = memref2 VS1_3 -
6910 VS1_3: vx3 = memref3 - -
6911 S1: x = load - VS1_0
6914 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6915 information we recorded in RELATED_STMT field is used to vectorize
6918 /* In case of interleaving (non-unit grouped access):
6925 Vectorized loads are created in the order of memory accesses
6926 starting from the access of the first stmt of the chain:
6929 VS2: vx1 = &base + vec_size*1
6930 VS3: vx3 = &base + vec_size*2
6931 VS4: vx4 = &base + vec_size*3
6933 Then permutation statements are generated:
6935 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6936 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6939 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6940 (the order of the data-refs in the output of vect_permute_load_chain
6941 corresponds to the order of scalar stmts in the interleaving chain - see
6942 the documentation of vect_permute_load_chain()).
6943 The generation of permutation stmts and recording them in
6944 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6946 In case of both multiple types and interleaving, the vector loads and
6947 permutation stmts above are created for every copy. The result vector
6948 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6949 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6951 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6952 on a target that supports unaligned accesses (dr_unaligned_supported)
6953 we generate the following code:
6957 p = p + indx * vectype_size;
6962 Otherwise, the data reference is potentially unaligned on a target that
6963 does not support unaligned accesses (dr_explicit_realign_optimized) -
6964 then generate the following code, in which the data in each iteration is
6965 obtained by two vector loads, one from the previous iteration, and one
6966 from the current iteration:
6968 msq_init = *(floor(p1))
6969 p2 = initial_addr + VS - 1;
6970 realignment_token = call target_builtin;
6973 p2 = p2 + indx * vectype_size
6975 vec_dest = realign_load (msq, lsq, realignment_token)
6980 /* If the misalignment remains the same throughout the execution of the
6981 loop, we can create the init_addr and permutation mask at the loop
6982 preheader. Otherwise, it needs to be created inside the loop.
6983 This can only occur when vectorizing memory accesses in the inner-loop
6984 nested within an outer-loop that is being vectorized. */
6986 if (nested_in_vect_loop
6987 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6988 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6990 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6991 compute_in_loop
= true;
6994 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6995 || alignment_support_scheme
== dr_explicit_realign
)
6996 && !compute_in_loop
)
6998 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6999 alignment_support_scheme
, NULL_TREE
,
7001 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7003 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7004 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7012 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7015 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7017 aggr_type
= vectype
;
7019 prev_stmt_info
= NULL
;
7020 for (j
= 0; j
< ncopies
; j
++)
7022 /* 1. Create the vector or array pointer update chain. */
7025 bool simd_lane_access_p
7026 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7027 if (simd_lane_access_p
7028 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7029 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7030 && integer_zerop (DR_OFFSET (first_dr
))
7031 && integer_zerop (DR_INIT (first_dr
))
7032 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7033 get_alias_set (DR_REF (first_dr
)))
7034 && (alignment_support_scheme
== dr_aligned
7035 || alignment_support_scheme
== dr_unaligned_supported
))
7037 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7038 dataref_offset
= build_int_cst (reference_alias_ptr_type
7039 (DR_REF (first_dr
)), 0);
7042 else if (first_stmt_for_drptr
7043 && first_stmt
!= first_stmt_for_drptr
)
7046 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7047 at_loop
, offset
, &dummy
, gsi
,
7048 &ptr_incr
, simd_lane_access_p
,
7049 &inv_p
, byte_offset
);
7050 /* Adjust the pointer by the difference to first_stmt. */
7051 data_reference_p ptrdr
7052 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7053 tree diff
= fold_convert (sizetype
,
7054 size_binop (MINUS_EXPR
,
7057 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7062 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7063 offset
, &dummy
, gsi
, &ptr_incr
,
7064 simd_lane_access_p
, &inv_p
,
7067 else if (dataref_offset
)
7068 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7069 TYPE_SIZE_UNIT (aggr_type
));
7071 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7072 TYPE_SIZE_UNIT (aggr_type
));
7074 if (grouped_load
|| slp_perm
)
7075 dr_chain
.create (vec_num
);
7081 vec_array
= create_vector_array (vectype
, vec_num
);
7084 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7085 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
7086 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
7087 gimple_call_set_lhs (new_stmt
, vec_array
);
7088 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7090 /* Extract each vector into an SSA_NAME. */
7091 for (i
= 0; i
< vec_num
; i
++)
7093 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7095 dr_chain
.quick_push (new_temp
);
7098 /* Record the mapping between SSA_NAMEs and statements. */
7099 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7103 for (i
= 0; i
< vec_num
; i
++)
7106 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7109 /* 2. Create the vector-load in the loop. */
7110 switch (alignment_support_scheme
)
7113 case dr_unaligned_supported
:
7115 unsigned int align
, misalign
;
7118 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7121 : build_int_cst (reference_alias_ptr_type
7122 (DR_REF (first_dr
)), 0));
7123 align
= TYPE_ALIGN_UNIT (vectype
);
7124 if (alignment_support_scheme
== dr_aligned
)
7126 gcc_assert (aligned_access_p (first_dr
));
7129 else if (DR_MISALIGNMENT (first_dr
) == -1)
7131 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
7132 align
= TYPE_ALIGN_UNIT (elem_type
);
7134 align
= (get_object_alignment (DR_REF (first_dr
))
7137 TREE_TYPE (data_ref
)
7138 = build_aligned_type (TREE_TYPE (data_ref
),
7139 align
* BITS_PER_UNIT
);
7143 TREE_TYPE (data_ref
)
7144 = build_aligned_type (TREE_TYPE (data_ref
),
7145 TYPE_ALIGN (elem_type
));
7146 misalign
= DR_MISALIGNMENT (first_dr
);
7148 if (dataref_offset
== NULL_TREE
7149 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7150 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7154 case dr_explicit_realign
:
7158 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7160 if (compute_in_loop
)
7161 msq
= vect_setup_realignment (first_stmt
, gsi
,
7163 dr_explicit_realign
,
7166 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7167 ptr
= copy_ssa_name (dataref_ptr
);
7169 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7170 new_stmt
= gimple_build_assign
7171 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7173 (TREE_TYPE (dataref_ptr
),
7174 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7175 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7177 = build2 (MEM_REF
, vectype
, ptr
,
7178 build_int_cst (reference_alias_ptr_type
7179 (DR_REF (first_dr
)), 0));
7180 vec_dest
= vect_create_destination_var (scalar_dest
,
7182 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7183 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7184 gimple_assign_set_lhs (new_stmt
, new_temp
);
7185 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7186 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7187 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7190 bump
= size_binop (MULT_EXPR
, vs
,
7191 TYPE_SIZE_UNIT (elem_type
));
7192 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7193 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7194 new_stmt
= gimple_build_assign
7195 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7198 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7199 ptr
= copy_ssa_name (ptr
, new_stmt
);
7200 gimple_assign_set_lhs (new_stmt
, ptr
);
7201 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7203 = build2 (MEM_REF
, vectype
, ptr
,
7204 build_int_cst (reference_alias_ptr_type
7205 (DR_REF (first_dr
)), 0));
7208 case dr_explicit_realign_optimized
:
7209 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7210 new_temp
= copy_ssa_name (dataref_ptr
);
7212 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7213 new_stmt
= gimple_build_assign
7214 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7216 (TREE_TYPE (dataref_ptr
),
7217 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7218 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7220 = build2 (MEM_REF
, vectype
, new_temp
,
7221 build_int_cst (reference_alias_ptr_type
7222 (DR_REF (first_dr
)), 0));
7227 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7228 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7229 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7230 gimple_assign_set_lhs (new_stmt
, new_temp
);
7231 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7233 /* 3. Handle explicit realignment if necessary/supported.
7235 vec_dest = realign_load (msq, lsq, realignment_token) */
7236 if (alignment_support_scheme
== dr_explicit_realign_optimized
7237 || alignment_support_scheme
== dr_explicit_realign
)
7239 lsq
= gimple_assign_lhs (new_stmt
);
7240 if (!realignment_token
)
7241 realignment_token
= dataref_ptr
;
7242 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7243 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7244 msq
, lsq
, realignment_token
);
7245 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7246 gimple_assign_set_lhs (new_stmt
, new_temp
);
7247 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7249 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7252 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7253 add_phi_arg (phi
, lsq
,
7254 loop_latch_edge (containing_loop
),
7260 /* 4. Handle invariant-load. */
7261 if (inv_p
&& !bb_vinfo
)
7263 gcc_assert (!grouped_load
);
7264 /* If we have versioned for aliasing or the loop doesn't
7265 have any data dependencies that would preclude this,
7266 then we are sure this is a loop invariant load and
7267 thus we can insert it on the preheader edge. */
7268 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7269 && !nested_in_vect_loop
7270 && hoist_defs_of_uses (stmt
, loop
))
7272 if (dump_enabled_p ())
7274 dump_printf_loc (MSG_NOTE
, vect_location
,
7275 "hoisting out of the vectorized "
7277 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7279 tree tem
= copy_ssa_name (scalar_dest
);
7280 gsi_insert_on_edge_immediate
7281 (loop_preheader_edge (loop
),
7282 gimple_build_assign (tem
,
7284 (gimple_assign_rhs1 (stmt
))));
7285 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7286 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7287 set_vinfo_for_stmt (new_stmt
,
7288 new_stmt_vec_info (new_stmt
, vinfo
));
7292 gimple_stmt_iterator gsi2
= *gsi
;
7294 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7296 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7302 tree perm_mask
= perm_mask_for_reverse (vectype
);
7303 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7304 perm_mask
, stmt
, gsi
);
7305 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7308 /* Collect vector loads and later create their permutation in
7309 vect_transform_grouped_load (). */
7310 if (grouped_load
|| slp_perm
)
7311 dr_chain
.quick_push (new_temp
);
7313 /* Store vector loads in the corresponding SLP_NODE. */
7314 if (slp
&& !slp_perm
)
7315 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7317 /* Bump the vector pointer to account for a gap or for excess
7318 elements loaded for a permuted SLP load. */
7319 if (group_gap_adj
!= 0)
7323 = wide_int_to_tree (sizetype
,
7324 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7325 group_gap_adj
, &ovf
));
7326 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7331 if (slp
&& !slp_perm
)
7336 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7337 slp_node_instance
, false))
7339 dr_chain
.release ();
7348 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7349 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7354 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7356 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7357 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7360 dr_chain
.release ();
7366 /* Function vect_is_simple_cond.
7369 LOOP - the loop that is being vectorized.
7370 COND - Condition that is checked for simple use.
7373 *COMP_VECTYPE - the vector type for the comparison.
7375 Returns whether a COND can be vectorized. Checks whether
7376 condition operands are supportable using vec_is_simple_use. */
7379 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7382 enum vect_def_type dt
;
7383 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7386 if (TREE_CODE (cond
) == SSA_NAME
7387 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7389 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7390 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7393 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7398 if (!COMPARISON_CLASS_P (cond
))
7401 lhs
= TREE_OPERAND (cond
, 0);
7402 rhs
= TREE_OPERAND (cond
, 1);
7404 if (TREE_CODE (lhs
) == SSA_NAME
)
7406 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7407 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7410 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7411 && TREE_CODE (lhs
) != FIXED_CST
)
7414 if (TREE_CODE (rhs
) == SSA_NAME
)
7416 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7417 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7420 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7421 && TREE_CODE (rhs
) != FIXED_CST
)
7424 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7428 /* vectorizable_condition.
7430 Check if STMT is conditional modify expression that can be vectorized.
7431 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7432 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7435 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7436 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7437 else clause if it is 2).
7439 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7442 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7443 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7446 tree scalar_dest
= NULL_TREE
;
7447 tree vec_dest
= NULL_TREE
;
7448 tree cond_expr
, then_clause
, else_clause
;
7449 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7450 tree comp_vectype
= NULL_TREE
;
7451 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7452 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7453 tree vec_compare
, vec_cond_expr
;
7455 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7456 enum vect_def_type dt
, dts
[4];
7458 enum tree_code code
;
7459 stmt_vec_info prev_stmt_info
= NULL
;
7461 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7462 vec
<tree
> vec_oprnds0
= vNULL
;
7463 vec
<tree
> vec_oprnds1
= vNULL
;
7464 vec
<tree
> vec_oprnds2
= vNULL
;
7465 vec
<tree
> vec_oprnds3
= vNULL
;
7467 bool masked
= false;
7469 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7472 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7474 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7477 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7478 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7482 /* FORNOW: not yet supported. */
7483 if (STMT_VINFO_LIVE_P (stmt_info
))
7485 if (dump_enabled_p ())
7486 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7487 "value used after loop.\n");
7492 /* Is vectorizable conditional operation? */
7493 if (!is_gimple_assign (stmt
))
7496 code
= gimple_assign_rhs_code (stmt
);
7498 if (code
!= COND_EXPR
)
7501 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7502 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7504 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7507 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7509 gcc_assert (ncopies
>= 1);
7510 if (reduc_index
&& ncopies
> 1)
7511 return false; /* FORNOW */
7513 cond_expr
= gimple_assign_rhs1 (stmt
);
7514 then_clause
= gimple_assign_rhs2 (stmt
);
7515 else_clause
= gimple_assign_rhs3 (stmt
);
7517 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7522 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7524 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7527 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7529 vec_cmp_type
= comp_vectype
;
7533 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7534 if (vec_cmp_type
== NULL_TREE
)
7539 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7540 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7547 vec_oprnds0
.create (1);
7548 vec_oprnds1
.create (1);
7549 vec_oprnds2
.create (1);
7550 vec_oprnds3
.create (1);
7554 scalar_dest
= gimple_assign_lhs (stmt
);
7555 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7557 /* Handle cond expr. */
7558 for (j
= 0; j
< ncopies
; j
++)
7560 gassign
*new_stmt
= NULL
;
7565 auto_vec
<tree
, 4> ops
;
7566 auto_vec
<vec
<tree
>, 4> vec_defs
;
7569 ops
.safe_push (cond_expr
);
7572 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7573 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7575 ops
.safe_push (then_clause
);
7576 ops
.safe_push (else_clause
);
7577 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7578 vec_oprnds3
= vec_defs
.pop ();
7579 vec_oprnds2
= vec_defs
.pop ();
7581 vec_oprnds1
= vec_defs
.pop ();
7582 vec_oprnds0
= vec_defs
.pop ();
7585 vec_defs
.release ();
7593 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7595 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7601 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7602 stmt
, comp_vectype
);
7603 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7604 loop_vinfo
, >emp
, &dts
[0]);
7607 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7608 stmt
, comp_vectype
);
7609 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7610 loop_vinfo
, >emp
, &dts
[1]);
7612 if (reduc_index
== 1)
7613 vec_then_clause
= reduc_def
;
7616 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7618 vect_is_simple_use (then_clause
, loop_vinfo
,
7621 if (reduc_index
== 2)
7622 vec_else_clause
= reduc_def
;
7625 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7627 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7634 = vect_get_vec_def_for_stmt_copy (dts
[0],
7635 vec_oprnds0
.pop ());
7638 = vect_get_vec_def_for_stmt_copy (dts
[1],
7639 vec_oprnds1
.pop ());
7641 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7642 vec_oprnds2
.pop ());
7643 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7644 vec_oprnds3
.pop ());
7649 vec_oprnds0
.quick_push (vec_cond_lhs
);
7651 vec_oprnds1
.quick_push (vec_cond_rhs
);
7652 vec_oprnds2
.quick_push (vec_then_clause
);
7653 vec_oprnds3
.quick_push (vec_else_clause
);
7656 /* Arguments are ready. Create the new vector stmt. */
7657 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7659 vec_then_clause
= vec_oprnds2
[i
];
7660 vec_else_clause
= vec_oprnds3
[i
];
7663 vec_compare
= vec_cond_lhs
;
7666 vec_cond_rhs
= vec_oprnds1
[i
];
7667 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7668 vec_cond_lhs
, vec_cond_rhs
);
7670 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7671 vec_compare
, vec_then_clause
, vec_else_clause
);
7673 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7674 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7675 gimple_assign_set_lhs (new_stmt
, new_temp
);
7676 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7678 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7685 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7687 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7689 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7692 vec_oprnds0
.release ();
7693 vec_oprnds1
.release ();
7694 vec_oprnds2
.release ();
7695 vec_oprnds3
.release ();
7700 /* vectorizable_comparison.
7702 Check if STMT is comparison expression that can be vectorized.
7703 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7704 comparison, put it in VEC_STMT, and insert it at GSI.
7706 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7709 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7710 gimple
**vec_stmt
, tree reduc_def
,
7713 tree lhs
, rhs1
, rhs2
;
7714 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7715 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7716 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7717 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7719 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7720 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7723 enum tree_code code
;
7724 stmt_vec_info prev_stmt_info
= NULL
;
7726 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7727 vec
<tree
> vec_oprnds0
= vNULL
;
7728 vec
<tree
> vec_oprnds1
= vNULL
;
7733 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7736 if (!VECTOR_BOOLEAN_TYPE_P (vectype
))
7739 mask_type
= vectype
;
7740 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7742 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7745 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7747 gcc_assert (ncopies
>= 1);
7748 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7749 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7753 if (STMT_VINFO_LIVE_P (stmt_info
))
7755 if (dump_enabled_p ())
7756 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7757 "value used after loop.\n");
7761 if (!is_gimple_assign (stmt
))
7764 code
= gimple_assign_rhs_code (stmt
);
7766 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7769 rhs1
= gimple_assign_rhs1 (stmt
);
7770 rhs2
= gimple_assign_rhs2 (stmt
);
7772 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7773 &dts
[0], &vectype1
))
7776 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7777 &dts
[1], &vectype2
))
7780 if (vectype1
&& vectype2
7781 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7784 vectype
= vectype1
? vectype1
: vectype2
;
7786 /* Invariant comparison. */
7789 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7790 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7793 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7798 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7799 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7800 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7806 vec_oprnds0
.create (1);
7807 vec_oprnds1
.create (1);
7811 lhs
= gimple_assign_lhs (stmt
);
7812 mask
= vect_create_destination_var (lhs
, mask_type
);
7814 /* Handle cmp expr. */
7815 for (j
= 0; j
< ncopies
; j
++)
7817 gassign
*new_stmt
= NULL
;
7822 auto_vec
<tree
, 2> ops
;
7823 auto_vec
<vec
<tree
>, 2> vec_defs
;
7825 ops
.safe_push (rhs1
);
7826 ops
.safe_push (rhs2
);
7827 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7828 vec_oprnds1
= vec_defs
.pop ();
7829 vec_oprnds0
= vec_defs
.pop ();
7833 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
7834 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
7839 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7840 vec_oprnds0
.pop ());
7841 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7842 vec_oprnds1
.pop ());
7847 vec_oprnds0
.quick_push (vec_rhs1
);
7848 vec_oprnds1
.quick_push (vec_rhs2
);
7851 /* Arguments are ready. Create the new vector stmt. */
7852 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7854 vec_rhs2
= vec_oprnds1
[i
];
7856 new_temp
= make_ssa_name (mask
);
7857 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7858 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7860 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7867 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7869 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7871 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7874 vec_oprnds0
.release ();
7875 vec_oprnds1
.release ();
7880 /* Make sure the statement is vectorizable. */
7883 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7885 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7886 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7887 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7889 tree scalar_type
, vectype
;
7890 gimple
*pattern_stmt
;
7891 gimple_seq pattern_def_seq
;
7893 if (dump_enabled_p ())
7895 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7896 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7899 if (gimple_has_volatile_ops (stmt
))
7901 if (dump_enabled_p ())
7902 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7903 "not vectorized: stmt has volatile operands\n");
7908 /* Skip stmts that do not need to be vectorized. In loops this is expected
7910 - the COND_EXPR which is the loop exit condition
7911 - any LABEL_EXPRs in the loop
7912 - computations that are used only for array indexing or loop control.
7913 In basic blocks we only analyze statements that are a part of some SLP
7914 instance, therefore, all the statements are relevant.
7916 Pattern statement needs to be analyzed instead of the original statement
7917 if the original statement is not relevant. Otherwise, we analyze both
7918 statements. In basic blocks we are called from some SLP instance
7919 traversal, don't analyze pattern stmts instead, the pattern stmts
7920 already will be part of SLP instance. */
7922 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7923 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7924 && !STMT_VINFO_LIVE_P (stmt_info
))
7926 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7928 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7929 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7931 /* Analyze PATTERN_STMT instead of the original stmt. */
7932 stmt
= pattern_stmt
;
7933 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7934 if (dump_enabled_p ())
7936 dump_printf_loc (MSG_NOTE
, vect_location
,
7937 "==> examining pattern statement: ");
7938 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7943 if (dump_enabled_p ())
7944 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7949 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7952 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7953 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7955 /* Analyze PATTERN_STMT too. */
7956 if (dump_enabled_p ())
7958 dump_printf_loc (MSG_NOTE
, vect_location
,
7959 "==> examining pattern statement: ");
7960 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7963 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7967 if (is_pattern_stmt_p (stmt_info
)
7969 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7971 gimple_stmt_iterator si
;
7973 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7975 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7976 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7977 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7979 /* Analyze def stmt of STMT if it's a pattern stmt. */
7980 if (dump_enabled_p ())
7982 dump_printf_loc (MSG_NOTE
, vect_location
,
7983 "==> examining pattern def statement: ");
7984 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7987 if (!vect_analyze_stmt (pattern_def_stmt
,
7988 need_to_vectorize
, node
))
7994 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7996 case vect_internal_def
:
7999 case vect_reduction_def
:
8000 case vect_nested_cycle
:
8001 gcc_assert (!bb_vinfo
8002 && (relevance
== vect_used_in_outer
8003 || relevance
== vect_used_in_outer_by_reduction
8004 || relevance
== vect_used_by_reduction
8005 || relevance
== vect_unused_in_scope
));
8008 case vect_induction_def
:
8009 case vect_constant_def
:
8010 case vect_external_def
:
8011 case vect_unknown_def_type
:
8018 gcc_assert (PURE_SLP_STMT (stmt_info
));
8020 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
8021 if (dump_enabled_p ())
8023 dump_printf_loc (MSG_NOTE
, vect_location
,
8024 "get vectype for scalar type: ");
8025 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
8026 dump_printf (MSG_NOTE
, "\n");
8029 vectype
= get_vectype_for_scalar_type (scalar_type
);
8032 if (dump_enabled_p ())
8034 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8035 "not SLPed: unsupported data-type ");
8036 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
8038 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
8043 if (dump_enabled_p ())
8045 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
8046 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
8047 dump_printf (MSG_NOTE
, "\n");
8050 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
8053 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8055 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8056 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8057 || (is_gimple_call (stmt
)
8058 && gimple_call_lhs (stmt
) == NULL_TREE
));
8059 *need_to_vectorize
= true;
8062 if (PURE_SLP_STMT (stmt_info
) && !node
)
8064 dump_printf_loc (MSG_NOTE
, vect_location
,
8065 "handled only by SLP analysis\n");
8071 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8072 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8073 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8074 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8075 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8076 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8077 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8078 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8079 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8080 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8081 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
8082 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8083 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8087 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8088 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8089 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8090 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8091 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8092 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8093 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8094 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8095 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8096 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8101 if (dump_enabled_p ())
8103 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8104 "not vectorized: relevant stmt not ");
8105 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8106 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8115 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8116 need extra handling, except for vectorizable reductions. */
8117 if (STMT_VINFO_LIVE_P (stmt_info
)
8118 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8119 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
8123 if (dump_enabled_p ())
8125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8126 "not vectorized: live stmt not ");
8127 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8128 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8138 /* Function vect_transform_stmt.
8140 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8143 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8144 bool *grouped_store
, slp_tree slp_node
,
8145 slp_instance slp_node_instance
)
8147 bool is_store
= false;
8148 gimple
*vec_stmt
= NULL
;
8149 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8152 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8154 switch (STMT_VINFO_TYPE (stmt_info
))
8156 case type_demotion_vec_info_type
:
8157 case type_promotion_vec_info_type
:
8158 case type_conversion_vec_info_type
:
8159 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8163 case induc_vec_info_type
:
8164 gcc_assert (!slp_node
);
8165 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
8169 case shift_vec_info_type
:
8170 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8174 case op_vec_info_type
:
8175 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8179 case assignment_vec_info_type
:
8180 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8184 case load_vec_info_type
:
8185 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8190 case store_vec_info_type
:
8191 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8193 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8195 /* In case of interleaving, the whole chain is vectorized when the
8196 last store in the chain is reached. Store stmts before the last
8197 one are skipped, and there vec_stmt_info shouldn't be freed
8199 *grouped_store
= true;
8200 if (STMT_VINFO_VEC_STMT (stmt_info
))
8207 case condition_vec_info_type
:
8208 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8212 case comparison_vec_info_type
:
8213 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8217 case call_vec_info_type
:
8218 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8219 stmt
= gsi_stmt (*gsi
);
8220 if (is_gimple_call (stmt
)
8221 && gimple_call_internal_p (stmt
)
8222 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8226 case call_simd_clone_vec_info_type
:
8227 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8228 stmt
= gsi_stmt (*gsi
);
8231 case reduc_vec_info_type
:
8232 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8237 if (!STMT_VINFO_LIVE_P (stmt_info
))
8239 if (dump_enabled_p ())
8240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8241 "stmt not supported.\n");
8246 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8247 This would break hybrid SLP vectorization. */
8249 gcc_assert (!vec_stmt
8250 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8252 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8253 is being vectorized, but outside the immediately enclosing loop. */
8255 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8256 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8257 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8258 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8259 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8260 || STMT_VINFO_RELEVANT (stmt_info
) ==
8261 vect_used_in_outer_by_reduction
))
8263 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8264 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8265 imm_use_iterator imm_iter
;
8266 use_operand_p use_p
;
8270 if (dump_enabled_p ())
8271 dump_printf_loc (MSG_NOTE
, vect_location
,
8272 "Record the vdef for outer-loop vectorization.\n");
8274 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8275 (to be used when vectorizing outer-loop stmts that use the DEF of
8277 if (gimple_code (stmt
) == GIMPLE_PHI
)
8278 scalar_dest
= PHI_RESULT (stmt
);
8280 scalar_dest
= gimple_assign_lhs (stmt
);
8282 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8284 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8286 exit_phi
= USE_STMT (use_p
);
8287 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8292 /* Handle stmts whose DEF is used outside the loop-nest that is
8293 being vectorized. */
8294 if (STMT_VINFO_LIVE_P (stmt_info
)
8295 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8297 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8302 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8308 /* Remove a group of stores (for SLP or interleaving), free their
8312 vect_remove_stores (gimple
*first_stmt
)
8314 gimple
*next
= first_stmt
;
8316 gimple_stmt_iterator next_si
;
8320 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8322 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8323 if (is_pattern_stmt_p (stmt_info
))
8324 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8325 /* Free the attached stmt_vec_info and remove the stmt. */
8326 next_si
= gsi_for_stmt (next
);
8327 unlink_stmt_vdef (next
);
8328 gsi_remove (&next_si
, true);
8329 release_defs (next
);
8330 free_stmt_vec_info (next
);
8336 /* Function new_stmt_vec_info.
8338 Create and initialize a new stmt_vec_info struct for STMT. */
8341 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8344 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8346 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8347 STMT_VINFO_STMT (res
) = stmt
;
8349 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8350 STMT_VINFO_LIVE_P (res
) = false;
8351 STMT_VINFO_VECTYPE (res
) = NULL
;
8352 STMT_VINFO_VEC_STMT (res
) = NULL
;
8353 STMT_VINFO_VECTORIZABLE (res
) = true;
8354 STMT_VINFO_IN_PATTERN_P (res
) = false;
8355 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8356 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8357 STMT_VINFO_DATA_REF (res
) = NULL
;
8358 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8360 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8361 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8362 STMT_VINFO_DR_INIT (res
) = NULL
;
8363 STMT_VINFO_DR_STEP (res
) = NULL
;
8364 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8366 if (gimple_code (stmt
) == GIMPLE_PHI
8367 && is_loop_header_bb_p (gimple_bb (stmt
)))
8368 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8370 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8372 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8373 STMT_SLP_TYPE (res
) = loop_vect
;
8374 GROUP_FIRST_ELEMENT (res
) = NULL
;
8375 GROUP_NEXT_ELEMENT (res
) = NULL
;
8376 GROUP_SIZE (res
) = 0;
8377 GROUP_STORE_COUNT (res
) = 0;
8378 GROUP_GAP (res
) = 0;
8379 GROUP_SAME_DR_STMT (res
) = NULL
;
8385 /* Create a hash table for stmt_vec_info. */
8388 init_stmt_vec_info_vec (void)
8390 gcc_assert (!stmt_vec_info_vec
.exists ());
8391 stmt_vec_info_vec
.create (50);
8395 /* Free hash table for stmt_vec_info. */
8398 free_stmt_vec_info_vec (void)
8402 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8404 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8405 gcc_assert (stmt_vec_info_vec
.exists ());
8406 stmt_vec_info_vec
.release ();
8410 /* Free stmt vectorization related info. */
8413 free_stmt_vec_info (gimple
*stmt
)
8415 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8420 /* Check if this statement has a related "pattern stmt"
8421 (introduced by the vectorizer during the pattern recognition
8422 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8424 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8426 stmt_vec_info patt_info
8427 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8430 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8431 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8432 gimple_set_bb (patt_stmt
, NULL
);
8433 tree lhs
= gimple_get_lhs (patt_stmt
);
8434 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8435 release_ssa_name (lhs
);
8438 gimple_stmt_iterator si
;
8439 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8441 gimple
*seq_stmt
= gsi_stmt (si
);
8442 gimple_set_bb (seq_stmt
, NULL
);
8443 lhs
= gimple_get_lhs (seq_stmt
);
8444 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8445 release_ssa_name (lhs
);
8446 free_stmt_vec_info (seq_stmt
);
8449 free_stmt_vec_info (patt_stmt
);
8453 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8454 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8455 set_vinfo_for_stmt (stmt
, NULL
);
8460 /* Function get_vectype_for_scalar_type_and_size.
8462 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8466 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8468 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8469 machine_mode simd_mode
;
8470 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8477 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8478 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8481 /* For vector types of elements whose mode precision doesn't
8482 match their types precision we use a element type of mode
8483 precision. The vectorization routines will have to make sure
8484 they support the proper result truncation/extension.
8485 We also make sure to build vector types with INTEGER_TYPE
8486 component type only. */
8487 if (INTEGRAL_TYPE_P (scalar_type
)
8488 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8489 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8490 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8491 TYPE_UNSIGNED (scalar_type
));
8493 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8494 When the component mode passes the above test simply use a type
8495 corresponding to that mode. The theory is that any use that
8496 would cause problems with this will disable vectorization anyway. */
8497 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8498 && !INTEGRAL_TYPE_P (scalar_type
))
8499 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8501 /* We can't build a vector type of elements with alignment bigger than
8503 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8504 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8505 TYPE_UNSIGNED (scalar_type
));
8507 /* If we felt back to using the mode fail if there was
8508 no scalar type for it. */
8509 if (scalar_type
== NULL_TREE
)
8512 /* If no size was supplied use the mode the target prefers. Otherwise
8513 lookup a vector mode of the specified size. */
8515 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8517 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8518 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8522 vectype
= build_vector_type (scalar_type
, nunits
);
8524 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8525 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8531 unsigned int current_vector_size
;
8533 /* Function get_vectype_for_scalar_type.
8535 Returns the vector type corresponding to SCALAR_TYPE as supported
8539 get_vectype_for_scalar_type (tree scalar_type
)
8542 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8543 current_vector_size
);
8545 && current_vector_size
== 0)
8546 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8550 /* Function get_mask_type_for_scalar_type.
8552 Returns the mask type corresponding to a result of comparison
8553 of vectors of specified SCALAR_TYPE as supported by target. */
8556 get_mask_type_for_scalar_type (tree scalar_type
)
8558 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8563 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8564 current_vector_size
);
8567 /* Function get_same_sized_vectype
8569 Returns a vector type corresponding to SCALAR_TYPE of size
8570 VECTOR_TYPE if supported by the target. */
8573 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8575 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8576 return build_same_sized_truth_vector_type (vector_type
);
8578 return get_vectype_for_scalar_type_and_size
8579 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8582 /* Function vect_is_simple_use.
8585 VINFO - the vect info of the loop or basic block that is being vectorized.
8586 OPERAND - operand in the loop or bb.
8588 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8589 DT - the type of definition
8591 Returns whether a stmt with OPERAND can be vectorized.
8592 For loops, supportable operands are constants, loop invariants, and operands
8593 that are defined by the current iteration of the loop. Unsupportable
8594 operands are those that are defined by a previous iteration of the loop (as
8595 is the case in reduction/induction computations).
8596 For basic blocks, supportable operands are constants and bb invariants.
8597 For now, operands defined outside the basic block are not supported. */
8600 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8601 gimple
**def_stmt
, enum vect_def_type
*dt
)
8604 *dt
= vect_unknown_def_type
;
8606 if (dump_enabled_p ())
8608 dump_printf_loc (MSG_NOTE
, vect_location
,
8609 "vect_is_simple_use: operand ");
8610 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8611 dump_printf (MSG_NOTE
, "\n");
8614 if (CONSTANT_CLASS_P (operand
))
8616 *dt
= vect_constant_def
;
8620 if (is_gimple_min_invariant (operand
))
8622 *dt
= vect_external_def
;
8626 if (TREE_CODE (operand
) != SSA_NAME
)
8628 if (dump_enabled_p ())
8629 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8634 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8636 *dt
= vect_external_def
;
8640 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8641 if (dump_enabled_p ())
8643 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8644 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8647 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8648 *dt
= vect_external_def
;
8651 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8652 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8655 if (dump_enabled_p ())
8657 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8660 case vect_uninitialized_def
:
8661 dump_printf (MSG_NOTE
, "uninitialized\n");
8663 case vect_constant_def
:
8664 dump_printf (MSG_NOTE
, "constant\n");
8666 case vect_external_def
:
8667 dump_printf (MSG_NOTE
, "external\n");
8669 case vect_internal_def
:
8670 dump_printf (MSG_NOTE
, "internal\n");
8672 case vect_induction_def
:
8673 dump_printf (MSG_NOTE
, "induction\n");
8675 case vect_reduction_def
:
8676 dump_printf (MSG_NOTE
, "reduction\n");
8678 case vect_double_reduction_def
:
8679 dump_printf (MSG_NOTE
, "double reduction\n");
8681 case vect_nested_cycle
:
8682 dump_printf (MSG_NOTE
, "nested cycle\n");
8684 case vect_unknown_def_type
:
8685 dump_printf (MSG_NOTE
, "unknown\n");
8690 if (*dt
== vect_unknown_def_type
)
8692 if (dump_enabled_p ())
8693 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8694 "Unsupported pattern.\n");
8698 switch (gimple_code (*def_stmt
))
8705 if (dump_enabled_p ())
8706 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8707 "unsupported defining stmt:\n");
8714 /* Function vect_is_simple_use.
8716 Same as vect_is_simple_use but also determines the vector operand
8717 type of OPERAND and stores it to *VECTYPE. If the definition of
8718 OPERAND is vect_uninitialized_def, vect_constant_def or
8719 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8720 is responsible to compute the best suited vector type for the
8724 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8725 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8727 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8730 /* Now get a vector type if the def is internal, otherwise supply
8731 NULL_TREE and leave it up to the caller to figure out a proper
8732 type for the use stmt. */
8733 if (*dt
== vect_internal_def
8734 || *dt
== vect_induction_def
8735 || *dt
== vect_reduction_def
8736 || *dt
== vect_double_reduction_def
8737 || *dt
== vect_nested_cycle
)
8739 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8741 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8742 && !STMT_VINFO_RELEVANT (stmt_info
)
8743 && !STMT_VINFO_LIVE_P (stmt_info
))
8744 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8746 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8747 gcc_assert (*vectype
!= NULL_TREE
);
8749 else if (*dt
== vect_uninitialized_def
8750 || *dt
== vect_constant_def
8751 || *dt
== vect_external_def
)
8752 *vectype
= NULL_TREE
;
8760 /* Function supportable_widening_operation
8762 Check whether an operation represented by the code CODE is a
8763 widening operation that is supported by the target platform in
8764 vector form (i.e., when operating on arguments of type VECTYPE_IN
8765 producing a result of type VECTYPE_OUT).
8767 Widening operations we currently support are NOP (CONVERT), FLOAT
8768 and WIDEN_MULT. This function checks if these operations are supported
8769 by the target platform either directly (via vector tree-codes), or via
8773 - CODE1 and CODE2 are codes of vector operations to be used when
8774 vectorizing the operation, if available.
8775 - MULTI_STEP_CVT determines the number of required intermediate steps in
8776 case of multi-step conversion (like char->short->int - in that case
8777 MULTI_STEP_CVT will be 1).
8778 - INTERM_TYPES contains the intermediate type required to perform the
8779 widening operation (short in the above example). */
8782 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8783 tree vectype_out
, tree vectype_in
,
8784 enum tree_code
*code1
, enum tree_code
*code2
,
8785 int *multi_step_cvt
,
8786 vec
<tree
> *interm_types
)
8788 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8789 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8790 struct loop
*vect_loop
= NULL
;
8791 machine_mode vec_mode
;
8792 enum insn_code icode1
, icode2
;
8793 optab optab1
, optab2
;
8794 tree vectype
= vectype_in
;
8795 tree wide_vectype
= vectype_out
;
8796 enum tree_code c1
, c2
;
8798 tree prev_type
, intermediate_type
;
8799 machine_mode intermediate_mode
, prev_mode
;
8800 optab optab3
, optab4
;
8802 *multi_step_cvt
= 0;
8804 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8808 case WIDEN_MULT_EXPR
:
8809 /* The result of a vectorized widening operation usually requires
8810 two vectors (because the widened results do not fit into one vector).
8811 The generated vector results would normally be expected to be
8812 generated in the same order as in the original scalar computation,
8813 i.e. if 8 results are generated in each vector iteration, they are
8814 to be organized as follows:
8815 vect1: [res1,res2,res3,res4],
8816 vect2: [res5,res6,res7,res8].
8818 However, in the special case that the result of the widening
8819 operation is used in a reduction computation only, the order doesn't
8820 matter (because when vectorizing a reduction we change the order of
8821 the computation). Some targets can take advantage of this and
8822 generate more efficient code. For example, targets like Altivec,
8823 that support widen_mult using a sequence of {mult_even,mult_odd}
8824 generate the following vectors:
8825 vect1: [res1,res3,res5,res7],
8826 vect2: [res2,res4,res6,res8].
8828 When vectorizing outer-loops, we execute the inner-loop sequentially
8829 (each vectorized inner-loop iteration contributes to VF outer-loop
8830 iterations in parallel). We therefore don't allow to change the
8831 order of the computation in the inner-loop during outer-loop
8833 /* TODO: Another case in which order doesn't *really* matter is when we
8834 widen and then contract again, e.g. (short)((int)x * y >> 8).
8835 Normally, pack_trunc performs an even/odd permute, whereas the
8836 repack from an even/odd expansion would be an interleave, which
8837 would be significantly simpler for e.g. AVX2. */
8838 /* In any case, in order to avoid duplicating the code below, recurse
8839 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8840 are properly set up for the caller. If we fail, we'll continue with
8841 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8843 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8844 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8845 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8846 stmt
, vectype_out
, vectype_in
,
8847 code1
, code2
, multi_step_cvt
,
8850 /* Elements in a vector with vect_used_by_reduction property cannot
8851 be reordered if the use chain with this property does not have the
8852 same operation. One such an example is s += a * b, where elements
8853 in a and b cannot be reordered. Here we check if the vector defined
8854 by STMT is only directly used in the reduction statement. */
8855 tree lhs
= gimple_assign_lhs (stmt
);
8856 use_operand_p dummy
;
8858 stmt_vec_info use_stmt_info
= NULL
;
8859 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8860 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8861 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8864 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8865 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8878 case VEC_WIDEN_MULT_EVEN_EXPR
:
8879 /* Support the recursion induced just above. */
8880 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8881 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8884 case WIDEN_LSHIFT_EXPR
:
8885 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8886 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8890 c1
= VEC_UNPACK_LO_EXPR
;
8891 c2
= VEC_UNPACK_HI_EXPR
;
8895 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8896 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8899 case FIX_TRUNC_EXPR
:
8900 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8901 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8902 computing the operation. */
8909 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8912 if (code
== FIX_TRUNC_EXPR
)
8914 /* The signedness is determined from output operand. */
8915 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8916 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8920 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8921 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8924 if (!optab1
|| !optab2
)
8927 vec_mode
= TYPE_MODE (vectype
);
8928 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8929 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8935 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8936 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8939 /* Check if it's a multi-step conversion that can be done using intermediate
8942 prev_type
= vectype
;
8943 prev_mode
= vec_mode
;
8945 if (!CONVERT_EXPR_CODE_P (code
))
8948 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8949 intermediate steps in promotion sequence. We try
8950 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8952 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8953 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8955 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8957 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8958 TYPE_UNSIGNED (prev_type
));
8959 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8960 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8962 if (!optab3
|| !optab4
8963 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8964 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8965 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8966 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8967 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8968 == CODE_FOR_nothing
)
8969 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8970 == CODE_FOR_nothing
))
8973 interm_types
->quick_push (intermediate_type
);
8974 (*multi_step_cvt
)++;
8976 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8977 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8980 prev_type
= intermediate_type
;
8981 prev_mode
= intermediate_mode
;
8984 interm_types
->release ();
8989 /* Function supportable_narrowing_operation
8991 Check whether an operation represented by the code CODE is a
8992 narrowing operation that is supported by the target platform in
8993 vector form (i.e., when operating on arguments of type VECTYPE_IN
8994 and producing a result of type VECTYPE_OUT).
8996 Narrowing operations we currently support are NOP (CONVERT) and
8997 FIX_TRUNC. This function checks if these operations are supported by
8998 the target platform directly via vector tree-codes.
9001 - CODE1 is the code of a vector operation to be used when
9002 vectorizing the operation, if available.
9003 - MULTI_STEP_CVT determines the number of required intermediate steps in
9004 case of multi-step conversion (like int->short->char - in that case
9005 MULTI_STEP_CVT will be 1).
9006 - INTERM_TYPES contains the intermediate type required to perform the
9007 narrowing operation (short in the above example). */
9010 supportable_narrowing_operation (enum tree_code code
,
9011 tree vectype_out
, tree vectype_in
,
9012 enum tree_code
*code1
, int *multi_step_cvt
,
9013 vec
<tree
> *interm_types
)
9015 machine_mode vec_mode
;
9016 enum insn_code icode1
;
9017 optab optab1
, interm_optab
;
9018 tree vectype
= vectype_in
;
9019 tree narrow_vectype
= vectype_out
;
9021 tree intermediate_type
;
9022 machine_mode intermediate_mode
, prev_mode
;
9026 *multi_step_cvt
= 0;
9030 c1
= VEC_PACK_TRUNC_EXPR
;
9033 case FIX_TRUNC_EXPR
:
9034 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9038 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9039 tree code and optabs used for computing the operation. */
9046 if (code
== FIX_TRUNC_EXPR
)
9047 /* The signedness is determined from output operand. */
9048 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9050 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9055 vec_mode
= TYPE_MODE (vectype
);
9056 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9061 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9064 /* Check if it's a multi-step conversion that can be done using intermediate
9066 prev_mode
= vec_mode
;
9067 if (code
== FIX_TRUNC_EXPR
)
9068 uns
= TYPE_UNSIGNED (vectype_out
);
9070 uns
= TYPE_UNSIGNED (vectype
);
9072 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9073 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9074 costly than signed. */
9075 if (code
== FIX_TRUNC_EXPR
&& uns
)
9077 enum insn_code icode2
;
9080 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9082 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9083 if (interm_optab
!= unknown_optab
9084 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9085 && insn_data
[icode1
].operand
[0].mode
9086 == insn_data
[icode2
].operand
[0].mode
)
9089 optab1
= interm_optab
;
9094 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9095 intermediate steps in promotion sequence. We try
9096 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9097 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9098 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9100 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9102 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9104 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9107 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9108 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9109 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9110 == CODE_FOR_nothing
))
9113 interm_types
->quick_push (intermediate_type
);
9114 (*multi_step_cvt
)++;
9116 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9119 prev_mode
= intermediate_mode
;
9120 optab1
= interm_optab
;
9123 interm_types
->release ();