1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
35 #include "hard-reg-set.h"
38 #include "dominance.h"
40 #include "basic-block.h"
41 #include "gimple-pretty-print.h"
42 #include "tree-ssa-alias.h"
43 #include "internal-fn.h"
45 #include "gimple-expr.h"
49 #include "gimple-iterator.h"
50 #include "gimplify-me.h"
51 #include "gimple-ssa.h"
53 #include "tree-phinodes.h"
54 #include "ssa-iterators.h"
55 #include "stringpool.h"
56 #include "tree-ssanames.h"
57 #include "tree-ssa-loop-manip.h"
59 #include "tree-ssa-loop.h"
60 #include "tree-scalar-evolution.h"
62 #include "recog.h" /* FIXME: for insn_data */
63 #include "insn-codes.h"
65 #include "diagnostic-core.h"
66 #include "tree-vectorizer.h"
69 #include "plugin-api.h"
74 /* For lang_hooks.types.type_for_mode. */
75 #include "langhooks.h"
77 /* Return the vectorized type for the given statement. */
80 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
82 return STMT_VINFO_VECTYPE (stmt_info
);
85 /* Return TRUE iff the given statement is in an inner loop relative to
86 the loop being vectorized. */
88 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
90 gimple stmt
= STMT_VINFO_STMT (stmt_info
);
91 basic_block bb
= gimple_bb (stmt
);
92 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
98 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
100 return (bb
->loop_father
== loop
->inner
);
103 /* Record the cost of a statement, either by directly informing the
104 target model or by saving it in a vector for later processing.
105 Return a preliminary estimate of the statement's cost. */
108 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
109 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
110 int misalign
, enum vect_cost_model_location where
)
114 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
115 add_stmt_info_to_vec (body_cost_vec
, count
, kind
,
116 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
119 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
124 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
125 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
126 void *target_cost_data
;
129 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
131 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
133 return add_stmt_cost (target_cost_data
, count
, kind
, stmt_info
,
138 /* Return a variable of type ELEM_TYPE[NELEMS]. */
141 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
143 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
147 /* ARRAY is an array of vectors created by create_vector_array.
148 Return an SSA_NAME for the vector in index N. The reference
149 is part of the vectorization of STMT and the vector is associated
150 with scalar destination SCALAR_DEST. */
153 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
154 tree array
, unsigned HOST_WIDE_INT n
)
156 tree vect_type
, vect
, vect_name
, array_ref
;
159 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
160 vect_type
= TREE_TYPE (TREE_TYPE (array
));
161 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
162 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
163 build_int_cst (size_type_node
, n
),
164 NULL_TREE
, NULL_TREE
);
166 new_stmt
= gimple_build_assign (vect
, array_ref
);
167 vect_name
= make_ssa_name (vect
, new_stmt
);
168 gimple_assign_set_lhs (new_stmt
, vect_name
);
169 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
174 /* ARRAY is an array of vectors created by create_vector_array.
175 Emit code to store SSA_NAME VECT in index N of the array.
176 The store is part of the vectorization of STMT. */
179 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
180 tree array
, unsigned HOST_WIDE_INT n
)
185 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
186 build_int_cst (size_type_node
, n
),
187 NULL_TREE
, NULL_TREE
);
189 new_stmt
= gimple_build_assign (array_ref
, vect
);
190 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
193 /* PTR is a pointer to an array of type TYPE. Return a representation
194 of *PTR. The memory reference replaces those in FIRST_DR
198 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
200 tree mem_ref
, alias_ptr_type
;
202 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
203 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
204 /* Arrays have the same alignment as their type. */
205 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
209 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
211 /* Function vect_mark_relevant.
213 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
216 vect_mark_relevant (vec
<gimple
> *worklist
, gimple stmt
,
217 enum vect_relevant relevant
, bool live_p
,
218 bool used_in_pattern
)
220 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
221 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
222 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
225 if (dump_enabled_p ())
226 dump_printf_loc (MSG_NOTE
, vect_location
,
227 "mark relevant %d, live %d.\n", relevant
, live_p
);
229 /* If this stmt is an original stmt in a pattern, we might need to mark its
230 related pattern stmt instead of the original stmt. However, such stmts
231 may have their own uses that are not in any pattern, in such cases the
232 stmt itself should be marked. */
233 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
236 if (!used_in_pattern
)
238 imm_use_iterator imm_iter
;
242 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
243 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
245 if (is_gimple_assign (stmt
))
246 lhs
= gimple_assign_lhs (stmt
);
248 lhs
= gimple_call_lhs (stmt
);
250 /* This use is out of pattern use, if LHS has other uses that are
251 pattern uses, we should mark the stmt itself, and not the pattern
253 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
254 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
256 if (is_gimple_debug (USE_STMT (use_p
)))
258 use_stmt
= USE_STMT (use_p
);
260 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
263 if (vinfo_for_stmt (use_stmt
)
264 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
274 /* This is the last stmt in a sequence that was detected as a
275 pattern that can potentially be vectorized. Don't mark the stmt
276 as relevant/live because it's not going to be vectorized.
277 Instead mark the pattern-stmt that replaces it. */
279 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
281 if (dump_enabled_p ())
282 dump_printf_loc (MSG_NOTE
, vect_location
,
283 "last stmt in pattern. don't mark"
284 " relevant/live.\n");
285 stmt_info
= vinfo_for_stmt (pattern_stmt
);
286 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
287 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
288 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
293 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
294 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
295 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
297 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
298 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
300 if (dump_enabled_p ())
301 dump_printf_loc (MSG_NOTE
, vect_location
,
302 "already marked relevant/live.\n");
306 worklist
->safe_push (stmt
);
310 /* Function vect_stmt_relevant_p.
312 Return true if STMT in loop that is represented by LOOP_VINFO is
313 "relevant for vectorization".
315 A stmt is considered "relevant for vectorization" if:
316 - it has uses outside the loop.
317 - it has vdefs (it alters memory).
318 - control stmts in the loop (except for the exit condition).
320 CHECKME: what other side effects would the vectorizer allow? */
323 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
324 enum vect_relevant
*relevant
, bool *live_p
)
326 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
328 imm_use_iterator imm_iter
;
332 *relevant
= vect_unused_in_scope
;
335 /* cond stmt other than loop exit cond. */
336 if (is_ctrl_stmt (stmt
)
337 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
338 != loop_exit_ctrl_vec_info_type
)
339 *relevant
= vect_used_in_scope
;
341 /* changing memory. */
342 if (gimple_code (stmt
) != GIMPLE_PHI
)
343 if (gimple_vdef (stmt
))
345 if (dump_enabled_p ())
346 dump_printf_loc (MSG_NOTE
, vect_location
,
347 "vec_stmt_relevant_p: stmt has vdefs.\n");
348 *relevant
= vect_used_in_scope
;
351 /* uses outside the loop. */
352 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
354 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
356 basic_block bb
= gimple_bb (USE_STMT (use_p
));
357 if (!flow_bb_inside_loop_p (loop
, bb
))
359 if (dump_enabled_p ())
360 dump_printf_loc (MSG_NOTE
, vect_location
,
361 "vec_stmt_relevant_p: used out of loop.\n");
363 if (is_gimple_debug (USE_STMT (use_p
)))
366 /* We expect all such uses to be in the loop exit phis
367 (because of loop closed form) */
368 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
369 gcc_assert (bb
== single_exit (loop
)->dest
);
376 return (*live_p
|| *relevant
);
380 /* Function exist_non_indexing_operands_for_use_p
382 USE is one of the uses attached to STMT. Check if USE is
383 used in STMT for anything other than indexing an array. */
386 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
389 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
391 /* USE corresponds to some operand in STMT. If there is no data
392 reference in STMT, then any operand that corresponds to USE
393 is not indexing an array. */
394 if (!STMT_VINFO_DATA_REF (stmt_info
))
397 /* STMT has a data_ref. FORNOW this means that its of one of
401 (This should have been verified in analyze_data_refs).
403 'var' in the second case corresponds to a def, not a use,
404 so USE cannot correspond to any operands that are not used
407 Therefore, all we need to check is if STMT falls into the
408 first case, and whether var corresponds to USE. */
410 if (!gimple_assign_copy_p (stmt
))
412 if (is_gimple_call (stmt
)
413 && gimple_call_internal_p (stmt
))
414 switch (gimple_call_internal_fn (stmt
))
417 operand
= gimple_call_arg (stmt
, 3);
422 operand
= gimple_call_arg (stmt
, 2);
432 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
434 operand
= gimple_assign_rhs1 (stmt
);
435 if (TREE_CODE (operand
) != SSA_NAME
)
446 Function process_use.
449 - a USE in STMT in a loop represented by LOOP_VINFO
450 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
451 that defined USE. This is done by calling mark_relevant and passing it
452 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
453 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
457 Generally, LIVE_P and RELEVANT are used to define the liveness and
458 relevance info of the DEF_STMT of this USE:
459 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
460 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
462 - case 1: If USE is used only for address computations (e.g. array indexing),
463 which does not need to be directly vectorized, then the liveness/relevance
464 of the respective DEF_STMT is left unchanged.
465 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
466 skip DEF_STMT cause it had already been processed.
467 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
468 be modified accordingly.
470 Return true if everything is as expected. Return false otherwise. */
473 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
474 enum vect_relevant relevant
, vec
<gimple
> *worklist
,
477 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
478 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
479 stmt_vec_info dstmt_vinfo
;
480 basic_block bb
, def_bb
;
483 enum vect_def_type dt
;
485 /* case 1: we are only interested in uses that need to be vectorized. Uses
486 that are used for address computation are not considered relevant. */
487 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
490 if (!vect_is_simple_use (use
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
494 "not vectorized: unsupported use in stmt.\n");
498 if (!def_stmt
|| gimple_nop_p (def_stmt
))
501 def_bb
= gimple_bb (def_stmt
);
502 if (!flow_bb_inside_loop_p (loop
, def_bb
))
504 if (dump_enabled_p ())
505 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
509 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
510 DEF_STMT must have already been processed, because this should be the
511 only way that STMT, which is a reduction-phi, was put in the worklist,
512 as there should be no other uses for DEF_STMT in the loop. So we just
513 check that everything is as expected, and we are done. */
514 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
515 bb
= gimple_bb (stmt
);
516 if (gimple_code (stmt
) == GIMPLE_PHI
517 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
518 && gimple_code (def_stmt
) != GIMPLE_PHI
519 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
520 && bb
->loop_father
== def_bb
->loop_father
)
522 if (dump_enabled_p ())
523 dump_printf_loc (MSG_NOTE
, vect_location
,
524 "reduc-stmt defining reduc-phi in the same nest.\n");
525 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
526 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
527 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
528 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
529 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
533 /* case 3a: outer-loop stmt defining an inner-loop stmt:
534 outer-loop-header-bb:
540 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
542 if (dump_enabled_p ())
543 dump_printf_loc (MSG_NOTE
, vect_location
,
544 "outer-loop def-stmt defining inner-loop stmt.\n");
548 case vect_unused_in_scope
:
549 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
550 vect_used_in_scope
: vect_unused_in_scope
;
553 case vect_used_in_outer_by_reduction
:
554 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
555 relevant
= vect_used_by_reduction
;
558 case vect_used_in_outer
:
559 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
560 relevant
= vect_used_in_scope
;
563 case vect_used_in_scope
:
571 /* case 3b: inner-loop stmt defining an outer-loop stmt:
572 outer-loop-header-bb:
576 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
578 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
580 if (dump_enabled_p ())
581 dump_printf_loc (MSG_NOTE
, vect_location
,
582 "inner-loop def-stmt defining outer-loop stmt.\n");
586 case vect_unused_in_scope
:
587 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
588 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
589 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
592 case vect_used_by_reduction
:
593 relevant
= vect_used_in_outer_by_reduction
;
596 case vect_used_in_scope
:
597 relevant
= vect_used_in_outer
;
605 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
606 is_pattern_stmt_p (stmt_vinfo
));
611 /* Function vect_mark_stmts_to_be_vectorized.
613 Not all stmts in the loop need to be vectorized. For example:
622 Stmt 1 and 3 do not need to be vectorized, because loop control and
623 addressing of vectorized data-refs are handled differently.
625 This pass detects such stmts. */
628 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
630 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
631 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
632 unsigned int nbbs
= loop
->num_nodes
;
633 gimple_stmt_iterator si
;
636 stmt_vec_info stmt_vinfo
;
640 enum vect_relevant relevant
, tmp_relevant
;
641 enum vect_def_type def_type
;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE
, vect_location
,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec
<gimple
, 64> worklist
;
649 /* 1. Init worklist. */
650 for (i
= 0; i
< nbbs
; i
++)
653 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
660 dump_printf (MSG_NOTE
, "\n");
663 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
664 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
666 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
668 stmt
= gsi_stmt (si
);
669 if (dump_enabled_p ())
671 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
672 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
673 dump_printf (MSG_NOTE
, "\n");
676 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
677 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
681 /* 2. Process_worklist */
682 while (worklist
.length () > 0)
687 stmt
= worklist
.pop ();
688 if (dump_enabled_p ())
690 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
691 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
692 dump_printf (MSG_NOTE
, "\n");
695 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
696 (DEF_STMT) as relevant/irrelevant and live/dead according to the
697 liveness and relevance properties of STMT. */
698 stmt_vinfo
= vinfo_for_stmt (stmt
);
699 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
700 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
702 /* Generally, the liveness and relevance properties of STMT are
703 propagated as is to the DEF_STMTs of its USEs:
704 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
705 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
707 One exception is when STMT has been identified as defining a reduction
708 variable; in this case we set the liveness/relevance as follows:
710 relevant = vect_used_by_reduction
711 This is because we distinguish between two kinds of relevant stmts -
712 those that are used by a reduction computation, and those that are
713 (also) used by a regular computation. This allows us later on to
714 identify stmts that are used solely by a reduction, and therefore the
715 order of the results that they produce does not have to be kept. */
717 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
718 tmp_relevant
= relevant
;
721 case vect_reduction_def
:
722 switch (tmp_relevant
)
724 case vect_unused_in_scope
:
725 relevant
= vect_used_by_reduction
;
728 case vect_used_by_reduction
:
729 if (gimple_code (stmt
) == GIMPLE_PHI
)
734 if (dump_enabled_p ())
735 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
736 "unsupported use of reduction.\n");
743 case vect_nested_cycle
:
744 if (tmp_relevant
!= vect_unused_in_scope
745 && tmp_relevant
!= vect_used_in_outer_by_reduction
746 && tmp_relevant
!= vect_used_in_outer
)
748 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
750 "unsupported use of nested cycle.\n");
758 case vect_double_reduction_def
:
759 if (tmp_relevant
!= vect_unused_in_scope
760 && tmp_relevant
!= vect_used_by_reduction
)
762 if (dump_enabled_p ())
763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
764 "unsupported use of double reduction.\n");
776 if (is_pattern_stmt_p (stmt_vinfo
))
778 /* Pattern statements are not inserted into the code, so
779 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
780 have to scan the RHS or function arguments instead. */
781 if (is_gimple_assign (stmt
))
783 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
784 tree op
= gimple_assign_rhs1 (stmt
);
787 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
789 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
790 live_p
, relevant
, &worklist
, false)
791 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
792 live_p
, relevant
, &worklist
, false))
796 for (; i
< gimple_num_ops (stmt
); i
++)
798 op
= gimple_op (stmt
, i
);
799 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
804 else if (is_gimple_call (stmt
))
806 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
808 tree arg
= gimple_call_arg (stmt
, i
);
809 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
816 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
818 tree op
= USE_FROM_PTR (use_p
);
819 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
824 if (STMT_VINFO_GATHER_P (stmt_vinfo
))
827 tree decl
= vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
829 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
833 } /* while worklist */
839 /* Function vect_model_simple_cost.
841 Models cost for simple operations, i.e. those that only emit ncopies of a
842 single op. Right now, this does not account for multiple insns that could
843 be generated for the single vector op. We will handle that shortly. */
846 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
847 enum vect_def_type
*dt
,
848 stmt_vector_for_cost
*prologue_cost_vec
,
849 stmt_vector_for_cost
*body_cost_vec
)
852 int inside_cost
= 0, prologue_cost
= 0;
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info
))
858 /* FORNOW: Assuming maximum 2 args per stmts. */
859 for (i
= 0; i
< 2; i
++)
860 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
861 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
862 stmt_info
, 0, vect_prologue
);
864 /* Pass the inside-of-loop statements to the target-specific cost model. */
865 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
866 stmt_info
, 0, vect_body
);
868 if (dump_enabled_p ())
869 dump_printf_loc (MSG_NOTE
, vect_location
,
870 "vect_model_simple_cost: inside_cost = %d, "
871 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
875 /* Model cost for type demotion and promotion operations. PWR is normally
876 zero for single-step promotions and demotions. It will be one if
877 two-step promotion/demotion is required, and so on. Each additional
878 step doubles the number of instructions required. */
881 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
882 enum vect_def_type
*dt
, int pwr
)
885 int inside_cost
= 0, prologue_cost
= 0;
886 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
887 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
888 void *target_cost_data
;
890 /* The SLP costs were already calculated during SLP tree build. */
891 if (PURE_SLP_STMT (stmt_info
))
895 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
897 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
899 for (i
= 0; i
< pwr
+ 1; i
++)
901 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
903 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
904 vec_promote_demote
, stmt_info
, 0,
908 /* FORNOW: Assuming maximum 2 args per stmts. */
909 for (i
= 0; i
< 2; i
++)
910 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
911 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
912 stmt_info
, 0, vect_prologue
);
914 if (dump_enabled_p ())
915 dump_printf_loc (MSG_NOTE
, vect_location
,
916 "vect_model_promotion_demotion_cost: inside_cost = %d, "
917 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
920 /* Function vect_cost_group_size
922 For grouped load or store, return the group_size only if it is the first
923 load or store of a group, else return 1. This ensures that group size is
924 only returned once per group. */
927 vect_cost_group_size (stmt_vec_info stmt_info
)
929 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
931 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
932 return GROUP_SIZE (stmt_info
);
938 /* Function vect_model_store_cost
940 Models cost for stores. In the case of grouped accesses, one access
941 has the overhead of the grouped access attributed to it. */
944 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
945 bool store_lanes_p
, enum vect_def_type dt
,
947 stmt_vector_for_cost
*prologue_cost_vec
,
948 stmt_vector_for_cost
*body_cost_vec
)
951 unsigned int inside_cost
= 0, prologue_cost
= 0;
952 struct data_reference
*first_dr
;
955 /* The SLP costs were already calculated during SLP tree build. */
956 if (PURE_SLP_STMT (stmt_info
))
959 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
960 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
961 stmt_info
, 0, vect_prologue
);
963 /* Grouped access? */
964 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
968 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
973 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
974 group_size
= vect_cost_group_size (stmt_info
);
977 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
979 /* Not a grouped access. */
983 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
986 /* We assume that the cost of a single store-lanes instruction is
987 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
988 access is instead being provided by a permute-and-store operation,
989 include the cost of the permutes. */
990 if (!store_lanes_p
&& group_size
> 1)
992 /* Uses a high and low interleave or shuffle operations for each
994 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
995 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
996 stmt_info
, 0, vect_body
);
998 if (dump_enabled_p ())
999 dump_printf_loc (MSG_NOTE
, vect_location
,
1000 "vect_model_store_cost: strided group_size = %d .\n",
1004 /* Costs of the stores. */
1005 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
1007 if (dump_enabled_p ())
1008 dump_printf_loc (MSG_NOTE
, vect_location
,
1009 "vect_model_store_cost: inside_cost = %d, "
1010 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1014 /* Calculate cost of DR's memory access. */
1016 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
1017 unsigned int *inside_cost
,
1018 stmt_vector_for_cost
*body_cost_vec
)
1020 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1021 gimple stmt
= DR_STMT (dr
);
1022 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1024 switch (alignment_support_scheme
)
1028 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1029 vector_store
, stmt_info
, 0,
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_NOTE
, vect_location
,
1034 "vect_model_store_cost: aligned.\n");
1038 case dr_unaligned_supported
:
1040 /* Here, we assign an additional cost for the unaligned store. */
1041 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1042 unaligned_store
, stmt_info
,
1043 DR_MISALIGNMENT (dr
), vect_body
);
1044 if (dump_enabled_p ())
1045 dump_printf_loc (MSG_NOTE
, vect_location
,
1046 "vect_model_store_cost: unaligned supported by "
1051 case dr_unaligned_unsupported
:
1053 *inside_cost
= VECT_MAX_COST
;
1055 if (dump_enabled_p ())
1056 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1057 "vect_model_store_cost: unsupported access.\n");
1067 /* Function vect_model_load_cost
1069 Models cost for loads. In the case of grouped accesses, the last access
1070 has the overhead of the grouped access attributed to it. Since unaligned
1071 accesses are supported for loads, we also account for the costs of the
1072 access scheme chosen. */
1075 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1076 bool load_lanes_p
, slp_tree slp_node
,
1077 stmt_vector_for_cost
*prologue_cost_vec
,
1078 stmt_vector_for_cost
*body_cost_vec
)
1082 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1083 unsigned int inside_cost
= 0, prologue_cost
= 0;
1085 /* The SLP costs were already calculated during SLP tree build. */
1086 if (PURE_SLP_STMT (stmt_info
))
1089 /* Grouped accesses? */
1090 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1091 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1093 group_size
= vect_cost_group_size (stmt_info
);
1094 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1096 /* Not a grouped access. */
1103 /* We assume that the cost of a single load-lanes instruction is
1104 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1105 access is instead being provided by a load-and-permute operation,
1106 include the cost of the permutes. */
1107 if (!load_lanes_p
&& group_size
> 1)
1109 /* Uses an even and odd extract operations or shuffle operations
1110 for each needed permute. */
1111 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1112 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1113 stmt_info
, 0, vect_body
);
1115 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE
, vect_location
,
1117 "vect_model_load_cost: strided group_size = %d .\n",
1121 /* The loads themselves. */
1122 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1124 /* N scalar loads plus gathering them into a vector. */
1125 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1126 inside_cost
+= record_stmt_cost (body_cost_vec
,
1127 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1128 scalar_load
, stmt_info
, 0, vect_body
);
1129 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1130 stmt_info
, 0, vect_body
);
1133 vect_get_load_cost (first_dr
, ncopies
,
1134 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1135 || group_size
> 1 || slp_node
),
1136 &inside_cost
, &prologue_cost
,
1137 prologue_cost_vec
, body_cost_vec
, true);
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_NOTE
, vect_location
,
1141 "vect_model_load_cost: inside_cost = %d, "
1142 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1146 /* Calculate cost of DR's memory access. */
1148 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1149 bool add_realign_cost
, unsigned int *inside_cost
,
1150 unsigned int *prologue_cost
,
1151 stmt_vector_for_cost
*prologue_cost_vec
,
1152 stmt_vector_for_cost
*body_cost_vec
,
1153 bool record_prologue_costs
)
1155 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1156 gimple stmt
= DR_STMT (dr
);
1157 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1159 switch (alignment_support_scheme
)
1163 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1164 stmt_info
, 0, vect_body
);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: aligned.\n");
1172 case dr_unaligned_supported
:
1174 /* Here, we assign an additional cost for the unaligned load. */
1175 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1176 unaligned_load
, stmt_info
,
1177 DR_MISALIGNMENT (dr
), vect_body
);
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE
, vect_location
,
1181 "vect_model_load_cost: unaligned supported by "
1186 case dr_explicit_realign
:
1188 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1189 vector_load
, stmt_info
, 0, vect_body
);
1190 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1191 vec_perm
, stmt_info
, 0, vect_body
);
1193 /* FIXME: If the misalignment remains fixed across the iterations of
1194 the containing loop, the following cost should be added to the
1196 if (targetm
.vectorize
.builtin_mask_for_load
)
1197 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1198 stmt_info
, 0, vect_body
);
1200 if (dump_enabled_p ())
1201 dump_printf_loc (MSG_NOTE
, vect_location
,
1202 "vect_model_load_cost: explicit realign\n");
1206 case dr_explicit_realign_optimized
:
1208 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_NOTE
, vect_location
,
1210 "vect_model_load_cost: unaligned software "
1213 /* Unaligned software pipeline has a load of an address, an initial
1214 load, and possibly a mask operation to "prime" the loop. However,
1215 if this is an access in a group of loads, which provide grouped
1216 access, then the above cost should only be considered for one
1217 access in the group. Inside the loop, there is a load op
1218 and a realignment op. */
1220 if (add_realign_cost
&& record_prologue_costs
)
1222 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1223 vector_stmt
, stmt_info
,
1225 if (targetm
.vectorize
.builtin_mask_for_load
)
1226 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1227 vector_stmt
, stmt_info
,
1231 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1232 stmt_info
, 0, vect_body
);
1233 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1234 stmt_info
, 0, vect_body
);
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE
, vect_location
,
1238 "vect_model_load_cost: explicit realign optimized"
1244 case dr_unaligned_unsupported
:
1246 *inside_cost
= VECT_MAX_COST
;
1248 if (dump_enabled_p ())
1249 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1250 "vect_model_load_cost: unsupported access.\n");
1259 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1260 the loop preheader for the vectorized stmt STMT. */
1263 vect_init_vector_1 (gimple stmt
, gimple new_stmt
, gimple_stmt_iterator
*gsi
)
1266 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1269 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1270 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1274 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1278 if (nested_in_vect_loop_p (loop
, stmt
))
1281 pe
= loop_preheader_edge (loop
);
1282 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1283 gcc_assert (!new_bb
);
1287 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1289 gimple_stmt_iterator gsi_bb_start
;
1291 gcc_assert (bb_vinfo
);
1292 bb
= BB_VINFO_BB (bb_vinfo
);
1293 gsi_bb_start
= gsi_after_labels (bb
);
1294 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1298 if (dump_enabled_p ())
1300 dump_printf_loc (MSG_NOTE
, vect_location
,
1301 "created new init_stmt: ");
1302 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1303 dump_printf (MSG_NOTE
, "\n");
1307 /* Function vect_init_vector.
1309 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1310 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1311 vector type a vector with all elements equal to VAL is created first.
1312 Place the initialization at BSI if it is not NULL. Otherwise, place the
1313 initialization at the loop preheader.
1314 Return the DEF of INIT_STMT.
1315 It will be used in the vectorization of STMT. */
1318 vect_init_vector (gimple stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1325 if (TREE_CODE (type
) == VECTOR_TYPE
1326 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1328 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1330 if (CONSTANT_CLASS_P (val
))
1331 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1334 new_temp
= make_ssa_name (TREE_TYPE (type
), NULL
);
1335 init_stmt
= gimple_build_assign_with_ops (NOP_EXPR
,
1338 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1342 val
= build_vector_from_val (type
, val
);
1345 new_var
= vect_get_new_vect_var (type
, vect_simple_var
, "cst_");
1346 init_stmt
= gimple_build_assign (new_var
, val
);
1347 new_temp
= make_ssa_name (new_var
, init_stmt
);
1348 gimple_assign_set_lhs (init_stmt
, new_temp
);
1349 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1350 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1355 /* Function vect_get_vec_def_for_operand.
1357 OP is an operand in STMT. This function returns a (vector) def that will be
1358 used in the vectorized stmt for STMT.
1360 In the case that OP is an SSA_NAME which is defined in the loop, then
1361 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1363 In case OP is an invariant or constant, a new stmt that creates a vector def
1364 needs to be introduced. */
1367 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1372 stmt_vec_info def_stmt_info
= NULL
;
1373 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1374 unsigned int nunits
;
1375 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1377 enum vect_def_type dt
;
1381 if (dump_enabled_p ())
1383 dump_printf_loc (MSG_NOTE
, vect_location
,
1384 "vect_get_vec_def_for_operand: ");
1385 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1386 dump_printf (MSG_NOTE
, "\n");
1389 is_simple_use
= vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
,
1390 &def_stmt
, &def
, &dt
);
1391 gcc_assert (is_simple_use
);
1392 if (dump_enabled_p ())
1394 int loc_printed
= 0;
1397 dump_printf_loc (MSG_NOTE
, vect_location
, "def = ");
1399 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, def
);
1400 dump_printf (MSG_NOTE
, "\n");
1405 dump_printf (MSG_NOTE
, " def_stmt = ");
1407 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1408 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1409 dump_printf (MSG_NOTE
, "\n");
1415 /* Case 1: operand is a constant. */
1416 case vect_constant_def
:
1418 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1419 gcc_assert (vector_type
);
1420 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1425 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1426 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE
, vect_location
,
1428 "Create vector_cst. nunits = %d\n", nunits
);
1430 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1433 /* Case 2: operand is defined outside the loop - loop invariant. */
1434 case vect_external_def
:
1436 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1437 gcc_assert (vector_type
);
1442 /* Create 'vec_inv = {inv,inv,..,inv}' */
1443 if (dump_enabled_p ())
1444 dump_printf_loc (MSG_NOTE
, vect_location
, "Create vector_inv.\n");
1446 return vect_init_vector (stmt
, def
, vector_type
, NULL
);
1449 /* Case 3: operand is defined inside the loop. */
1450 case vect_internal_def
:
1453 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1455 /* Get the def from the vectorized stmt. */
1456 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1458 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1459 /* Get vectorized pattern statement. */
1461 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1462 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1463 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1464 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1465 gcc_assert (vec_stmt
);
1466 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1467 vec_oprnd
= PHI_RESULT (vec_stmt
);
1468 else if (is_gimple_call (vec_stmt
))
1469 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1471 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1475 /* Case 4: operand is defined by a loop header phi - reduction */
1476 case vect_reduction_def
:
1477 case vect_double_reduction_def
:
1478 case vect_nested_cycle
:
1482 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1483 loop
= (gimple_bb (def_stmt
))->loop_father
;
1485 /* Get the def before the loop */
1486 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1487 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1490 /* Case 5: operand is defined by loop-header phi - induction. */
1491 case vect_induction_def
:
1493 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1495 /* Get the def from the vectorized stmt. */
1496 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1497 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1498 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1499 vec_oprnd
= PHI_RESULT (vec_stmt
);
1501 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1511 /* Function vect_get_vec_def_for_stmt_copy
1513 Return a vector-def for an operand. This function is used when the
1514 vectorized stmt to be created (by the caller to this function) is a "copy"
1515 created in case the vectorized result cannot fit in one vector, and several
1516 copies of the vector-stmt are required. In this case the vector-def is
1517 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1518 of the stmt that defines VEC_OPRND.
1519 DT is the type of the vector def VEC_OPRND.
1522 In case the vectorization factor (VF) is bigger than the number
1523 of elements that can fit in a vectype (nunits), we have to generate
1524 more than one vector stmt to vectorize the scalar stmt. This situation
1525 arises when there are multiple data-types operated upon in the loop; the
1526 smallest data-type determines the VF, and as a result, when vectorizing
1527 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1528 vector stmt (each computing a vector of 'nunits' results, and together
1529 computing 'VF' results in each iteration). This function is called when
1530 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1531 which VF=16 and nunits=4, so the number of copies required is 4):
1533 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1535 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1536 VS1.1: vx.1 = memref1 VS1.2
1537 VS1.2: vx.2 = memref2 VS1.3
1538 VS1.3: vx.3 = memref3
1540 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1541 VSnew.1: vz1 = vx.1 + ... VSnew.2
1542 VSnew.2: vz2 = vx.2 + ... VSnew.3
1543 VSnew.3: vz3 = vx.3 + ...
1545 The vectorization of S1 is explained in vectorizable_load.
1546 The vectorization of S2:
1547 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1548 the function 'vect_get_vec_def_for_operand' is called to
1549 get the relevant vector-def for each operand of S2. For operand x it
1550 returns the vector-def 'vx.0'.
1552 To create the remaining copies of the vector-stmt (VSnew.j), this
1553 function is called to get the relevant vector-def for each operand. It is
1554 obtained from the respective VS1.j stmt, which is recorded in the
1555 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1557 For example, to obtain the vector-def 'vx.1' in order to create the
1558 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1559 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1560 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1561 and return its def ('vx.1').
1562 Overall, to create the above sequence this function will be called 3 times:
1563 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1564 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1565 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1568 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1570 gimple vec_stmt_for_operand
;
1571 stmt_vec_info def_stmt_info
;
1573 /* Do nothing; can reuse same def. */
1574 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1577 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1578 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1579 gcc_assert (def_stmt_info
);
1580 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1581 gcc_assert (vec_stmt_for_operand
);
1582 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1583 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1584 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1586 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1591 /* Get vectorized definitions for the operands to create a copy of an original
1592 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1595 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1596 vec
<tree
> *vec_oprnds0
,
1597 vec
<tree
> *vec_oprnds1
)
1599 tree vec_oprnd
= vec_oprnds0
->pop ();
1601 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1602 vec_oprnds0
->quick_push (vec_oprnd
);
1604 if (vec_oprnds1
&& vec_oprnds1
->length ())
1606 vec_oprnd
= vec_oprnds1
->pop ();
1607 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1608 vec_oprnds1
->quick_push (vec_oprnd
);
1613 /* Get vectorized definitions for OP0 and OP1.
1614 REDUC_INDEX is the index of reduction operand in case of reduction,
1615 and -1 otherwise. */
1618 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1619 vec
<tree
> *vec_oprnds0
,
1620 vec
<tree
> *vec_oprnds1
,
1621 slp_tree slp_node
, int reduc_index
)
1625 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1626 auto_vec
<tree
> ops (nops
);
1627 auto_vec
<vec
<tree
> > vec_defs (nops
);
1629 ops
.quick_push (op0
);
1631 ops
.quick_push (op1
);
1633 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1635 *vec_oprnds0
= vec_defs
[0];
1637 *vec_oprnds1
= vec_defs
[1];
1643 vec_oprnds0
->create (1);
1644 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1645 vec_oprnds0
->quick_push (vec_oprnd
);
1649 vec_oprnds1
->create (1);
1650 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1651 vec_oprnds1
->quick_push (vec_oprnd
);
1657 /* Function vect_finish_stmt_generation.
1659 Insert a new stmt. */
1662 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1663 gimple_stmt_iterator
*gsi
)
1665 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1666 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1667 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1669 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1671 if (!gsi_end_p (*gsi
)
1672 && gimple_has_mem_ops (vec_stmt
))
1674 gimple at_stmt
= gsi_stmt (*gsi
);
1675 tree vuse
= gimple_vuse (at_stmt
);
1676 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1678 tree vdef
= gimple_vdef (at_stmt
);
1679 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1680 /* If we have an SSA vuse and insert a store, update virtual
1681 SSA form to avoid triggering the renamer. Do so only
1682 if we can easily see all uses - which is what almost always
1683 happens with the way vectorized stmts are inserted. */
1684 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1685 && ((is_gimple_assign (vec_stmt
)
1686 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1687 || (is_gimple_call (vec_stmt
)
1688 && !(gimple_call_flags (vec_stmt
)
1689 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1691 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1692 gimple_set_vdef (vec_stmt
, new_vdef
);
1693 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1697 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1699 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1702 if (dump_enabled_p ())
1704 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1705 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1706 dump_printf (MSG_NOTE
, "\n");
1709 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1711 /* While EH edges will generally prevent vectorization, stmt might
1712 e.g. be in a must-not-throw region. Ensure newly created stmts
1713 that could throw are part of the same region. */
1714 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1715 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1716 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1719 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1720 a function declaration if the target has a vectorized version
1721 of the function, or NULL_TREE if the function cannot be vectorized. */
1724 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1726 tree fndecl
= gimple_call_fndecl (call
);
1728 /* We only handle functions that do not read or clobber memory -- i.e.
1729 const or novops ones. */
1730 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1734 || TREE_CODE (fndecl
) != FUNCTION_DECL
1735 || !DECL_BUILT_IN (fndecl
))
1738 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1743 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
,
1744 gimple_stmt_iterator
*);
1747 /* Function vectorizable_mask_load_store.
1749 Check if STMT performs a conditional load or store that can be vectorized.
1750 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1751 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1752 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1755 vectorizable_mask_load_store (gimple stmt
, gimple_stmt_iterator
*gsi
,
1756 gimple
*vec_stmt
, slp_tree slp_node
)
1758 tree vec_dest
= NULL
;
1759 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1760 stmt_vec_info prev_stmt_info
;
1761 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1762 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1763 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1764 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1765 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1769 tree dataref_ptr
= NULL_TREE
;
1771 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1775 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1776 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1777 int gather_scale
= 1;
1778 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1783 enum vect_def_type dt
;
1785 if (slp_node
!= NULL
)
1788 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1789 gcc_assert (ncopies
>= 1);
1791 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1792 mask
= gimple_call_arg (stmt
, 2);
1793 if (TYPE_PRECISION (TREE_TYPE (mask
))
1794 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1797 /* FORNOW. This restriction should be relaxed. */
1798 if (nested_in_vect_loop
&& ncopies
> 1)
1800 if (dump_enabled_p ())
1801 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1802 "multiple types in nested loop.");
1806 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1809 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1812 if (!STMT_VINFO_DATA_REF (stmt_info
))
1815 elem_type
= TREE_TYPE (vectype
);
1817 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1820 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1823 if (STMT_VINFO_GATHER_P (stmt_info
))
1827 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
1828 &gather_off
, &gather_scale
);
1829 gcc_assert (gather_decl
);
1830 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, NULL
,
1831 &def_stmt
, &def
, &gather_dt
,
1832 &gather_off_vectype
))
1834 if (dump_enabled_p ())
1835 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1836 "gather index use not simple.");
1840 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1842 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1843 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1845 if (dump_enabled_p ())
1846 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1847 "masked gather with integer mask not supported.");
1851 else if (tree_int_cst_compare (nested_in_vect_loop
1852 ? STMT_VINFO_DR_STEP (stmt_info
)
1853 : DR_STEP (dr
), size_zero_node
) <= 0)
1855 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1856 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1859 if (TREE_CODE (mask
) != SSA_NAME
)
1862 if (!vect_is_simple_use (mask
, stmt
, loop_vinfo
, NULL
,
1863 &def_stmt
, &def
, &dt
))
1868 tree rhs
= gimple_call_arg (stmt
, 3);
1869 if (!vect_is_simple_use (rhs
, stmt
, loop_vinfo
, NULL
,
1870 &def_stmt
, &def
, &dt
))
1874 if (!vec_stmt
) /* transformation not required. */
1876 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1878 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1881 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1887 if (STMT_VINFO_GATHER_P (stmt_info
))
1889 tree vec_oprnd0
= NULL_TREE
, op
;
1890 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1891 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1892 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1893 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1894 tree mask_perm_mask
= NULL_TREE
;
1895 edge pe
= loop_preheader_edge (loop
);
1898 enum { NARROW
, NONE
, WIDEN
} modifier
;
1899 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1901 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1902 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1903 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1904 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1905 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1906 scaletype
= TREE_VALUE (arglist
);
1907 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1908 && types_compatible_p (srctype
, masktype
));
1910 if (nunits
== gather_off_nunits
)
1912 else if (nunits
== gather_off_nunits
/ 2)
1914 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1917 for (i
= 0; i
< gather_off_nunits
; ++i
)
1918 sel
[i
] = i
| nunits
;
1920 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1922 else if (nunits
== gather_off_nunits
* 2)
1924 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1927 for (i
= 0; i
< nunits
; ++i
)
1928 sel
[i
] = i
< gather_off_nunits
1929 ? i
: i
+ nunits
- gather_off_nunits
;
1931 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1933 for (i
= 0; i
< nunits
; ++i
)
1934 sel
[i
] = i
| gather_off_nunits
;
1935 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1940 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1942 ptr
= fold_convert (ptrtype
, gather_base
);
1943 if (!is_gimple_min_invariant (ptr
))
1945 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1946 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1947 gcc_assert (!new_bb
);
1950 scale
= build_int_cst (scaletype
, gather_scale
);
1952 prev_stmt_info
= NULL
;
1953 for (j
= 0; j
< ncopies
; ++j
)
1955 if (modifier
== WIDEN
&& (j
& 1))
1956 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1957 perm_mask
, stmt
, gsi
);
1960 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
1963 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1965 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1967 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1968 == TYPE_VECTOR_SUBPARTS (idxtype
));
1969 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
1970 var
= make_ssa_name (var
, NULL
);
1971 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1973 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
1975 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1979 if (mask_perm_mask
&& (j
& 1))
1980 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1981 mask_perm_mask
, stmt
, gsi
);
1985 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
1988 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
,
1989 &def_stmt
, &def
, &dt
);
1990 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1994 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1996 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1997 == TYPE_VECTOR_SUBPARTS (masktype
));
1998 var
= vect_get_new_vect_var (masktype
, vect_simple_var
,
2000 var
= make_ssa_name (var
, NULL
);
2001 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2003 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
2004 mask_op
, NULL_TREE
);
2005 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2011 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
2014 if (!useless_type_conversion_p (vectype
, rettype
))
2016 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2017 == TYPE_VECTOR_SUBPARTS (rettype
));
2018 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
2019 op
= make_ssa_name (var
, new_stmt
);
2020 gimple_call_set_lhs (new_stmt
, op
);
2021 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2022 var
= make_ssa_name (vec_dest
, NULL
);
2023 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2025 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
2030 var
= make_ssa_name (vec_dest
, new_stmt
);
2031 gimple_call_set_lhs (new_stmt
, var
);
2034 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2036 if (modifier
== NARROW
)
2043 var
= permute_vec_elements (prev_res
, var
,
2044 perm_mask
, stmt
, gsi
);
2045 new_stmt
= SSA_NAME_DEF_STMT (var
);
2048 if (prev_stmt_info
== NULL
)
2049 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2051 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2052 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2055 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2057 tree lhs
= gimple_call_lhs (stmt
);
2058 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2059 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2060 set_vinfo_for_stmt (stmt
, NULL
);
2061 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2062 gsi_replace (gsi
, new_stmt
, true);
2067 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2068 prev_stmt_info
= NULL
;
2069 for (i
= 0; i
< ncopies
; i
++)
2071 unsigned align
, misalign
;
2075 tree rhs
= gimple_call_arg (stmt
, 3);
2076 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
, NULL
);
2077 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2078 /* We should have catched mismatched types earlier. */
2079 gcc_assert (useless_type_conversion_p (vectype
,
2080 TREE_TYPE (vec_rhs
)));
2081 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2082 NULL_TREE
, &dummy
, gsi
,
2083 &ptr_incr
, false, &inv_p
);
2084 gcc_assert (!inv_p
);
2088 vect_is_simple_use (vec_rhs
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2090 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2091 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2093 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2094 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2095 TYPE_SIZE_UNIT (vectype
));
2098 align
= TYPE_ALIGN_UNIT (vectype
);
2099 if (aligned_access_p (dr
))
2101 else if (DR_MISALIGNMENT (dr
) == -1)
2103 align
= TYPE_ALIGN_UNIT (elem_type
);
2107 misalign
= DR_MISALIGNMENT (dr
);
2108 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2111 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2112 gimple_call_arg (stmt
, 1),
2114 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2116 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2118 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2119 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2124 tree vec_mask
= NULL_TREE
;
2125 prev_stmt_info
= NULL
;
2126 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2127 for (i
= 0; i
< ncopies
; i
++)
2129 unsigned align
, misalign
;
2133 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2134 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2135 NULL_TREE
, &dummy
, gsi
,
2136 &ptr_incr
, false, &inv_p
);
2137 gcc_assert (!inv_p
);
2141 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2143 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2144 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2145 TYPE_SIZE_UNIT (vectype
));
2148 align
= TYPE_ALIGN_UNIT (vectype
);
2149 if (aligned_access_p (dr
))
2151 else if (DR_MISALIGNMENT (dr
) == -1)
2153 align
= TYPE_ALIGN_UNIT (elem_type
);
2157 misalign
= DR_MISALIGNMENT (dr
);
2158 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2161 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2162 gimple_call_arg (stmt
, 1),
2164 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
, NULL
));
2165 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2167 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2169 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2170 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2176 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2178 tree lhs
= gimple_call_lhs (stmt
);
2179 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2180 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2181 set_vinfo_for_stmt (stmt
, NULL
);
2182 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2183 gsi_replace (gsi
, new_stmt
, true);
2190 /* Function vectorizable_call.
2192 Check if STMT performs a function call that can be vectorized.
2193 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2194 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2195 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2198 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
2204 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2205 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2206 tree vectype_out
, vectype_in
;
2209 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2210 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2211 tree fndecl
, new_temp
, def
, rhs_type
;
2213 enum vect_def_type dt
[3]
2214 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2215 gimple new_stmt
= NULL
;
2217 vec
<tree
> vargs
= vNULL
;
2218 enum { NARROW
, NONE
, WIDEN
} modifier
;
2222 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2225 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2228 /* Is STMT a vectorizable call? */
2229 if (!is_gimple_call (stmt
))
2232 if (gimple_call_internal_p (stmt
)
2233 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2234 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2235 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2238 if (gimple_call_lhs (stmt
) == NULL_TREE
2239 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2242 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2244 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2246 /* Process function arguments. */
2247 rhs_type
= NULL_TREE
;
2248 vectype_in
= NULL_TREE
;
2249 nargs
= gimple_call_num_args (stmt
);
2251 /* Bail out if the function has more than three arguments, we do not have
2252 interesting builtin functions to vectorize with more than two arguments
2253 except for fma. No arguments is also not good. */
2254 if (nargs
== 0 || nargs
> 3)
2257 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2258 if (gimple_call_internal_p (stmt
)
2259 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2262 rhs_type
= unsigned_type_node
;
2265 for (i
= 0; i
< nargs
; i
++)
2269 op
= gimple_call_arg (stmt
, i
);
2271 /* We can only handle calls with arguments of the same type. */
2273 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2275 if (dump_enabled_p ())
2276 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2277 "argument types differ.\n");
2281 rhs_type
= TREE_TYPE (op
);
2283 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2284 &def_stmt
, &def
, &dt
[i
], &opvectype
))
2286 if (dump_enabled_p ())
2287 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2288 "use not simple.\n");
2293 vectype_in
= opvectype
;
2295 && opvectype
!= vectype_in
)
2297 if (dump_enabled_p ())
2298 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2299 "argument vector types differ.\n");
2303 /* If all arguments are external or constant defs use a vector type with
2304 the same size as the output vector type. */
2306 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2308 gcc_assert (vectype_in
);
2311 if (dump_enabled_p ())
2313 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2314 "no vectype for scalar type ");
2315 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2316 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2323 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2324 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2325 if (nunits_in
== nunits_out
/ 2)
2327 else if (nunits_out
== nunits_in
)
2329 else if (nunits_out
== nunits_in
/ 2)
2334 /* For now, we only vectorize functions if a target specific builtin
2335 is available. TODO -- in some cases, it might be profitable to
2336 insert the calls for pieces of the vector, in order to be able
2337 to vectorize other operations in the loop. */
2338 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2339 if (fndecl
== NULL_TREE
)
2341 if (gimple_call_internal_p (stmt
)
2342 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2345 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2346 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2347 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2348 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2350 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2351 { 0, 1, 2, ... vf - 1 } vector. */
2352 gcc_assert (nargs
== 0);
2356 if (dump_enabled_p ())
2357 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2358 "function is not vectorizable.\n");
2363 gcc_assert (!gimple_vuse (stmt
));
2365 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2367 else if (modifier
== NARROW
)
2368 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2370 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2372 /* Sanity check: make sure that at least one copy of the vectorized stmt
2373 needs to be generated. */
2374 gcc_assert (ncopies
>= 1);
2376 if (!vec_stmt
) /* transformation not required. */
2378 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2382 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2388 if (dump_enabled_p ())
2389 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2392 scalar_dest
= gimple_call_lhs (stmt
);
2393 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2395 prev_stmt_info
= NULL
;
2399 for (j
= 0; j
< ncopies
; ++j
)
2401 /* Build argument list for the vectorized call. */
2403 vargs
.create (nargs
);
2409 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2410 vec
<tree
> vec_oprnds0
;
2412 for (i
= 0; i
< nargs
; i
++)
2413 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2414 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2415 vec_oprnds0
= vec_defs
[0];
2417 /* Arguments are ready. Create the new vector stmt. */
2418 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2421 for (k
= 0; k
< nargs
; k
++)
2423 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2424 vargs
[k
] = vec_oprndsk
[i
];
2426 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2427 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2428 gimple_call_set_lhs (new_stmt
, new_temp
);
2429 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2430 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2433 for (i
= 0; i
< nargs
; i
++)
2435 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2436 vec_oprndsi
.release ();
2441 for (i
= 0; i
< nargs
; i
++)
2443 op
= gimple_call_arg (stmt
, i
);
2446 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2449 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2451 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2454 vargs
.quick_push (vec_oprnd0
);
2457 if (gimple_call_internal_p (stmt
)
2458 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2460 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2462 for (k
= 0; k
< nunits_out
; ++k
)
2463 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2464 tree cst
= build_vector (vectype_out
, v
);
2466 = vect_get_new_vect_var (vectype_out
, vect_simple_var
, "cst_");
2467 gimple init_stmt
= gimple_build_assign (new_var
, cst
);
2468 new_temp
= make_ssa_name (new_var
, init_stmt
);
2469 gimple_assign_set_lhs (init_stmt
, new_temp
);
2470 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2471 new_temp
= make_ssa_name (vec_dest
, NULL
);
2472 new_stmt
= gimple_build_assign (new_temp
,
2473 gimple_assign_lhs (init_stmt
));
2477 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2478 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2479 gimple_call_set_lhs (new_stmt
, new_temp
);
2481 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2484 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2486 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2488 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2494 for (j
= 0; j
< ncopies
; ++j
)
2496 /* Build argument list for the vectorized call. */
2498 vargs
.create (nargs
* 2);
2504 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2505 vec
<tree
> vec_oprnds0
;
2507 for (i
= 0; i
< nargs
; i
++)
2508 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2509 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2510 vec_oprnds0
= vec_defs
[0];
2512 /* Arguments are ready. Create the new vector stmt. */
2513 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2517 for (k
= 0; k
< nargs
; k
++)
2519 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2520 vargs
.quick_push (vec_oprndsk
[i
]);
2521 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2523 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2524 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2525 gimple_call_set_lhs (new_stmt
, new_temp
);
2526 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2527 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2530 for (i
= 0; i
< nargs
; i
++)
2532 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2533 vec_oprndsi
.release ();
2538 for (i
= 0; i
< nargs
; i
++)
2540 op
= gimple_call_arg (stmt
, i
);
2544 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2546 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2550 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2552 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2554 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2557 vargs
.quick_push (vec_oprnd0
);
2558 vargs
.quick_push (vec_oprnd1
);
2561 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2562 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2563 gimple_call_set_lhs (new_stmt
, new_temp
);
2564 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2567 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2569 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2571 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2574 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2579 /* No current target implements this case. */
2585 /* The call in STMT might prevent it from being removed in dce.
2586 We however cannot remove it here, due to the way the ssa name
2587 it defines is mapped to the new definition. So just replace
2588 rhs of the statement with something harmless. */
2593 type
= TREE_TYPE (scalar_dest
);
2594 if (is_pattern_stmt_p (stmt_info
))
2595 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2597 lhs
= gimple_call_lhs (stmt
);
2598 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2599 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2600 set_vinfo_for_stmt (stmt
, NULL
);
2601 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2602 gsi_replace (gsi
, new_stmt
, false);
2608 struct simd_call_arg_info
2612 enum vect_def_type dt
;
2613 HOST_WIDE_INT linear_step
;
2617 /* Function vectorizable_simd_clone_call.
2619 Check if STMT performs a function call that can be vectorized
2620 by calling a simd clone of the function.
2621 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2622 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2623 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2626 vectorizable_simd_clone_call (gimple stmt
, gimple_stmt_iterator
*gsi
,
2627 gimple
*vec_stmt
, slp_tree slp_node
)
2632 tree vec_oprnd0
= NULL_TREE
;
2633 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2635 unsigned int nunits
;
2636 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2637 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2638 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2639 tree fndecl
, new_temp
, def
;
2641 gimple new_stmt
= NULL
;
2643 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2644 vec
<tree
> vargs
= vNULL
;
2646 tree lhs
, rtype
, ratype
;
2647 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2649 /* Is STMT a vectorizable call? */
2650 if (!is_gimple_call (stmt
))
2653 fndecl
= gimple_call_fndecl (stmt
);
2654 if (fndecl
== NULL_TREE
)
2657 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2658 if (node
== NULL
|| node
->simd_clones
== NULL
)
2661 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2664 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2667 if (gimple_call_lhs (stmt
)
2668 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2671 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2673 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2675 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2679 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2682 /* Process function arguments. */
2683 nargs
= gimple_call_num_args (stmt
);
2685 /* Bail out if the function has zero arguments. */
2689 arginfo
.create (nargs
);
2691 for (i
= 0; i
< nargs
; i
++)
2693 simd_call_arg_info thisarginfo
;
2696 thisarginfo
.linear_step
= 0;
2697 thisarginfo
.align
= 0;
2698 thisarginfo
.op
= NULL_TREE
;
2700 op
= gimple_call_arg (stmt
, i
);
2701 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2702 &def_stmt
, &def
, &thisarginfo
.dt
,
2703 &thisarginfo
.vectype
)
2704 || thisarginfo
.dt
== vect_uninitialized_def
)
2706 if (dump_enabled_p ())
2707 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2708 "use not simple.\n");
2713 if (thisarginfo
.dt
== vect_constant_def
2714 || thisarginfo
.dt
== vect_external_def
)
2715 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2717 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2719 if (thisarginfo
.dt
!= vect_constant_def
2720 && thisarginfo
.dt
!= vect_external_def
2722 && TREE_CODE (op
) == SSA_NAME
2723 && simple_iv (loop
, loop_containing_stmt (stmt
), op
, &iv
, false)
2724 && tree_fits_shwi_p (iv
.step
))
2726 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2727 thisarginfo
.op
= iv
.base
;
2729 else if ((thisarginfo
.dt
== vect_constant_def
2730 || thisarginfo
.dt
== vect_external_def
)
2731 && POINTER_TYPE_P (TREE_TYPE (op
)))
2732 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2734 arginfo
.quick_push (thisarginfo
);
2737 unsigned int badness
= 0;
2738 struct cgraph_node
*bestn
= NULL
;
2739 if (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
))
2740 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
));
2742 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2743 n
= n
->simdclone
->next_clone
)
2745 unsigned int this_badness
= 0;
2746 if (n
->simdclone
->simdlen
2747 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2748 || n
->simdclone
->nargs
!= nargs
)
2750 if (n
->simdclone
->simdlen
2751 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2752 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2753 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2754 if (n
->simdclone
->inbranch
)
2755 this_badness
+= 2048;
2756 int target_badness
= targetm
.simd_clone
.usable (n
);
2757 if (target_badness
< 0)
2759 this_badness
+= target_badness
* 512;
2760 /* FORNOW: Have to add code to add the mask argument. */
2761 if (n
->simdclone
->inbranch
)
2763 for (i
= 0; i
< nargs
; i
++)
2765 switch (n
->simdclone
->args
[i
].arg_type
)
2767 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2768 if (!useless_type_conversion_p
2769 (n
->simdclone
->args
[i
].orig_type
,
2770 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2772 else if (arginfo
[i
].dt
== vect_constant_def
2773 || arginfo
[i
].dt
== vect_external_def
2774 || arginfo
[i
].linear_step
)
2777 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2778 if (arginfo
[i
].dt
!= vect_constant_def
2779 && arginfo
[i
].dt
!= vect_external_def
)
2782 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2783 if (arginfo
[i
].dt
== vect_constant_def
2784 || arginfo
[i
].dt
== vect_external_def
2785 || (arginfo
[i
].linear_step
2786 != n
->simdclone
->args
[i
].linear_step
))
2789 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2793 case SIMD_CLONE_ARG_TYPE_MASK
:
2796 if (i
== (size_t) -1)
2798 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2803 if (arginfo
[i
].align
)
2804 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2805 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2807 if (i
== (size_t) -1)
2809 if (bestn
== NULL
|| this_badness
< badness
)
2812 badness
= this_badness
;
2822 for (i
= 0; i
< nargs
; i
++)
2823 if ((arginfo
[i
].dt
== vect_constant_def
2824 || arginfo
[i
].dt
== vect_external_def
)
2825 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2828 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2830 if (arginfo
[i
].vectype
== NULL
2831 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2832 > bestn
->simdclone
->simdlen
))
2839 fndecl
= bestn
->decl
;
2840 nunits
= bestn
->simdclone
->simdlen
;
2841 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2843 /* If the function isn't const, only allow it in simd loops where user
2844 has asserted that at least nunits consecutive iterations can be
2845 performed using SIMD instructions. */
2846 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2847 && gimple_vuse (stmt
))
2853 /* Sanity check: make sure that at least one copy of the vectorized stmt
2854 needs to be generated. */
2855 gcc_assert (ncopies
>= 1);
2857 if (!vec_stmt
) /* transformation not required. */
2859 STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
) = bestn
->decl
;
2860 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2861 if (dump_enabled_p ())
2862 dump_printf_loc (MSG_NOTE
, vect_location
,
2863 "=== vectorizable_simd_clone_call ===\n");
2864 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2871 if (dump_enabled_p ())
2872 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2875 scalar_dest
= gimple_call_lhs (stmt
);
2876 vec_dest
= NULL_TREE
;
2881 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2882 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2883 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2886 rtype
= TREE_TYPE (ratype
);
2890 prev_stmt_info
= NULL
;
2891 for (j
= 0; j
< ncopies
; ++j
)
2893 /* Build argument list for the vectorized call. */
2895 vargs
.create (nargs
);
2899 for (i
= 0; i
< nargs
; i
++)
2901 unsigned int k
, l
, m
, o
;
2903 op
= gimple_call_arg (stmt
, i
);
2904 switch (bestn
->simdclone
->args
[i
].arg_type
)
2906 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2907 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2908 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2909 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2911 if (TYPE_VECTOR_SUBPARTS (atype
)
2912 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2914 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2915 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2916 / TYPE_VECTOR_SUBPARTS (atype
));
2917 gcc_assert ((k
& (k
- 1)) == 0);
2920 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2923 vec_oprnd0
= arginfo
[i
].op
;
2924 if ((m
& (k
- 1)) == 0)
2926 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2929 arginfo
[i
].op
= vec_oprnd0
;
2931 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
2933 bitsize_int ((m
& (k
- 1)) * prec
));
2935 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2937 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2938 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2942 k
= (TYPE_VECTOR_SUBPARTS (atype
)
2943 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
2944 gcc_assert ((k
& (k
- 1)) == 0);
2945 vec
<constructor_elt
, va_gc
> *ctor_elts
;
2947 vec_alloc (ctor_elts
, k
);
2950 for (l
= 0; l
< k
; l
++)
2952 if (m
== 0 && l
== 0)
2954 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2957 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2959 arginfo
[i
].op
= vec_oprnd0
;
2962 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
2966 vargs
.safe_push (vec_oprnd0
);
2969 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
2971 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2973 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2974 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2979 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2980 vargs
.safe_push (op
);
2982 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2987 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
2992 edge pe
= loop_preheader_edge (loop
);
2993 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2994 gcc_assert (!new_bb
);
2996 tree phi_res
= copy_ssa_name (op
, NULL
);
2997 gimple new_phi
= create_phi_node (phi_res
, loop
->header
);
2998 set_vinfo_for_stmt (new_phi
,
2999 new_stmt_vec_info (new_phi
, loop_vinfo
,
3001 add_phi_arg (new_phi
, arginfo
[i
].op
,
3002 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3004 = POINTER_TYPE_P (TREE_TYPE (op
))
3005 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3006 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3007 ? sizetype
: TREE_TYPE (op
);
3009 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3011 tree tcst
= wide_int_to_tree (type
, cst
);
3012 tree phi_arg
= copy_ssa_name (op
, NULL
);
3013 new_stmt
= gimple_build_assign_with_ops (code
, phi_arg
,
3015 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3016 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3017 set_vinfo_for_stmt (new_stmt
,
3018 new_stmt_vec_info (new_stmt
, loop_vinfo
,
3020 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3022 arginfo
[i
].op
= phi_res
;
3023 vargs
.safe_push (phi_res
);
3028 = POINTER_TYPE_P (TREE_TYPE (op
))
3029 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3030 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3031 ? sizetype
: TREE_TYPE (op
);
3033 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3035 tree tcst
= wide_int_to_tree (type
, cst
);
3036 new_temp
= make_ssa_name (TREE_TYPE (op
), NULL
);
3038 = gimple_build_assign_with_ops (code
, new_temp
,
3039 arginfo
[i
].op
, tcst
);
3040 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3041 vargs
.safe_push (new_temp
);
3044 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3050 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3053 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3055 new_temp
= create_tmp_var (ratype
, NULL
);
3056 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3057 == TYPE_VECTOR_SUBPARTS (rtype
))
3058 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3060 new_temp
= make_ssa_name (rtype
, new_stmt
);
3061 gimple_call_set_lhs (new_stmt
, new_temp
);
3063 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3067 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3070 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3071 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3072 gcc_assert ((k
& (k
- 1)) == 0);
3073 for (l
= 0; l
< k
; l
++)
3078 t
= build_fold_addr_expr (new_temp
);
3079 t
= build2 (MEM_REF
, vectype
, t
,
3080 build_int_cst (TREE_TYPE (t
),
3081 l
* prec
/ BITS_PER_UNIT
));
3084 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3085 size_int (prec
), bitsize_int (l
* prec
));
3087 = gimple_build_assign (make_ssa_name (vectype
, NULL
), t
);
3088 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3089 if (j
== 0 && l
== 0)
3090 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3092 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3094 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3099 tree clobber
= build_constructor (ratype
, NULL
);
3100 TREE_THIS_VOLATILE (clobber
) = 1;
3101 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3102 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3106 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3108 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3109 / TYPE_VECTOR_SUBPARTS (rtype
));
3110 gcc_assert ((k
& (k
- 1)) == 0);
3111 if ((j
& (k
- 1)) == 0)
3112 vec_alloc (ret_ctor_elts
, k
);
3115 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3116 for (m
= 0; m
< o
; m
++)
3118 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3119 size_int (m
), NULL_TREE
, NULL_TREE
);
3121 = gimple_build_assign (make_ssa_name (rtype
, NULL
),
3123 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3124 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3125 gimple_assign_lhs (new_stmt
));
3127 tree clobber
= build_constructor (ratype
, NULL
);
3128 TREE_THIS_VOLATILE (clobber
) = 1;
3129 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3130 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3133 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3134 if ((j
& (k
- 1)) != k
- 1)
3136 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3138 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
),
3140 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3142 if ((unsigned) j
== k
- 1)
3143 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3145 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3147 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3152 tree t
= build_fold_addr_expr (new_temp
);
3153 t
= build2 (MEM_REF
, vectype
, t
,
3154 build_int_cst (TREE_TYPE (t
), 0));
3156 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
), t
);
3157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3158 tree clobber
= build_constructor (ratype
, NULL
);
3159 TREE_THIS_VOLATILE (clobber
) = 1;
3160 vect_finish_stmt_generation (stmt
,
3161 gimple_build_assign (new_temp
,
3167 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3169 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3171 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3176 /* The call in STMT might prevent it from being removed in dce.
3177 We however cannot remove it here, due to the way the ssa name
3178 it defines is mapped to the new definition. So just replace
3179 rhs of the statement with something harmless. */
3186 type
= TREE_TYPE (scalar_dest
);
3187 if (is_pattern_stmt_p (stmt_info
))
3188 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3190 lhs
= gimple_call_lhs (stmt
);
3191 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3194 new_stmt
= gimple_build_nop ();
3195 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3196 set_vinfo_for_stmt (stmt
, NULL
);
3197 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3198 gsi_replace (gsi
, new_stmt
, false);
3199 unlink_stmt_vdef (stmt
);
3205 /* Function vect_gen_widened_results_half
3207 Create a vector stmt whose code, type, number of arguments, and result
3208 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3209 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3210 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3211 needs to be created (DECL is a function-decl of a target-builtin).
3212 STMT is the original scalar stmt that we are vectorizing. */
3215 vect_gen_widened_results_half (enum tree_code code
,
3217 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3218 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3224 /* Generate half of the widened result: */
3225 if (code
== CALL_EXPR
)
3227 /* Target specific support */
3228 if (op_type
== binary_op
)
3229 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3231 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3232 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3233 gimple_call_set_lhs (new_stmt
, new_temp
);
3237 /* Generic support */
3238 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3239 if (op_type
!= binary_op
)
3241 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
3243 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3244 gimple_assign_set_lhs (new_stmt
, new_temp
);
3246 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3252 /* Get vectorized definitions for loop-based vectorization. For the first
3253 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3254 scalar operand), and for the rest we get a copy with
3255 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3256 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3257 The vectors are collected into VEC_OPRNDS. */
3260 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
3261 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3265 /* Get first vector operand. */
3266 /* All the vector operands except the very first one (that is scalar oprnd)
3268 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3269 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
3271 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3273 vec_oprnds
->quick_push (vec_oprnd
);
3275 /* Get second vector operand. */
3276 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3277 vec_oprnds
->quick_push (vec_oprnd
);
3281 /* For conversion in multiple steps, continue to get operands
3284 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3288 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3289 For multi-step conversions store the resulting vectors and call the function
3293 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3294 int multi_step_cvt
, gimple stmt
,
3296 gimple_stmt_iterator
*gsi
,
3297 slp_tree slp_node
, enum tree_code code
,
3298 stmt_vec_info
*prev_stmt_info
)
3301 tree vop0
, vop1
, new_tmp
, vec_dest
;
3303 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3305 vec_dest
= vec_dsts
.pop ();
3307 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3309 /* Create demotion operation. */
3310 vop0
= (*vec_oprnds
)[i
];
3311 vop1
= (*vec_oprnds
)[i
+ 1];
3312 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
3313 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3314 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3315 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3318 /* Store the resulting vector for next recursive call. */
3319 (*vec_oprnds
)[i
/2] = new_tmp
;
3322 /* This is the last step of the conversion sequence. Store the
3323 vectors in SLP_NODE or in vector info of the scalar statement
3324 (or in STMT_VINFO_RELATED_STMT chain). */
3326 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3329 if (!*prev_stmt_info
)
3330 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3332 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3334 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3339 /* For multi-step demotion operations we first generate demotion operations
3340 from the source type to the intermediate types, and then combine the
3341 results (stored in VEC_OPRNDS) in demotion operation to the destination
3345 /* At each level of recursion we have half of the operands we had at the
3347 vec_oprnds
->truncate ((i
+1)/2);
3348 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3349 stmt
, vec_dsts
, gsi
, slp_node
,
3350 VEC_PACK_TRUNC_EXPR
,
3354 vec_dsts
.quick_push (vec_dest
);
3358 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3359 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3360 the resulting vectors and call the function recursively. */
3363 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3364 vec
<tree
> *vec_oprnds1
,
3365 gimple stmt
, tree vec_dest
,
3366 gimple_stmt_iterator
*gsi
,
3367 enum tree_code code1
,
3368 enum tree_code code2
, tree decl1
,
3369 tree decl2
, int op_type
)
3372 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3373 gimple new_stmt1
, new_stmt2
;
3374 vec
<tree
> vec_tmp
= vNULL
;
3376 vec_tmp
.create (vec_oprnds0
->length () * 2);
3377 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3379 if (op_type
== binary_op
)
3380 vop1
= (*vec_oprnds1
)[i
];
3384 /* Generate the two halves of promotion operation. */
3385 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3386 op_type
, vec_dest
, gsi
, stmt
);
3387 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3388 op_type
, vec_dest
, gsi
, stmt
);
3389 if (is_gimple_call (new_stmt1
))
3391 new_tmp1
= gimple_call_lhs (new_stmt1
);
3392 new_tmp2
= gimple_call_lhs (new_stmt2
);
3396 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3397 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3400 /* Store the results for the next step. */
3401 vec_tmp
.quick_push (new_tmp1
);
3402 vec_tmp
.quick_push (new_tmp2
);
3405 vec_oprnds0
->release ();
3406 *vec_oprnds0
= vec_tmp
;
3410 /* Check if STMT performs a conversion operation, that can be vectorized.
3411 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3412 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3413 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3416 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3417 gimple
*vec_stmt
, slp_tree slp_node
)
3421 tree op0
, op1
= NULL_TREE
;
3422 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3423 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3424 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3425 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3426 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3427 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3431 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3432 gimple new_stmt
= NULL
;
3433 stmt_vec_info prev_stmt_info
;
3436 tree vectype_out
, vectype_in
;
3438 tree lhs_type
, rhs_type
;
3439 enum { NARROW
, NONE
, WIDEN
} modifier
;
3440 vec
<tree
> vec_oprnds0
= vNULL
;
3441 vec
<tree
> vec_oprnds1
= vNULL
;
3443 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3444 int multi_step_cvt
= 0;
3445 vec
<tree
> vec_dsts
= vNULL
;
3446 vec
<tree
> interm_types
= vNULL
;
3447 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3449 machine_mode rhs_mode
;
3450 unsigned short fltsz
;
3452 /* Is STMT a vectorizable conversion? */
3454 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3457 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3460 if (!is_gimple_assign (stmt
))
3463 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3466 code
= gimple_assign_rhs_code (stmt
);
3467 if (!CONVERT_EXPR_CODE_P (code
)
3468 && code
!= FIX_TRUNC_EXPR
3469 && code
!= FLOAT_EXPR
3470 && code
!= WIDEN_MULT_EXPR
3471 && code
!= WIDEN_LSHIFT_EXPR
)
3474 op_type
= TREE_CODE_LENGTH (code
);
3476 /* Check types of lhs and rhs. */
3477 scalar_dest
= gimple_assign_lhs (stmt
);
3478 lhs_type
= TREE_TYPE (scalar_dest
);
3479 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3481 op0
= gimple_assign_rhs1 (stmt
);
3482 rhs_type
= TREE_TYPE (op0
);
3484 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3485 && !((INTEGRAL_TYPE_P (lhs_type
)
3486 && INTEGRAL_TYPE_P (rhs_type
))
3487 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3488 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3491 if ((INTEGRAL_TYPE_P (lhs_type
)
3492 && (TYPE_PRECISION (lhs_type
)
3493 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3494 || (INTEGRAL_TYPE_P (rhs_type
)
3495 && (TYPE_PRECISION (rhs_type
)
3496 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3498 if (dump_enabled_p ())
3499 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3500 "type conversion to/from bit-precision unsupported."
3505 /* Check the operands of the operation. */
3506 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
3507 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3509 if (dump_enabled_p ())
3510 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3511 "use not simple.\n");
3514 if (op_type
== binary_op
)
3518 op1
= gimple_assign_rhs2 (stmt
);
3519 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3520 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3522 if (CONSTANT_CLASS_P (op0
))
3523 ok
= vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
,
3524 &def_stmt
, &def
, &dt
[1], &vectype_in
);
3526 ok
= vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3531 if (dump_enabled_p ())
3532 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3533 "use not simple.\n");
3538 /* If op0 is an external or constant defs use a vector type of
3539 the same size as the output vector type. */
3541 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3543 gcc_assert (vectype_in
);
3546 if (dump_enabled_p ())
3548 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3549 "no vectype for scalar type ");
3550 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3551 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3557 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3558 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3559 if (nunits_in
< nunits_out
)
3561 else if (nunits_out
== nunits_in
)
3566 /* Multiple types in SLP are handled by creating the appropriate number of
3567 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3569 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3571 else if (modifier
== NARROW
)
3572 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3574 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3576 /* Sanity check: make sure that at least one copy of the vectorized stmt
3577 needs to be generated. */
3578 gcc_assert (ncopies
>= 1);
3580 /* Supportable by target? */
3584 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3586 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3591 if (dump_enabled_p ())
3592 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3593 "conversion not supported by target.\n");
3597 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3598 &code1
, &code2
, &multi_step_cvt
,
3601 /* Binary widening operation can only be supported directly by the
3603 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3607 if (code
!= FLOAT_EXPR
3608 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3609 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3612 rhs_mode
= TYPE_MODE (rhs_type
);
3613 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3614 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3615 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3616 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3619 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3620 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3621 if (cvt_type
== NULL_TREE
)
3624 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3626 if (!supportable_convert_operation (code
, vectype_out
,
3627 cvt_type
, &decl1
, &codecvt1
))
3630 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3631 cvt_type
, &codecvt1
,
3632 &codecvt2
, &multi_step_cvt
,
3636 gcc_assert (multi_step_cvt
== 0);
3638 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3639 vectype_in
, &code1
, &code2
,
3640 &multi_step_cvt
, &interm_types
))
3644 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3647 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3648 codecvt2
= ERROR_MARK
;
3652 interm_types
.safe_push (cvt_type
);
3653 cvt_type
= NULL_TREE
;
3658 gcc_assert (op_type
== unary_op
);
3659 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3660 &code1
, &multi_step_cvt
,
3664 if (code
!= FIX_TRUNC_EXPR
3665 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3666 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3669 rhs_mode
= TYPE_MODE (rhs_type
);
3671 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3672 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3673 if (cvt_type
== NULL_TREE
)
3675 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3678 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3679 &code1
, &multi_step_cvt
,
3688 if (!vec_stmt
) /* transformation not required. */
3690 if (dump_enabled_p ())
3691 dump_printf_loc (MSG_NOTE
, vect_location
,
3692 "=== vectorizable_conversion ===\n");
3693 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3695 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3696 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3698 else if (modifier
== NARROW
)
3700 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3701 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3705 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3706 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3708 interm_types
.release ();
3713 if (dump_enabled_p ())
3714 dump_printf_loc (MSG_NOTE
, vect_location
,
3715 "transform conversion. ncopies = %d.\n", ncopies
);
3717 if (op_type
== binary_op
)
3719 if (CONSTANT_CLASS_P (op0
))
3720 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3721 else if (CONSTANT_CLASS_P (op1
))
3722 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3725 /* In case of multi-step conversion, we first generate conversion operations
3726 to the intermediate types, and then from that types to the final one.
3727 We create vector destinations for the intermediate type (TYPES) received
3728 from supportable_*_operation, and store them in the correct order
3729 for future use in vect_create_vectorized_*_stmts (). */
3730 vec_dsts
.create (multi_step_cvt
+ 1);
3731 vec_dest
= vect_create_destination_var (scalar_dest
,
3732 (cvt_type
&& modifier
== WIDEN
)
3733 ? cvt_type
: vectype_out
);
3734 vec_dsts
.quick_push (vec_dest
);
3738 for (i
= interm_types
.length () - 1;
3739 interm_types
.iterate (i
, &intermediate_type
); i
--)
3741 vec_dest
= vect_create_destination_var (scalar_dest
,
3743 vec_dsts
.quick_push (vec_dest
);
3748 vec_dest
= vect_create_destination_var (scalar_dest
,
3750 ? vectype_out
: cvt_type
);
3754 if (modifier
== WIDEN
)
3756 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3757 if (op_type
== binary_op
)
3758 vec_oprnds1
.create (1);
3760 else if (modifier
== NARROW
)
3761 vec_oprnds0
.create (
3762 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3764 else if (code
== WIDEN_LSHIFT_EXPR
)
3765 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3768 prev_stmt_info
= NULL
;
3772 for (j
= 0; j
< ncopies
; j
++)
3775 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3778 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3780 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3782 /* Arguments are ready, create the new vector stmt. */
3783 if (code1
== CALL_EXPR
)
3785 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3786 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3787 gimple_call_set_lhs (new_stmt
, new_temp
);
3791 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3792 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
,
3794 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3795 gimple_assign_set_lhs (new_stmt
, new_temp
);
3798 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3800 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3804 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3806 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3807 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3812 /* In case the vectorization factor (VF) is bigger than the number
3813 of elements that we can fit in a vectype (nunits), we have to
3814 generate more than one vector stmt - i.e - we need to "unroll"
3815 the vector stmt by a factor VF/nunits. */
3816 for (j
= 0; j
< ncopies
; j
++)
3823 if (code
== WIDEN_LSHIFT_EXPR
)
3828 /* Store vec_oprnd1 for every vector stmt to be created
3829 for SLP_NODE. We check during the analysis that all
3830 the shift arguments are the same. */
3831 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3832 vec_oprnds1
.quick_push (vec_oprnd1
);
3834 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3838 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3839 &vec_oprnds1
, slp_node
, -1);
3843 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3844 vec_oprnds0
.quick_push (vec_oprnd0
);
3845 if (op_type
== binary_op
)
3847 if (code
== WIDEN_LSHIFT_EXPR
)
3850 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
,
3852 vec_oprnds1
.quick_push (vec_oprnd1
);
3858 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3859 vec_oprnds0
.truncate (0);
3860 vec_oprnds0
.quick_push (vec_oprnd0
);
3861 if (op_type
== binary_op
)
3863 if (code
== WIDEN_LSHIFT_EXPR
)
3866 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3868 vec_oprnds1
.truncate (0);
3869 vec_oprnds1
.quick_push (vec_oprnd1
);
3873 /* Arguments are ready. Create the new vector stmts. */
3874 for (i
= multi_step_cvt
; i
>= 0; i
--)
3876 tree this_dest
= vec_dsts
[i
];
3877 enum tree_code c1
= code1
, c2
= code2
;
3878 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3883 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3885 stmt
, this_dest
, gsi
,
3886 c1
, c2
, decl1
, decl2
,
3890 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3894 if (codecvt1
== CALL_EXPR
)
3896 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3897 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3898 gimple_call_set_lhs (new_stmt
, new_temp
);
3902 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3903 new_temp
= make_ssa_name (vec_dest
, NULL
);
3904 new_stmt
= gimple_build_assign_with_ops (codecvt1
,
3909 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3912 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3915 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3918 if (!prev_stmt_info
)
3919 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3921 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3922 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3927 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3931 /* In case the vectorization factor (VF) is bigger than the number
3932 of elements that we can fit in a vectype (nunits), we have to
3933 generate more than one vector stmt - i.e - we need to "unroll"
3934 the vector stmt by a factor VF/nunits. */
3935 for (j
= 0; j
< ncopies
; j
++)
3939 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3943 vec_oprnds0
.truncate (0);
3944 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3945 vect_pow2 (multi_step_cvt
) - 1);
3948 /* Arguments are ready. Create the new vector stmts. */
3950 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3952 if (codecvt1
== CALL_EXPR
)
3954 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3955 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3956 gimple_call_set_lhs (new_stmt
, new_temp
);
3960 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3961 new_temp
= make_ssa_name (vec_dest
, NULL
);
3962 new_stmt
= gimple_build_assign_with_ops (codecvt1
, new_temp
,
3966 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3967 vec_oprnds0
[i
] = new_temp
;
3970 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
3971 stmt
, vec_dsts
, gsi
,
3976 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3980 vec_oprnds0
.release ();
3981 vec_oprnds1
.release ();
3982 vec_dsts
.release ();
3983 interm_types
.release ();
3989 /* Function vectorizable_assignment.
3991 Check if STMT performs an assignment (copy) that can be vectorized.
3992 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3993 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3994 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3997 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
3998 gimple
*vec_stmt
, slp_tree slp_node
)
4003 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4004 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4005 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4009 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4010 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4013 vec
<tree
> vec_oprnds
= vNULL
;
4015 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4016 gimple new_stmt
= NULL
;
4017 stmt_vec_info prev_stmt_info
= NULL
;
4018 enum tree_code code
;
4021 /* Multiple types in SLP are handled by creating the appropriate number of
4022 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4024 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4027 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4029 gcc_assert (ncopies
>= 1);
4031 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4034 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4037 /* Is vectorizable assignment? */
4038 if (!is_gimple_assign (stmt
))
4041 scalar_dest
= gimple_assign_lhs (stmt
);
4042 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4045 code
= gimple_assign_rhs_code (stmt
);
4046 if (gimple_assign_single_p (stmt
)
4047 || code
== PAREN_EXPR
4048 || CONVERT_EXPR_CODE_P (code
))
4049 op
= gimple_assign_rhs1 (stmt
);
4053 if (code
== VIEW_CONVERT_EXPR
)
4054 op
= TREE_OPERAND (op
, 0);
4056 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
4057 &def_stmt
, &def
, &dt
[0], &vectype_in
))
4059 if (dump_enabled_p ())
4060 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4061 "use not simple.\n");
4065 /* We can handle NOP_EXPR conversions that do not change the number
4066 of elements or the vector size. */
4067 if ((CONVERT_EXPR_CODE_P (code
)
4068 || code
== VIEW_CONVERT_EXPR
)
4070 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4071 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4072 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4075 /* We do not handle bit-precision changes. */
4076 if ((CONVERT_EXPR_CODE_P (code
)
4077 || code
== VIEW_CONVERT_EXPR
)
4078 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4079 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4080 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4081 || ((TYPE_PRECISION (TREE_TYPE (op
))
4082 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4083 /* But a conversion that does not change the bit-pattern is ok. */
4084 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4085 > TYPE_PRECISION (TREE_TYPE (op
)))
4086 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4088 if (dump_enabled_p ())
4089 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4090 "type conversion to/from bit-precision "
4095 if (!vec_stmt
) /* transformation not required. */
4097 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4098 if (dump_enabled_p ())
4099 dump_printf_loc (MSG_NOTE
, vect_location
,
4100 "=== vectorizable_assignment ===\n");
4101 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4106 if (dump_enabled_p ())
4107 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4110 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4113 for (j
= 0; j
< ncopies
; j
++)
4117 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4119 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4121 /* Arguments are ready. create the new vector stmt. */
4122 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4124 if (CONVERT_EXPR_CODE_P (code
)
4125 || code
== VIEW_CONVERT_EXPR
)
4126 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4127 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4128 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4129 gimple_assign_set_lhs (new_stmt
, new_temp
);
4130 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4132 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4139 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4141 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4143 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4146 vec_oprnds
.release ();
4151 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4152 either as shift by a scalar or by a vector. */
4155 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4158 machine_mode vec_mode
;
4163 vectype
= get_vectype_for_scalar_type (scalar_type
);
4167 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4169 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4171 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4173 || (optab_handler (optab
, TYPE_MODE (vectype
))
4174 == CODE_FOR_nothing
))
4178 vec_mode
= TYPE_MODE (vectype
);
4179 icode
= (int) optab_handler (optab
, vec_mode
);
4180 if (icode
== CODE_FOR_nothing
)
4187 /* Function vectorizable_shift.
4189 Check if STMT performs a shift operation that can be vectorized.
4190 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4191 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4192 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4195 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
4196 gimple
*vec_stmt
, slp_tree slp_node
)
4200 tree op0
, op1
= NULL
;
4201 tree vec_oprnd1
= NULL_TREE
;
4202 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4204 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4205 enum tree_code code
;
4206 machine_mode vec_mode
;
4210 machine_mode optab_op2_mode
;
4213 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4214 gimple new_stmt
= NULL
;
4215 stmt_vec_info prev_stmt_info
;
4222 vec
<tree
> vec_oprnds0
= vNULL
;
4223 vec
<tree
> vec_oprnds1
= vNULL
;
4226 bool scalar_shift_arg
= true;
4227 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4230 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4233 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4236 /* Is STMT a vectorizable binary/unary operation? */
4237 if (!is_gimple_assign (stmt
))
4240 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4243 code
= gimple_assign_rhs_code (stmt
);
4245 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4246 || code
== RROTATE_EXPR
))
4249 scalar_dest
= gimple_assign_lhs (stmt
);
4250 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4251 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4252 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4256 "bit-precision shifts not supported.\n");
4260 op0
= gimple_assign_rhs1 (stmt
);
4261 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4262 &def_stmt
, &def
, &dt
[0], &vectype
))
4264 if (dump_enabled_p ())
4265 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4266 "use not simple.\n");
4269 /* If op0 is an external or constant def use a vector type with
4270 the same size as the output vector type. */
4272 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4274 gcc_assert (vectype
);
4277 if (dump_enabled_p ())
4278 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4279 "no vectype for scalar type\n");
4283 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4284 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4285 if (nunits_out
!= nunits_in
)
4288 op1
= gimple_assign_rhs2 (stmt
);
4289 if (!vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4290 &def
, &dt
[1], &op1_vectype
))
4292 if (dump_enabled_p ())
4293 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4294 "use not simple.\n");
4299 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4303 /* Multiple types in SLP are handled by creating the appropriate number of
4304 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4306 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4309 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4311 gcc_assert (ncopies
>= 1);
4313 /* Determine whether the shift amount is a vector, or scalar. If the
4314 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4316 if (dt
[1] == vect_internal_def
&& !slp_node
)
4317 scalar_shift_arg
= false;
4318 else if (dt
[1] == vect_constant_def
4319 || dt
[1] == vect_external_def
4320 || dt
[1] == vect_internal_def
)
4322 /* In SLP, need to check whether the shift count is the same,
4323 in loops if it is a constant or invariant, it is always
4327 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4330 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4331 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4332 scalar_shift_arg
= false;
4337 if (dump_enabled_p ())
4338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4339 "operand mode requires invariant argument.\n");
4343 /* Vector shifted by vector. */
4344 if (!scalar_shift_arg
)
4346 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4347 if (dump_enabled_p ())
4348 dump_printf_loc (MSG_NOTE
, vect_location
,
4349 "vector/vector shift/rotate found.\n");
4352 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4353 if (op1_vectype
== NULL_TREE
4354 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4356 if (dump_enabled_p ())
4357 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4358 "unusable type for last operand in"
4359 " vector/vector shift/rotate.\n");
4363 /* See if the machine has a vector shifted by scalar insn and if not
4364 then see if it has a vector shifted by vector insn. */
4367 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4369 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4371 if (dump_enabled_p ())
4372 dump_printf_loc (MSG_NOTE
, vect_location
,
4373 "vector/scalar shift/rotate found.\n");
4377 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4379 && (optab_handler (optab
, TYPE_MODE (vectype
))
4380 != CODE_FOR_nothing
))
4382 scalar_shift_arg
= false;
4384 if (dump_enabled_p ())
4385 dump_printf_loc (MSG_NOTE
, vect_location
,
4386 "vector/vector shift/rotate found.\n");
4388 /* Unlike the other binary operators, shifts/rotates have
4389 the rhs being int, instead of the same type as the lhs,
4390 so make sure the scalar is the right type if we are
4391 dealing with vectors of long long/long/short/char. */
4392 if (dt
[1] == vect_constant_def
)
4393 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4394 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4398 && TYPE_MODE (TREE_TYPE (vectype
))
4399 != TYPE_MODE (TREE_TYPE (op1
)))
4401 if (dump_enabled_p ())
4402 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4403 "unusable type for last operand in"
4404 " vector/vector shift/rotate.\n");
4407 if (vec_stmt
&& !slp_node
)
4409 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4410 op1
= vect_init_vector (stmt
, op1
,
4411 TREE_TYPE (vectype
), NULL
);
4418 /* Supportable by target? */
4421 if (dump_enabled_p ())
4422 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4426 vec_mode
= TYPE_MODE (vectype
);
4427 icode
= (int) optab_handler (optab
, vec_mode
);
4428 if (icode
== CODE_FOR_nothing
)
4430 if (dump_enabled_p ())
4431 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4432 "op not supported by target.\n");
4433 /* Check only during analysis. */
4434 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4435 || (vf
< vect_min_worthwhile_factor (code
)
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE
, vect_location
,
4440 "proceeding using word mode.\n");
4443 /* Worthwhile without SIMD support? Check only during analysis. */
4444 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4445 && vf
< vect_min_worthwhile_factor (code
)
4448 if (dump_enabled_p ())
4449 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4450 "not worthwhile without SIMD support.\n");
4454 if (!vec_stmt
) /* transformation not required. */
4456 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4457 if (dump_enabled_p ())
4458 dump_printf_loc (MSG_NOTE
, vect_location
,
4459 "=== vectorizable_shift ===\n");
4460 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4466 if (dump_enabled_p ())
4467 dump_printf_loc (MSG_NOTE
, vect_location
,
4468 "transform binary/unary operation.\n");
4471 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4473 prev_stmt_info
= NULL
;
4474 for (j
= 0; j
< ncopies
; j
++)
4479 if (scalar_shift_arg
)
4481 /* Vector shl and shr insn patterns can be defined with scalar
4482 operand 2 (shift operand). In this case, use constant or loop
4483 invariant op1 directly, without extending it to vector mode
4485 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4486 if (!VECTOR_MODE_P (optab_op2_mode
))
4488 if (dump_enabled_p ())
4489 dump_printf_loc (MSG_NOTE
, vect_location
,
4490 "operand 1 using scalar mode.\n");
4492 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4493 vec_oprnds1
.quick_push (vec_oprnd1
);
4496 /* Store vec_oprnd1 for every vector stmt to be created
4497 for SLP_NODE. We check during the analysis that all
4498 the shift arguments are the same.
4499 TODO: Allow different constants for different vector
4500 stmts generated for an SLP instance. */
4501 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4502 vec_oprnds1
.quick_push (vec_oprnd1
);
4507 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4508 (a special case for certain kind of vector shifts); otherwise,
4509 operand 1 should be of a vector type (the usual case). */
4511 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4514 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4518 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4520 /* Arguments are ready. Create the new vector stmt. */
4521 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4523 vop1
= vec_oprnds1
[i
];
4524 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
4525 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4526 gimple_assign_set_lhs (new_stmt
, new_temp
);
4527 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4529 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4536 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4538 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4539 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4542 vec_oprnds0
.release ();
4543 vec_oprnds1
.release ();
4549 /* Function vectorizable_operation.
4551 Check if STMT performs a binary, unary or ternary operation that can
4553 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4554 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4555 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4558 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
4559 gimple
*vec_stmt
, slp_tree slp_node
)
4563 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4564 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4566 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4567 enum tree_code code
;
4568 machine_mode vec_mode
;
4575 enum vect_def_type dt
[3]
4576 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4577 gimple new_stmt
= NULL
;
4578 stmt_vec_info prev_stmt_info
;
4584 vec
<tree
> vec_oprnds0
= vNULL
;
4585 vec
<tree
> vec_oprnds1
= vNULL
;
4586 vec
<tree
> vec_oprnds2
= vNULL
;
4587 tree vop0
, vop1
, vop2
;
4588 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4591 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4594 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4597 /* Is STMT a vectorizable binary/unary operation? */
4598 if (!is_gimple_assign (stmt
))
4601 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4604 code
= gimple_assign_rhs_code (stmt
);
4606 /* For pointer addition, we should use the normal plus for
4607 the vector addition. */
4608 if (code
== POINTER_PLUS_EXPR
)
4611 /* Support only unary or binary operations. */
4612 op_type
= TREE_CODE_LENGTH (code
);
4613 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4615 if (dump_enabled_p ())
4616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4617 "num. args = %d (not unary/binary/ternary op).\n",
4622 scalar_dest
= gimple_assign_lhs (stmt
);
4623 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4625 /* Most operations cannot handle bit-precision types without extra
4627 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4628 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4629 /* Exception are bitwise binary operations. */
4630 && code
!= BIT_IOR_EXPR
4631 && code
!= BIT_XOR_EXPR
4632 && code
!= BIT_AND_EXPR
)
4634 if (dump_enabled_p ())
4635 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4636 "bit-precision arithmetic not supported.\n");
4640 op0
= gimple_assign_rhs1 (stmt
);
4641 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4642 &def_stmt
, &def
, &dt
[0], &vectype
))
4644 if (dump_enabled_p ())
4645 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4646 "use not simple.\n");
4649 /* If op0 is an external or constant def use a vector type with
4650 the same size as the output vector type. */
4652 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4654 gcc_assert (vectype
);
4657 if (dump_enabled_p ())
4659 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4660 "no vectype for scalar type ");
4661 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4663 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4669 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4670 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4671 if (nunits_out
!= nunits_in
)
4674 if (op_type
== binary_op
|| op_type
== ternary_op
)
4676 op1
= gimple_assign_rhs2 (stmt
);
4677 if (!vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4682 "use not simple.\n");
4686 if (op_type
== ternary_op
)
4688 op2
= gimple_assign_rhs3 (stmt
);
4689 if (!vect_is_simple_use (op2
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4692 if (dump_enabled_p ())
4693 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4694 "use not simple.\n");
4700 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4704 /* Multiple types in SLP are handled by creating the appropriate number of
4705 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4707 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4710 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4712 gcc_assert (ncopies
>= 1);
4714 /* Shifts are handled in vectorizable_shift (). */
4715 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4716 || code
== RROTATE_EXPR
)
4719 /* Supportable by target? */
4721 vec_mode
= TYPE_MODE (vectype
);
4722 if (code
== MULT_HIGHPART_EXPR
)
4724 if (can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
)))
4725 icode
= LAST_INSN_CODE
;
4727 icode
= CODE_FOR_nothing
;
4731 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4734 if (dump_enabled_p ())
4735 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4739 icode
= (int) optab_handler (optab
, vec_mode
);
4742 if (icode
== CODE_FOR_nothing
)
4744 if (dump_enabled_p ())
4745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4746 "op not supported by target.\n");
4747 /* Check only during analysis. */
4748 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4749 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4751 if (dump_enabled_p ())
4752 dump_printf_loc (MSG_NOTE
, vect_location
,
4753 "proceeding using word mode.\n");
4756 /* Worthwhile without SIMD support? Check only during analysis. */
4757 if (!VECTOR_MODE_P (vec_mode
)
4759 && vf
< vect_min_worthwhile_factor (code
))
4761 if (dump_enabled_p ())
4762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4763 "not worthwhile without SIMD support.\n");
4767 if (!vec_stmt
) /* transformation not required. */
4769 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4770 if (dump_enabled_p ())
4771 dump_printf_loc (MSG_NOTE
, vect_location
,
4772 "=== vectorizable_operation ===\n");
4773 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4779 if (dump_enabled_p ())
4780 dump_printf_loc (MSG_NOTE
, vect_location
,
4781 "transform binary/unary operation.\n");
4784 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4786 /* In case the vectorization factor (VF) is bigger than the number
4787 of elements that we can fit in a vectype (nunits), we have to generate
4788 more than one vector stmt - i.e - we need to "unroll" the
4789 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4790 from one copy of the vector stmt to the next, in the field
4791 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4792 stages to find the correct vector defs to be used when vectorizing
4793 stmts that use the defs of the current stmt. The example below
4794 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4795 we need to create 4 vectorized stmts):
4797 before vectorization:
4798 RELATED_STMT VEC_STMT
4802 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4804 RELATED_STMT VEC_STMT
4805 VS1_0: vx0 = memref0 VS1_1 -
4806 VS1_1: vx1 = memref1 VS1_2 -
4807 VS1_2: vx2 = memref2 VS1_3 -
4808 VS1_3: vx3 = memref3 - -
4809 S1: x = load - VS1_0
4812 step2: vectorize stmt S2 (done here):
4813 To vectorize stmt S2 we first need to find the relevant vector
4814 def for the first operand 'x'. This is, as usual, obtained from
4815 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4816 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4817 relevant vector def 'vx0'. Having found 'vx0' we can generate
4818 the vector stmt VS2_0, and as usual, record it in the
4819 STMT_VINFO_VEC_STMT of stmt S2.
4820 When creating the second copy (VS2_1), we obtain the relevant vector
4821 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4822 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4823 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4824 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4825 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4826 chain of stmts and pointers:
4827 RELATED_STMT VEC_STMT
4828 VS1_0: vx0 = memref0 VS1_1 -
4829 VS1_1: vx1 = memref1 VS1_2 -
4830 VS1_2: vx2 = memref2 VS1_3 -
4831 VS1_3: vx3 = memref3 - -
4832 S1: x = load - VS1_0
4833 VS2_0: vz0 = vx0 + v1 VS2_1 -
4834 VS2_1: vz1 = vx1 + v1 VS2_2 -
4835 VS2_2: vz2 = vx2 + v1 VS2_3 -
4836 VS2_3: vz3 = vx3 + v1 - -
4837 S2: z = x + 1 - VS2_0 */
4839 prev_stmt_info
= NULL
;
4840 for (j
= 0; j
< ncopies
; j
++)
4845 if (op_type
== binary_op
|| op_type
== ternary_op
)
4846 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4849 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4851 if (op_type
== ternary_op
)
4853 vec_oprnds2
.create (1);
4854 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4861 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4862 if (op_type
== ternary_op
)
4864 tree vec_oprnd
= vec_oprnds2
.pop ();
4865 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4870 /* Arguments are ready. Create the new vector stmt. */
4871 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4873 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4874 ? vec_oprnds1
[i
] : NULL_TREE
);
4875 vop2
= ((op_type
== ternary_op
)
4876 ? vec_oprnds2
[i
] : NULL_TREE
);
4877 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
4879 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4880 gimple_assign_set_lhs (new_stmt
, new_temp
);
4881 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4883 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4890 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4892 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4893 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4896 vec_oprnds0
.release ();
4897 vec_oprnds1
.release ();
4898 vec_oprnds2
.release ();
4903 /* A helper function to ensure data reference DR's base alignment
4907 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4912 if (((dataref_aux
*)dr
->aux
)->base_misaligned
)
4914 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4915 tree base_decl
= ((dataref_aux
*)dr
->aux
)->base_decl
;
4917 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4918 DECL_USER_ALIGN (base_decl
) = 1;
4919 ((dataref_aux
*)dr
->aux
)->base_misaligned
= false;
4924 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4925 reversal of the vector elements. If that is impossible to do,
4929 perm_mask_for_reverse (tree vectype
)
4934 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4935 sel
= XALLOCAVEC (unsigned char, nunits
);
4937 for (i
= 0; i
< nunits
; ++i
)
4938 sel
[i
] = nunits
- 1 - i
;
4940 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
4942 return vect_gen_perm_mask_checked (vectype
, sel
);
4945 /* Function vectorizable_store.
4947 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4949 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4950 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4951 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4954 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
4960 tree vec_oprnd
= NULL_TREE
;
4961 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4962 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
4963 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4965 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4966 struct loop
*loop
= NULL
;
4967 machine_mode vec_mode
;
4969 enum dr_alignment_support alignment_support_scheme
;
4972 enum vect_def_type dt
;
4973 stmt_vec_info prev_stmt_info
= NULL
;
4974 tree dataref_ptr
= NULL_TREE
;
4975 tree dataref_offset
= NULL_TREE
;
4976 gimple ptr_incr
= NULL
;
4977 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4980 gimple next_stmt
, first_stmt
= NULL
;
4981 bool grouped_store
= false;
4982 bool store_lanes_p
= false;
4983 unsigned int group_size
, i
;
4984 vec
<tree
> dr_chain
= vNULL
;
4985 vec
<tree
> oprnds
= vNULL
;
4986 vec
<tree
> result_chain
= vNULL
;
4988 bool negative
= false;
4989 tree offset
= NULL_TREE
;
4990 vec
<tree
> vec_oprnds
= vNULL
;
4991 bool slp
= (slp_node
!= NULL
);
4992 unsigned int vec_num
;
4993 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4997 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4999 /* Multiple types in SLP are handled by creating the appropriate number of
5000 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5002 if (slp
|| PURE_SLP_STMT (stmt_info
))
5005 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5007 gcc_assert (ncopies
>= 1);
5009 /* FORNOW. This restriction should be relaxed. */
5010 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5012 if (dump_enabled_p ())
5013 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5014 "multiple types in nested loop.\n");
5018 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5021 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5024 /* Is vectorizable store? */
5026 if (!is_gimple_assign (stmt
))
5029 scalar_dest
= gimple_assign_lhs (stmt
);
5030 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5031 && is_pattern_stmt_p (stmt_info
))
5032 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5033 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5034 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5035 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5036 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5037 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5038 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5039 && TREE_CODE (scalar_dest
) != MEM_REF
)
5042 gcc_assert (gimple_assign_single_p (stmt
));
5043 op
= gimple_assign_rhs1 (stmt
);
5044 if (!vect_is_simple_use (op
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5047 if (dump_enabled_p ())
5048 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5049 "use not simple.\n");
5053 elem_type
= TREE_TYPE (vectype
);
5054 vec_mode
= TYPE_MODE (vectype
);
5056 /* FORNOW. In some cases can vectorize even if data-type not supported
5057 (e.g. - array initialization with 0). */
5058 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5061 if (!STMT_VINFO_DATA_REF (stmt_info
))
5065 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5066 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5067 size_zero_node
) < 0;
5068 if (negative
&& ncopies
> 1)
5070 if (dump_enabled_p ())
5071 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5072 "multiple types with negative step.\n");
5078 gcc_assert (!grouped_store
);
5079 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5080 if (alignment_support_scheme
!= dr_aligned
5081 && alignment_support_scheme
!= dr_unaligned_supported
)
5083 if (dump_enabled_p ())
5084 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5085 "negative step but alignment required.\n");
5088 if (dt
!= vect_constant_def
5089 && dt
!= vect_external_def
5090 && !perm_mask_for_reverse (vectype
))
5092 if (dump_enabled_p ())
5093 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5094 "negative step and reversing not supported.\n");
5099 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5101 grouped_store
= true;
5102 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5103 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5105 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5106 if (vect_store_lanes_supported (vectype
, group_size
))
5107 store_lanes_p
= true;
5108 else if (!vect_grouped_store_supported (vectype
, group_size
))
5112 if (first_stmt
== stmt
)
5114 /* STMT is the leader of the group. Check the operands of all the
5115 stmts of the group. */
5116 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5119 gcc_assert (gimple_assign_single_p (next_stmt
));
5120 op
= gimple_assign_rhs1 (next_stmt
);
5121 if (!vect_is_simple_use (op
, next_stmt
, loop_vinfo
, bb_vinfo
,
5122 &def_stmt
, &def
, &dt
))
5124 if (dump_enabled_p ())
5125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5126 "use not simple.\n");
5129 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5134 if (!vec_stmt
) /* transformation not required. */
5136 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5137 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5144 ensure_base_align (stmt_info
, dr
);
5148 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5149 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5151 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5154 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5156 /* We vectorize all the stmts of the interleaving group when we
5157 reach the last stmt in the group. */
5158 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5159 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5168 grouped_store
= false;
5169 /* VEC_NUM is the number of vect stmts to be created for this
5171 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5172 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5173 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5174 op
= gimple_assign_rhs1 (first_stmt
);
5177 /* VEC_NUM is the number of vect stmts to be created for this
5179 vec_num
= group_size
;
5185 group_size
= vec_num
= 1;
5188 if (dump_enabled_p ())
5189 dump_printf_loc (MSG_NOTE
, vect_location
,
5190 "transform store. ncopies = %d\n", ncopies
);
5192 dr_chain
.create (group_size
);
5193 oprnds
.create (group_size
);
5195 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5196 gcc_assert (alignment_support_scheme
);
5197 /* Targets with store-lane instructions must not require explicit
5199 gcc_assert (!store_lanes_p
5200 || alignment_support_scheme
== dr_aligned
5201 || alignment_support_scheme
== dr_unaligned_supported
);
5204 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5207 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5209 aggr_type
= vectype
;
5211 /* In case the vectorization factor (VF) is bigger than the number
5212 of elements that we can fit in a vectype (nunits), we have to generate
5213 more than one vector stmt - i.e - we need to "unroll" the
5214 vector stmt by a factor VF/nunits. For more details see documentation in
5215 vect_get_vec_def_for_copy_stmt. */
5217 /* In case of interleaving (non-unit grouped access):
5224 We create vectorized stores starting from base address (the access of the
5225 first stmt in the chain (S2 in the above example), when the last store stmt
5226 of the chain (S4) is reached:
5229 VS2: &base + vec_size*1 = vx0
5230 VS3: &base + vec_size*2 = vx1
5231 VS4: &base + vec_size*3 = vx3
5233 Then permutation statements are generated:
5235 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5236 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5239 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5240 (the order of the data-refs in the output of vect_permute_store_chain
5241 corresponds to the order of scalar stmts in the interleaving chain - see
5242 the documentation of vect_permute_store_chain()).
5244 In case of both multiple types and interleaving, above vector stores and
5245 permutation stmts are created for every copy. The result vector stmts are
5246 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5247 STMT_VINFO_RELATED_STMT for the next copies.
5250 prev_stmt_info
= NULL
;
5251 for (j
= 0; j
< ncopies
; j
++)
5259 /* Get vectorized arguments for SLP_NODE. */
5260 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5261 NULL
, slp_node
, -1);
5263 vec_oprnd
= vec_oprnds
[0];
5267 /* For interleaved stores we collect vectorized defs for all the
5268 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5269 used as an input to vect_permute_store_chain(), and OPRNDS as
5270 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5272 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5273 OPRNDS are of size 1. */
5274 next_stmt
= first_stmt
;
5275 for (i
= 0; i
< group_size
; i
++)
5277 /* Since gaps are not supported for interleaved stores,
5278 GROUP_SIZE is the exact number of stmts in the chain.
5279 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5280 there is no interleaving, GROUP_SIZE is 1, and only one
5281 iteration of the loop will be executed. */
5282 gcc_assert (next_stmt
5283 && gimple_assign_single_p (next_stmt
));
5284 op
= gimple_assign_rhs1 (next_stmt
);
5286 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
5288 dr_chain
.quick_push (vec_oprnd
);
5289 oprnds
.quick_push (vec_oprnd
);
5290 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5294 /* We should have catched mismatched types earlier. */
5295 gcc_assert (useless_type_conversion_p (vectype
,
5296 TREE_TYPE (vec_oprnd
)));
5297 bool simd_lane_access_p
5298 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5299 if (simd_lane_access_p
5300 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5301 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5302 && integer_zerop (DR_OFFSET (first_dr
))
5303 && integer_zerop (DR_INIT (first_dr
))
5304 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5305 get_alias_set (DR_REF (first_dr
))))
5307 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5308 dataref_offset
= build_int_cst (reference_alias_ptr_type
5309 (DR_REF (first_dr
)), 0);
5314 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5315 simd_lane_access_p
? loop
: NULL
,
5316 offset
, &dummy
, gsi
, &ptr_incr
,
5317 simd_lane_access_p
, &inv_p
);
5318 gcc_assert (bb_vinfo
|| !inv_p
);
5322 /* For interleaved stores we created vectorized defs for all the
5323 defs stored in OPRNDS in the previous iteration (previous copy).
5324 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5325 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5327 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5328 OPRNDS are of size 1. */
5329 for (i
= 0; i
< group_size
; i
++)
5332 vect_is_simple_use (op
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5334 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5335 dr_chain
[i
] = vec_oprnd
;
5336 oprnds
[i
] = vec_oprnd
;
5340 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5341 TYPE_SIZE_UNIT (aggr_type
));
5343 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5344 TYPE_SIZE_UNIT (aggr_type
));
5351 /* Combine all the vectors into an array. */
5352 vec_array
= create_vector_array (vectype
, vec_num
);
5353 for (i
= 0; i
< vec_num
; i
++)
5355 vec_oprnd
= dr_chain
[i
];
5356 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5360 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5361 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5362 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5363 gimple_call_set_lhs (new_stmt
, data_ref
);
5364 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5372 result_chain
.create (group_size
);
5374 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5378 next_stmt
= first_stmt
;
5379 for (i
= 0; i
< vec_num
; i
++)
5381 unsigned align
, misalign
;
5384 /* Bump the vector pointer. */
5385 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5389 vec_oprnd
= vec_oprnds
[i
];
5390 else if (grouped_store
)
5391 /* For grouped stores vectorized defs are interleaved in
5392 vect_permute_store_chain(). */
5393 vec_oprnd
= result_chain
[i
];
5395 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
5398 : build_int_cst (reference_alias_ptr_type
5399 (DR_REF (first_dr
)), 0));
5400 align
= TYPE_ALIGN_UNIT (vectype
);
5401 if (aligned_access_p (first_dr
))
5403 else if (DR_MISALIGNMENT (first_dr
) == -1)
5405 TREE_TYPE (data_ref
)
5406 = build_aligned_type (TREE_TYPE (data_ref
),
5407 TYPE_ALIGN (elem_type
));
5408 align
= TYPE_ALIGN_UNIT (elem_type
);
5413 TREE_TYPE (data_ref
)
5414 = build_aligned_type (TREE_TYPE (data_ref
),
5415 TYPE_ALIGN (elem_type
));
5416 misalign
= DR_MISALIGNMENT (first_dr
);
5418 if (dataref_offset
== NULL_TREE
)
5419 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5423 && dt
!= vect_constant_def
5424 && dt
!= vect_external_def
)
5426 tree perm_mask
= perm_mask_for_reverse (vectype
);
5428 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5430 tree new_temp
= make_ssa_name (perm_dest
, NULL
);
5432 /* Generate the permute statement. */
5434 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, new_temp
,
5435 vec_oprnd
, vec_oprnd
,
5437 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5439 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5440 vec_oprnd
= new_temp
;
5443 /* Arguments are ready. Create the new vector stmt. */
5444 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5445 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5450 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5458 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5460 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5461 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5465 dr_chain
.release ();
5467 result_chain
.release ();
5468 vec_oprnds
.release ();
5473 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5474 VECTOR_CST mask. No checks are made that the target platform supports the
5475 mask, so callers may wish to test can_vec_perm_p separately, or use
5476 vect_gen_perm_mask_checked. */
5479 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5481 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5484 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5486 mask_elt_type
= lang_hooks
.types
.type_for_mode
5487 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5488 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5490 mask_elts
= XALLOCAVEC (tree
, nunits
);
5491 for (i
= nunits
- 1; i
>= 0; i
--)
5492 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5493 mask_vec
= build_vector (mask_type
, mask_elts
);
5498 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5499 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5502 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5504 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5505 return vect_gen_perm_mask_any (vectype
, sel
);
5508 /* Given a vector variable X and Y, that was generated for the scalar
5509 STMT, generate instructions to permute the vector elements of X and Y
5510 using permutation mask MASK_VEC, insert them at *GSI and return the
5511 permuted vector variable. */
5514 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple stmt
,
5515 gimple_stmt_iterator
*gsi
)
5517 tree vectype
= TREE_TYPE (x
);
5518 tree perm_dest
, data_ref
;
5521 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5522 data_ref
= make_ssa_name (perm_dest
, NULL
);
5524 /* Generate the permute statement. */
5525 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5527 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5532 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5533 inserting them on the loops preheader edge. Returns true if we
5534 were successful in doing so (and thus STMT can be moved then),
5535 otherwise returns false. */
5538 hoist_defs_of_uses (gimple stmt
, struct loop
*loop
)
5544 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5546 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5547 if (!gimple_nop_p (def_stmt
)
5548 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5550 /* Make sure we don't need to recurse. While we could do
5551 so in simple cases when there are more complex use webs
5552 we don't have an easy way to preserve stmt order to fulfil
5553 dependencies within them. */
5556 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5558 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5560 gimple def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5561 if (!gimple_nop_p (def_stmt2
)
5562 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5572 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5574 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5575 if (!gimple_nop_p (def_stmt
)
5576 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5578 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5579 gsi_remove (&gsi
, false);
5580 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5587 /* vectorizable_load.
5589 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5591 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5592 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5593 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5596 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
5597 slp_tree slp_node
, slp_instance slp_node_instance
)
5600 tree vec_dest
= NULL
;
5601 tree data_ref
= NULL
;
5602 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5603 stmt_vec_info prev_stmt_info
;
5604 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5605 struct loop
*loop
= NULL
;
5606 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5607 bool nested_in_vect_loop
= false;
5608 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5609 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5613 gimple new_stmt
= NULL
;
5615 enum dr_alignment_support alignment_support_scheme
;
5616 tree dataref_ptr
= NULL_TREE
;
5617 tree dataref_offset
= NULL_TREE
;
5618 gimple ptr_incr
= NULL
;
5619 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5621 int i
, j
, group_size
, group_gap
;
5622 tree msq
= NULL_TREE
, lsq
;
5623 tree offset
= NULL_TREE
;
5624 tree byte_offset
= NULL_TREE
;
5625 tree realignment_token
= NULL_TREE
;
5627 vec
<tree
> dr_chain
= vNULL
;
5628 bool grouped_load
= false;
5629 bool load_lanes_p
= false;
5632 bool negative
= false;
5633 bool compute_in_loop
= false;
5634 struct loop
*at_loop
;
5636 bool slp
= (slp_node
!= NULL
);
5637 bool slp_perm
= false;
5638 enum tree_code code
;
5639 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5642 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
5643 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
5644 int gather_scale
= 1;
5645 enum vect_def_type gather_dt
= vect_unknown_def_type
;
5649 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5650 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
5651 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5656 /* Multiple types in SLP are handled by creating the appropriate number of
5657 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5659 if (slp
|| PURE_SLP_STMT (stmt_info
))
5662 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5664 gcc_assert (ncopies
>= 1);
5666 /* FORNOW. This restriction should be relaxed. */
5667 if (nested_in_vect_loop
&& ncopies
> 1)
5669 if (dump_enabled_p ())
5670 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5671 "multiple types in nested loop.\n");
5675 /* Invalidate assumptions made by dependence analysis when vectorization
5676 on the unrolled body effectively re-orders stmts. */
5678 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5679 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5680 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5682 if (dump_enabled_p ())
5683 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5684 "cannot perform implicit CSE when unrolling "
5685 "with negative dependence distance\n");
5689 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5692 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5695 /* Is vectorizable load? */
5696 if (!is_gimple_assign (stmt
))
5699 scalar_dest
= gimple_assign_lhs (stmt
);
5700 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5703 code
= gimple_assign_rhs_code (stmt
);
5704 if (code
!= ARRAY_REF
5705 && code
!= BIT_FIELD_REF
5706 && code
!= INDIRECT_REF
5707 && code
!= COMPONENT_REF
5708 && code
!= IMAGPART_EXPR
5709 && code
!= REALPART_EXPR
5711 && TREE_CODE_CLASS (code
) != tcc_declaration
)
5714 if (!STMT_VINFO_DATA_REF (stmt_info
))
5717 elem_type
= TREE_TYPE (vectype
);
5718 mode
= TYPE_MODE (vectype
);
5720 /* FORNOW. In some cases can vectorize even if data-type not supported
5721 (e.g. - data copies). */
5722 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
5724 if (dump_enabled_p ())
5725 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5726 "Aligned load, but unsupported type.\n");
5730 /* Check if the load is a part of an interleaving chain. */
5731 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5733 grouped_load
= true;
5735 gcc_assert (! nested_in_vect_loop
&& !STMT_VINFO_GATHER_P (stmt_info
));
5737 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5738 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5740 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5741 if (vect_load_lanes_supported (vectype
, group_size
))
5742 load_lanes_p
= true;
5743 else if (!vect_grouped_load_supported (vectype
, group_size
))
5747 /* Invalidate assumptions made by dependence analysis when vectorization
5748 on the unrolled body effectively re-orders stmts. */
5749 if (!PURE_SLP_STMT (stmt_info
)
5750 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5751 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5752 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5754 if (dump_enabled_p ())
5755 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5756 "cannot perform implicit CSE when performing "
5757 "group loads with negative dependence distance\n");
5763 if (STMT_VINFO_GATHER_P (stmt_info
))
5767 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
5768 &gather_off
, &gather_scale
);
5769 gcc_assert (gather_decl
);
5770 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, bb_vinfo
,
5771 &def_stmt
, &def
, &gather_dt
,
5772 &gather_off_vectype
))
5774 if (dump_enabled_p ())
5775 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5776 "gather index use not simple.\n");
5780 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
5784 negative
= tree_int_cst_compare (nested_in_vect_loop
5785 ? STMT_VINFO_DR_STEP (stmt_info
)
5787 size_zero_node
) < 0;
5788 if (negative
&& ncopies
> 1)
5790 if (dump_enabled_p ())
5791 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5792 "multiple types with negative step.\n");
5800 if (dump_enabled_p ())
5801 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5802 "negative step for group load not supported"
5806 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5807 if (alignment_support_scheme
!= dr_aligned
5808 && alignment_support_scheme
!= dr_unaligned_supported
)
5810 if (dump_enabled_p ())
5811 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5812 "negative step but alignment required.\n");
5815 if (!perm_mask_for_reverse (vectype
))
5817 if (dump_enabled_p ())
5818 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5819 "negative step and reversing not supported."
5826 if (!vec_stmt
) /* transformation not required. */
5828 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
5829 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
, NULL
, NULL
, NULL
);
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_NOTE
, vect_location
,
5835 "transform load. ncopies = %d\n", ncopies
);
5839 ensure_base_align (stmt_info
, dr
);
5841 if (STMT_VINFO_GATHER_P (stmt_info
))
5843 tree vec_oprnd0
= NULL_TREE
, op
;
5844 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
5845 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5846 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
5847 edge pe
= loop_preheader_edge (loop
);
5850 enum { NARROW
, NONE
, WIDEN
} modifier
;
5851 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
5853 if (nunits
== gather_off_nunits
)
5855 else if (nunits
== gather_off_nunits
/ 2)
5857 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
5860 for (i
= 0; i
< gather_off_nunits
; ++i
)
5861 sel
[i
] = i
| nunits
;
5863 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
5865 else if (nunits
== gather_off_nunits
* 2)
5867 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5870 for (i
= 0; i
< nunits
; ++i
)
5871 sel
[i
] = i
< gather_off_nunits
5872 ? i
: i
+ nunits
- gather_off_nunits
;
5874 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5880 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
5881 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5882 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5883 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5884 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5885 scaletype
= TREE_VALUE (arglist
);
5886 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
5888 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5890 ptr
= fold_convert (ptrtype
, gather_base
);
5891 if (!is_gimple_min_invariant (ptr
))
5893 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5894 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5895 gcc_assert (!new_bb
);
5898 /* Currently we support only unconditional gather loads,
5899 so mask should be all ones. */
5900 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
5901 mask
= build_int_cst (masktype
, -1);
5902 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
5904 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
5905 mask
= build_vector_from_val (masktype
, mask
);
5906 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5908 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
5912 for (j
= 0; j
< 6; ++j
)
5914 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
5915 mask
= build_real (TREE_TYPE (masktype
), r
);
5916 mask
= build_vector_from_val (masktype
, mask
);
5917 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5922 scale
= build_int_cst (scaletype
, gather_scale
);
5924 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
5925 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
5926 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
5930 for (j
= 0; j
< 6; ++j
)
5932 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
5933 merge
= build_real (TREE_TYPE (rettype
), r
);
5937 merge
= build_vector_from_val (rettype
, merge
);
5938 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
5940 prev_stmt_info
= NULL
;
5941 for (j
= 0; j
< ncopies
; ++j
)
5943 if (modifier
== WIDEN
&& (j
& 1))
5944 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
5945 perm_mask
, stmt
, gsi
);
5948 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
5951 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
5953 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5955 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5956 == TYPE_VECTOR_SUBPARTS (idxtype
));
5957 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
5958 var
= make_ssa_name (var
, NULL
);
5959 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5961 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
5963 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5968 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
5970 if (!useless_type_conversion_p (vectype
, rettype
))
5972 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
5973 == TYPE_VECTOR_SUBPARTS (rettype
));
5974 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
5975 op
= make_ssa_name (var
, new_stmt
);
5976 gimple_call_set_lhs (new_stmt
, op
);
5977 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5978 var
= make_ssa_name (vec_dest
, NULL
);
5979 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
5981 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
5986 var
= make_ssa_name (vec_dest
, new_stmt
);
5987 gimple_call_set_lhs (new_stmt
, var
);
5990 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5992 if (modifier
== NARROW
)
5999 var
= permute_vec_elements (prev_res
, var
,
6000 perm_mask
, stmt
, gsi
);
6001 new_stmt
= SSA_NAME_DEF_STMT (var
);
6004 if (prev_stmt_info
== NULL
)
6005 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6007 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6008 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6012 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
6014 gimple_stmt_iterator incr_gsi
;
6020 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6021 gimple_seq stmts
= NULL
;
6022 tree stride_base
, stride_step
, alias_off
;
6024 gcc_assert (!nested_in_vect_loop
);
6027 = fold_build_pointer_plus
6028 (unshare_expr (DR_BASE_ADDRESS (dr
)),
6029 size_binop (PLUS_EXPR
,
6030 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr
))),
6031 convert_to_ptrofftype (DR_INIT (dr
))));
6032 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (dr
)));
6034 /* For a load with loop-invariant (but other than power-of-2)
6035 stride (i.e. not a grouped access) like so:
6037 for (i = 0; i < n; i += stride)
6040 we generate a new induction variable and new accesses to
6041 form a new vector (or vectors, depending on ncopies):
6043 for (j = 0; ; j += VF*stride)
6045 tmp2 = array[j + stride];
6047 vectemp = {tmp1, tmp2, ...}
6050 ivstep
= stride_step
;
6051 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6052 build_int_cst (TREE_TYPE (ivstep
), vf
));
6054 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6056 create_iv (stride_base
, ivstep
, NULL
,
6057 loop
, &incr_gsi
, insert_after
,
6059 incr
= gsi_stmt (incr_gsi
);
6060 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
6062 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6064 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6066 prev_stmt_info
= NULL
;
6067 running_off
= offvar
;
6068 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (dr
)), 0);
6069 for (j
= 0; j
< ncopies
; j
++)
6073 vec_alloc (v
, nunits
);
6074 for (i
= 0; i
< nunits
; i
++)
6076 tree newref
, newoff
;
6078 newref
= build2 (MEM_REF
, TREE_TYPE (vectype
),
6079 running_off
, alias_off
);
6081 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6084 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6085 newoff
= copy_ssa_name (running_off
, NULL
);
6086 incr
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, newoff
,
6087 running_off
, stride_step
);
6088 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6090 running_off
= newoff
;
6093 vec_inv
= build_constructor (vectype
, v
);
6094 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6095 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6098 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6100 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6101 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6108 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6110 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6111 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6112 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6114 /* Check if the chain of loads is already vectorized. */
6115 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6116 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6117 ??? But we can only do so if there is exactly one
6118 as we have no way to get at the rest. Leave the CSE
6120 ??? With the group load eventually participating
6121 in multiple different permutations (having multiple
6122 slp nodes which refer to the same group) the CSE
6123 is even wrong code. See PR56270. */
6126 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6129 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6130 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6132 /* VEC_NUM is the number of vect stmts to be created for this group. */
6135 grouped_load
= false;
6136 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6137 if (SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6139 group_gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
6143 vec_num
= group_size
;
6151 group_size
= vec_num
= 1;
6155 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6156 gcc_assert (alignment_support_scheme
);
6157 /* Targets with load-lane instructions must not require explicit
6159 gcc_assert (!load_lanes_p
6160 || alignment_support_scheme
== dr_aligned
6161 || alignment_support_scheme
== dr_unaligned_supported
);
6163 /* In case the vectorization factor (VF) is bigger than the number
6164 of elements that we can fit in a vectype (nunits), we have to generate
6165 more than one vector stmt - i.e - we need to "unroll" the
6166 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6167 from one copy of the vector stmt to the next, in the field
6168 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6169 stages to find the correct vector defs to be used when vectorizing
6170 stmts that use the defs of the current stmt. The example below
6171 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6172 need to create 4 vectorized stmts):
6174 before vectorization:
6175 RELATED_STMT VEC_STMT
6179 step 1: vectorize stmt S1:
6180 We first create the vector stmt VS1_0, and, as usual, record a
6181 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6182 Next, we create the vector stmt VS1_1, and record a pointer to
6183 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6184 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6186 RELATED_STMT VEC_STMT
6187 VS1_0: vx0 = memref0 VS1_1 -
6188 VS1_1: vx1 = memref1 VS1_2 -
6189 VS1_2: vx2 = memref2 VS1_3 -
6190 VS1_3: vx3 = memref3 - -
6191 S1: x = load - VS1_0
6194 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6195 information we recorded in RELATED_STMT field is used to vectorize
6198 /* In case of interleaving (non-unit grouped access):
6205 Vectorized loads are created in the order of memory accesses
6206 starting from the access of the first stmt of the chain:
6209 VS2: vx1 = &base + vec_size*1
6210 VS3: vx3 = &base + vec_size*2
6211 VS4: vx4 = &base + vec_size*3
6213 Then permutation statements are generated:
6215 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6216 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6219 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6220 (the order of the data-refs in the output of vect_permute_load_chain
6221 corresponds to the order of scalar stmts in the interleaving chain - see
6222 the documentation of vect_permute_load_chain()).
6223 The generation of permutation stmts and recording them in
6224 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6226 In case of both multiple types and interleaving, the vector loads and
6227 permutation stmts above are created for every copy. The result vector
6228 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6229 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6231 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6232 on a target that supports unaligned accesses (dr_unaligned_supported)
6233 we generate the following code:
6237 p = p + indx * vectype_size;
6242 Otherwise, the data reference is potentially unaligned on a target that
6243 does not support unaligned accesses (dr_explicit_realign_optimized) -
6244 then generate the following code, in which the data in each iteration is
6245 obtained by two vector loads, one from the previous iteration, and one
6246 from the current iteration:
6248 msq_init = *(floor(p1))
6249 p2 = initial_addr + VS - 1;
6250 realignment_token = call target_builtin;
6253 p2 = p2 + indx * vectype_size
6255 vec_dest = realign_load (msq, lsq, realignment_token)
6260 /* If the misalignment remains the same throughout the execution of the
6261 loop, we can create the init_addr and permutation mask at the loop
6262 preheader. Otherwise, it needs to be created inside the loop.
6263 This can only occur when vectorizing memory accesses in the inner-loop
6264 nested within an outer-loop that is being vectorized. */
6266 if (nested_in_vect_loop
6267 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6268 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6270 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6271 compute_in_loop
= true;
6274 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6275 || alignment_support_scheme
== dr_explicit_realign
)
6276 && !compute_in_loop
)
6278 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6279 alignment_support_scheme
, NULL_TREE
,
6281 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6283 phi
= SSA_NAME_DEF_STMT (msq
);
6284 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6292 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6295 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6297 aggr_type
= vectype
;
6299 prev_stmt_info
= NULL
;
6300 for (j
= 0; j
< ncopies
; j
++)
6302 /* 1. Create the vector or array pointer update chain. */
6305 bool simd_lane_access_p
6306 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6307 if (simd_lane_access_p
6308 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6309 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6310 && integer_zerop (DR_OFFSET (first_dr
))
6311 && integer_zerop (DR_INIT (first_dr
))
6312 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6313 get_alias_set (DR_REF (first_dr
)))
6314 && (alignment_support_scheme
== dr_aligned
6315 || alignment_support_scheme
== dr_unaligned_supported
))
6317 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6318 dataref_offset
= build_int_cst (reference_alias_ptr_type
6319 (DR_REF (first_dr
)), 0);
6324 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6325 offset
, &dummy
, gsi
, &ptr_incr
,
6326 simd_lane_access_p
, &inv_p
,
6329 else if (dataref_offset
)
6330 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6331 TYPE_SIZE_UNIT (aggr_type
));
6333 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6334 TYPE_SIZE_UNIT (aggr_type
));
6336 if (grouped_load
|| slp_perm
)
6337 dr_chain
.create (vec_num
);
6343 vec_array
= create_vector_array (vectype
, vec_num
);
6346 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6347 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6348 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6349 gimple_call_set_lhs (new_stmt
, vec_array
);
6350 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6352 /* Extract each vector into an SSA_NAME. */
6353 for (i
= 0; i
< vec_num
; i
++)
6355 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6357 dr_chain
.quick_push (new_temp
);
6360 /* Record the mapping between SSA_NAMEs and statements. */
6361 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6365 for (i
= 0; i
< vec_num
; i
++)
6368 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6371 /* 2. Create the vector-load in the loop. */
6372 switch (alignment_support_scheme
)
6375 case dr_unaligned_supported
:
6377 unsigned int align
, misalign
;
6380 = build2 (MEM_REF
, vectype
, dataref_ptr
,
6383 : build_int_cst (reference_alias_ptr_type
6384 (DR_REF (first_dr
)), 0));
6385 align
= TYPE_ALIGN_UNIT (vectype
);
6386 if (alignment_support_scheme
== dr_aligned
)
6388 gcc_assert (aligned_access_p (first_dr
));
6391 else if (DR_MISALIGNMENT (first_dr
) == -1)
6393 TREE_TYPE (data_ref
)
6394 = build_aligned_type (TREE_TYPE (data_ref
),
6395 TYPE_ALIGN (elem_type
));
6396 align
= TYPE_ALIGN_UNIT (elem_type
);
6401 TREE_TYPE (data_ref
)
6402 = build_aligned_type (TREE_TYPE (data_ref
),
6403 TYPE_ALIGN (elem_type
));
6404 misalign
= DR_MISALIGNMENT (first_dr
);
6406 if (dataref_offset
== NULL_TREE
)
6407 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6411 case dr_explicit_realign
:
6416 vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
6418 if (compute_in_loop
)
6419 msq
= vect_setup_realignment (first_stmt
, gsi
,
6421 dr_explicit_realign
,
6424 ptr
= copy_ssa_name (dataref_ptr
, NULL
);
6425 new_stmt
= gimple_build_assign_with_ops
6426 (BIT_AND_EXPR
, ptr
, dataref_ptr
,
6428 (TREE_TYPE (dataref_ptr
),
6429 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6430 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6432 = build2 (MEM_REF
, vectype
, ptr
,
6433 build_int_cst (reference_alias_ptr_type
6434 (DR_REF (first_dr
)), 0));
6435 vec_dest
= vect_create_destination_var (scalar_dest
,
6437 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6438 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6439 gimple_assign_set_lhs (new_stmt
, new_temp
);
6440 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6441 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6442 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6445 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
6446 TYPE_SIZE_UNIT (elem_type
));
6447 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6448 new_stmt
= gimple_build_assign_with_ops
6449 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
6452 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6453 ptr
= copy_ssa_name (dataref_ptr
, new_stmt
);
6454 gimple_assign_set_lhs (new_stmt
, ptr
);
6455 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6457 = build2 (MEM_REF
, vectype
, ptr
,
6458 build_int_cst (reference_alias_ptr_type
6459 (DR_REF (first_dr
)), 0));
6462 case dr_explicit_realign_optimized
:
6463 new_temp
= copy_ssa_name (dataref_ptr
, NULL
);
6464 new_stmt
= gimple_build_assign_with_ops
6465 (BIT_AND_EXPR
, new_temp
, dataref_ptr
,
6467 (TREE_TYPE (dataref_ptr
),
6468 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6469 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6471 = build2 (MEM_REF
, vectype
, new_temp
,
6472 build_int_cst (reference_alias_ptr_type
6473 (DR_REF (first_dr
)), 0));
6478 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6479 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6480 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6481 gimple_assign_set_lhs (new_stmt
, new_temp
);
6482 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6484 /* 3. Handle explicit realignment if necessary/supported.
6486 vec_dest = realign_load (msq, lsq, realignment_token) */
6487 if (alignment_support_scheme
== dr_explicit_realign_optimized
6488 || alignment_support_scheme
== dr_explicit_realign
)
6490 lsq
= gimple_assign_lhs (new_stmt
);
6491 if (!realignment_token
)
6492 realignment_token
= dataref_ptr
;
6493 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6495 = gimple_build_assign_with_ops (REALIGN_LOAD_EXPR
,
6498 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6499 gimple_assign_set_lhs (new_stmt
, new_temp
);
6500 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6502 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6505 if (i
== vec_num
- 1 && j
== ncopies
- 1)
6506 add_phi_arg (phi
, lsq
,
6507 loop_latch_edge (containing_loop
),
6513 /* 4. Handle invariant-load. */
6514 if (inv_p
&& !bb_vinfo
)
6516 gcc_assert (!grouped_load
);
6517 /* If we have versioned for aliasing or the loop doesn't
6518 have any data dependencies that would preclude this,
6519 then we are sure this is a loop invariant load and
6520 thus we can insert it on the preheader edge. */
6521 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
6522 && !nested_in_vect_loop
6523 && hoist_defs_of_uses (stmt
, loop
))
6525 if (dump_enabled_p ())
6527 dump_printf_loc (MSG_NOTE
, vect_location
,
6528 "hoisting out of the vectorized "
6530 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6531 dump_printf (MSG_NOTE
, "\n");
6533 tree tem
= copy_ssa_name (scalar_dest
, NULL
);
6534 gsi_insert_on_edge_immediate
6535 (loop_preheader_edge (loop
),
6536 gimple_build_assign (tem
,
6538 (gimple_assign_rhs1 (stmt
))));
6539 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
6543 gimple_stmt_iterator gsi2
= *gsi
;
6545 new_temp
= vect_init_vector (stmt
, scalar_dest
,
6548 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6549 set_vinfo_for_stmt (new_stmt
,
6550 new_stmt_vec_info (new_stmt
, loop_vinfo
,
6556 tree perm_mask
= perm_mask_for_reverse (vectype
);
6557 new_temp
= permute_vec_elements (new_temp
, new_temp
,
6558 perm_mask
, stmt
, gsi
);
6559 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6562 /* Collect vector loads and later create their permutation in
6563 vect_transform_grouped_load (). */
6564 if (grouped_load
|| slp_perm
)
6565 dr_chain
.quick_push (new_temp
);
6567 /* Store vector loads in the corresponding SLP_NODE. */
6568 if (slp
&& !slp_perm
)
6569 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6571 /* Bump the vector pointer to account for a gap. */
6572 if (slp
&& group_gap
!= 0)
6574 tree bump
= size_binop (MULT_EXPR
,
6575 TYPE_SIZE_UNIT (elem_type
),
6576 size_int (group_gap
));
6577 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6582 if (slp
&& !slp_perm
)
6587 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6588 slp_node_instance
, false))
6590 dr_chain
.release ();
6599 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
6600 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6605 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6607 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6608 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6611 dr_chain
.release ();
6617 /* Function vect_is_simple_cond.
6620 LOOP - the loop that is being vectorized.
6621 COND - Condition that is checked for simple use.
6624 *COMP_VECTYPE - the vector type for the comparison.
6626 Returns whether a COND can be vectorized. Checks whether
6627 condition operands are supportable using vec_is_simple_use. */
6630 vect_is_simple_cond (tree cond
, gimple stmt
, loop_vec_info loop_vinfo
,
6631 bb_vec_info bb_vinfo
, tree
*comp_vectype
)
6635 enum vect_def_type dt
;
6636 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
6638 if (!COMPARISON_CLASS_P (cond
))
6641 lhs
= TREE_OPERAND (cond
, 0);
6642 rhs
= TREE_OPERAND (cond
, 1);
6644 if (TREE_CODE (lhs
) == SSA_NAME
)
6646 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
6647 if (!vect_is_simple_use_1 (lhs
, stmt
, loop_vinfo
, bb_vinfo
,
6648 &lhs_def_stmt
, &def
, &dt
, &vectype1
))
6651 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
6652 && TREE_CODE (lhs
) != FIXED_CST
)
6655 if (TREE_CODE (rhs
) == SSA_NAME
)
6657 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
6658 if (!vect_is_simple_use_1 (rhs
, stmt
, loop_vinfo
, bb_vinfo
,
6659 &rhs_def_stmt
, &def
, &dt
, &vectype2
))
6662 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
6663 && TREE_CODE (rhs
) != FIXED_CST
)
6666 *comp_vectype
= vectype1
? vectype1
: vectype2
;
6670 /* vectorizable_condition.
6672 Check if STMT is conditional modify expression that can be vectorized.
6673 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6674 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6677 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6678 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6679 else caluse if it is 2).
6681 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6684 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
6685 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
,
6688 tree scalar_dest
= NULL_TREE
;
6689 tree vec_dest
= NULL_TREE
;
6690 tree cond_expr
, then_clause
, else_clause
;
6691 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6692 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6693 tree comp_vectype
= NULL_TREE
;
6694 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
6695 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
6696 tree vec_compare
, vec_cond_expr
;
6698 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6700 enum vect_def_type dt
, dts
[4];
6701 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6703 enum tree_code code
;
6704 stmt_vec_info prev_stmt_info
= NULL
;
6706 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6707 vec
<tree
> vec_oprnds0
= vNULL
;
6708 vec
<tree
> vec_oprnds1
= vNULL
;
6709 vec
<tree
> vec_oprnds2
= vNULL
;
6710 vec
<tree
> vec_oprnds3
= vNULL
;
6713 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
6716 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6718 gcc_assert (ncopies
>= 1);
6719 if (reduc_index
&& ncopies
> 1)
6720 return false; /* FORNOW */
6722 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
6725 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6728 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6729 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
6733 /* FORNOW: not yet supported. */
6734 if (STMT_VINFO_LIVE_P (stmt_info
))
6736 if (dump_enabled_p ())
6737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6738 "value used after loop.\n");
6742 /* Is vectorizable conditional operation? */
6743 if (!is_gimple_assign (stmt
))
6746 code
= gimple_assign_rhs_code (stmt
);
6748 if (code
!= COND_EXPR
)
6751 cond_expr
= gimple_assign_rhs1 (stmt
);
6752 then_clause
= gimple_assign_rhs2 (stmt
);
6753 else_clause
= gimple_assign_rhs3 (stmt
);
6755 if (!vect_is_simple_cond (cond_expr
, stmt
, loop_vinfo
, bb_vinfo
,
6760 if (TREE_CODE (then_clause
) == SSA_NAME
)
6762 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
6763 if (!vect_is_simple_use (then_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6764 &then_def_stmt
, &def
, &dt
))
6767 else if (TREE_CODE (then_clause
) != INTEGER_CST
6768 && TREE_CODE (then_clause
) != REAL_CST
6769 && TREE_CODE (then_clause
) != FIXED_CST
)
6772 if (TREE_CODE (else_clause
) == SSA_NAME
)
6774 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
6775 if (!vect_is_simple_use (else_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6776 &else_def_stmt
, &def
, &dt
))
6779 else if (TREE_CODE (else_clause
) != INTEGER_CST
6780 && TREE_CODE (else_clause
) != REAL_CST
6781 && TREE_CODE (else_clause
) != FIXED_CST
)
6784 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
)));
6785 /* The result of a vector comparison should be signed type. */
6786 tree cmp_type
= build_nonstandard_integer_type (prec
, 0);
6787 vec_cmp_type
= get_same_sized_vectype (cmp_type
, vectype
);
6788 if (vec_cmp_type
== NULL_TREE
)
6793 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
6794 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
6801 vec_oprnds0
.create (1);
6802 vec_oprnds1
.create (1);
6803 vec_oprnds2
.create (1);
6804 vec_oprnds3
.create (1);
6808 scalar_dest
= gimple_assign_lhs (stmt
);
6809 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6811 /* Handle cond expr. */
6812 for (j
= 0; j
< ncopies
; j
++)
6814 gimple new_stmt
= NULL
;
6819 auto_vec
<tree
, 4> ops
;
6820 auto_vec
<vec
<tree
>, 4> vec_defs
;
6822 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
6823 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
6824 ops
.safe_push (then_clause
);
6825 ops
.safe_push (else_clause
);
6826 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
6827 vec_oprnds3
= vec_defs
.pop ();
6828 vec_oprnds2
= vec_defs
.pop ();
6829 vec_oprnds1
= vec_defs
.pop ();
6830 vec_oprnds0
= vec_defs
.pop ();
6833 vec_defs
.release ();
6839 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
6841 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), stmt
,
6842 loop_vinfo
, NULL
, >emp
, &def
, &dts
[0]);
6845 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
6847 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), stmt
,
6848 loop_vinfo
, NULL
, >emp
, &def
, &dts
[1]);
6849 if (reduc_index
== 1)
6850 vec_then_clause
= reduc_def
;
6853 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
6855 vect_is_simple_use (then_clause
, stmt
, loop_vinfo
,
6856 NULL
, >emp
, &def
, &dts
[2]);
6858 if (reduc_index
== 2)
6859 vec_else_clause
= reduc_def
;
6862 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
6864 vect_is_simple_use (else_clause
, stmt
, loop_vinfo
,
6865 NULL
, >emp
, &def
, &dts
[3]);
6871 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
6872 vec_oprnds0
.pop ());
6873 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
6874 vec_oprnds1
.pop ());
6875 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
6876 vec_oprnds2
.pop ());
6877 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
6878 vec_oprnds3
.pop ());
6883 vec_oprnds0
.quick_push (vec_cond_lhs
);
6884 vec_oprnds1
.quick_push (vec_cond_rhs
);
6885 vec_oprnds2
.quick_push (vec_then_clause
);
6886 vec_oprnds3
.quick_push (vec_else_clause
);
6889 /* Arguments are ready. Create the new vector stmt. */
6890 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
6892 vec_cond_rhs
= vec_oprnds1
[i
];
6893 vec_then_clause
= vec_oprnds2
[i
];
6894 vec_else_clause
= vec_oprnds3
[i
];
6896 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
6897 vec_cond_lhs
, vec_cond_rhs
);
6898 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
6899 vec_compare
, vec_then_clause
, vec_else_clause
);
6901 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
6902 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6903 gimple_assign_set_lhs (new_stmt
, new_temp
);
6904 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6906 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6913 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6915 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6917 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6920 vec_oprnds0
.release ();
6921 vec_oprnds1
.release ();
6922 vec_oprnds2
.release ();
6923 vec_oprnds3
.release ();
6929 /* Make sure the statement is vectorizable. */
6932 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
6934 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6935 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6936 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
6938 tree scalar_type
, vectype
;
6939 gimple pattern_stmt
;
6940 gimple_seq pattern_def_seq
;
6942 if (dump_enabled_p ())
6944 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
6945 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6946 dump_printf (MSG_NOTE
, "\n");
6949 if (gimple_has_volatile_ops (stmt
))
6951 if (dump_enabled_p ())
6952 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6953 "not vectorized: stmt has volatile operands\n");
6958 /* Skip stmts that do not need to be vectorized. In loops this is expected
6960 - the COND_EXPR which is the loop exit condition
6961 - any LABEL_EXPRs in the loop
6962 - computations that are used only for array indexing or loop control.
6963 In basic blocks we only analyze statements that are a part of some SLP
6964 instance, therefore, all the statements are relevant.
6966 Pattern statement needs to be analyzed instead of the original statement
6967 if the original statement is not relevant. Otherwise, we analyze both
6968 statements. In basic blocks we are called from some SLP instance
6969 traversal, don't analyze pattern stmts instead, the pattern stmts
6970 already will be part of SLP instance. */
6972 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
6973 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
6974 && !STMT_VINFO_LIVE_P (stmt_info
))
6976 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6978 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6979 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6981 /* Analyze PATTERN_STMT instead of the original stmt. */
6982 stmt
= pattern_stmt
;
6983 stmt_info
= vinfo_for_stmt (pattern_stmt
);
6984 if (dump_enabled_p ())
6986 dump_printf_loc (MSG_NOTE
, vect_location
,
6987 "==> examining pattern statement: ");
6988 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6989 dump_printf (MSG_NOTE
, "\n");
6994 if (dump_enabled_p ())
6995 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7000 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7003 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7004 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7006 /* Analyze PATTERN_STMT too. */
7007 if (dump_enabled_p ())
7009 dump_printf_loc (MSG_NOTE
, vect_location
,
7010 "==> examining pattern statement: ");
7011 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7012 dump_printf (MSG_NOTE
, "\n");
7015 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7019 if (is_pattern_stmt_p (stmt_info
)
7021 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7023 gimple_stmt_iterator si
;
7025 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7027 gimple pattern_def_stmt
= gsi_stmt (si
);
7028 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7029 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7031 /* Analyze def stmt of STMT if it's a pattern stmt. */
7032 if (dump_enabled_p ())
7034 dump_printf_loc (MSG_NOTE
, vect_location
,
7035 "==> examining pattern def statement: ");
7036 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7037 dump_printf (MSG_NOTE
, "\n");
7040 if (!vect_analyze_stmt (pattern_def_stmt
,
7041 need_to_vectorize
, node
))
7047 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7049 case vect_internal_def
:
7052 case vect_reduction_def
:
7053 case vect_nested_cycle
:
7054 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
7055 || relevance
== vect_used_in_outer_by_reduction
7056 || relevance
== vect_unused_in_scope
));
7059 case vect_induction_def
:
7060 case vect_constant_def
:
7061 case vect_external_def
:
7062 case vect_unknown_def_type
:
7069 gcc_assert (PURE_SLP_STMT (stmt_info
));
7071 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7072 if (dump_enabled_p ())
7074 dump_printf_loc (MSG_NOTE
, vect_location
,
7075 "get vectype for scalar type: ");
7076 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7077 dump_printf (MSG_NOTE
, "\n");
7080 vectype
= get_vectype_for_scalar_type (scalar_type
);
7083 if (dump_enabled_p ())
7085 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7086 "not SLPed: unsupported data-type ");
7087 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7089 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7094 if (dump_enabled_p ())
7096 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7097 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7098 dump_printf (MSG_NOTE
, "\n");
7101 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7104 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7106 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7107 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7108 || (is_gimple_call (stmt
)
7109 && gimple_call_lhs (stmt
) == NULL_TREE
));
7110 *need_to_vectorize
= true;
7115 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7116 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7117 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, NULL
)
7118 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
7119 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
7120 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
7121 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
7122 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
7123 || vectorizable_call (stmt
, NULL
, NULL
, NULL
)
7124 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
7125 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
7126 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, NULL
));
7130 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7131 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7132 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7133 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7134 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7135 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7136 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7137 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7138 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7143 if (dump_enabled_p ())
7145 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7146 "not vectorized: relevant stmt not ");
7147 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7148 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7149 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7158 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7159 need extra handling, except for vectorizable reductions. */
7160 if (STMT_VINFO_LIVE_P (stmt_info
)
7161 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7162 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7166 if (dump_enabled_p ())
7168 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7169 "not vectorized: live stmt not ");
7170 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7171 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7172 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7182 /* Function vect_transform_stmt.
7184 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7187 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
7188 bool *grouped_store
, slp_tree slp_node
,
7189 slp_instance slp_node_instance
)
7191 bool is_store
= false;
7192 gimple vec_stmt
= NULL
;
7193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7196 switch (STMT_VINFO_TYPE (stmt_info
))
7198 case type_demotion_vec_info_type
:
7199 case type_promotion_vec_info_type
:
7200 case type_conversion_vec_info_type
:
7201 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7205 case induc_vec_info_type
:
7206 gcc_assert (!slp_node
);
7207 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7211 case shift_vec_info_type
:
7212 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7216 case op_vec_info_type
:
7217 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7221 case assignment_vec_info_type
:
7222 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7226 case load_vec_info_type
:
7227 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7232 case store_vec_info_type
:
7233 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7235 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7237 /* In case of interleaving, the whole chain is vectorized when the
7238 last store in the chain is reached. Store stmts before the last
7239 one are skipped, and there vec_stmt_info shouldn't be freed
7241 *grouped_store
= true;
7242 if (STMT_VINFO_VEC_STMT (stmt_info
))
7249 case condition_vec_info_type
:
7250 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7254 case call_vec_info_type
:
7255 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7256 stmt
= gsi_stmt (*gsi
);
7257 if (is_gimple_call (stmt
)
7258 && gimple_call_internal_p (stmt
)
7259 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7263 case call_simd_clone_vec_info_type
:
7264 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7265 stmt
= gsi_stmt (*gsi
);
7268 case reduc_vec_info_type
:
7269 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7274 if (!STMT_VINFO_LIVE_P (stmt_info
))
7276 if (dump_enabled_p ())
7277 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7278 "stmt not supported.\n");
7283 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7284 is being vectorized, but outside the immediately enclosing loop. */
7286 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7287 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7288 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7289 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7290 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7291 || STMT_VINFO_RELEVANT (stmt_info
) ==
7292 vect_used_in_outer_by_reduction
))
7294 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7295 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7296 imm_use_iterator imm_iter
;
7297 use_operand_p use_p
;
7301 if (dump_enabled_p ())
7302 dump_printf_loc (MSG_NOTE
, vect_location
,
7303 "Record the vdef for outer-loop vectorization.\n");
7305 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7306 (to be used when vectorizing outer-loop stmts that use the DEF of
7308 if (gimple_code (stmt
) == GIMPLE_PHI
)
7309 scalar_dest
= PHI_RESULT (stmt
);
7311 scalar_dest
= gimple_assign_lhs (stmt
);
7313 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7315 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7317 exit_phi
= USE_STMT (use_p
);
7318 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7323 /* Handle stmts whose DEF is used outside the loop-nest that is
7324 being vectorized. */
7325 if (STMT_VINFO_LIVE_P (stmt_info
)
7326 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7328 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7333 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7339 /* Remove a group of stores (for SLP or interleaving), free their
7343 vect_remove_stores (gimple first_stmt
)
7345 gimple next
= first_stmt
;
7347 gimple_stmt_iterator next_si
;
7351 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7353 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7354 if (is_pattern_stmt_p (stmt_info
))
7355 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7356 /* Free the attached stmt_vec_info and remove the stmt. */
7357 next_si
= gsi_for_stmt (next
);
7358 unlink_stmt_vdef (next
);
7359 gsi_remove (&next_si
, true);
7360 release_defs (next
);
7361 free_stmt_vec_info (next
);
7367 /* Function new_stmt_vec_info.
7369 Create and initialize a new stmt_vec_info struct for STMT. */
7372 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
7373 bb_vec_info bb_vinfo
)
7376 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7378 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7379 STMT_VINFO_STMT (res
) = stmt
;
7380 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
7381 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
7382 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7383 STMT_VINFO_LIVE_P (res
) = false;
7384 STMT_VINFO_VECTYPE (res
) = NULL
;
7385 STMT_VINFO_VEC_STMT (res
) = NULL
;
7386 STMT_VINFO_VECTORIZABLE (res
) = true;
7387 STMT_VINFO_IN_PATTERN_P (res
) = false;
7388 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7389 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7390 STMT_VINFO_DATA_REF (res
) = NULL
;
7392 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7393 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7394 STMT_VINFO_DR_INIT (res
) = NULL
;
7395 STMT_VINFO_DR_STEP (res
) = NULL
;
7396 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7398 if (gimple_code (stmt
) == GIMPLE_PHI
7399 && is_loop_header_bb_p (gimple_bb (stmt
)))
7400 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7402 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7404 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7405 STMT_SLP_TYPE (res
) = loop_vect
;
7406 GROUP_FIRST_ELEMENT (res
) = NULL
;
7407 GROUP_NEXT_ELEMENT (res
) = NULL
;
7408 GROUP_SIZE (res
) = 0;
7409 GROUP_STORE_COUNT (res
) = 0;
7410 GROUP_GAP (res
) = 0;
7411 GROUP_SAME_DR_STMT (res
) = NULL
;
7417 /* Create a hash table for stmt_vec_info. */
7420 init_stmt_vec_info_vec (void)
7422 gcc_assert (!stmt_vec_info_vec
.exists ());
7423 stmt_vec_info_vec
.create (50);
7427 /* Free hash table for stmt_vec_info. */
7430 free_stmt_vec_info_vec (void)
7434 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7436 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info
) info
));
7437 gcc_assert (stmt_vec_info_vec
.exists ());
7438 stmt_vec_info_vec
.release ();
7442 /* Free stmt vectorization related info. */
7445 free_stmt_vec_info (gimple stmt
)
7447 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7452 /* Check if this statement has a related "pattern stmt"
7453 (introduced by the vectorizer during the pattern recognition
7454 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7456 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7458 stmt_vec_info patt_info
7459 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7462 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7463 gimple patt_stmt
= STMT_VINFO_STMT (patt_info
);
7464 gimple_set_bb (patt_stmt
, NULL
);
7465 tree lhs
= gimple_get_lhs (patt_stmt
);
7466 if (TREE_CODE (lhs
) == SSA_NAME
)
7467 release_ssa_name (lhs
);
7470 gimple_stmt_iterator si
;
7471 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7473 gimple seq_stmt
= gsi_stmt (si
);
7474 gimple_set_bb (seq_stmt
, NULL
);
7475 lhs
= gimple_get_lhs (patt_stmt
);
7476 if (TREE_CODE (lhs
) == SSA_NAME
)
7477 release_ssa_name (lhs
);
7478 free_stmt_vec_info (seq_stmt
);
7481 free_stmt_vec_info (patt_stmt
);
7485 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7486 set_vinfo_for_stmt (stmt
, NULL
);
7491 /* Function get_vectype_for_scalar_type_and_size.
7493 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7497 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7499 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7500 machine_mode simd_mode
;
7501 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7508 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7509 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7512 /* For vector types of elements whose mode precision doesn't
7513 match their types precision we use a element type of mode
7514 precision. The vectorization routines will have to make sure
7515 they support the proper result truncation/extension.
7516 We also make sure to build vector types with INTEGER_TYPE
7517 component type only. */
7518 if (INTEGRAL_TYPE_P (scalar_type
)
7519 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
7520 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
7521 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
7522 TYPE_UNSIGNED (scalar_type
));
7524 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7525 When the component mode passes the above test simply use a type
7526 corresponding to that mode. The theory is that any use that
7527 would cause problems with this will disable vectorization anyway. */
7528 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
7529 && !INTEGRAL_TYPE_P (scalar_type
))
7530 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
7532 /* We can't build a vector type of elements with alignment bigger than
7534 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
7535 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
7536 TYPE_UNSIGNED (scalar_type
));
7538 /* If we felt back to using the mode fail if there was
7539 no scalar type for it. */
7540 if (scalar_type
== NULL_TREE
)
7543 /* If no size was supplied use the mode the target prefers. Otherwise
7544 lookup a vector mode of the specified size. */
7546 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
7548 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
7549 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
7553 vectype
= build_vector_type (scalar_type
, nunits
);
7555 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
7556 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
7562 unsigned int current_vector_size
;
7564 /* Function get_vectype_for_scalar_type.
7566 Returns the vector type corresponding to SCALAR_TYPE as supported
7570 get_vectype_for_scalar_type (tree scalar_type
)
7573 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
7574 current_vector_size
);
7576 && current_vector_size
== 0)
7577 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
7581 /* Function get_same_sized_vectype
7583 Returns a vector type corresponding to SCALAR_TYPE of size
7584 VECTOR_TYPE if supported by the target. */
7587 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
7589 return get_vectype_for_scalar_type_and_size
7590 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
7593 /* Function vect_is_simple_use.
7596 LOOP_VINFO - the vect info of the loop that is being vectorized.
7597 BB_VINFO - the vect info of the basic block that is being vectorized.
7598 OPERAND - operand of STMT in the loop or bb.
7599 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7601 Returns whether a stmt with OPERAND can be vectorized.
7602 For loops, supportable operands are constants, loop invariants, and operands
7603 that are defined by the current iteration of the loop. Unsupportable
7604 operands are those that are defined by a previous iteration of the loop (as
7605 is the case in reduction/induction computations).
7606 For basic blocks, supportable operands are constants and bb invariants.
7607 For now, operands defined outside the basic block are not supported. */
7610 vect_is_simple_use (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7611 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7612 tree
*def
, enum vect_def_type
*dt
)
7615 stmt_vec_info stmt_vinfo
;
7616 struct loop
*loop
= NULL
;
7619 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7624 if (dump_enabled_p ())
7626 dump_printf_loc (MSG_NOTE
, vect_location
,
7627 "vect_is_simple_use: operand ");
7628 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
7629 dump_printf (MSG_NOTE
, "\n");
7632 if (CONSTANT_CLASS_P (operand
))
7634 *dt
= vect_constant_def
;
7638 if (is_gimple_min_invariant (operand
))
7641 *dt
= vect_external_def
;
7645 if (TREE_CODE (operand
) == PAREN_EXPR
)
7647 if (dump_enabled_p ())
7648 dump_printf_loc (MSG_NOTE
, vect_location
, "non-associatable copy.\n");
7649 operand
= TREE_OPERAND (operand
, 0);
7652 if (TREE_CODE (operand
) != SSA_NAME
)
7654 if (dump_enabled_p ())
7655 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7660 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
7661 if (*def_stmt
== NULL
)
7663 if (dump_enabled_p ())
7664 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7669 if (dump_enabled_p ())
7671 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
7672 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
7673 dump_printf (MSG_NOTE
, "\n");
7676 /* Empty stmt is expected only in case of a function argument.
7677 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7678 if (gimple_nop_p (*def_stmt
))
7681 *dt
= vect_external_def
;
7685 bb
= gimple_bb (*def_stmt
);
7687 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
7688 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
7689 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
7690 *dt
= vect_external_def
;
7693 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
7694 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
7697 if (*dt
== vect_unknown_def_type
7699 && *dt
== vect_double_reduction_def
7700 && gimple_code (stmt
) != GIMPLE_PHI
))
7702 if (dump_enabled_p ())
7703 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7704 "Unsupported pattern.\n");
7708 if (dump_enabled_p ())
7709 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: %d.\n", *dt
);
7711 switch (gimple_code (*def_stmt
))
7714 *def
= gimple_phi_result (*def_stmt
);
7718 *def
= gimple_assign_lhs (*def_stmt
);
7722 *def
= gimple_call_lhs (*def_stmt
);
7727 if (dump_enabled_p ())
7728 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7729 "unsupported defining stmt:\n");
7736 /* Function vect_is_simple_use_1.
7738 Same as vect_is_simple_use_1 but also determines the vector operand
7739 type of OPERAND and stores it to *VECTYPE. If the definition of
7740 OPERAND is vect_uninitialized_def, vect_constant_def or
7741 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7742 is responsible to compute the best suited vector type for the
7746 vect_is_simple_use_1 (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7747 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7748 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
7750 if (!vect_is_simple_use (operand
, stmt
, loop_vinfo
, bb_vinfo
, def_stmt
,
7754 /* Now get a vector type if the def is internal, otherwise supply
7755 NULL_TREE and leave it up to the caller to figure out a proper
7756 type for the use stmt. */
7757 if (*dt
== vect_internal_def
7758 || *dt
== vect_induction_def
7759 || *dt
== vect_reduction_def
7760 || *dt
== vect_double_reduction_def
7761 || *dt
== vect_nested_cycle
)
7763 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
7765 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7766 && !STMT_VINFO_RELEVANT (stmt_info
)
7767 && !STMT_VINFO_LIVE_P (stmt_info
))
7768 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7770 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7771 gcc_assert (*vectype
!= NULL_TREE
);
7773 else if (*dt
== vect_uninitialized_def
7774 || *dt
== vect_constant_def
7775 || *dt
== vect_external_def
)
7776 *vectype
= NULL_TREE
;
7784 /* Function supportable_widening_operation
7786 Check whether an operation represented by the code CODE is a
7787 widening operation that is supported by the target platform in
7788 vector form (i.e., when operating on arguments of type VECTYPE_IN
7789 producing a result of type VECTYPE_OUT).
7791 Widening operations we currently support are NOP (CONVERT), FLOAT
7792 and WIDEN_MULT. This function checks if these operations are supported
7793 by the target platform either directly (via vector tree-codes), or via
7797 - CODE1 and CODE2 are codes of vector operations to be used when
7798 vectorizing the operation, if available.
7799 - MULTI_STEP_CVT determines the number of required intermediate steps in
7800 case of multi-step conversion (like char->short->int - in that case
7801 MULTI_STEP_CVT will be 1).
7802 - INTERM_TYPES contains the intermediate type required to perform the
7803 widening operation (short in the above example). */
7806 supportable_widening_operation (enum tree_code code
, gimple stmt
,
7807 tree vectype_out
, tree vectype_in
,
7808 enum tree_code
*code1
, enum tree_code
*code2
,
7809 int *multi_step_cvt
,
7810 vec
<tree
> *interm_types
)
7812 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7813 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7814 struct loop
*vect_loop
= NULL
;
7815 machine_mode vec_mode
;
7816 enum insn_code icode1
, icode2
;
7817 optab optab1
, optab2
;
7818 tree vectype
= vectype_in
;
7819 tree wide_vectype
= vectype_out
;
7820 enum tree_code c1
, c2
;
7822 tree prev_type
, intermediate_type
;
7823 machine_mode intermediate_mode
, prev_mode
;
7824 optab optab3
, optab4
;
7826 *multi_step_cvt
= 0;
7828 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
7832 case WIDEN_MULT_EXPR
:
7833 /* The result of a vectorized widening operation usually requires
7834 two vectors (because the widened results do not fit into one vector).
7835 The generated vector results would normally be expected to be
7836 generated in the same order as in the original scalar computation,
7837 i.e. if 8 results are generated in each vector iteration, they are
7838 to be organized as follows:
7839 vect1: [res1,res2,res3,res4],
7840 vect2: [res5,res6,res7,res8].
7842 However, in the special case that the result of the widening
7843 operation is used in a reduction computation only, the order doesn't
7844 matter (because when vectorizing a reduction we change the order of
7845 the computation). Some targets can take advantage of this and
7846 generate more efficient code. For example, targets like Altivec,
7847 that support widen_mult using a sequence of {mult_even,mult_odd}
7848 generate the following vectors:
7849 vect1: [res1,res3,res5,res7],
7850 vect2: [res2,res4,res6,res8].
7852 When vectorizing outer-loops, we execute the inner-loop sequentially
7853 (each vectorized inner-loop iteration contributes to VF outer-loop
7854 iterations in parallel). We therefore don't allow to change the
7855 order of the computation in the inner-loop during outer-loop
7857 /* TODO: Another case in which order doesn't *really* matter is when we
7858 widen and then contract again, e.g. (short)((int)x * y >> 8).
7859 Normally, pack_trunc performs an even/odd permute, whereas the
7860 repack from an even/odd expansion would be an interleave, which
7861 would be significantly simpler for e.g. AVX2. */
7862 /* In any case, in order to avoid duplicating the code below, recurse
7863 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
7864 are properly set up for the caller. If we fail, we'll continue with
7865 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
7867 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
7868 && !nested_in_vect_loop_p (vect_loop
, stmt
)
7869 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
7870 stmt
, vectype_out
, vectype_in
,
7871 code1
, code2
, multi_step_cvt
,
7874 /* Elements in a vector with vect_used_by_reduction property cannot
7875 be reordered if the use chain with this property does not have the
7876 same operation. One such an example is s += a * b, where elements
7877 in a and b cannot be reordered. Here we check if the vector defined
7878 by STMT is only directly used in the reduction statement. */
7879 tree lhs
= gimple_assign_lhs (stmt
);
7880 use_operand_p dummy
;
7882 stmt_vec_info use_stmt_info
= NULL
;
7883 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
7884 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
7885 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
7888 c1
= VEC_WIDEN_MULT_LO_EXPR
;
7889 c2
= VEC_WIDEN_MULT_HI_EXPR
;
7892 case VEC_WIDEN_MULT_EVEN_EXPR
:
7893 /* Support the recursion induced just above. */
7894 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
7895 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
7898 case WIDEN_LSHIFT_EXPR
:
7899 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
7900 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
7904 c1
= VEC_UNPACK_LO_EXPR
;
7905 c2
= VEC_UNPACK_HI_EXPR
;
7909 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
7910 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
7913 case FIX_TRUNC_EXPR
:
7914 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
7915 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
7916 computing the operation. */
7923 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
7925 enum tree_code ctmp
= c1
;
7930 if (code
== FIX_TRUNC_EXPR
)
7932 /* The signedness is determined from output operand. */
7933 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
7934 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
7938 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
7939 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
7942 if (!optab1
|| !optab2
)
7945 vec_mode
= TYPE_MODE (vectype
);
7946 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
7947 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
7953 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7954 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7957 /* Check if it's a multi-step conversion that can be done using intermediate
7960 prev_type
= vectype
;
7961 prev_mode
= vec_mode
;
7963 if (!CONVERT_EXPR_CODE_P (code
))
7966 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
7967 intermediate steps in promotion sequence. We try
7968 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
7970 interm_types
->create (MAX_INTERM_CVT_STEPS
);
7971 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
7973 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
7975 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
7976 TYPE_UNSIGNED (prev_type
));
7977 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
7978 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
7980 if (!optab3
|| !optab4
7981 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
7982 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
7983 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
7984 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
7985 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
7986 == CODE_FOR_nothing
)
7987 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
7988 == CODE_FOR_nothing
))
7991 interm_types
->quick_push (intermediate_type
);
7992 (*multi_step_cvt
)++;
7994 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7995 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7998 prev_type
= intermediate_type
;
7999 prev_mode
= intermediate_mode
;
8002 interm_types
->release ();
8007 /* Function supportable_narrowing_operation
8009 Check whether an operation represented by the code CODE is a
8010 narrowing operation that is supported by the target platform in
8011 vector form (i.e., when operating on arguments of type VECTYPE_IN
8012 and producing a result of type VECTYPE_OUT).
8014 Narrowing operations we currently support are NOP (CONVERT) and
8015 FIX_TRUNC. This function checks if these operations are supported by
8016 the target platform directly via vector tree-codes.
8019 - CODE1 is the code of a vector operation to be used when
8020 vectorizing the operation, if available.
8021 - MULTI_STEP_CVT determines the number of required intermediate steps in
8022 case of multi-step conversion (like int->short->char - in that case
8023 MULTI_STEP_CVT will be 1).
8024 - INTERM_TYPES contains the intermediate type required to perform the
8025 narrowing operation (short in the above example). */
8028 supportable_narrowing_operation (enum tree_code code
,
8029 tree vectype_out
, tree vectype_in
,
8030 enum tree_code
*code1
, int *multi_step_cvt
,
8031 vec
<tree
> *interm_types
)
8033 machine_mode vec_mode
;
8034 enum insn_code icode1
;
8035 optab optab1
, interm_optab
;
8036 tree vectype
= vectype_in
;
8037 tree narrow_vectype
= vectype_out
;
8039 tree intermediate_type
;
8040 machine_mode intermediate_mode
, prev_mode
;
8044 *multi_step_cvt
= 0;
8048 c1
= VEC_PACK_TRUNC_EXPR
;
8051 case FIX_TRUNC_EXPR
:
8052 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8056 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8057 tree code and optabs used for computing the operation. */
8064 if (code
== FIX_TRUNC_EXPR
)
8065 /* The signedness is determined from output operand. */
8066 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8068 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8073 vec_mode
= TYPE_MODE (vectype
);
8074 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8079 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8082 /* Check if it's a multi-step conversion that can be done using intermediate
8084 prev_mode
= vec_mode
;
8085 if (code
== FIX_TRUNC_EXPR
)
8086 uns
= TYPE_UNSIGNED (vectype_out
);
8088 uns
= TYPE_UNSIGNED (vectype
);
8090 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8091 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8092 costly than signed. */
8093 if (code
== FIX_TRUNC_EXPR
&& uns
)
8095 enum insn_code icode2
;
8098 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8100 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8101 if (interm_optab
!= unknown_optab
8102 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8103 && insn_data
[icode1
].operand
[0].mode
8104 == insn_data
[icode2
].operand
[0].mode
)
8107 optab1
= interm_optab
;
8112 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8113 intermediate steps in promotion sequence. We try
8114 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8115 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8116 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8118 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8120 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8122 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8125 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8126 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8127 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8128 == CODE_FOR_nothing
))
8131 interm_types
->quick_push (intermediate_type
);
8132 (*multi_step_cvt
)++;
8134 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8137 prev_mode
= intermediate_mode
;
8138 optab1
= interm_optab
;
8141 interm_types
->release ();