1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
42 #include "tree-ssa-loop-manip.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
47 #include "insn-config.h"
48 #include "recog.h" /* FIXME: for insn_data */
49 #include "insn-codes.h"
50 #include "optabs-tree.h"
51 #include "diagnostic-core.h"
52 #include "tree-vectorizer.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
62 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
64 return STMT_VINFO_VECTYPE (stmt_info
);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
70 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
72 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
73 basic_block bb
= gimple_bb (stmt
);
74 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
80 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
82 return (bb
->loop_father
== loop
->inner
);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
90 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
91 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
92 int misalign
, enum vect_cost_model_location where
)
96 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
97 stmt_info_for_cost si
= { count
, kind
,
98 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
100 body_cost_vec
->safe_push (si
);
102 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
105 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
106 count
, kind
, stmt_info
, misalign
, where
);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
112 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
114 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT and the vector is associated
121 with scalar destination SCALAR_DEST. */
124 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
125 tree array
, unsigned HOST_WIDE_INT n
)
127 tree vect_type
, vect
, vect_name
, array_ref
;
130 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
131 vect_type
= TREE_TYPE (TREE_TYPE (array
));
132 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
133 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
134 build_int_cst (size_type_node
, n
),
135 NULL_TREE
, NULL_TREE
);
137 new_stmt
= gimple_build_assign (vect
, array_ref
);
138 vect_name
= make_ssa_name (vect
, new_stmt
);
139 gimple_assign_set_lhs (new_stmt
, vect_name
);
140 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT. */
150 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
151 tree array
, unsigned HOST_WIDE_INT n
)
156 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
157 build_int_cst (size_type_node
, n
),
158 NULL_TREE
, NULL_TREE
);
160 new_stmt
= gimple_build_assign (array_ref
, vect
);
161 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
169 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
171 tree mem_ref
, alias_ptr_type
;
173 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
174 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
175 /* Arrays have the same alignment as their type. */
176 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
180 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
182 /* Function vect_mark_relevant.
184 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
187 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
188 enum vect_relevant relevant
, bool live_p
,
189 bool used_in_pattern
)
191 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
192 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
193 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
194 gimple
*pattern_stmt
;
196 if (dump_enabled_p ())
197 dump_printf_loc (MSG_NOTE
, vect_location
,
198 "mark relevant %d, live %d.\n", relevant
, live_p
);
200 /* If this stmt is an original stmt in a pattern, we might need to mark its
201 related pattern stmt instead of the original stmt. However, such stmts
202 may have their own uses that are not in any pattern, in such cases the
203 stmt itself should be marked. */
204 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
207 if (!used_in_pattern
)
209 imm_use_iterator imm_iter
;
213 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
214 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
216 if (is_gimple_assign (stmt
))
217 lhs
= gimple_assign_lhs (stmt
);
219 lhs
= gimple_call_lhs (stmt
);
221 /* This use is out of pattern use, if LHS has other uses that are
222 pattern uses, we should mark the stmt itself, and not the pattern
224 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
225 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
227 if (is_gimple_debug (USE_STMT (use_p
)))
229 use_stmt
= USE_STMT (use_p
);
231 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
234 if (vinfo_for_stmt (use_stmt
)
235 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
245 /* This is the last stmt in a sequence that was detected as a
246 pattern that can potentially be vectorized. Don't mark the stmt
247 as relevant/live because it's not going to be vectorized.
248 Instead mark the pattern-stmt that replaces it. */
250 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
252 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE
, vect_location
,
254 "last stmt in pattern. don't mark"
255 " relevant/live.\n");
256 stmt_info
= vinfo_for_stmt (pattern_stmt
);
257 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
258 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
259 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
264 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
265 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
266 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
268 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
269 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE
, vect_location
,
273 "already marked relevant/live.\n");
277 worklist
->safe_push (stmt
);
281 /* Function vect_stmt_relevant_p.
283 Return true if STMT in loop that is represented by LOOP_VINFO is
284 "relevant for vectorization".
286 A stmt is considered "relevant for vectorization" if:
287 - it has uses outside the loop.
288 - it has vdefs (it alters memory).
289 - control stmts in the loop (except for the exit condition).
291 CHECKME: what other side effects would the vectorizer allow? */
294 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
295 enum vect_relevant
*relevant
, bool *live_p
)
297 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
299 imm_use_iterator imm_iter
;
303 *relevant
= vect_unused_in_scope
;
306 /* cond stmt other than loop exit cond. */
307 if (is_ctrl_stmt (stmt
)
308 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
309 != loop_exit_ctrl_vec_info_type
)
310 *relevant
= vect_used_in_scope
;
312 /* changing memory. */
313 if (gimple_code (stmt
) != GIMPLE_PHI
)
314 if (gimple_vdef (stmt
)
315 && !gimple_clobber_p (stmt
))
317 if (dump_enabled_p ())
318 dump_printf_loc (MSG_NOTE
, vect_location
,
319 "vec_stmt_relevant_p: stmt has vdefs.\n");
320 *relevant
= vect_used_in_scope
;
323 /* uses outside the loop. */
324 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
326 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
328 basic_block bb
= gimple_bb (USE_STMT (use_p
));
329 if (!flow_bb_inside_loop_p (loop
, bb
))
331 if (dump_enabled_p ())
332 dump_printf_loc (MSG_NOTE
, vect_location
,
333 "vec_stmt_relevant_p: used out of loop.\n");
335 if (is_gimple_debug (USE_STMT (use_p
)))
338 /* We expect all such uses to be in the loop exit phis
339 (because of loop closed form) */
340 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
341 gcc_assert (bb
== single_exit (loop
)->dest
);
348 return (*live_p
|| *relevant
);
352 /* Function exist_non_indexing_operands_for_use_p
354 USE is one of the uses attached to STMT. Check if USE is
355 used in STMT for anything other than indexing an array. */
358 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
361 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
363 /* USE corresponds to some operand in STMT. If there is no data
364 reference in STMT, then any operand that corresponds to USE
365 is not indexing an array. */
366 if (!STMT_VINFO_DATA_REF (stmt_info
))
369 /* STMT has a data_ref. FORNOW this means that its of one of
373 (This should have been verified in analyze_data_refs).
375 'var' in the second case corresponds to a def, not a use,
376 so USE cannot correspond to any operands that are not used
379 Therefore, all we need to check is if STMT falls into the
380 first case, and whether var corresponds to USE. */
382 if (!gimple_assign_copy_p (stmt
))
384 if (is_gimple_call (stmt
)
385 && gimple_call_internal_p (stmt
))
386 switch (gimple_call_internal_fn (stmt
))
389 operand
= gimple_call_arg (stmt
, 3);
394 operand
= gimple_call_arg (stmt
, 2);
404 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
406 operand
= gimple_assign_rhs1 (stmt
);
407 if (TREE_CODE (operand
) != SSA_NAME
)
418 Function process_use.
421 - a USE in STMT in a loop represented by LOOP_VINFO
422 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
423 that defined USE. This is done by calling mark_relevant and passing it
424 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
425 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
429 Generally, LIVE_P and RELEVANT are used to define the liveness and
430 relevance info of the DEF_STMT of this USE:
431 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
432 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
434 - case 1: If USE is used only for address computations (e.g. array indexing),
435 which does not need to be directly vectorized, then the liveness/relevance
436 of the respective DEF_STMT is left unchanged.
437 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
438 skip DEF_STMT cause it had already been processed.
439 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
440 be modified accordingly.
442 Return true if everything is as expected. Return false otherwise. */
445 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
446 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
449 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
450 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
451 stmt_vec_info dstmt_vinfo
;
452 basic_block bb
, def_bb
;
454 enum vect_def_type dt
;
456 /* case 1: we are only interested in uses that need to be vectorized. Uses
457 that are used for address computation are not considered relevant. */
458 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
461 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
463 if (dump_enabled_p ())
464 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
465 "not vectorized: unsupported use in stmt.\n");
469 if (!def_stmt
|| gimple_nop_p (def_stmt
))
472 def_bb
= gimple_bb (def_stmt
);
473 if (!flow_bb_inside_loop_p (loop
, def_bb
))
475 if (dump_enabled_p ())
476 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
481 DEF_STMT must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DEF_STMT in the loop. So we just
484 check that everything is as expected, and we are done. */
485 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
486 bb
= gimple_bb (stmt
);
487 if (gimple_code (stmt
) == GIMPLE_PHI
488 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
489 && gimple_code (def_stmt
) != GIMPLE_PHI
490 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
491 && bb
->loop_father
== def_bb
->loop_father
)
493 if (dump_enabled_p ())
494 dump_printf_loc (MSG_NOTE
, vect_location
,
495 "reduc-stmt defining reduc-phi in the same nest.\n");
496 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
497 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
498 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
499 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
500 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
504 /* case 3a: outer-loop stmt defining an inner-loop stmt:
505 outer-loop-header-bb:
511 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE
, vect_location
,
515 "outer-loop def-stmt defining inner-loop stmt.\n");
519 case vect_unused_in_scope
:
520 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
521 vect_used_in_scope
: vect_unused_in_scope
;
524 case vect_used_in_outer_by_reduction
:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
526 relevant
= vect_used_by_reduction
;
529 case vect_used_in_outer
:
530 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
531 relevant
= vect_used_in_scope
;
534 case vect_used_in_scope
:
542 /* case 3b: inner-loop stmt defining an outer-loop stmt:
543 outer-loop-header-bb:
547 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
549 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
551 if (dump_enabled_p ())
552 dump_printf_loc (MSG_NOTE
, vect_location
,
553 "inner-loop def-stmt defining outer-loop stmt.\n");
557 case vect_unused_in_scope
:
558 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
559 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
560 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
563 case vect_used_by_reduction
:
564 relevant
= vect_used_in_outer_by_reduction
;
567 case vect_used_in_scope
:
568 relevant
= vect_used_in_outer
;
576 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
577 is_pattern_stmt_p (stmt_vinfo
));
582 /* Function vect_mark_stmts_to_be_vectorized.
584 Not all stmts in the loop need to be vectorized. For example:
593 Stmt 1 and 3 do not need to be vectorized, because loop control and
594 addressing of vectorized data-refs are handled differently.
596 This pass detects such stmts. */
599 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
601 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
602 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
603 unsigned int nbbs
= loop
->num_nodes
;
604 gimple_stmt_iterator si
;
607 stmt_vec_info stmt_vinfo
;
611 enum vect_relevant relevant
, tmp_relevant
;
612 enum vect_def_type def_type
;
614 if (dump_enabled_p ())
615 dump_printf_loc (MSG_NOTE
, vect_location
,
616 "=== vect_mark_stmts_to_be_vectorized ===\n");
618 auto_vec
<gimple
*, 64> worklist
;
620 /* 1. Init worklist. */
621 for (i
= 0; i
< nbbs
; i
++)
624 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
627 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
630 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
633 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
634 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
636 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
638 stmt
= gsi_stmt (si
);
639 if (dump_enabled_p ())
641 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
642 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
645 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
646 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
650 /* 2. Process_worklist */
651 while (worklist
.length () > 0)
656 stmt
= worklist
.pop ();
657 if (dump_enabled_p ())
659 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
660 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
663 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
664 (DEF_STMT) as relevant/irrelevant and live/dead according to the
665 liveness and relevance properties of STMT. */
666 stmt_vinfo
= vinfo_for_stmt (stmt
);
667 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
668 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
670 /* Generally, the liveness and relevance properties of STMT are
671 propagated as is to the DEF_STMTs of its USEs:
672 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
673 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
675 One exception is when STMT has been identified as defining a reduction
676 variable; in this case we set the liveness/relevance as follows:
678 relevant = vect_used_by_reduction
679 This is because we distinguish between two kinds of relevant stmts -
680 those that are used by a reduction computation, and those that are
681 (also) used by a regular computation. This allows us later on to
682 identify stmts that are used solely by a reduction, and therefore the
683 order of the results that they produce does not have to be kept. */
685 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
686 tmp_relevant
= relevant
;
689 case vect_reduction_def
:
690 switch (tmp_relevant
)
692 case vect_unused_in_scope
:
693 relevant
= vect_used_by_reduction
;
696 case vect_used_by_reduction
:
697 if (gimple_code (stmt
) == GIMPLE_PHI
)
702 if (dump_enabled_p ())
703 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
704 "unsupported use of reduction.\n");
711 case vect_nested_cycle
:
712 if (tmp_relevant
!= vect_unused_in_scope
713 && tmp_relevant
!= vect_used_in_outer_by_reduction
714 && tmp_relevant
!= vect_used_in_outer
)
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
718 "unsupported use of nested cycle.\n");
726 case vect_double_reduction_def
:
727 if (tmp_relevant
!= vect_unused_in_scope
728 && tmp_relevant
!= vect_used_by_reduction
)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
732 "unsupported use of double reduction.\n");
744 if (is_pattern_stmt_p (stmt_vinfo
))
746 /* Pattern statements are not inserted into the code, so
747 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
748 have to scan the RHS or function arguments instead. */
749 if (is_gimple_assign (stmt
))
751 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
752 tree op
= gimple_assign_rhs1 (stmt
);
755 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
757 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
758 live_p
, relevant
, &worklist
, false)
759 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
760 live_p
, relevant
, &worklist
, false))
764 for (; i
< gimple_num_ops (stmt
); i
++)
766 op
= gimple_op (stmt
, i
);
767 if (TREE_CODE (op
) == SSA_NAME
768 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
773 else if (is_gimple_call (stmt
))
775 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
777 tree arg
= gimple_call_arg (stmt
, i
);
778 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
785 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
787 tree op
= USE_FROM_PTR (use_p
);
788 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
793 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
796 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
798 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
802 } /* while worklist */
808 /* Function vect_model_simple_cost.
810 Models cost for simple operations, i.e. those that only emit ncopies of a
811 single op. Right now, this does not account for multiple insns that could
812 be generated for the single vector op. We will handle that shortly. */
815 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
816 enum vect_def_type
*dt
,
817 stmt_vector_for_cost
*prologue_cost_vec
,
818 stmt_vector_for_cost
*body_cost_vec
)
821 int inside_cost
= 0, prologue_cost
= 0;
823 /* The SLP costs were already calculated during SLP tree build. */
824 if (PURE_SLP_STMT (stmt_info
))
827 /* FORNOW: Assuming maximum 2 args per stmts. */
828 for (i
= 0; i
< 2; i
++)
829 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
830 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
831 stmt_info
, 0, vect_prologue
);
833 /* Pass the inside-of-loop statements to the target-specific cost model. */
834 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
835 stmt_info
, 0, vect_body
);
837 if (dump_enabled_p ())
838 dump_printf_loc (MSG_NOTE
, vect_location
,
839 "vect_model_simple_cost: inside_cost = %d, "
840 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
844 /* Model cost for type demotion and promotion operations. PWR is normally
845 zero for single-step promotions and demotions. It will be one if
846 two-step promotion/demotion is required, and so on. Each additional
847 step doubles the number of instructions required. */
850 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
851 enum vect_def_type
*dt
, int pwr
)
854 int inside_cost
= 0, prologue_cost
= 0;
855 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
856 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
857 void *target_cost_data
;
859 /* The SLP costs were already calculated during SLP tree build. */
860 if (PURE_SLP_STMT (stmt_info
))
864 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
866 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
868 for (i
= 0; i
< pwr
+ 1; i
++)
870 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
872 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
873 vec_promote_demote
, stmt_info
, 0,
877 /* FORNOW: Assuming maximum 2 args per stmts. */
878 for (i
= 0; i
< 2; i
++)
879 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
880 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
881 stmt_info
, 0, vect_prologue
);
883 if (dump_enabled_p ())
884 dump_printf_loc (MSG_NOTE
, vect_location
,
885 "vect_model_promotion_demotion_cost: inside_cost = %d, "
886 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
889 /* Function vect_cost_group_size
891 For grouped load or store, return the group_size only if it is the first
892 load or store of a group, else return 1. This ensures that group size is
893 only returned once per group. */
896 vect_cost_group_size (stmt_vec_info stmt_info
)
898 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
900 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
901 return GROUP_SIZE (stmt_info
);
907 /* Function vect_model_store_cost
909 Models cost for stores. In the case of grouped accesses, one access
910 has the overhead of the grouped access attributed to it. */
913 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
914 bool store_lanes_p
, enum vect_def_type dt
,
916 stmt_vector_for_cost
*prologue_cost_vec
,
917 stmt_vector_for_cost
*body_cost_vec
)
920 unsigned int inside_cost
= 0, prologue_cost
= 0;
921 struct data_reference
*first_dr
;
924 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
925 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
926 stmt_info
, 0, vect_prologue
);
928 /* Grouped access? */
929 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
933 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
938 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
939 group_size
= vect_cost_group_size (stmt_info
);
942 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
944 /* Not a grouped access. */
948 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
951 /* We assume that the cost of a single store-lanes instruction is
952 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
953 access is instead being provided by a permute-and-store operation,
954 include the cost of the permutes. */
955 if (!store_lanes_p
&& group_size
> 1
956 && !STMT_VINFO_STRIDED_P (stmt_info
))
958 /* Uses a high and low interleave or shuffle operations for each
960 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
961 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
962 stmt_info
, 0, vect_body
);
964 if (dump_enabled_p ())
965 dump_printf_loc (MSG_NOTE
, vect_location
,
966 "vect_model_store_cost: strided group_size = %d .\n",
970 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
971 /* Costs of the stores. */
972 if (STMT_VINFO_STRIDED_P (stmt_info
)
973 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
975 /* N scalar stores plus extracting the elements. */
976 inside_cost
+= record_stmt_cost (body_cost_vec
,
977 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
978 scalar_store
, stmt_info
, 0, vect_body
);
981 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
983 if (STMT_VINFO_STRIDED_P (stmt_info
))
984 inside_cost
+= record_stmt_cost (body_cost_vec
,
985 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
986 vec_to_scalar
, stmt_info
, 0, vect_body
);
988 if (dump_enabled_p ())
989 dump_printf_loc (MSG_NOTE
, vect_location
,
990 "vect_model_store_cost: inside_cost = %d, "
991 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
995 /* Calculate cost of DR's memory access. */
997 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
998 unsigned int *inside_cost
,
999 stmt_vector_for_cost
*body_cost_vec
)
1001 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1002 gimple
*stmt
= DR_STMT (dr
);
1003 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1005 switch (alignment_support_scheme
)
1009 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1010 vector_store
, stmt_info
, 0,
1013 if (dump_enabled_p ())
1014 dump_printf_loc (MSG_NOTE
, vect_location
,
1015 "vect_model_store_cost: aligned.\n");
1019 case dr_unaligned_supported
:
1021 /* Here, we assign an additional cost for the unaligned store. */
1022 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1023 unaligned_store
, stmt_info
,
1024 DR_MISALIGNMENT (dr
), vect_body
);
1025 if (dump_enabled_p ())
1026 dump_printf_loc (MSG_NOTE
, vect_location
,
1027 "vect_model_store_cost: unaligned supported by "
1032 case dr_unaligned_unsupported
:
1034 *inside_cost
= VECT_MAX_COST
;
1036 if (dump_enabled_p ())
1037 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1038 "vect_model_store_cost: unsupported access.\n");
1048 /* Function vect_model_load_cost
1050 Models cost for loads. In the case of grouped accesses, the last access
1051 has the overhead of the grouped access attributed to it. Since unaligned
1052 accesses are supported for loads, we also account for the costs of the
1053 access scheme chosen. */
1056 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1057 bool load_lanes_p
, slp_tree slp_node
,
1058 stmt_vector_for_cost
*prologue_cost_vec
,
1059 stmt_vector_for_cost
*body_cost_vec
)
1063 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1064 unsigned int inside_cost
= 0, prologue_cost
= 0;
1066 /* Grouped accesses? */
1067 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1068 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1070 group_size
= vect_cost_group_size (stmt_info
);
1071 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1073 /* Not a grouped access. */
1080 /* We assume that the cost of a single load-lanes instruction is
1081 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1082 access is instead being provided by a load-and-permute operation,
1083 include the cost of the permutes. */
1084 if (!load_lanes_p
&& group_size
> 1
1085 && !STMT_VINFO_STRIDED_P (stmt_info
))
1087 /* Uses an even and odd extract operations or shuffle operations
1088 for each needed permute. */
1089 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1090 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1091 stmt_info
, 0, vect_body
);
1093 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_NOTE
, vect_location
,
1095 "vect_model_load_cost: strided group_size = %d .\n",
1099 /* The loads themselves. */
1100 if (STMT_VINFO_STRIDED_P (stmt_info
)
1101 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1103 /* N scalar loads plus gathering them into a vector. */
1104 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1105 inside_cost
+= record_stmt_cost (body_cost_vec
,
1106 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1107 scalar_load
, stmt_info
, 0, vect_body
);
1110 vect_get_load_cost (first_dr
, ncopies
,
1111 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1112 || group_size
> 1 || slp_node
),
1113 &inside_cost
, &prologue_cost
,
1114 prologue_cost_vec
, body_cost_vec
, true);
1115 if (STMT_VINFO_STRIDED_P (stmt_info
))
1116 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1117 stmt_info
, 0, vect_body
);
1119 if (dump_enabled_p ())
1120 dump_printf_loc (MSG_NOTE
, vect_location
,
1121 "vect_model_load_cost: inside_cost = %d, "
1122 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1126 /* Calculate cost of DR's memory access. */
1128 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1129 bool add_realign_cost
, unsigned int *inside_cost
,
1130 unsigned int *prologue_cost
,
1131 stmt_vector_for_cost
*prologue_cost_vec
,
1132 stmt_vector_for_cost
*body_cost_vec
,
1133 bool record_prologue_costs
)
1135 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1136 gimple
*stmt
= DR_STMT (dr
);
1137 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1139 switch (alignment_support_scheme
)
1143 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1144 stmt_info
, 0, vect_body
);
1146 if (dump_enabled_p ())
1147 dump_printf_loc (MSG_NOTE
, vect_location
,
1148 "vect_model_load_cost: aligned.\n");
1152 case dr_unaligned_supported
:
1154 /* Here, we assign an additional cost for the unaligned load. */
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1156 unaligned_load
, stmt_info
,
1157 DR_MISALIGNMENT (dr
), vect_body
);
1159 if (dump_enabled_p ())
1160 dump_printf_loc (MSG_NOTE
, vect_location
,
1161 "vect_model_load_cost: unaligned supported by "
1166 case dr_explicit_realign
:
1168 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1169 vector_load
, stmt_info
, 0, vect_body
);
1170 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1171 vec_perm
, stmt_info
, 0, vect_body
);
1173 /* FIXME: If the misalignment remains fixed across the iterations of
1174 the containing loop, the following cost should be added to the
1176 if (targetm
.vectorize
.builtin_mask_for_load
)
1177 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1178 stmt_info
, 0, vect_body
);
1180 if (dump_enabled_p ())
1181 dump_printf_loc (MSG_NOTE
, vect_location
,
1182 "vect_model_load_cost: explicit realign\n");
1186 case dr_explicit_realign_optimized
:
1188 if (dump_enabled_p ())
1189 dump_printf_loc (MSG_NOTE
, vect_location
,
1190 "vect_model_load_cost: unaligned software "
1193 /* Unaligned software pipeline has a load of an address, an initial
1194 load, and possibly a mask operation to "prime" the loop. However,
1195 if this is an access in a group of loads, which provide grouped
1196 access, then the above cost should only be considered for one
1197 access in the group. Inside the loop, there is a load op
1198 and a realignment op. */
1200 if (add_realign_cost
&& record_prologue_costs
)
1202 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1203 vector_stmt
, stmt_info
,
1205 if (targetm
.vectorize
.builtin_mask_for_load
)
1206 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1207 vector_stmt
, stmt_info
,
1211 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1212 stmt_info
, 0, vect_body
);
1213 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1214 stmt_info
, 0, vect_body
);
1216 if (dump_enabled_p ())
1217 dump_printf_loc (MSG_NOTE
, vect_location
,
1218 "vect_model_load_cost: explicit realign optimized"
1224 case dr_unaligned_unsupported
:
1226 *inside_cost
= VECT_MAX_COST
;
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1230 "vect_model_load_cost: unsupported access.\n");
1239 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1240 the loop preheader for the vectorized stmt STMT. */
1243 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1246 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1249 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1250 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1254 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1258 if (nested_in_vect_loop_p (loop
, stmt
))
1261 pe
= loop_preheader_edge (loop
);
1262 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1263 gcc_assert (!new_bb
);
1267 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1269 gimple_stmt_iterator gsi_bb_start
;
1271 gcc_assert (bb_vinfo
);
1272 bb
= BB_VINFO_BB (bb_vinfo
);
1273 gsi_bb_start
= gsi_after_labels (bb
);
1274 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1278 if (dump_enabled_p ())
1280 dump_printf_loc (MSG_NOTE
, vect_location
,
1281 "created new init_stmt: ");
1282 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1286 /* Function vect_init_vector.
1288 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1289 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1290 vector type a vector with all elements equal to VAL is created first.
1291 Place the initialization at BSI if it is not NULL. Otherwise, place the
1292 initialization at the loop preheader.
1293 Return the DEF of INIT_STMT.
1294 It will be used in the vectorization of STMT. */
1297 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1304 if (TREE_CODE (type
) == VECTOR_TYPE
1305 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1307 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1309 if (CONSTANT_CLASS_P (val
))
1310 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1313 new_temp
= make_ssa_name (TREE_TYPE (type
));
1314 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1315 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1319 val
= build_vector_from_val (type
, val
);
1322 new_var
= vect_get_new_vect_var (type
, vect_simple_var
, "cst_");
1323 init_stmt
= gimple_build_assign (new_var
, val
);
1324 new_temp
= make_ssa_name (new_var
, init_stmt
);
1325 gimple_assign_set_lhs (init_stmt
, new_temp
);
1326 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1327 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1332 /* Function vect_get_vec_def_for_operand.
1334 OP is an operand in STMT. This function returns a (vector) def that will be
1335 used in the vectorized stmt for STMT.
1337 In the case that OP is an SSA_NAME which is defined in the loop, then
1338 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1340 In case OP is an invariant or constant, a new stmt that creates a vector def
1341 needs to be introduced. */
1344 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
)
1349 stmt_vec_info def_stmt_info
= NULL
;
1350 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1351 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1352 enum vect_def_type dt
;
1356 if (dump_enabled_p ())
1358 dump_printf_loc (MSG_NOTE
, vect_location
,
1359 "vect_get_vec_def_for_operand: ");
1360 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1361 dump_printf (MSG_NOTE
, "\n");
1364 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1365 gcc_assert (is_simple_use
);
1366 if (dump_enabled_p ())
1368 int loc_printed
= 0;
1372 dump_printf (MSG_NOTE
, " def_stmt = ");
1374 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1375 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1381 /* operand is a constant or a loop invariant. */
1382 case vect_constant_def
:
1383 case vect_external_def
:
1385 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1386 gcc_assert (vector_type
);
1387 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1390 /* operand is defined inside the loop. */
1391 case vect_internal_def
:
1393 /* Get the def from the vectorized stmt. */
1394 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1396 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1397 /* Get vectorized pattern statement. */
1399 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1400 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1401 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1402 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1403 gcc_assert (vec_stmt
);
1404 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1405 vec_oprnd
= PHI_RESULT (vec_stmt
);
1406 else if (is_gimple_call (vec_stmt
))
1407 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1409 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1413 /* operand is defined by a loop header phi - reduction */
1414 case vect_reduction_def
:
1415 case vect_double_reduction_def
:
1416 case vect_nested_cycle
:
1417 /* Code should use get_initial_def_for_reduction. */
1420 /* operand is defined by loop-header phi - induction. */
1421 case vect_induction_def
:
1423 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1425 /* Get the def from the vectorized stmt. */
1426 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1427 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1428 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1429 vec_oprnd
= PHI_RESULT (vec_stmt
);
1431 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1441 /* Function vect_get_vec_def_for_stmt_copy
1443 Return a vector-def for an operand. This function is used when the
1444 vectorized stmt to be created (by the caller to this function) is a "copy"
1445 created in case the vectorized result cannot fit in one vector, and several
1446 copies of the vector-stmt are required. In this case the vector-def is
1447 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1448 of the stmt that defines VEC_OPRND.
1449 DT is the type of the vector def VEC_OPRND.
1452 In case the vectorization factor (VF) is bigger than the number
1453 of elements that can fit in a vectype (nunits), we have to generate
1454 more than one vector stmt to vectorize the scalar stmt. This situation
1455 arises when there are multiple data-types operated upon in the loop; the
1456 smallest data-type determines the VF, and as a result, when vectorizing
1457 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1458 vector stmt (each computing a vector of 'nunits' results, and together
1459 computing 'VF' results in each iteration). This function is called when
1460 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1461 which VF=16 and nunits=4, so the number of copies required is 4):
1463 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1465 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1466 VS1.1: vx.1 = memref1 VS1.2
1467 VS1.2: vx.2 = memref2 VS1.3
1468 VS1.3: vx.3 = memref3
1470 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1471 VSnew.1: vz1 = vx.1 + ... VSnew.2
1472 VSnew.2: vz2 = vx.2 + ... VSnew.3
1473 VSnew.3: vz3 = vx.3 + ...
1475 The vectorization of S1 is explained in vectorizable_load.
1476 The vectorization of S2:
1477 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1478 the function 'vect_get_vec_def_for_operand' is called to
1479 get the relevant vector-def for each operand of S2. For operand x it
1480 returns the vector-def 'vx.0'.
1482 To create the remaining copies of the vector-stmt (VSnew.j), this
1483 function is called to get the relevant vector-def for each operand. It is
1484 obtained from the respective VS1.j stmt, which is recorded in the
1485 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1487 For example, to obtain the vector-def 'vx.1' in order to create the
1488 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1489 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1490 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1491 and return its def ('vx.1').
1492 Overall, to create the above sequence this function will be called 3 times:
1493 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1494 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1495 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1498 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1500 gimple
*vec_stmt_for_operand
;
1501 stmt_vec_info def_stmt_info
;
1503 /* Do nothing; can reuse same def. */
1504 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1507 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1508 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1509 gcc_assert (def_stmt_info
);
1510 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1511 gcc_assert (vec_stmt_for_operand
);
1512 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1513 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1514 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1516 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1521 /* Get vectorized definitions for the operands to create a copy of an original
1522 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1525 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1526 vec
<tree
> *vec_oprnds0
,
1527 vec
<tree
> *vec_oprnds1
)
1529 tree vec_oprnd
= vec_oprnds0
->pop ();
1531 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1532 vec_oprnds0
->quick_push (vec_oprnd
);
1534 if (vec_oprnds1
&& vec_oprnds1
->length ())
1536 vec_oprnd
= vec_oprnds1
->pop ();
1537 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1538 vec_oprnds1
->quick_push (vec_oprnd
);
1543 /* Get vectorized definitions for OP0 and OP1.
1544 REDUC_INDEX is the index of reduction operand in case of reduction,
1545 and -1 otherwise. */
1548 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1549 vec
<tree
> *vec_oprnds0
,
1550 vec
<tree
> *vec_oprnds1
,
1551 slp_tree slp_node
, int reduc_index
)
1555 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1556 auto_vec
<tree
> ops (nops
);
1557 auto_vec
<vec
<tree
> > vec_defs (nops
);
1559 ops
.quick_push (op0
);
1561 ops
.quick_push (op1
);
1563 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1565 *vec_oprnds0
= vec_defs
[0];
1567 *vec_oprnds1
= vec_defs
[1];
1573 vec_oprnds0
->create (1);
1574 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1575 vec_oprnds0
->quick_push (vec_oprnd
);
1579 vec_oprnds1
->create (1);
1580 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1581 vec_oprnds1
->quick_push (vec_oprnd
);
1587 /* Function vect_finish_stmt_generation.
1589 Insert a new stmt. */
1592 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1593 gimple_stmt_iterator
*gsi
)
1595 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1596 vec_info
*vinfo
= stmt_info
->vinfo
;
1598 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1600 if (!gsi_end_p (*gsi
)
1601 && gimple_has_mem_ops (vec_stmt
))
1603 gimple
*at_stmt
= gsi_stmt (*gsi
);
1604 tree vuse
= gimple_vuse (at_stmt
);
1605 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1607 tree vdef
= gimple_vdef (at_stmt
);
1608 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1609 /* If we have an SSA vuse and insert a store, update virtual
1610 SSA form to avoid triggering the renamer. Do so only
1611 if we can easily see all uses - which is what almost always
1612 happens with the way vectorized stmts are inserted. */
1613 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1614 && ((is_gimple_assign (vec_stmt
)
1615 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1616 || (is_gimple_call (vec_stmt
)
1617 && !(gimple_call_flags (vec_stmt
)
1618 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1620 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1621 gimple_set_vdef (vec_stmt
, new_vdef
);
1622 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1626 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1628 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1630 if (dump_enabled_p ())
1632 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1633 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1636 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1638 /* While EH edges will generally prevent vectorization, stmt might
1639 e.g. be in a must-not-throw region. Ensure newly created stmts
1640 that could throw are part of the same region. */
1641 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1642 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1643 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1646 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1647 a function declaration if the target has a vectorized version
1648 of the function, or NULL_TREE if the function cannot be vectorized. */
1651 vectorizable_function (gcall
*call
, tree vectype_out
, tree vectype_in
)
1653 tree fndecl
= gimple_call_fndecl (call
);
1655 /* We only handle functions that do not read or clobber memory -- i.e.
1656 const or novops ones. */
1657 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1661 || TREE_CODE (fndecl
) != FUNCTION_DECL
1662 || !DECL_BUILT_IN (fndecl
))
1665 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1670 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1671 gimple_stmt_iterator
*);
1674 /* Function vectorizable_mask_load_store.
1676 Check if STMT performs a conditional load or store that can be vectorized.
1677 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1678 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1679 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1682 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1683 gimple
**vec_stmt
, slp_tree slp_node
)
1685 tree vec_dest
= NULL
;
1686 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1687 stmt_vec_info prev_stmt_info
;
1688 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1689 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1690 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1691 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1692 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1696 tree dataref_ptr
= NULL_TREE
;
1698 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1702 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1703 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1704 int gather_scale
= 1;
1705 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1709 enum vect_def_type dt
;
1711 if (slp_node
!= NULL
)
1714 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1715 gcc_assert (ncopies
>= 1);
1717 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1718 mask
= gimple_call_arg (stmt
, 2);
1719 if (TYPE_PRECISION (TREE_TYPE (mask
))
1720 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1723 /* FORNOW. This restriction should be relaxed. */
1724 if (nested_in_vect_loop
&& ncopies
> 1)
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1728 "multiple types in nested loop.");
1732 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1735 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1738 if (!STMT_VINFO_DATA_REF (stmt_info
))
1741 elem_type
= TREE_TYPE (vectype
);
1743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1746 if (STMT_VINFO_STRIDED_P (stmt_info
))
1749 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1752 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1753 &gather_off
, &gather_scale
);
1754 gcc_assert (gather_decl
);
1755 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1756 &gather_off_vectype
))
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1760 "gather index use not simple.");
1764 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1766 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1767 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1771 "masked gather with integer mask not supported.");
1775 else if (tree_int_cst_compare (nested_in_vect_loop
1776 ? STMT_VINFO_DR_STEP (stmt_info
)
1777 : DR_STEP (dr
), size_zero_node
) <= 0)
1779 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1780 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1783 if (TREE_CODE (mask
) != SSA_NAME
)
1786 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
))
1791 tree rhs
= gimple_call_arg (stmt
, 3);
1792 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
))
1796 if (!vec_stmt
) /* transformation not required. */
1798 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1800 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1803 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1809 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1811 tree vec_oprnd0
= NULL_TREE
, op
;
1812 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1813 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1814 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1815 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1816 tree mask_perm_mask
= NULL_TREE
;
1817 edge pe
= loop_preheader_edge (loop
);
1820 enum { NARROW
, NONE
, WIDEN
} modifier
;
1821 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1823 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1824 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1825 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1826 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1827 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1828 scaletype
= TREE_VALUE (arglist
);
1829 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1830 && types_compatible_p (srctype
, masktype
));
1832 if (nunits
== gather_off_nunits
)
1834 else if (nunits
== gather_off_nunits
/ 2)
1836 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1839 for (i
= 0; i
< gather_off_nunits
; ++i
)
1840 sel
[i
] = i
| nunits
;
1842 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1844 else if (nunits
== gather_off_nunits
* 2)
1846 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1849 for (i
= 0; i
< nunits
; ++i
)
1850 sel
[i
] = i
< gather_off_nunits
1851 ? i
: i
+ nunits
- gather_off_nunits
;
1853 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1855 for (i
= 0; i
< nunits
; ++i
)
1856 sel
[i
] = i
| gather_off_nunits
;
1857 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1862 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1864 ptr
= fold_convert (ptrtype
, gather_base
);
1865 if (!is_gimple_min_invariant (ptr
))
1867 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1868 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1869 gcc_assert (!new_bb
);
1872 scale
= build_int_cst (scaletype
, gather_scale
);
1874 prev_stmt_info
= NULL
;
1875 for (j
= 0; j
< ncopies
; ++j
)
1877 if (modifier
== WIDEN
&& (j
& 1))
1878 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1879 perm_mask
, stmt
, gsi
);
1882 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1885 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1887 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1889 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1890 == TYPE_VECTOR_SUBPARTS (idxtype
));
1891 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
1892 var
= make_ssa_name (var
);
1893 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1895 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1896 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1900 if (mask_perm_mask
&& (j
& 1))
1901 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1902 mask_perm_mask
, stmt
, gsi
);
1906 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1909 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1910 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1914 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1916 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1917 == TYPE_VECTOR_SUBPARTS (masktype
));
1918 var
= vect_get_new_vect_var (masktype
, vect_simple_var
,
1920 var
= make_ssa_name (var
);
1921 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1923 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1930 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1933 if (!useless_type_conversion_p (vectype
, rettype
))
1935 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1936 == TYPE_VECTOR_SUBPARTS (rettype
));
1937 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
1938 op
= make_ssa_name (var
, new_stmt
);
1939 gimple_call_set_lhs (new_stmt
, op
);
1940 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1941 var
= make_ssa_name (vec_dest
);
1942 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1943 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1947 var
= make_ssa_name (vec_dest
, new_stmt
);
1948 gimple_call_set_lhs (new_stmt
, var
);
1951 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1953 if (modifier
== NARROW
)
1960 var
= permute_vec_elements (prev_res
, var
,
1961 perm_mask
, stmt
, gsi
);
1962 new_stmt
= SSA_NAME_DEF_STMT (var
);
1965 if (prev_stmt_info
== NULL
)
1966 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1968 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1969 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1972 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1974 tree lhs
= gimple_call_lhs (stmt
);
1975 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1976 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1977 set_vinfo_for_stmt (stmt
, NULL
);
1978 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1979 gsi_replace (gsi
, new_stmt
, true);
1984 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1985 prev_stmt_info
= NULL
;
1986 for (i
= 0; i
< ncopies
; i
++)
1988 unsigned align
, misalign
;
1992 tree rhs
= gimple_call_arg (stmt
, 3);
1993 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
1994 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1995 /* We should have catched mismatched types earlier. */
1996 gcc_assert (useless_type_conversion_p (vectype
,
1997 TREE_TYPE (vec_rhs
)));
1998 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
1999 NULL_TREE
, &dummy
, gsi
,
2000 &ptr_incr
, false, &inv_p
);
2001 gcc_assert (!inv_p
);
2005 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2006 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2007 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2008 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2009 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2010 TYPE_SIZE_UNIT (vectype
));
2013 align
= TYPE_ALIGN_UNIT (vectype
);
2014 if (aligned_access_p (dr
))
2016 else if (DR_MISALIGNMENT (dr
) == -1)
2018 align
= TYPE_ALIGN_UNIT (elem_type
);
2022 misalign
= DR_MISALIGNMENT (dr
);
2023 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2026 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2027 gimple_call_arg (stmt
, 1),
2029 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2031 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2033 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2034 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2039 tree vec_mask
= NULL_TREE
;
2040 prev_stmt_info
= NULL
;
2041 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2042 for (i
= 0; i
< ncopies
; i
++)
2044 unsigned align
, misalign
;
2048 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2049 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2050 NULL_TREE
, &dummy
, gsi
,
2051 &ptr_incr
, false, &inv_p
);
2052 gcc_assert (!inv_p
);
2056 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2057 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2058 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2059 TYPE_SIZE_UNIT (vectype
));
2062 align
= TYPE_ALIGN_UNIT (vectype
);
2063 if (aligned_access_p (dr
))
2065 else if (DR_MISALIGNMENT (dr
) == -1)
2067 align
= TYPE_ALIGN_UNIT (elem_type
);
2071 misalign
= DR_MISALIGNMENT (dr
);
2072 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2075 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2076 gimple_call_arg (stmt
, 1),
2078 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2079 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2081 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2083 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2084 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2090 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2092 tree lhs
= gimple_call_lhs (stmt
);
2093 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2094 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2095 set_vinfo_for_stmt (stmt
, NULL
);
2096 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2097 gsi_replace (gsi
, new_stmt
, true);
2104 /* Function vectorizable_call.
2106 Check if GS performs a function call that can be vectorized.
2107 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2108 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2109 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2112 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2119 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2120 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2121 tree vectype_out
, vectype_in
;
2124 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2125 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2126 vec_info
*vinfo
= stmt_info
->vinfo
;
2127 tree fndecl
, new_temp
, rhs_type
;
2129 enum vect_def_type dt
[3]
2130 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2131 gimple
*new_stmt
= NULL
;
2133 vec
<tree
> vargs
= vNULL
;
2134 enum { NARROW
, NONE
, WIDEN
} modifier
;
2138 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2141 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2144 /* Is GS a vectorizable call? */
2145 stmt
= dyn_cast
<gcall
*> (gs
);
2149 if (gimple_call_internal_p (stmt
)
2150 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2151 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2152 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2155 if (gimple_call_lhs (stmt
) == NULL_TREE
2156 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2159 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2161 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2163 /* Process function arguments. */
2164 rhs_type
= NULL_TREE
;
2165 vectype_in
= NULL_TREE
;
2166 nargs
= gimple_call_num_args (stmt
);
2168 /* Bail out if the function has more than three arguments, we do not have
2169 interesting builtin functions to vectorize with more than two arguments
2170 except for fma. No arguments is also not good. */
2171 if (nargs
== 0 || nargs
> 3)
2174 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2175 if (gimple_call_internal_p (stmt
)
2176 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2179 rhs_type
= unsigned_type_node
;
2182 for (i
= 0; i
< nargs
; i
++)
2186 op
= gimple_call_arg (stmt
, i
);
2188 /* We can only handle calls with arguments of the same type. */
2190 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2192 if (dump_enabled_p ())
2193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2194 "argument types differ.\n");
2198 rhs_type
= TREE_TYPE (op
);
2200 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2202 if (dump_enabled_p ())
2203 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2204 "use not simple.\n");
2209 vectype_in
= opvectype
;
2211 && opvectype
!= vectype_in
)
2213 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2215 "argument vector types differ.\n");
2219 /* If all arguments are external or constant defs use a vector type with
2220 the same size as the output vector type. */
2222 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2224 gcc_assert (vectype_in
);
2227 if (dump_enabled_p ())
2229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2230 "no vectype for scalar type ");
2231 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2232 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2239 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2240 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2241 if (nunits_in
== nunits_out
/ 2)
2243 else if (nunits_out
== nunits_in
)
2245 else if (nunits_out
== nunits_in
/ 2)
2250 /* For now, we only vectorize functions if a target specific builtin
2251 is available. TODO -- in some cases, it might be profitable to
2252 insert the calls for pieces of the vector, in order to be able
2253 to vectorize other operations in the loop. */
2254 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2255 if (fndecl
== NULL_TREE
)
2257 if (gimple_call_internal_p (stmt
)
2258 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2261 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2262 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2263 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2264 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2266 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2267 { 0, 1, 2, ... vf - 1 } vector. */
2268 gcc_assert (nargs
== 0);
2272 if (dump_enabled_p ())
2273 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2274 "function is not vectorizable.\n");
2279 gcc_assert (!gimple_vuse (stmt
));
2281 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2283 else if (modifier
== NARROW
)
2284 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2286 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2288 /* Sanity check: make sure that at least one copy of the vectorized stmt
2289 needs to be generated. */
2290 gcc_assert (ncopies
>= 1);
2292 if (!vec_stmt
) /* transformation not required. */
2294 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2295 if (dump_enabled_p ())
2296 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2298 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2304 if (dump_enabled_p ())
2305 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2308 scalar_dest
= gimple_call_lhs (stmt
);
2309 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2311 prev_stmt_info
= NULL
;
2315 for (j
= 0; j
< ncopies
; ++j
)
2317 /* Build argument list for the vectorized call. */
2319 vargs
.create (nargs
);
2325 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2326 vec
<tree
> vec_oprnds0
;
2328 for (i
= 0; i
< nargs
; i
++)
2329 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2330 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2331 vec_oprnds0
= vec_defs
[0];
2333 /* Arguments are ready. Create the new vector stmt. */
2334 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2337 for (k
= 0; k
< nargs
; k
++)
2339 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2340 vargs
[k
] = vec_oprndsk
[i
];
2342 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2343 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2344 gimple_call_set_lhs (new_stmt
, new_temp
);
2345 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2346 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2349 for (i
= 0; i
< nargs
; i
++)
2351 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2352 vec_oprndsi
.release ();
2357 for (i
= 0; i
< nargs
; i
++)
2359 op
= gimple_call_arg (stmt
, i
);
2362 = vect_get_vec_def_for_operand (op
, stmt
);
2365 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2367 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2370 vargs
.quick_push (vec_oprnd0
);
2373 if (gimple_call_internal_p (stmt
)
2374 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2376 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2378 for (k
= 0; k
< nunits_out
; ++k
)
2379 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2380 tree cst
= build_vector (vectype_out
, v
);
2382 = vect_get_new_vect_var (vectype_out
, vect_simple_var
, "cst_");
2383 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2384 new_temp
= make_ssa_name (new_var
, init_stmt
);
2385 gimple_assign_set_lhs (init_stmt
, new_temp
);
2386 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2387 new_temp
= make_ssa_name (vec_dest
);
2388 new_stmt
= gimple_build_assign (new_temp
,
2389 gimple_assign_lhs (init_stmt
));
2393 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2394 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2395 gimple_call_set_lhs (new_stmt
, new_temp
);
2397 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2400 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2402 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2404 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2410 for (j
= 0; j
< ncopies
; ++j
)
2412 /* Build argument list for the vectorized call. */
2414 vargs
.create (nargs
* 2);
2420 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2421 vec
<tree
> vec_oprnds0
;
2423 for (i
= 0; i
< nargs
; i
++)
2424 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2425 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2426 vec_oprnds0
= vec_defs
[0];
2428 /* Arguments are ready. Create the new vector stmt. */
2429 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2433 for (k
= 0; k
< nargs
; k
++)
2435 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2436 vargs
.quick_push (vec_oprndsk
[i
]);
2437 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2439 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2440 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2441 gimple_call_set_lhs (new_stmt
, new_temp
);
2442 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2443 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2446 for (i
= 0; i
< nargs
; i
++)
2448 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2449 vec_oprndsi
.release ();
2454 for (i
= 0; i
< nargs
; i
++)
2456 op
= gimple_call_arg (stmt
, i
);
2460 = vect_get_vec_def_for_operand (op
, stmt
);
2462 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2466 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2468 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2470 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2473 vargs
.quick_push (vec_oprnd0
);
2474 vargs
.quick_push (vec_oprnd1
);
2477 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2478 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2479 gimple_call_set_lhs (new_stmt
, new_temp
);
2480 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2483 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2485 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2487 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2490 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2495 /* No current target implements this case. */
2501 /* The call in STMT might prevent it from being removed in dce.
2502 We however cannot remove it here, due to the way the ssa name
2503 it defines is mapped to the new definition. So just replace
2504 rhs of the statement with something harmless. */
2509 type
= TREE_TYPE (scalar_dest
);
2510 if (is_pattern_stmt_p (stmt_info
))
2511 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2513 lhs
= gimple_call_lhs (stmt
);
2515 if (gimple_call_internal_p (stmt
)
2516 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2518 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2519 with vf - 1 rather than 0, that is the last iteration of the
2521 imm_use_iterator iter
;
2522 use_operand_p use_p
;
2524 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2526 basic_block use_bb
= gimple_bb (use_stmt
);
2528 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2530 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2531 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2532 ncopies
* nunits_out
- 1));
2533 update_stmt (use_stmt
);
2538 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2539 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2540 set_vinfo_for_stmt (stmt
, NULL
);
2541 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2542 gsi_replace (gsi
, new_stmt
, false);
2548 struct simd_call_arg_info
2552 enum vect_def_type dt
;
2553 HOST_WIDE_INT linear_step
;
2555 bool simd_lane_linear
;
2558 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2559 is linear within simd lane (but not within whole loop), note it in
2563 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2564 struct simd_call_arg_info
*arginfo
)
2566 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2568 if (!is_gimple_assign (def_stmt
)
2569 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2570 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2573 tree base
= gimple_assign_rhs1 (def_stmt
);
2574 HOST_WIDE_INT linear_step
= 0;
2575 tree v
= gimple_assign_rhs2 (def_stmt
);
2576 while (TREE_CODE (v
) == SSA_NAME
)
2579 def_stmt
= SSA_NAME_DEF_STMT (v
);
2580 if (is_gimple_assign (def_stmt
))
2581 switch (gimple_assign_rhs_code (def_stmt
))
2584 t
= gimple_assign_rhs2 (def_stmt
);
2585 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2587 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2588 v
= gimple_assign_rhs1 (def_stmt
);
2591 t
= gimple_assign_rhs2 (def_stmt
);
2592 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2594 linear_step
= tree_to_shwi (t
);
2595 v
= gimple_assign_rhs1 (def_stmt
);
2598 t
= gimple_assign_rhs1 (def_stmt
);
2599 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2600 || (TYPE_PRECISION (TREE_TYPE (v
))
2601 < TYPE_PRECISION (TREE_TYPE (t
))))
2610 else if (is_gimple_call (def_stmt
)
2611 && gimple_call_internal_p (def_stmt
)
2612 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2614 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2615 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2620 arginfo
->linear_step
= linear_step
;
2622 arginfo
->simd_lane_linear
= true;
2628 /* Function vectorizable_simd_clone_call.
2630 Check if STMT performs a function call that can be vectorized
2631 by calling a simd clone of the function.
2632 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2633 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2634 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2637 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2638 gimple
**vec_stmt
, slp_tree slp_node
)
2643 tree vec_oprnd0
= NULL_TREE
;
2644 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2646 unsigned int nunits
;
2647 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2648 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2649 vec_info
*vinfo
= stmt_info
->vinfo
;
2650 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2651 tree fndecl
, new_temp
;
2653 gimple
*new_stmt
= NULL
;
2655 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2656 vec
<tree
> vargs
= vNULL
;
2658 tree lhs
, rtype
, ratype
;
2659 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2661 /* Is STMT a vectorizable call? */
2662 if (!is_gimple_call (stmt
))
2665 fndecl
= gimple_call_fndecl (stmt
);
2666 if (fndecl
== NULL_TREE
)
2669 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2670 if (node
== NULL
|| node
->simd_clones
== NULL
)
2673 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2676 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2679 if (gimple_call_lhs (stmt
)
2680 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2683 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2685 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2687 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2691 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2694 /* Process function arguments. */
2695 nargs
= gimple_call_num_args (stmt
);
2697 /* Bail out if the function has zero arguments. */
2701 arginfo
.create (nargs
);
2703 for (i
= 0; i
< nargs
; i
++)
2705 simd_call_arg_info thisarginfo
;
2708 thisarginfo
.linear_step
= 0;
2709 thisarginfo
.align
= 0;
2710 thisarginfo
.op
= NULL_TREE
;
2711 thisarginfo
.simd_lane_linear
= false;
2713 op
= gimple_call_arg (stmt
, i
);
2714 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2715 &thisarginfo
.vectype
)
2716 || thisarginfo
.dt
== vect_uninitialized_def
)
2718 if (dump_enabled_p ())
2719 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2720 "use not simple.\n");
2725 if (thisarginfo
.dt
== vect_constant_def
2726 || thisarginfo
.dt
== vect_external_def
)
2727 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2729 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2731 /* For linear arguments, the analyze phase should have saved
2732 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2733 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2734 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2736 gcc_assert (vec_stmt
);
2737 thisarginfo
.linear_step
2738 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2740 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2741 thisarginfo
.simd_lane_linear
2742 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2743 == boolean_true_node
);
2744 /* If loop has been peeled for alignment, we need to adjust it. */
2745 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2746 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2747 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2749 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2750 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2751 tree opt
= TREE_TYPE (thisarginfo
.op
);
2752 bias
= fold_convert (TREE_TYPE (step
), bias
);
2753 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2755 = fold_build2 (POINTER_TYPE_P (opt
)
2756 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2757 thisarginfo
.op
, bias
);
2761 && thisarginfo
.dt
!= vect_constant_def
2762 && thisarginfo
.dt
!= vect_external_def
2764 && TREE_CODE (op
) == SSA_NAME
2765 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2767 && tree_fits_shwi_p (iv
.step
))
2769 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2770 thisarginfo
.op
= iv
.base
;
2772 else if ((thisarginfo
.dt
== vect_constant_def
2773 || thisarginfo
.dt
== vect_external_def
)
2774 && POINTER_TYPE_P (TREE_TYPE (op
)))
2775 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2776 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2778 if (POINTER_TYPE_P (TREE_TYPE (op
))
2779 && !thisarginfo
.linear_step
2781 && thisarginfo
.dt
!= vect_constant_def
2782 && thisarginfo
.dt
!= vect_external_def
2785 && TREE_CODE (op
) == SSA_NAME
)
2786 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2788 arginfo
.quick_push (thisarginfo
);
2791 unsigned int badness
= 0;
2792 struct cgraph_node
*bestn
= NULL
;
2793 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2794 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2796 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2797 n
= n
->simdclone
->next_clone
)
2799 unsigned int this_badness
= 0;
2800 if (n
->simdclone
->simdlen
2801 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2802 || n
->simdclone
->nargs
!= nargs
)
2804 if (n
->simdclone
->simdlen
2805 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2806 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2807 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2808 if (n
->simdclone
->inbranch
)
2809 this_badness
+= 2048;
2810 int target_badness
= targetm
.simd_clone
.usable (n
);
2811 if (target_badness
< 0)
2813 this_badness
+= target_badness
* 512;
2814 /* FORNOW: Have to add code to add the mask argument. */
2815 if (n
->simdclone
->inbranch
)
2817 for (i
= 0; i
< nargs
; i
++)
2819 switch (n
->simdclone
->args
[i
].arg_type
)
2821 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2822 if (!useless_type_conversion_p
2823 (n
->simdclone
->args
[i
].orig_type
,
2824 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2826 else if (arginfo
[i
].dt
== vect_constant_def
2827 || arginfo
[i
].dt
== vect_external_def
2828 || arginfo
[i
].linear_step
)
2831 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2832 if (arginfo
[i
].dt
!= vect_constant_def
2833 && arginfo
[i
].dt
!= vect_external_def
)
2836 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2837 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2838 if (arginfo
[i
].dt
== vect_constant_def
2839 || arginfo
[i
].dt
== vect_external_def
2840 || (arginfo
[i
].linear_step
2841 != n
->simdclone
->args
[i
].linear_step
))
2844 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2845 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2846 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2850 case SIMD_CLONE_ARG_TYPE_MASK
:
2853 if (i
== (size_t) -1)
2855 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2860 if (arginfo
[i
].align
)
2861 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2862 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2864 if (i
== (size_t) -1)
2866 if (bestn
== NULL
|| this_badness
< badness
)
2869 badness
= this_badness
;
2879 for (i
= 0; i
< nargs
; i
++)
2880 if ((arginfo
[i
].dt
== vect_constant_def
2881 || arginfo
[i
].dt
== vect_external_def
)
2882 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2885 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2887 if (arginfo
[i
].vectype
== NULL
2888 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2889 > bestn
->simdclone
->simdlen
))
2896 fndecl
= bestn
->decl
;
2897 nunits
= bestn
->simdclone
->simdlen
;
2898 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2900 /* If the function isn't const, only allow it in simd loops where user
2901 has asserted that at least nunits consecutive iterations can be
2902 performed using SIMD instructions. */
2903 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2904 && gimple_vuse (stmt
))
2910 /* Sanity check: make sure that at least one copy of the vectorized stmt
2911 needs to be generated. */
2912 gcc_assert (ncopies
>= 1);
2914 if (!vec_stmt
) /* transformation not required. */
2916 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2917 for (i
= 0; i
< nargs
; i
++)
2918 if (bestn
->simdclone
->args
[i
].arg_type
2919 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2921 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2923 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2924 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2925 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2926 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2927 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2928 tree sll
= arginfo
[i
].simd_lane_linear
2929 ? boolean_true_node
: boolean_false_node
;
2930 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2932 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2933 if (dump_enabled_p ())
2934 dump_printf_loc (MSG_NOTE
, vect_location
,
2935 "=== vectorizable_simd_clone_call ===\n");
2936 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2943 if (dump_enabled_p ())
2944 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2947 scalar_dest
= gimple_call_lhs (stmt
);
2948 vec_dest
= NULL_TREE
;
2953 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2954 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2955 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2958 rtype
= TREE_TYPE (ratype
);
2962 prev_stmt_info
= NULL
;
2963 for (j
= 0; j
< ncopies
; ++j
)
2965 /* Build argument list for the vectorized call. */
2967 vargs
.create (nargs
);
2971 for (i
= 0; i
< nargs
; i
++)
2973 unsigned int k
, l
, m
, o
;
2975 op
= gimple_call_arg (stmt
, i
);
2976 switch (bestn
->simdclone
->args
[i
].arg_type
)
2978 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2979 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2980 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2981 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2983 if (TYPE_VECTOR_SUBPARTS (atype
)
2984 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2986 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2987 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2988 / TYPE_VECTOR_SUBPARTS (atype
));
2989 gcc_assert ((k
& (k
- 1)) == 0);
2992 = vect_get_vec_def_for_operand (op
, stmt
);
2995 vec_oprnd0
= arginfo
[i
].op
;
2996 if ((m
& (k
- 1)) == 0)
2998 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3001 arginfo
[i
].op
= vec_oprnd0
;
3003 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3005 bitsize_int ((m
& (k
- 1)) * prec
));
3007 = gimple_build_assign (make_ssa_name (atype
),
3009 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3010 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3014 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3015 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3016 gcc_assert ((k
& (k
- 1)) == 0);
3017 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3019 vec_alloc (ctor_elts
, k
);
3022 for (l
= 0; l
< k
; l
++)
3024 if (m
== 0 && l
== 0)
3026 = vect_get_vec_def_for_operand (op
, stmt
);
3029 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3031 arginfo
[i
].op
= vec_oprnd0
;
3034 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3038 vargs
.safe_push (vec_oprnd0
);
3041 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3043 = gimple_build_assign (make_ssa_name (atype
),
3045 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3046 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3051 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3052 vargs
.safe_push (op
);
3054 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3059 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3064 edge pe
= loop_preheader_edge (loop
);
3065 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3066 gcc_assert (!new_bb
);
3068 if (arginfo
[i
].simd_lane_linear
)
3070 vargs
.safe_push (arginfo
[i
].op
);
3073 tree phi_res
= copy_ssa_name (op
);
3074 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3075 set_vinfo_for_stmt (new_phi
,
3076 new_stmt_vec_info (new_phi
, loop_vinfo
));
3077 add_phi_arg (new_phi
, arginfo
[i
].op
,
3078 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3080 = POINTER_TYPE_P (TREE_TYPE (op
))
3081 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3082 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3083 ? sizetype
: TREE_TYPE (op
);
3085 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3087 tree tcst
= wide_int_to_tree (type
, cst
);
3088 tree phi_arg
= copy_ssa_name (op
);
3090 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3091 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3092 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3093 set_vinfo_for_stmt (new_stmt
,
3094 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3095 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3097 arginfo
[i
].op
= phi_res
;
3098 vargs
.safe_push (phi_res
);
3103 = POINTER_TYPE_P (TREE_TYPE (op
))
3104 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3105 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3106 ? sizetype
: TREE_TYPE (op
);
3108 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3110 tree tcst
= wide_int_to_tree (type
, cst
);
3111 new_temp
= make_ssa_name (TREE_TYPE (op
));
3112 new_stmt
= gimple_build_assign (new_temp
, code
,
3113 arginfo
[i
].op
, tcst
);
3114 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3115 vargs
.safe_push (new_temp
);
3118 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3124 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3127 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3129 new_temp
= create_tmp_var (ratype
);
3130 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3131 == TYPE_VECTOR_SUBPARTS (rtype
))
3132 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3134 new_temp
= make_ssa_name (rtype
, new_stmt
);
3135 gimple_call_set_lhs (new_stmt
, new_temp
);
3137 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3141 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3144 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3145 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3146 gcc_assert ((k
& (k
- 1)) == 0);
3147 for (l
= 0; l
< k
; l
++)
3152 t
= build_fold_addr_expr (new_temp
);
3153 t
= build2 (MEM_REF
, vectype
, t
,
3154 build_int_cst (TREE_TYPE (t
),
3155 l
* prec
/ BITS_PER_UNIT
));
3158 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3159 size_int (prec
), bitsize_int (l
* prec
));
3161 = gimple_build_assign (make_ssa_name (vectype
), t
);
3162 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3163 if (j
== 0 && l
== 0)
3164 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3166 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3168 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3173 tree clobber
= build_constructor (ratype
, NULL
);
3174 TREE_THIS_VOLATILE (clobber
) = 1;
3175 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3176 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3180 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3182 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3183 / TYPE_VECTOR_SUBPARTS (rtype
));
3184 gcc_assert ((k
& (k
- 1)) == 0);
3185 if ((j
& (k
- 1)) == 0)
3186 vec_alloc (ret_ctor_elts
, k
);
3189 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3190 for (m
= 0; m
< o
; m
++)
3192 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3193 size_int (m
), NULL_TREE
, NULL_TREE
);
3195 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3196 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3197 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3198 gimple_assign_lhs (new_stmt
));
3200 tree clobber
= build_constructor (ratype
, NULL
);
3201 TREE_THIS_VOLATILE (clobber
) = 1;
3202 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3203 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3206 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3207 if ((j
& (k
- 1)) != k
- 1)
3209 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3211 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3212 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3214 if ((unsigned) j
== k
- 1)
3215 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3217 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3219 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3224 tree t
= build_fold_addr_expr (new_temp
);
3225 t
= build2 (MEM_REF
, vectype
, t
,
3226 build_int_cst (TREE_TYPE (t
), 0));
3228 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3229 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3230 tree clobber
= build_constructor (ratype
, NULL
);
3231 TREE_THIS_VOLATILE (clobber
) = 1;
3232 vect_finish_stmt_generation (stmt
,
3233 gimple_build_assign (new_temp
,
3239 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3241 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3243 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3248 /* The call in STMT might prevent it from being removed in dce.
3249 We however cannot remove it here, due to the way the ssa name
3250 it defines is mapped to the new definition. So just replace
3251 rhs of the statement with something harmless. */
3258 type
= TREE_TYPE (scalar_dest
);
3259 if (is_pattern_stmt_p (stmt_info
))
3260 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3262 lhs
= gimple_call_lhs (stmt
);
3263 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3266 new_stmt
= gimple_build_nop ();
3267 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3268 set_vinfo_for_stmt (stmt
, NULL
);
3269 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3270 gsi_replace (gsi
, new_stmt
, true);
3271 unlink_stmt_vdef (stmt
);
3277 /* Function vect_gen_widened_results_half
3279 Create a vector stmt whose code, type, number of arguments, and result
3280 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3281 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3282 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3283 needs to be created (DECL is a function-decl of a target-builtin).
3284 STMT is the original scalar stmt that we are vectorizing. */
3287 vect_gen_widened_results_half (enum tree_code code
,
3289 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3290 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3296 /* Generate half of the widened result: */
3297 if (code
== CALL_EXPR
)
3299 /* Target specific support */
3300 if (op_type
== binary_op
)
3301 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3303 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3304 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3305 gimple_call_set_lhs (new_stmt
, new_temp
);
3309 /* Generic support */
3310 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3311 if (op_type
!= binary_op
)
3313 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3314 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3315 gimple_assign_set_lhs (new_stmt
, new_temp
);
3317 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3323 /* Get vectorized definitions for loop-based vectorization. For the first
3324 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3325 scalar operand), and for the rest we get a copy with
3326 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3327 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3328 The vectors are collected into VEC_OPRNDS. */
3331 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3332 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3336 /* Get first vector operand. */
3337 /* All the vector operands except the very first one (that is scalar oprnd)
3339 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3340 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3342 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3344 vec_oprnds
->quick_push (vec_oprnd
);
3346 /* Get second vector operand. */
3347 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3348 vec_oprnds
->quick_push (vec_oprnd
);
3352 /* For conversion in multiple steps, continue to get operands
3355 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3359 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3360 For multi-step conversions store the resulting vectors and call the function
3364 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3365 int multi_step_cvt
, gimple
*stmt
,
3367 gimple_stmt_iterator
*gsi
,
3368 slp_tree slp_node
, enum tree_code code
,
3369 stmt_vec_info
*prev_stmt_info
)
3372 tree vop0
, vop1
, new_tmp
, vec_dest
;
3374 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3376 vec_dest
= vec_dsts
.pop ();
3378 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3380 /* Create demotion operation. */
3381 vop0
= (*vec_oprnds
)[i
];
3382 vop1
= (*vec_oprnds
)[i
+ 1];
3383 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3384 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3385 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3386 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3389 /* Store the resulting vector for next recursive call. */
3390 (*vec_oprnds
)[i
/2] = new_tmp
;
3393 /* This is the last step of the conversion sequence. Store the
3394 vectors in SLP_NODE or in vector info of the scalar statement
3395 (or in STMT_VINFO_RELATED_STMT chain). */
3397 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3400 if (!*prev_stmt_info
)
3401 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3403 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3405 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3410 /* For multi-step demotion operations we first generate demotion operations
3411 from the source type to the intermediate types, and then combine the
3412 results (stored in VEC_OPRNDS) in demotion operation to the destination
3416 /* At each level of recursion we have half of the operands we had at the
3418 vec_oprnds
->truncate ((i
+1)/2);
3419 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3420 stmt
, vec_dsts
, gsi
, slp_node
,
3421 VEC_PACK_TRUNC_EXPR
,
3425 vec_dsts
.quick_push (vec_dest
);
3429 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3430 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3431 the resulting vectors and call the function recursively. */
3434 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3435 vec
<tree
> *vec_oprnds1
,
3436 gimple
*stmt
, tree vec_dest
,
3437 gimple_stmt_iterator
*gsi
,
3438 enum tree_code code1
,
3439 enum tree_code code2
, tree decl1
,
3440 tree decl2
, int op_type
)
3443 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3444 gimple
*new_stmt1
, *new_stmt2
;
3445 vec
<tree
> vec_tmp
= vNULL
;
3447 vec_tmp
.create (vec_oprnds0
->length () * 2);
3448 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3450 if (op_type
== binary_op
)
3451 vop1
= (*vec_oprnds1
)[i
];
3455 /* Generate the two halves of promotion operation. */
3456 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3457 op_type
, vec_dest
, gsi
, stmt
);
3458 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3459 op_type
, vec_dest
, gsi
, stmt
);
3460 if (is_gimple_call (new_stmt1
))
3462 new_tmp1
= gimple_call_lhs (new_stmt1
);
3463 new_tmp2
= gimple_call_lhs (new_stmt2
);
3467 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3468 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3471 /* Store the results for the next step. */
3472 vec_tmp
.quick_push (new_tmp1
);
3473 vec_tmp
.quick_push (new_tmp2
);
3476 vec_oprnds0
->release ();
3477 *vec_oprnds0
= vec_tmp
;
3481 /* Check if STMT performs a conversion operation, that can be vectorized.
3482 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3483 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3484 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3487 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3488 gimple
**vec_stmt
, slp_tree slp_node
)
3492 tree op0
, op1
= NULL_TREE
;
3493 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3494 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3495 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3496 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3497 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3498 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3501 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3502 gimple
*new_stmt
= NULL
;
3503 stmt_vec_info prev_stmt_info
;
3506 tree vectype_out
, vectype_in
;
3508 tree lhs_type
, rhs_type
;
3509 enum { NARROW
, NONE
, WIDEN
} modifier
;
3510 vec
<tree
> vec_oprnds0
= vNULL
;
3511 vec
<tree
> vec_oprnds1
= vNULL
;
3513 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3514 vec_info
*vinfo
= stmt_info
->vinfo
;
3515 int multi_step_cvt
= 0;
3516 vec
<tree
> vec_dsts
= vNULL
;
3517 vec
<tree
> interm_types
= vNULL
;
3518 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3520 machine_mode rhs_mode
;
3521 unsigned short fltsz
;
3523 /* Is STMT a vectorizable conversion? */
3525 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3528 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3531 if (!is_gimple_assign (stmt
))
3534 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3537 code
= gimple_assign_rhs_code (stmt
);
3538 if (!CONVERT_EXPR_CODE_P (code
)
3539 && code
!= FIX_TRUNC_EXPR
3540 && code
!= FLOAT_EXPR
3541 && code
!= WIDEN_MULT_EXPR
3542 && code
!= WIDEN_LSHIFT_EXPR
)
3545 op_type
= TREE_CODE_LENGTH (code
);
3547 /* Check types of lhs and rhs. */
3548 scalar_dest
= gimple_assign_lhs (stmt
);
3549 lhs_type
= TREE_TYPE (scalar_dest
);
3550 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3552 op0
= gimple_assign_rhs1 (stmt
);
3553 rhs_type
= TREE_TYPE (op0
);
3555 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3556 && !((INTEGRAL_TYPE_P (lhs_type
)
3557 && INTEGRAL_TYPE_P (rhs_type
))
3558 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3559 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3562 if ((INTEGRAL_TYPE_P (lhs_type
)
3563 && (TYPE_PRECISION (lhs_type
)
3564 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3565 || (INTEGRAL_TYPE_P (rhs_type
)
3566 && (TYPE_PRECISION (rhs_type
)
3567 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3569 if (dump_enabled_p ())
3570 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3571 "type conversion to/from bit-precision unsupported."
3576 /* Check the operands of the operation. */
3577 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3579 if (dump_enabled_p ())
3580 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3581 "use not simple.\n");
3584 if (op_type
== binary_op
)
3588 op1
= gimple_assign_rhs2 (stmt
);
3589 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3590 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3592 if (CONSTANT_CLASS_P (op0
))
3593 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3595 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3599 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3601 "use not simple.\n");
3606 /* If op0 is an external or constant defs use a vector type of
3607 the same size as the output vector type. */
3609 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3611 gcc_assert (vectype_in
);
3614 if (dump_enabled_p ())
3616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3617 "no vectype for scalar type ");
3618 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3619 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3625 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3626 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3627 if (nunits_in
< nunits_out
)
3629 else if (nunits_out
== nunits_in
)
3634 /* Multiple types in SLP are handled by creating the appropriate number of
3635 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3637 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3639 else if (modifier
== NARROW
)
3640 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3642 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3644 /* Sanity check: make sure that at least one copy of the vectorized stmt
3645 needs to be generated. */
3646 gcc_assert (ncopies
>= 1);
3648 /* Supportable by target? */
3652 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3654 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3659 if (dump_enabled_p ())
3660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3661 "conversion not supported by target.\n");
3665 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3666 &code1
, &code2
, &multi_step_cvt
,
3669 /* Binary widening operation can only be supported directly by the
3671 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3675 if (code
!= FLOAT_EXPR
3676 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3677 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3680 rhs_mode
= TYPE_MODE (rhs_type
);
3681 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3682 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3683 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3684 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3687 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3688 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3689 if (cvt_type
== NULL_TREE
)
3692 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3694 if (!supportable_convert_operation (code
, vectype_out
,
3695 cvt_type
, &decl1
, &codecvt1
))
3698 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3699 cvt_type
, &codecvt1
,
3700 &codecvt2
, &multi_step_cvt
,
3704 gcc_assert (multi_step_cvt
== 0);
3706 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3707 vectype_in
, &code1
, &code2
,
3708 &multi_step_cvt
, &interm_types
))
3712 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3715 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3716 codecvt2
= ERROR_MARK
;
3720 interm_types
.safe_push (cvt_type
);
3721 cvt_type
= NULL_TREE
;
3726 gcc_assert (op_type
== unary_op
);
3727 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3728 &code1
, &multi_step_cvt
,
3732 if (code
!= FIX_TRUNC_EXPR
3733 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3734 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3737 rhs_mode
= TYPE_MODE (rhs_type
);
3739 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3740 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3741 if (cvt_type
== NULL_TREE
)
3743 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3746 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3747 &code1
, &multi_step_cvt
,
3756 if (!vec_stmt
) /* transformation not required. */
3758 if (dump_enabled_p ())
3759 dump_printf_loc (MSG_NOTE
, vect_location
,
3760 "=== vectorizable_conversion ===\n");
3761 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3763 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3764 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3766 else if (modifier
== NARROW
)
3768 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3769 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3773 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3774 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3776 interm_types
.release ();
3781 if (dump_enabled_p ())
3782 dump_printf_loc (MSG_NOTE
, vect_location
,
3783 "transform conversion. ncopies = %d.\n", ncopies
);
3785 if (op_type
== binary_op
)
3787 if (CONSTANT_CLASS_P (op0
))
3788 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3789 else if (CONSTANT_CLASS_P (op1
))
3790 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3793 /* In case of multi-step conversion, we first generate conversion operations
3794 to the intermediate types, and then from that types to the final one.
3795 We create vector destinations for the intermediate type (TYPES) received
3796 from supportable_*_operation, and store them in the correct order
3797 for future use in vect_create_vectorized_*_stmts (). */
3798 vec_dsts
.create (multi_step_cvt
+ 1);
3799 vec_dest
= vect_create_destination_var (scalar_dest
,
3800 (cvt_type
&& modifier
== WIDEN
)
3801 ? cvt_type
: vectype_out
);
3802 vec_dsts
.quick_push (vec_dest
);
3806 for (i
= interm_types
.length () - 1;
3807 interm_types
.iterate (i
, &intermediate_type
); i
--)
3809 vec_dest
= vect_create_destination_var (scalar_dest
,
3811 vec_dsts
.quick_push (vec_dest
);
3816 vec_dest
= vect_create_destination_var (scalar_dest
,
3818 ? vectype_out
: cvt_type
);
3822 if (modifier
== WIDEN
)
3824 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3825 if (op_type
== binary_op
)
3826 vec_oprnds1
.create (1);
3828 else if (modifier
== NARROW
)
3829 vec_oprnds0
.create (
3830 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3832 else if (code
== WIDEN_LSHIFT_EXPR
)
3833 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3836 prev_stmt_info
= NULL
;
3840 for (j
= 0; j
< ncopies
; j
++)
3843 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3846 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3848 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3850 /* Arguments are ready, create the new vector stmt. */
3851 if (code1
== CALL_EXPR
)
3853 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3854 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3855 gimple_call_set_lhs (new_stmt
, new_temp
);
3859 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3860 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3861 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3862 gimple_assign_set_lhs (new_stmt
, new_temp
);
3865 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3867 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3870 if (!prev_stmt_info
)
3871 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3873 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3874 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3881 /* In case the vectorization factor (VF) is bigger than the number
3882 of elements that we can fit in a vectype (nunits), we have to
3883 generate more than one vector stmt - i.e - we need to "unroll"
3884 the vector stmt by a factor VF/nunits. */
3885 for (j
= 0; j
< ncopies
; j
++)
3892 if (code
== WIDEN_LSHIFT_EXPR
)
3897 /* Store vec_oprnd1 for every vector stmt to be created
3898 for SLP_NODE. We check during the analysis that all
3899 the shift arguments are the same. */
3900 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3901 vec_oprnds1
.quick_push (vec_oprnd1
);
3903 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3907 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3908 &vec_oprnds1
, slp_node
, -1);
3912 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3913 vec_oprnds0
.quick_push (vec_oprnd0
);
3914 if (op_type
== binary_op
)
3916 if (code
== WIDEN_LSHIFT_EXPR
)
3919 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3920 vec_oprnds1
.quick_push (vec_oprnd1
);
3926 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3927 vec_oprnds0
.truncate (0);
3928 vec_oprnds0
.quick_push (vec_oprnd0
);
3929 if (op_type
== binary_op
)
3931 if (code
== WIDEN_LSHIFT_EXPR
)
3934 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3936 vec_oprnds1
.truncate (0);
3937 vec_oprnds1
.quick_push (vec_oprnd1
);
3941 /* Arguments are ready. Create the new vector stmts. */
3942 for (i
= multi_step_cvt
; i
>= 0; i
--)
3944 tree this_dest
= vec_dsts
[i
];
3945 enum tree_code c1
= code1
, c2
= code2
;
3946 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3951 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3953 stmt
, this_dest
, gsi
,
3954 c1
, c2
, decl1
, decl2
,
3958 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3962 if (codecvt1
== CALL_EXPR
)
3964 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3965 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3966 gimple_call_set_lhs (new_stmt
, new_temp
);
3970 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3971 new_temp
= make_ssa_name (vec_dest
);
3972 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
3976 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3979 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3982 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3985 if (!prev_stmt_info
)
3986 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3988 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3989 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3994 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3998 /* In case the vectorization factor (VF) is bigger than the number
3999 of elements that we can fit in a vectype (nunits), we have to
4000 generate more than one vector stmt - i.e - we need to "unroll"
4001 the vector stmt by a factor VF/nunits. */
4002 for (j
= 0; j
< ncopies
; j
++)
4006 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4010 vec_oprnds0
.truncate (0);
4011 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4012 vect_pow2 (multi_step_cvt
) - 1);
4015 /* Arguments are ready. Create the new vector stmts. */
4017 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4019 if (codecvt1
== CALL_EXPR
)
4021 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4022 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4023 gimple_call_set_lhs (new_stmt
, new_temp
);
4027 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4028 new_temp
= make_ssa_name (vec_dest
);
4029 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4033 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4034 vec_oprnds0
[i
] = new_temp
;
4037 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4038 stmt
, vec_dsts
, gsi
,
4043 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4047 vec_oprnds0
.release ();
4048 vec_oprnds1
.release ();
4049 vec_dsts
.release ();
4050 interm_types
.release ();
4056 /* Function vectorizable_assignment.
4058 Check if STMT performs an assignment (copy) that can be vectorized.
4059 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4060 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4061 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4064 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4065 gimple
**vec_stmt
, slp_tree slp_node
)
4070 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4071 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4074 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4077 vec
<tree
> vec_oprnds
= vNULL
;
4079 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4080 vec_info
*vinfo
= stmt_info
->vinfo
;
4081 gimple
*new_stmt
= NULL
;
4082 stmt_vec_info prev_stmt_info
= NULL
;
4083 enum tree_code code
;
4086 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4089 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4092 /* Is vectorizable assignment? */
4093 if (!is_gimple_assign (stmt
))
4096 scalar_dest
= gimple_assign_lhs (stmt
);
4097 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4100 code
= gimple_assign_rhs_code (stmt
);
4101 if (gimple_assign_single_p (stmt
)
4102 || code
== PAREN_EXPR
4103 || CONVERT_EXPR_CODE_P (code
))
4104 op
= gimple_assign_rhs1 (stmt
);
4108 if (code
== VIEW_CONVERT_EXPR
)
4109 op
= TREE_OPERAND (op
, 0);
4111 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4112 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4114 /* Multiple types in SLP are handled by creating the appropriate number of
4115 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4117 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4120 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4122 gcc_assert (ncopies
>= 1);
4124 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4126 if (dump_enabled_p ())
4127 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4128 "use not simple.\n");
4132 /* We can handle NOP_EXPR conversions that do not change the number
4133 of elements or the vector size. */
4134 if ((CONVERT_EXPR_CODE_P (code
)
4135 || code
== VIEW_CONVERT_EXPR
)
4137 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4138 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4139 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4142 /* We do not handle bit-precision changes. */
4143 if ((CONVERT_EXPR_CODE_P (code
)
4144 || code
== VIEW_CONVERT_EXPR
)
4145 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4146 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4147 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4148 || ((TYPE_PRECISION (TREE_TYPE (op
))
4149 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4150 /* But a conversion that does not change the bit-pattern is ok. */
4151 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4152 > TYPE_PRECISION (TREE_TYPE (op
)))
4153 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4155 if (dump_enabled_p ())
4156 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4157 "type conversion to/from bit-precision "
4162 if (!vec_stmt
) /* transformation not required. */
4164 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4165 if (dump_enabled_p ())
4166 dump_printf_loc (MSG_NOTE
, vect_location
,
4167 "=== vectorizable_assignment ===\n");
4168 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4173 if (dump_enabled_p ())
4174 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4177 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4180 for (j
= 0; j
< ncopies
; j
++)
4184 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4186 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4188 /* Arguments are ready. create the new vector stmt. */
4189 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4191 if (CONVERT_EXPR_CODE_P (code
)
4192 || code
== VIEW_CONVERT_EXPR
)
4193 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4194 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4195 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4196 gimple_assign_set_lhs (new_stmt
, new_temp
);
4197 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4199 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4206 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4208 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4210 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4213 vec_oprnds
.release ();
4218 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4219 either as shift by a scalar or by a vector. */
4222 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4225 machine_mode vec_mode
;
4230 vectype
= get_vectype_for_scalar_type (scalar_type
);
4234 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4236 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4238 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4240 || (optab_handler (optab
, TYPE_MODE (vectype
))
4241 == CODE_FOR_nothing
))
4245 vec_mode
= TYPE_MODE (vectype
);
4246 icode
= (int) optab_handler (optab
, vec_mode
);
4247 if (icode
== CODE_FOR_nothing
)
4254 /* Function vectorizable_shift.
4256 Check if STMT performs a shift operation that can be vectorized.
4257 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4258 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4259 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4262 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4263 gimple
**vec_stmt
, slp_tree slp_node
)
4267 tree op0
, op1
= NULL
;
4268 tree vec_oprnd1
= NULL_TREE
;
4269 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4271 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4272 enum tree_code code
;
4273 machine_mode vec_mode
;
4277 machine_mode optab_op2_mode
;
4279 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4280 gimple
*new_stmt
= NULL
;
4281 stmt_vec_info prev_stmt_info
;
4288 vec
<tree
> vec_oprnds0
= vNULL
;
4289 vec
<tree
> vec_oprnds1
= vNULL
;
4292 bool scalar_shift_arg
= true;
4293 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4294 vec_info
*vinfo
= stmt_info
->vinfo
;
4297 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4300 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4303 /* Is STMT a vectorizable binary/unary operation? */
4304 if (!is_gimple_assign (stmt
))
4307 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4310 code
= gimple_assign_rhs_code (stmt
);
4312 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4313 || code
== RROTATE_EXPR
))
4316 scalar_dest
= gimple_assign_lhs (stmt
);
4317 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4318 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4319 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4321 if (dump_enabled_p ())
4322 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4323 "bit-precision shifts not supported.\n");
4327 op0
= gimple_assign_rhs1 (stmt
);
4328 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4332 "use not simple.\n");
4335 /* If op0 is an external or constant def use a vector type with
4336 the same size as the output vector type. */
4338 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4340 gcc_assert (vectype
);
4343 if (dump_enabled_p ())
4344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4345 "no vectype for scalar type\n");
4349 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4350 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4351 if (nunits_out
!= nunits_in
)
4354 op1
= gimple_assign_rhs2 (stmt
);
4355 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4357 if (dump_enabled_p ())
4358 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4359 "use not simple.\n");
4364 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4368 /* Multiple types in SLP are handled by creating the appropriate number of
4369 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4371 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4374 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4376 gcc_assert (ncopies
>= 1);
4378 /* Determine whether the shift amount is a vector, or scalar. If the
4379 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4381 if ((dt
[1] == vect_internal_def
4382 || dt
[1] == vect_induction_def
)
4384 scalar_shift_arg
= false;
4385 else if (dt
[1] == vect_constant_def
4386 || dt
[1] == vect_external_def
4387 || dt
[1] == vect_internal_def
)
4389 /* In SLP, need to check whether the shift count is the same,
4390 in loops if it is a constant or invariant, it is always
4394 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4397 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4398 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4399 scalar_shift_arg
= false;
4404 if (dump_enabled_p ())
4405 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4406 "operand mode requires invariant argument.\n");
4410 /* Vector shifted by vector. */
4411 if (!scalar_shift_arg
)
4413 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4414 if (dump_enabled_p ())
4415 dump_printf_loc (MSG_NOTE
, vect_location
,
4416 "vector/vector shift/rotate found.\n");
4419 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4420 if (op1_vectype
== NULL_TREE
4421 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4423 if (dump_enabled_p ())
4424 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4425 "unusable type for last operand in"
4426 " vector/vector shift/rotate.\n");
4430 /* See if the machine has a vector shifted by scalar insn and if not
4431 then see if it has a vector shifted by vector insn. */
4434 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4436 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_NOTE
, vect_location
,
4440 "vector/scalar shift/rotate found.\n");
4444 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4446 && (optab_handler (optab
, TYPE_MODE (vectype
))
4447 != CODE_FOR_nothing
))
4449 scalar_shift_arg
= false;
4451 if (dump_enabled_p ())
4452 dump_printf_loc (MSG_NOTE
, vect_location
,
4453 "vector/vector shift/rotate found.\n");
4455 /* Unlike the other binary operators, shifts/rotates have
4456 the rhs being int, instead of the same type as the lhs,
4457 so make sure the scalar is the right type if we are
4458 dealing with vectors of long long/long/short/char. */
4459 if (dt
[1] == vect_constant_def
)
4460 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4461 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4465 && TYPE_MODE (TREE_TYPE (vectype
))
4466 != TYPE_MODE (TREE_TYPE (op1
)))
4468 if (dump_enabled_p ())
4469 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4470 "unusable type for last operand in"
4471 " vector/vector shift/rotate.\n");
4474 if (vec_stmt
&& !slp_node
)
4476 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4477 op1
= vect_init_vector (stmt
, op1
,
4478 TREE_TYPE (vectype
), NULL
);
4485 /* Supportable by target? */
4488 if (dump_enabled_p ())
4489 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4493 vec_mode
= TYPE_MODE (vectype
);
4494 icode
= (int) optab_handler (optab
, vec_mode
);
4495 if (icode
== CODE_FOR_nothing
)
4497 if (dump_enabled_p ())
4498 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4499 "op not supported by target.\n");
4500 /* Check only during analysis. */
4501 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4502 || (vf
< vect_min_worthwhile_factor (code
)
4505 if (dump_enabled_p ())
4506 dump_printf_loc (MSG_NOTE
, vect_location
,
4507 "proceeding using word mode.\n");
4510 /* Worthwhile without SIMD support? Check only during analysis. */
4511 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4512 && vf
< vect_min_worthwhile_factor (code
)
4515 if (dump_enabled_p ())
4516 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4517 "not worthwhile without SIMD support.\n");
4521 if (!vec_stmt
) /* transformation not required. */
4523 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4524 if (dump_enabled_p ())
4525 dump_printf_loc (MSG_NOTE
, vect_location
,
4526 "=== vectorizable_shift ===\n");
4527 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4533 if (dump_enabled_p ())
4534 dump_printf_loc (MSG_NOTE
, vect_location
,
4535 "transform binary/unary operation.\n");
4538 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4540 prev_stmt_info
= NULL
;
4541 for (j
= 0; j
< ncopies
; j
++)
4546 if (scalar_shift_arg
)
4548 /* Vector shl and shr insn patterns can be defined with scalar
4549 operand 2 (shift operand). In this case, use constant or loop
4550 invariant op1 directly, without extending it to vector mode
4552 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4553 if (!VECTOR_MODE_P (optab_op2_mode
))
4555 if (dump_enabled_p ())
4556 dump_printf_loc (MSG_NOTE
, vect_location
,
4557 "operand 1 using scalar mode.\n");
4559 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4560 vec_oprnds1
.quick_push (vec_oprnd1
);
4563 /* Store vec_oprnd1 for every vector stmt to be created
4564 for SLP_NODE. We check during the analysis that all
4565 the shift arguments are the same.
4566 TODO: Allow different constants for different vector
4567 stmts generated for an SLP instance. */
4568 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4569 vec_oprnds1
.quick_push (vec_oprnd1
);
4574 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4575 (a special case for certain kind of vector shifts); otherwise,
4576 operand 1 should be of a vector type (the usual case). */
4578 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4581 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4585 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4587 /* Arguments are ready. Create the new vector stmt. */
4588 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4590 vop1
= vec_oprnds1
[i
];
4591 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4592 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4593 gimple_assign_set_lhs (new_stmt
, new_temp
);
4594 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4596 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4603 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4605 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4606 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4609 vec_oprnds0
.release ();
4610 vec_oprnds1
.release ();
4616 /* Function vectorizable_operation.
4618 Check if STMT performs a binary, unary or ternary operation that can
4620 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4621 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4622 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4625 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4626 gimple
**vec_stmt
, slp_tree slp_node
)
4630 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4631 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4633 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4634 enum tree_code code
;
4635 machine_mode vec_mode
;
4639 bool target_support_p
;
4641 enum vect_def_type dt
[3]
4642 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4643 gimple
*new_stmt
= NULL
;
4644 stmt_vec_info prev_stmt_info
;
4650 vec
<tree
> vec_oprnds0
= vNULL
;
4651 vec
<tree
> vec_oprnds1
= vNULL
;
4652 vec
<tree
> vec_oprnds2
= vNULL
;
4653 tree vop0
, vop1
, vop2
;
4654 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4655 vec_info
*vinfo
= stmt_info
->vinfo
;
4658 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4661 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4664 /* Is STMT a vectorizable binary/unary operation? */
4665 if (!is_gimple_assign (stmt
))
4668 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4671 code
= gimple_assign_rhs_code (stmt
);
4673 /* For pointer addition, we should use the normal plus for
4674 the vector addition. */
4675 if (code
== POINTER_PLUS_EXPR
)
4678 /* Support only unary or binary operations. */
4679 op_type
= TREE_CODE_LENGTH (code
);
4680 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4682 if (dump_enabled_p ())
4683 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4684 "num. args = %d (not unary/binary/ternary op).\n",
4689 scalar_dest
= gimple_assign_lhs (stmt
);
4690 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4692 /* Most operations cannot handle bit-precision types without extra
4694 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4695 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4696 /* Exception are bitwise binary operations. */
4697 && code
!= BIT_IOR_EXPR
4698 && code
!= BIT_XOR_EXPR
4699 && code
!= BIT_AND_EXPR
)
4701 if (dump_enabled_p ())
4702 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4703 "bit-precision arithmetic not supported.\n");
4707 op0
= gimple_assign_rhs1 (stmt
);
4708 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4710 if (dump_enabled_p ())
4711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4712 "use not simple.\n");
4715 /* If op0 is an external or constant def use a vector type with
4716 the same size as the output vector type. */
4718 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4720 gcc_assert (vectype
);
4723 if (dump_enabled_p ())
4725 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4726 "no vectype for scalar type ");
4727 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4729 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4735 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4736 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4737 if (nunits_out
!= nunits_in
)
4740 if (op_type
== binary_op
|| op_type
== ternary_op
)
4742 op1
= gimple_assign_rhs2 (stmt
);
4743 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4745 if (dump_enabled_p ())
4746 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4747 "use not simple.\n");
4751 if (op_type
== ternary_op
)
4753 op2
= gimple_assign_rhs3 (stmt
);
4754 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4756 if (dump_enabled_p ())
4757 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4758 "use not simple.\n");
4764 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4768 /* Multiple types in SLP are handled by creating the appropriate number of
4769 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4771 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4774 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4776 gcc_assert (ncopies
>= 1);
4778 /* Shifts are handled in vectorizable_shift (). */
4779 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4780 || code
== RROTATE_EXPR
)
4783 /* Supportable by target? */
4785 vec_mode
= TYPE_MODE (vectype
);
4786 if (code
== MULT_HIGHPART_EXPR
)
4787 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4790 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4793 if (dump_enabled_p ())
4794 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4798 target_support_p
= (optab_handler (optab
, vec_mode
)
4799 != CODE_FOR_nothing
);
4802 if (!target_support_p
)
4804 if (dump_enabled_p ())
4805 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4806 "op not supported by target.\n");
4807 /* Check only during analysis. */
4808 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4809 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4811 if (dump_enabled_p ())
4812 dump_printf_loc (MSG_NOTE
, vect_location
,
4813 "proceeding using word mode.\n");
4816 /* Worthwhile without SIMD support? Check only during analysis. */
4817 if (!VECTOR_MODE_P (vec_mode
)
4819 && vf
< vect_min_worthwhile_factor (code
))
4821 if (dump_enabled_p ())
4822 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4823 "not worthwhile without SIMD support.\n");
4827 if (!vec_stmt
) /* transformation not required. */
4829 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4830 if (dump_enabled_p ())
4831 dump_printf_loc (MSG_NOTE
, vect_location
,
4832 "=== vectorizable_operation ===\n");
4833 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4839 if (dump_enabled_p ())
4840 dump_printf_loc (MSG_NOTE
, vect_location
,
4841 "transform binary/unary operation.\n");
4844 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4846 /* In case the vectorization factor (VF) is bigger than the number
4847 of elements that we can fit in a vectype (nunits), we have to generate
4848 more than one vector stmt - i.e - we need to "unroll" the
4849 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4850 from one copy of the vector stmt to the next, in the field
4851 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4852 stages to find the correct vector defs to be used when vectorizing
4853 stmts that use the defs of the current stmt. The example below
4854 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4855 we need to create 4 vectorized stmts):
4857 before vectorization:
4858 RELATED_STMT VEC_STMT
4862 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4864 RELATED_STMT VEC_STMT
4865 VS1_0: vx0 = memref0 VS1_1 -
4866 VS1_1: vx1 = memref1 VS1_2 -
4867 VS1_2: vx2 = memref2 VS1_3 -
4868 VS1_3: vx3 = memref3 - -
4869 S1: x = load - VS1_0
4872 step2: vectorize stmt S2 (done here):
4873 To vectorize stmt S2 we first need to find the relevant vector
4874 def for the first operand 'x'. This is, as usual, obtained from
4875 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4876 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4877 relevant vector def 'vx0'. Having found 'vx0' we can generate
4878 the vector stmt VS2_0, and as usual, record it in the
4879 STMT_VINFO_VEC_STMT of stmt S2.
4880 When creating the second copy (VS2_1), we obtain the relevant vector
4881 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4882 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4883 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4884 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4885 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4886 chain of stmts and pointers:
4887 RELATED_STMT VEC_STMT
4888 VS1_0: vx0 = memref0 VS1_1 -
4889 VS1_1: vx1 = memref1 VS1_2 -
4890 VS1_2: vx2 = memref2 VS1_3 -
4891 VS1_3: vx3 = memref3 - -
4892 S1: x = load - VS1_0
4893 VS2_0: vz0 = vx0 + v1 VS2_1 -
4894 VS2_1: vz1 = vx1 + v1 VS2_2 -
4895 VS2_2: vz2 = vx2 + v1 VS2_3 -
4896 VS2_3: vz3 = vx3 + v1 - -
4897 S2: z = x + 1 - VS2_0 */
4899 prev_stmt_info
= NULL
;
4900 for (j
= 0; j
< ncopies
; j
++)
4905 if (op_type
== binary_op
|| op_type
== ternary_op
)
4906 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4909 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4911 if (op_type
== ternary_op
)
4913 vec_oprnds2
.create (1);
4914 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4920 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4921 if (op_type
== ternary_op
)
4923 tree vec_oprnd
= vec_oprnds2
.pop ();
4924 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4929 /* Arguments are ready. Create the new vector stmt. */
4930 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4932 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4933 ? vec_oprnds1
[i
] : NULL_TREE
);
4934 vop2
= ((op_type
== ternary_op
)
4935 ? vec_oprnds2
[i
] : NULL_TREE
);
4936 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
4937 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4938 gimple_assign_set_lhs (new_stmt
, new_temp
);
4939 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4941 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4948 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4950 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4951 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4954 vec_oprnds0
.release ();
4955 vec_oprnds1
.release ();
4956 vec_oprnds2
.release ();
4961 /* A helper function to ensure data reference DR's base alignment
4965 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4970 if (DR_VECT_AUX (dr
)->base_misaligned
)
4972 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4973 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
4975 if (decl_in_symtab_p (base_decl
))
4976 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
4979 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4980 DECL_USER_ALIGN (base_decl
) = 1;
4982 DR_VECT_AUX (dr
)->base_misaligned
= false;
4987 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4988 reversal of the vector elements. If that is impossible to do,
4992 perm_mask_for_reverse (tree vectype
)
4997 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4998 sel
= XALLOCAVEC (unsigned char, nunits
);
5000 for (i
= 0; i
< nunits
; ++i
)
5001 sel
[i
] = nunits
- 1 - i
;
5003 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5005 return vect_gen_perm_mask_checked (vectype
, sel
);
5008 /* Function vectorizable_store.
5010 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5012 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5013 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5014 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5017 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5023 tree vec_oprnd
= NULL_TREE
;
5024 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5025 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5027 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5028 struct loop
*loop
= NULL
;
5029 machine_mode vec_mode
;
5031 enum dr_alignment_support alignment_support_scheme
;
5033 enum vect_def_type dt
;
5034 stmt_vec_info prev_stmt_info
= NULL
;
5035 tree dataref_ptr
= NULL_TREE
;
5036 tree dataref_offset
= NULL_TREE
;
5037 gimple
*ptr_incr
= NULL
;
5040 gimple
*next_stmt
, *first_stmt
= NULL
;
5041 bool grouped_store
= false;
5042 bool store_lanes_p
= false;
5043 unsigned int group_size
, i
;
5044 vec
<tree
> dr_chain
= vNULL
;
5045 vec
<tree
> oprnds
= vNULL
;
5046 vec
<tree
> result_chain
= vNULL
;
5048 bool negative
= false;
5049 tree offset
= NULL_TREE
;
5050 vec
<tree
> vec_oprnds
= vNULL
;
5051 bool slp
= (slp_node
!= NULL
);
5052 unsigned int vec_num
;
5053 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5054 vec_info
*vinfo
= stmt_info
->vinfo
;
5056 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5057 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5058 int scatter_scale
= 1;
5059 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5060 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5063 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5066 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5069 /* Is vectorizable store? */
5071 if (!is_gimple_assign (stmt
))
5074 scalar_dest
= gimple_assign_lhs (stmt
);
5075 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5076 && is_pattern_stmt_p (stmt_info
))
5077 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5078 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5079 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5080 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5081 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5082 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5083 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5084 && TREE_CODE (scalar_dest
) != MEM_REF
)
5087 gcc_assert (gimple_assign_single_p (stmt
));
5089 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5090 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5093 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5095 /* Multiple types in SLP are handled by creating the appropriate number of
5096 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5098 if (slp
|| PURE_SLP_STMT (stmt_info
))
5101 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5103 gcc_assert (ncopies
>= 1);
5105 /* FORNOW. This restriction should be relaxed. */
5106 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5108 if (dump_enabled_p ())
5109 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5110 "multiple types in nested loop.\n");
5114 op
= gimple_assign_rhs1 (stmt
);
5115 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5119 "use not simple.\n");
5123 elem_type
= TREE_TYPE (vectype
);
5124 vec_mode
= TYPE_MODE (vectype
);
5126 /* FORNOW. In some cases can vectorize even if data-type not supported
5127 (e.g. - array initialization with 0). */
5128 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5131 if (!STMT_VINFO_DATA_REF (stmt_info
))
5134 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5137 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5138 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5139 size_zero_node
) < 0;
5140 if (negative
&& ncopies
> 1)
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5144 "multiple types with negative step.\n");
5149 gcc_assert (!grouped_store
);
5150 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5151 if (alignment_support_scheme
!= dr_aligned
5152 && alignment_support_scheme
!= dr_unaligned_supported
)
5154 if (dump_enabled_p ())
5155 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5156 "negative step but alignment required.\n");
5159 if (dt
!= vect_constant_def
5160 && dt
!= vect_external_def
5161 && !perm_mask_for_reverse (vectype
))
5163 if (dump_enabled_p ())
5164 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5165 "negative step and reversing not supported.\n");
5171 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5173 grouped_store
= true;
5174 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5175 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5177 && !PURE_SLP_STMT (stmt_info
)
5178 && !STMT_VINFO_STRIDED_P (stmt_info
))
5180 if (vect_store_lanes_supported (vectype
, group_size
))
5181 store_lanes_p
= true;
5182 else if (!vect_grouped_store_supported (vectype
, group_size
))
5186 if (STMT_VINFO_STRIDED_P (stmt_info
)
5187 && (slp
|| PURE_SLP_STMT (stmt_info
))
5188 && (group_size
> nunits
5189 || nunits
% group_size
!= 0))
5191 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5192 "unhandled strided group store\n");
5196 if (first_stmt
== stmt
)
5198 /* STMT is the leader of the group. Check the operands of all the
5199 stmts of the group. */
5200 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5203 gcc_assert (gimple_assign_single_p (next_stmt
));
5204 op
= gimple_assign_rhs1 (next_stmt
);
5205 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5207 if (dump_enabled_p ())
5208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5209 "use not simple.\n");
5212 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5217 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5220 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5221 &scatter_off
, &scatter_scale
);
5222 gcc_assert (scatter_decl
);
5223 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5224 &scatter_off_vectype
))
5226 if (dump_enabled_p ())
5227 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5228 "scatter index use not simple.");
5233 if (!vec_stmt
) /* transformation not required. */
5235 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5236 /* The SLP costs are calculated during SLP analysis. */
5237 if (!PURE_SLP_STMT (stmt_info
))
5238 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5245 ensure_base_align (stmt_info
, dr
);
5247 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5249 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5250 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5251 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5252 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5253 edge pe
= loop_preheader_edge (loop
);
5256 enum { NARROW
, NONE
, WIDEN
} modifier
;
5257 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5259 if (nunits
== (unsigned int) scatter_off_nunits
)
5261 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5263 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5266 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5267 sel
[i
] = i
| nunits
;
5269 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5270 gcc_assert (perm_mask
!= NULL_TREE
);
5272 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5274 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5277 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5278 sel
[i
] = i
| scatter_off_nunits
;
5280 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5281 gcc_assert (perm_mask
!= NULL_TREE
);
5287 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5288 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5289 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5290 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5291 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5292 scaletype
= TREE_VALUE (arglist
);
5294 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5295 && TREE_CODE (rettype
) == VOID_TYPE
);
5297 ptr
= fold_convert (ptrtype
, scatter_base
);
5298 if (!is_gimple_min_invariant (ptr
))
5300 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5301 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5302 gcc_assert (!new_bb
);
5305 /* Currently we support only unconditional scatter stores,
5306 so mask should be all ones. */
5307 mask
= build_int_cst (masktype
, -1);
5308 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5310 scale
= build_int_cst (scaletype
, scatter_scale
);
5312 prev_stmt_info
= NULL
;
5313 for (j
= 0; j
< ncopies
; ++j
)
5318 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5320 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5322 else if (modifier
!= NONE
&& (j
& 1))
5324 if (modifier
== WIDEN
)
5327 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5328 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5331 else if (modifier
== NARROW
)
5333 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5336 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5344 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5346 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5349 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5351 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5352 == TYPE_VECTOR_SUBPARTS (srctype
));
5353 var
= vect_get_new_vect_var (srctype
, vect_simple_var
, NULL
);
5354 var
= make_ssa_name (var
);
5355 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5356 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5357 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5361 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5363 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5364 == TYPE_VECTOR_SUBPARTS (idxtype
));
5365 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
5366 var
= make_ssa_name (var
);
5367 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5368 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5369 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5374 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5376 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5378 if (prev_stmt_info
== NULL
)
5379 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5381 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5382 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5389 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5390 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5392 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5395 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5397 /* We vectorize all the stmts of the interleaving group when we
5398 reach the last stmt in the group. */
5399 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5400 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5409 grouped_store
= false;
5410 /* VEC_NUM is the number of vect stmts to be created for this
5412 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5413 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5414 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5415 op
= gimple_assign_rhs1 (first_stmt
);
5418 /* VEC_NUM is the number of vect stmts to be created for this
5420 vec_num
= group_size
;
5426 group_size
= vec_num
= 1;
5429 if (dump_enabled_p ())
5430 dump_printf_loc (MSG_NOTE
, vect_location
,
5431 "transform store. ncopies = %d\n", ncopies
);
5433 if (STMT_VINFO_STRIDED_P (stmt_info
))
5435 gimple_stmt_iterator incr_gsi
;
5441 gimple_seq stmts
= NULL
;
5442 tree stride_base
, stride_step
, alias_off
;
5446 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5449 = fold_build_pointer_plus
5450 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5451 size_binop (PLUS_EXPR
,
5452 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5453 convert_to_ptrofftype (DR_INIT(first_dr
))));
5454 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5456 /* For a store with loop-invariant (but other than power-of-2)
5457 stride (i.e. not a grouped access) like so:
5459 for (i = 0; i < n; i += stride)
5462 we generate a new induction variable and new stores from
5463 the components of the (vectorized) rhs:
5465 for (j = 0; ; j += VF*stride)
5470 array[j + stride] = tmp2;
5474 unsigned nstores
= nunits
;
5475 tree ltype
= elem_type
;
5478 nstores
= nunits
/ group_size
;
5479 if (group_size
< nunits
)
5480 ltype
= build_vector_type (elem_type
, group_size
);
5483 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5484 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5488 ivstep
= stride_step
;
5489 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5490 build_int_cst (TREE_TYPE (ivstep
),
5491 ncopies
* nstores
));
5493 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5495 create_iv (stride_base
, ivstep
, NULL
,
5496 loop
, &incr_gsi
, insert_after
,
5498 incr
= gsi_stmt (incr_gsi
);
5499 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5501 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5503 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5505 prev_stmt_info
= NULL
;
5506 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5507 next_stmt
= first_stmt
;
5508 for (g
= 0; g
< group_size
; g
++)
5510 running_off
= offvar
;
5513 tree size
= TYPE_SIZE_UNIT (ltype
);
5514 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5516 tree newoff
= copy_ssa_name (running_off
, NULL
);
5517 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5519 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5520 running_off
= newoff
;
5522 for (j
= 0; j
< ncopies
; j
++)
5524 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5525 and first_stmt == stmt. */
5530 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5532 vec_oprnd
= vec_oprnds
[0];
5536 gcc_assert (gimple_assign_single_p (next_stmt
));
5537 op
= gimple_assign_rhs1 (next_stmt
);
5538 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5544 vec_oprnd
= vec_oprnds
[j
];
5547 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5548 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5552 for (i
= 0; i
< nstores
; i
++)
5554 tree newref
, newoff
;
5555 gimple
*incr
, *assign
;
5556 tree size
= TYPE_SIZE (ltype
);
5557 /* Extract the i'th component. */
5558 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5559 bitsize_int (i
), size
);
5560 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5563 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5567 newref
= build2 (MEM_REF
, ltype
,
5568 running_off
, alias_off
);
5570 /* And store it to *running_off. */
5571 assign
= gimple_build_assign (newref
, elem
);
5572 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5574 newoff
= copy_ssa_name (running_off
, NULL
);
5575 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5576 running_off
, stride_step
);
5577 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5579 running_off
= newoff
;
5580 if (g
== group_size
- 1
5583 if (j
== 0 && i
== 0)
5584 STMT_VINFO_VEC_STMT (stmt_info
)
5585 = *vec_stmt
= assign
;
5587 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5588 prev_stmt_info
= vinfo_for_stmt (assign
);
5592 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5597 dr_chain
.create (group_size
);
5598 oprnds
.create (group_size
);
5600 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5601 gcc_assert (alignment_support_scheme
);
5602 /* Targets with store-lane instructions must not require explicit
5604 gcc_assert (!store_lanes_p
5605 || alignment_support_scheme
== dr_aligned
5606 || alignment_support_scheme
== dr_unaligned_supported
);
5609 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5612 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5614 aggr_type
= vectype
;
5616 /* In case the vectorization factor (VF) is bigger than the number
5617 of elements that we can fit in a vectype (nunits), we have to generate
5618 more than one vector stmt - i.e - we need to "unroll" the
5619 vector stmt by a factor VF/nunits. For more details see documentation in
5620 vect_get_vec_def_for_copy_stmt. */
5622 /* In case of interleaving (non-unit grouped access):
5629 We create vectorized stores starting from base address (the access of the
5630 first stmt in the chain (S2 in the above example), when the last store stmt
5631 of the chain (S4) is reached:
5634 VS2: &base + vec_size*1 = vx0
5635 VS3: &base + vec_size*2 = vx1
5636 VS4: &base + vec_size*3 = vx3
5638 Then permutation statements are generated:
5640 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5641 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5644 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5645 (the order of the data-refs in the output of vect_permute_store_chain
5646 corresponds to the order of scalar stmts in the interleaving chain - see
5647 the documentation of vect_permute_store_chain()).
5649 In case of both multiple types and interleaving, above vector stores and
5650 permutation stmts are created for every copy. The result vector stmts are
5651 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5652 STMT_VINFO_RELATED_STMT for the next copies.
5655 prev_stmt_info
= NULL
;
5656 for (j
= 0; j
< ncopies
; j
++)
5663 /* Get vectorized arguments for SLP_NODE. */
5664 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5665 NULL
, slp_node
, -1);
5667 vec_oprnd
= vec_oprnds
[0];
5671 /* For interleaved stores we collect vectorized defs for all the
5672 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5673 used as an input to vect_permute_store_chain(), and OPRNDS as
5674 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5676 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5677 OPRNDS are of size 1. */
5678 next_stmt
= first_stmt
;
5679 for (i
= 0; i
< group_size
; i
++)
5681 /* Since gaps are not supported for interleaved stores,
5682 GROUP_SIZE is the exact number of stmts in the chain.
5683 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5684 there is no interleaving, GROUP_SIZE is 1, and only one
5685 iteration of the loop will be executed. */
5686 gcc_assert (next_stmt
5687 && gimple_assign_single_p (next_stmt
));
5688 op
= gimple_assign_rhs1 (next_stmt
);
5690 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5691 dr_chain
.quick_push (vec_oprnd
);
5692 oprnds
.quick_push (vec_oprnd
);
5693 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5697 /* We should have catched mismatched types earlier. */
5698 gcc_assert (useless_type_conversion_p (vectype
,
5699 TREE_TYPE (vec_oprnd
)));
5700 bool simd_lane_access_p
5701 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5702 if (simd_lane_access_p
5703 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5704 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5705 && integer_zerop (DR_OFFSET (first_dr
))
5706 && integer_zerop (DR_INIT (first_dr
))
5707 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5708 get_alias_set (DR_REF (first_dr
))))
5710 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5711 dataref_offset
= build_int_cst (reference_alias_ptr_type
5712 (DR_REF (first_dr
)), 0);
5717 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5718 simd_lane_access_p
? loop
: NULL
,
5719 offset
, &dummy
, gsi
, &ptr_incr
,
5720 simd_lane_access_p
, &inv_p
);
5721 gcc_assert (bb_vinfo
|| !inv_p
);
5725 /* For interleaved stores we created vectorized defs for all the
5726 defs stored in OPRNDS in the previous iteration (previous copy).
5727 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5728 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5730 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5731 OPRNDS are of size 1. */
5732 for (i
= 0; i
< group_size
; i
++)
5735 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5736 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5737 dr_chain
[i
] = vec_oprnd
;
5738 oprnds
[i
] = vec_oprnd
;
5742 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5743 TYPE_SIZE_UNIT (aggr_type
));
5745 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5746 TYPE_SIZE_UNIT (aggr_type
));
5753 /* Combine all the vectors into an array. */
5754 vec_array
= create_vector_array (vectype
, vec_num
);
5755 for (i
= 0; i
< vec_num
; i
++)
5757 vec_oprnd
= dr_chain
[i
];
5758 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5762 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5763 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5764 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5765 gimple_call_set_lhs (new_stmt
, data_ref
);
5766 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5774 result_chain
.create (group_size
);
5776 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5780 next_stmt
= first_stmt
;
5781 for (i
= 0; i
< vec_num
; i
++)
5783 unsigned align
, misalign
;
5786 /* Bump the vector pointer. */
5787 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5791 vec_oprnd
= vec_oprnds
[i
];
5792 else if (grouped_store
)
5793 /* For grouped stores vectorized defs are interleaved in
5794 vect_permute_store_chain(). */
5795 vec_oprnd
= result_chain
[i
];
5797 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5801 : build_int_cst (reference_alias_ptr_type
5802 (DR_REF (first_dr
)), 0));
5803 align
= TYPE_ALIGN_UNIT (vectype
);
5804 if (aligned_access_p (first_dr
))
5806 else if (DR_MISALIGNMENT (first_dr
) == -1)
5808 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5809 align
= TYPE_ALIGN_UNIT (elem_type
);
5811 align
= get_object_alignment (DR_REF (first_dr
))
5814 TREE_TYPE (data_ref
)
5815 = build_aligned_type (TREE_TYPE (data_ref
),
5816 align
* BITS_PER_UNIT
);
5820 TREE_TYPE (data_ref
)
5821 = build_aligned_type (TREE_TYPE (data_ref
),
5822 TYPE_ALIGN (elem_type
));
5823 misalign
= DR_MISALIGNMENT (first_dr
);
5825 if (dataref_offset
== NULL_TREE
5826 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5827 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5831 && dt
!= vect_constant_def
5832 && dt
!= vect_external_def
)
5834 tree perm_mask
= perm_mask_for_reverse (vectype
);
5836 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5838 tree new_temp
= make_ssa_name (perm_dest
);
5840 /* Generate the permute statement. */
5842 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5843 vec_oprnd
, perm_mask
);
5844 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5846 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5847 vec_oprnd
= new_temp
;
5850 /* Arguments are ready. Create the new vector stmt. */
5851 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5852 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5857 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5865 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5867 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5868 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5872 dr_chain
.release ();
5874 result_chain
.release ();
5875 vec_oprnds
.release ();
5880 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5881 VECTOR_CST mask. No checks are made that the target platform supports the
5882 mask, so callers may wish to test can_vec_perm_p separately, or use
5883 vect_gen_perm_mask_checked. */
5886 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5888 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5891 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5893 mask_elt_type
= lang_hooks
.types
.type_for_mode
5894 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5895 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5897 mask_elts
= XALLOCAVEC (tree
, nunits
);
5898 for (i
= nunits
- 1; i
>= 0; i
--)
5899 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5900 mask_vec
= build_vector (mask_type
, mask_elts
);
5905 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5906 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5909 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5911 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5912 return vect_gen_perm_mask_any (vectype
, sel
);
5915 /* Given a vector variable X and Y, that was generated for the scalar
5916 STMT, generate instructions to permute the vector elements of X and Y
5917 using permutation mask MASK_VEC, insert them at *GSI and return the
5918 permuted vector variable. */
5921 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
5922 gimple_stmt_iterator
*gsi
)
5924 tree vectype
= TREE_TYPE (x
);
5925 tree perm_dest
, data_ref
;
5928 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5929 data_ref
= make_ssa_name (perm_dest
);
5931 /* Generate the permute statement. */
5932 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
5933 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5938 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5939 inserting them on the loops preheader edge. Returns true if we
5940 were successful in doing so (and thus STMT can be moved then),
5941 otherwise returns false. */
5944 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
5950 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5952 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5953 if (!gimple_nop_p (def_stmt
)
5954 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5956 /* Make sure we don't need to recurse. While we could do
5957 so in simple cases when there are more complex use webs
5958 we don't have an easy way to preserve stmt order to fulfil
5959 dependencies within them. */
5962 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5964 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5966 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5967 if (!gimple_nop_p (def_stmt2
)
5968 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5978 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5980 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
5981 if (!gimple_nop_p (def_stmt
)
5982 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5984 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5985 gsi_remove (&gsi
, false);
5986 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5993 /* vectorizable_load.
5995 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5997 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5998 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5999 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6002 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6003 slp_tree slp_node
, slp_instance slp_node_instance
)
6006 tree vec_dest
= NULL
;
6007 tree data_ref
= NULL
;
6008 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6009 stmt_vec_info prev_stmt_info
;
6010 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6011 struct loop
*loop
= NULL
;
6012 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6013 bool nested_in_vect_loop
= false;
6014 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6018 gimple
*new_stmt
= NULL
;
6020 enum dr_alignment_support alignment_support_scheme
;
6021 tree dataref_ptr
= NULL_TREE
;
6022 tree dataref_offset
= NULL_TREE
;
6023 gimple
*ptr_incr
= NULL
;
6025 int i
, j
, group_size
= -1, group_gap_adj
;
6026 tree msq
= NULL_TREE
, lsq
;
6027 tree offset
= NULL_TREE
;
6028 tree byte_offset
= NULL_TREE
;
6029 tree realignment_token
= NULL_TREE
;
6031 vec
<tree
> dr_chain
= vNULL
;
6032 bool grouped_load
= false;
6033 bool load_lanes_p
= false;
6036 bool negative
= false;
6037 bool compute_in_loop
= false;
6038 struct loop
*at_loop
;
6040 bool slp
= (slp_node
!= NULL
);
6041 bool slp_perm
= false;
6042 enum tree_code code
;
6043 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6046 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6047 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6048 int gather_scale
= 1;
6049 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6050 vec_info
*vinfo
= stmt_info
->vinfo
;
6052 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6055 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6058 /* Is vectorizable load? */
6059 if (!is_gimple_assign (stmt
))
6062 scalar_dest
= gimple_assign_lhs (stmt
);
6063 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6066 code
= gimple_assign_rhs_code (stmt
);
6067 if (code
!= ARRAY_REF
6068 && code
!= BIT_FIELD_REF
6069 && code
!= INDIRECT_REF
6070 && code
!= COMPONENT_REF
6071 && code
!= IMAGPART_EXPR
6072 && code
!= REALPART_EXPR
6074 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6077 if (!STMT_VINFO_DATA_REF (stmt_info
))
6080 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6081 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6085 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6086 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6087 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6092 /* Multiple types in SLP are handled by creating the appropriate number of
6093 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6095 if (slp
|| PURE_SLP_STMT (stmt_info
))
6098 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6100 gcc_assert (ncopies
>= 1);
6102 /* FORNOW. This restriction should be relaxed. */
6103 if (nested_in_vect_loop
&& ncopies
> 1)
6105 if (dump_enabled_p ())
6106 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6107 "multiple types in nested loop.\n");
6111 /* Invalidate assumptions made by dependence analysis when vectorization
6112 on the unrolled body effectively re-orders stmts. */
6114 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6115 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6116 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6118 if (dump_enabled_p ())
6119 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6120 "cannot perform implicit CSE when unrolling "
6121 "with negative dependence distance\n");
6125 elem_type
= TREE_TYPE (vectype
);
6126 mode
= TYPE_MODE (vectype
);
6128 /* FORNOW. In some cases can vectorize even if data-type not supported
6129 (e.g. - data copies). */
6130 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6132 if (dump_enabled_p ())
6133 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6134 "Aligned load, but unsupported type.\n");
6138 /* Check if the load is a part of an interleaving chain. */
6139 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6141 grouped_load
= true;
6143 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6145 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6147 /* If this is single-element interleaving with an element distance
6148 that leaves unused vector loads around punt - we at least create
6149 very sub-optimal code in that case (and blow up memory,
6151 if (first_stmt
== stmt
6152 && !GROUP_NEXT_ELEMENT (stmt_info
)
6153 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6155 if (dump_enabled_p ())
6156 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6157 "single-element interleaving not supported "
6158 "for not adjacent vector loads\n");
6162 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6165 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6167 && !PURE_SLP_STMT (stmt_info
)
6168 && !STMT_VINFO_STRIDED_P (stmt_info
))
6170 if (vect_load_lanes_supported (vectype
, group_size
))
6171 load_lanes_p
= true;
6172 else if (!vect_grouped_load_supported (vectype
, group_size
))
6176 /* Invalidate assumptions made by dependence analysis when vectorization
6177 on the unrolled body effectively re-orders stmts. */
6178 if (!PURE_SLP_STMT (stmt_info
)
6179 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6180 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6181 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6183 if (dump_enabled_p ())
6184 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6185 "cannot perform implicit CSE when performing "
6186 "group loads with negative dependence distance\n");
6190 /* Similarly when the stmt is a load that is both part of a SLP
6191 instance and a loop vectorized stmt via the same-dr mechanism
6192 we have to give up. */
6193 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6194 && (STMT_SLP_TYPE (stmt_info
)
6195 != STMT_SLP_TYPE (vinfo_for_stmt
6196 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6198 if (dump_enabled_p ())
6199 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6200 "conflicting SLP types for CSEd load\n");
6206 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6209 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6210 &gather_off
, &gather_scale
);
6211 gcc_assert (gather_decl
);
6212 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6213 &gather_off_vectype
))
6215 if (dump_enabled_p ())
6216 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6217 "gather index use not simple.\n");
6221 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6224 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6225 && (group_size
> nunits
6226 || nunits
% group_size
!= 0))
6228 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6229 "unhandled strided group load\n");
6235 negative
= tree_int_cst_compare (nested_in_vect_loop
6236 ? STMT_VINFO_DR_STEP (stmt_info
)
6238 size_zero_node
) < 0;
6239 if (negative
&& ncopies
> 1)
6241 if (dump_enabled_p ())
6242 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6243 "multiple types with negative step.\n");
6251 if (dump_enabled_p ())
6252 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6253 "negative step for group load not supported"
6257 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6258 if (alignment_support_scheme
!= dr_aligned
6259 && alignment_support_scheme
!= dr_unaligned_supported
)
6261 if (dump_enabled_p ())
6262 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6263 "negative step but alignment required.\n");
6266 if (!perm_mask_for_reverse (vectype
))
6268 if (dump_enabled_p ())
6269 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6270 "negative step and reversing not supported."
6277 if (!vec_stmt
) /* transformation not required. */
6279 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6280 /* The SLP costs are calculated during SLP analysis. */
6281 if (!PURE_SLP_STMT (stmt_info
))
6282 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6287 if (dump_enabled_p ())
6288 dump_printf_loc (MSG_NOTE
, vect_location
,
6289 "transform load. ncopies = %d\n", ncopies
);
6293 ensure_base_align (stmt_info
, dr
);
6295 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6297 tree vec_oprnd0
= NULL_TREE
, op
;
6298 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6299 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6300 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6301 edge pe
= loop_preheader_edge (loop
);
6304 enum { NARROW
, NONE
, WIDEN
} modifier
;
6305 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6307 if (nunits
== gather_off_nunits
)
6309 else if (nunits
== gather_off_nunits
/ 2)
6311 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6314 for (i
= 0; i
< gather_off_nunits
; ++i
)
6315 sel
[i
] = i
| nunits
;
6317 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6319 else if (nunits
== gather_off_nunits
* 2)
6321 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6324 for (i
= 0; i
< nunits
; ++i
)
6325 sel
[i
] = i
< gather_off_nunits
6326 ? i
: i
+ nunits
- gather_off_nunits
;
6328 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6334 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6335 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6336 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6337 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6338 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6339 scaletype
= TREE_VALUE (arglist
);
6340 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6342 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6344 ptr
= fold_convert (ptrtype
, gather_base
);
6345 if (!is_gimple_min_invariant (ptr
))
6347 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6348 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6349 gcc_assert (!new_bb
);
6352 /* Currently we support only unconditional gather loads,
6353 so mask should be all ones. */
6354 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6355 mask
= build_int_cst (masktype
, -1);
6356 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6358 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6359 mask
= build_vector_from_val (masktype
, mask
);
6360 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6362 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6366 for (j
= 0; j
< 6; ++j
)
6368 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6369 mask
= build_real (TREE_TYPE (masktype
), r
);
6370 mask
= build_vector_from_val (masktype
, mask
);
6371 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6376 scale
= build_int_cst (scaletype
, gather_scale
);
6378 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6379 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6380 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6384 for (j
= 0; j
< 6; ++j
)
6386 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6387 merge
= build_real (TREE_TYPE (rettype
), r
);
6391 merge
= build_vector_from_val (rettype
, merge
);
6392 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6394 prev_stmt_info
= NULL
;
6395 for (j
= 0; j
< ncopies
; ++j
)
6397 if (modifier
== WIDEN
&& (j
& 1))
6398 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6399 perm_mask
, stmt
, gsi
);
6402 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6405 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6407 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6409 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6410 == TYPE_VECTOR_SUBPARTS (idxtype
));
6411 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
6412 var
= make_ssa_name (var
);
6413 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6415 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6416 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6421 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6423 if (!useless_type_conversion_p (vectype
, rettype
))
6425 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6426 == TYPE_VECTOR_SUBPARTS (rettype
));
6427 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
6428 op
= make_ssa_name (var
, new_stmt
);
6429 gimple_call_set_lhs (new_stmt
, op
);
6430 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6431 var
= make_ssa_name (vec_dest
);
6432 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6434 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6438 var
= make_ssa_name (vec_dest
, new_stmt
);
6439 gimple_call_set_lhs (new_stmt
, var
);
6442 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6444 if (modifier
== NARROW
)
6451 var
= permute_vec_elements (prev_res
, var
,
6452 perm_mask
, stmt
, gsi
);
6453 new_stmt
= SSA_NAME_DEF_STMT (var
);
6456 if (prev_stmt_info
== NULL
)
6457 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6459 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6460 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6464 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6466 gimple_stmt_iterator incr_gsi
;
6472 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6473 gimple_seq stmts
= NULL
;
6474 tree stride_base
, stride_step
, alias_off
;
6476 gcc_assert (!nested_in_vect_loop
);
6478 if (slp
&& grouped_load
)
6479 first_dr
= STMT_VINFO_DATA_REF
6480 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6485 = fold_build_pointer_plus
6486 (DR_BASE_ADDRESS (first_dr
),
6487 size_binop (PLUS_EXPR
,
6488 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6489 convert_to_ptrofftype (DR_INIT (first_dr
))));
6490 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6492 /* For a load with loop-invariant (but other than power-of-2)
6493 stride (i.e. not a grouped access) like so:
6495 for (i = 0; i < n; i += stride)
6498 we generate a new induction variable and new accesses to
6499 form a new vector (or vectors, depending on ncopies):
6501 for (j = 0; ; j += VF*stride)
6503 tmp2 = array[j + stride];
6505 vectemp = {tmp1, tmp2, ...}
6508 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6509 build_int_cst (TREE_TYPE (stride_step
), vf
));
6511 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6513 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6514 loop
, &incr_gsi
, insert_after
,
6516 incr
= gsi_stmt (incr_gsi
);
6517 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6519 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6520 &stmts
, true, NULL_TREE
);
6522 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6524 prev_stmt_info
= NULL
;
6525 running_off
= offvar
;
6526 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6527 int nloads
= nunits
;
6528 tree ltype
= TREE_TYPE (vectype
);
6529 auto_vec
<tree
> dr_chain
;
6532 nloads
= nunits
/ group_size
;
6533 if (group_size
< nunits
)
6534 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6537 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6538 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6540 dr_chain
.create (ncopies
);
6542 for (j
= 0; j
< ncopies
; j
++)
6548 vec_alloc (v
, nloads
);
6549 for (i
= 0; i
< nloads
; i
++)
6551 tree newref
, newoff
;
6553 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6555 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6558 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6559 newoff
= copy_ssa_name (running_off
);
6560 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6561 running_off
, stride_step
);
6562 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6564 running_off
= newoff
;
6567 vec_inv
= build_constructor (vectype
, v
);
6568 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6569 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6573 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6574 build2 (MEM_REF
, ltype
,
6575 running_off
, alias_off
));
6576 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6578 tree newoff
= copy_ssa_name (running_off
);
6579 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6580 running_off
, stride_step
);
6581 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6583 running_off
= newoff
;
6588 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6590 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6595 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6597 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6598 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6602 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6603 slp_node_instance
, false);
6609 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6611 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6612 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6613 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6615 /* Check if the chain of loads is already vectorized. */
6616 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6617 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6618 ??? But we can only do so if there is exactly one
6619 as we have no way to get at the rest. Leave the CSE
6621 ??? With the group load eventually participating
6622 in multiple different permutations (having multiple
6623 slp nodes which refer to the same group) the CSE
6624 is even wrong code. See PR56270. */
6627 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6630 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6631 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6634 /* VEC_NUM is the number of vect stmts to be created for this group. */
6637 grouped_load
= false;
6638 /* For SLP permutation support we need to load the whole group,
6639 not only the number of vector stmts the permutation result
6642 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6644 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6645 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6648 vec_num
= group_size
;
6654 group_size
= vec_num
= 1;
6658 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6659 gcc_assert (alignment_support_scheme
);
6660 /* Targets with load-lane instructions must not require explicit
6662 gcc_assert (!load_lanes_p
6663 || alignment_support_scheme
== dr_aligned
6664 || alignment_support_scheme
== dr_unaligned_supported
);
6666 /* In case the vectorization factor (VF) is bigger than the number
6667 of elements that we can fit in a vectype (nunits), we have to generate
6668 more than one vector stmt - i.e - we need to "unroll" the
6669 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6670 from one copy of the vector stmt to the next, in the field
6671 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6672 stages to find the correct vector defs to be used when vectorizing
6673 stmts that use the defs of the current stmt. The example below
6674 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6675 need to create 4 vectorized stmts):
6677 before vectorization:
6678 RELATED_STMT VEC_STMT
6682 step 1: vectorize stmt S1:
6683 We first create the vector stmt VS1_0, and, as usual, record a
6684 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6685 Next, we create the vector stmt VS1_1, and record a pointer to
6686 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6687 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6689 RELATED_STMT VEC_STMT
6690 VS1_0: vx0 = memref0 VS1_1 -
6691 VS1_1: vx1 = memref1 VS1_2 -
6692 VS1_2: vx2 = memref2 VS1_3 -
6693 VS1_3: vx3 = memref3 - -
6694 S1: x = load - VS1_0
6697 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6698 information we recorded in RELATED_STMT field is used to vectorize
6701 /* In case of interleaving (non-unit grouped access):
6708 Vectorized loads are created in the order of memory accesses
6709 starting from the access of the first stmt of the chain:
6712 VS2: vx1 = &base + vec_size*1
6713 VS3: vx3 = &base + vec_size*2
6714 VS4: vx4 = &base + vec_size*3
6716 Then permutation statements are generated:
6718 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6719 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6722 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6723 (the order of the data-refs in the output of vect_permute_load_chain
6724 corresponds to the order of scalar stmts in the interleaving chain - see
6725 the documentation of vect_permute_load_chain()).
6726 The generation of permutation stmts and recording them in
6727 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6729 In case of both multiple types and interleaving, the vector loads and
6730 permutation stmts above are created for every copy. The result vector
6731 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6732 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6734 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6735 on a target that supports unaligned accesses (dr_unaligned_supported)
6736 we generate the following code:
6740 p = p + indx * vectype_size;
6745 Otherwise, the data reference is potentially unaligned on a target that
6746 does not support unaligned accesses (dr_explicit_realign_optimized) -
6747 then generate the following code, in which the data in each iteration is
6748 obtained by two vector loads, one from the previous iteration, and one
6749 from the current iteration:
6751 msq_init = *(floor(p1))
6752 p2 = initial_addr + VS - 1;
6753 realignment_token = call target_builtin;
6756 p2 = p2 + indx * vectype_size
6758 vec_dest = realign_load (msq, lsq, realignment_token)
6763 /* If the misalignment remains the same throughout the execution of the
6764 loop, we can create the init_addr and permutation mask at the loop
6765 preheader. Otherwise, it needs to be created inside the loop.
6766 This can only occur when vectorizing memory accesses in the inner-loop
6767 nested within an outer-loop that is being vectorized. */
6769 if (nested_in_vect_loop
6770 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6771 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6773 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6774 compute_in_loop
= true;
6777 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6778 || alignment_support_scheme
== dr_explicit_realign
)
6779 && !compute_in_loop
)
6781 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6782 alignment_support_scheme
, NULL_TREE
,
6784 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6786 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6787 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6795 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6798 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6800 aggr_type
= vectype
;
6802 prev_stmt_info
= NULL
;
6803 for (j
= 0; j
< ncopies
; j
++)
6805 /* 1. Create the vector or array pointer update chain. */
6808 bool simd_lane_access_p
6809 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6810 if (simd_lane_access_p
6811 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6812 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6813 && integer_zerop (DR_OFFSET (first_dr
))
6814 && integer_zerop (DR_INIT (first_dr
))
6815 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6816 get_alias_set (DR_REF (first_dr
)))
6817 && (alignment_support_scheme
== dr_aligned
6818 || alignment_support_scheme
== dr_unaligned_supported
))
6820 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6821 dataref_offset
= build_int_cst (reference_alias_ptr_type
6822 (DR_REF (first_dr
)), 0);
6827 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6828 offset
, &dummy
, gsi
, &ptr_incr
,
6829 simd_lane_access_p
, &inv_p
,
6832 else if (dataref_offset
)
6833 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6834 TYPE_SIZE_UNIT (aggr_type
));
6836 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6837 TYPE_SIZE_UNIT (aggr_type
));
6839 if (grouped_load
|| slp_perm
)
6840 dr_chain
.create (vec_num
);
6846 vec_array
= create_vector_array (vectype
, vec_num
);
6849 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6850 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6851 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6852 gimple_call_set_lhs (new_stmt
, vec_array
);
6853 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6855 /* Extract each vector into an SSA_NAME. */
6856 for (i
= 0; i
< vec_num
; i
++)
6858 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6860 dr_chain
.quick_push (new_temp
);
6863 /* Record the mapping between SSA_NAMEs and statements. */
6864 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6868 for (i
= 0; i
< vec_num
; i
++)
6871 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6874 /* 2. Create the vector-load in the loop. */
6875 switch (alignment_support_scheme
)
6878 case dr_unaligned_supported
:
6880 unsigned int align
, misalign
;
6883 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6886 : build_int_cst (reference_alias_ptr_type
6887 (DR_REF (first_dr
)), 0));
6888 align
= TYPE_ALIGN_UNIT (vectype
);
6889 if (alignment_support_scheme
== dr_aligned
)
6891 gcc_assert (aligned_access_p (first_dr
));
6894 else if (DR_MISALIGNMENT (first_dr
) == -1)
6896 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
6897 align
= TYPE_ALIGN_UNIT (elem_type
);
6899 align
= (get_object_alignment (DR_REF (first_dr
))
6902 TREE_TYPE (data_ref
)
6903 = build_aligned_type (TREE_TYPE (data_ref
),
6904 align
* BITS_PER_UNIT
);
6908 TREE_TYPE (data_ref
)
6909 = build_aligned_type (TREE_TYPE (data_ref
),
6910 TYPE_ALIGN (elem_type
));
6911 misalign
= DR_MISALIGNMENT (first_dr
);
6913 if (dataref_offset
== NULL_TREE
6914 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6915 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6919 case dr_explicit_realign
:
6923 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
6925 if (compute_in_loop
)
6926 msq
= vect_setup_realignment (first_stmt
, gsi
,
6928 dr_explicit_realign
,
6931 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6932 ptr
= copy_ssa_name (dataref_ptr
);
6934 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6935 new_stmt
= gimple_build_assign
6936 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
6938 (TREE_TYPE (dataref_ptr
),
6939 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6940 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6942 = build2 (MEM_REF
, vectype
, ptr
,
6943 build_int_cst (reference_alias_ptr_type
6944 (DR_REF (first_dr
)), 0));
6945 vec_dest
= vect_create_destination_var (scalar_dest
,
6947 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6948 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6949 gimple_assign_set_lhs (new_stmt
, new_temp
);
6950 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6951 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6952 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6955 bump
= size_binop (MULT_EXPR
, vs
,
6956 TYPE_SIZE_UNIT (elem_type
));
6957 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
6958 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6959 new_stmt
= gimple_build_assign
6960 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
6963 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6964 ptr
= copy_ssa_name (ptr
, new_stmt
);
6965 gimple_assign_set_lhs (new_stmt
, ptr
);
6966 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6968 = build2 (MEM_REF
, vectype
, ptr
,
6969 build_int_cst (reference_alias_ptr_type
6970 (DR_REF (first_dr
)), 0));
6973 case dr_explicit_realign_optimized
:
6974 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6975 new_temp
= copy_ssa_name (dataref_ptr
);
6977 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6978 new_stmt
= gimple_build_assign
6979 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
6981 (TREE_TYPE (dataref_ptr
),
6982 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6983 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6985 = build2 (MEM_REF
, vectype
, new_temp
,
6986 build_int_cst (reference_alias_ptr_type
6987 (DR_REF (first_dr
)), 0));
6992 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6993 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6994 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6995 gimple_assign_set_lhs (new_stmt
, new_temp
);
6996 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6998 /* 3. Handle explicit realignment if necessary/supported.
7000 vec_dest = realign_load (msq, lsq, realignment_token) */
7001 if (alignment_support_scheme
== dr_explicit_realign_optimized
7002 || alignment_support_scheme
== dr_explicit_realign
)
7004 lsq
= gimple_assign_lhs (new_stmt
);
7005 if (!realignment_token
)
7006 realignment_token
= dataref_ptr
;
7007 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7008 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7009 msq
, lsq
, realignment_token
);
7010 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7011 gimple_assign_set_lhs (new_stmt
, new_temp
);
7012 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7014 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7017 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7018 add_phi_arg (phi
, lsq
,
7019 loop_latch_edge (containing_loop
),
7025 /* 4. Handle invariant-load. */
7026 if (inv_p
&& !bb_vinfo
)
7028 gcc_assert (!grouped_load
);
7029 /* If we have versioned for aliasing or the loop doesn't
7030 have any data dependencies that would preclude this,
7031 then we are sure this is a loop invariant load and
7032 thus we can insert it on the preheader edge. */
7033 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7034 && !nested_in_vect_loop
7035 && hoist_defs_of_uses (stmt
, loop
))
7037 if (dump_enabled_p ())
7039 dump_printf_loc (MSG_NOTE
, vect_location
,
7040 "hoisting out of the vectorized "
7042 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7044 tree tem
= copy_ssa_name (scalar_dest
);
7045 gsi_insert_on_edge_immediate
7046 (loop_preheader_edge (loop
),
7047 gimple_build_assign (tem
,
7049 (gimple_assign_rhs1 (stmt
))));
7050 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7054 gimple_stmt_iterator gsi2
= *gsi
;
7056 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7059 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7060 set_vinfo_for_stmt (new_stmt
,
7061 new_stmt_vec_info (new_stmt
, vinfo
));
7066 tree perm_mask
= perm_mask_for_reverse (vectype
);
7067 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7068 perm_mask
, stmt
, gsi
);
7069 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7072 /* Collect vector loads and later create their permutation in
7073 vect_transform_grouped_load (). */
7074 if (grouped_load
|| slp_perm
)
7075 dr_chain
.quick_push (new_temp
);
7077 /* Store vector loads in the corresponding SLP_NODE. */
7078 if (slp
&& !slp_perm
)
7079 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7081 /* Bump the vector pointer to account for a gap or for excess
7082 elements loaded for a permuted SLP load. */
7083 if (group_gap_adj
!= 0)
7087 = wide_int_to_tree (sizetype
,
7088 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7089 group_gap_adj
, &ovf
));
7090 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7095 if (slp
&& !slp_perm
)
7100 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7101 slp_node_instance
, false))
7103 dr_chain
.release ();
7112 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7113 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7118 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7120 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7121 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7124 dr_chain
.release ();
7130 /* Function vect_is_simple_cond.
7133 LOOP - the loop that is being vectorized.
7134 COND - Condition that is checked for simple use.
7137 *COMP_VECTYPE - the vector type for the comparison.
7139 Returns whether a COND can be vectorized. Checks whether
7140 condition operands are supportable using vec_is_simple_use. */
7143 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7146 enum vect_def_type dt
;
7147 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7149 if (!COMPARISON_CLASS_P (cond
))
7152 lhs
= TREE_OPERAND (cond
, 0);
7153 rhs
= TREE_OPERAND (cond
, 1);
7155 if (TREE_CODE (lhs
) == SSA_NAME
)
7157 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7158 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7161 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7162 && TREE_CODE (lhs
) != FIXED_CST
)
7165 if (TREE_CODE (rhs
) == SSA_NAME
)
7167 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7168 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7171 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7172 && TREE_CODE (rhs
) != FIXED_CST
)
7175 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7179 /* vectorizable_condition.
7181 Check if STMT is conditional modify expression that can be vectorized.
7182 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7183 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7186 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7187 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7188 else clause if it is 2).
7190 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7193 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7194 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7197 tree scalar_dest
= NULL_TREE
;
7198 tree vec_dest
= NULL_TREE
;
7199 tree cond_expr
, then_clause
, else_clause
;
7200 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7201 tree comp_vectype
= NULL_TREE
;
7202 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7203 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7204 tree vec_compare
, vec_cond_expr
;
7206 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7207 enum vect_def_type dt
, dts
[4];
7209 enum tree_code code
;
7210 stmt_vec_info prev_stmt_info
= NULL
;
7212 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7213 vec
<tree
> vec_oprnds0
= vNULL
;
7214 vec
<tree
> vec_oprnds1
= vNULL
;
7215 vec
<tree
> vec_oprnds2
= vNULL
;
7216 vec
<tree
> vec_oprnds3
= vNULL
;
7219 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7222 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7225 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7226 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7230 /* FORNOW: not yet supported. */
7231 if (STMT_VINFO_LIVE_P (stmt_info
))
7233 if (dump_enabled_p ())
7234 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7235 "value used after loop.\n");
7239 /* Is vectorizable conditional operation? */
7240 if (!is_gimple_assign (stmt
))
7243 code
= gimple_assign_rhs_code (stmt
);
7245 if (code
!= COND_EXPR
)
7248 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7249 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7251 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7254 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7256 gcc_assert (ncopies
>= 1);
7257 if (reduc_index
&& ncopies
> 1)
7258 return false; /* FORNOW */
7260 cond_expr
= gimple_assign_rhs1 (stmt
);
7261 then_clause
= gimple_assign_rhs2 (stmt
);
7262 else_clause
= gimple_assign_rhs3 (stmt
);
7264 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7269 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7271 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7274 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
)));
7275 /* The result of a vector comparison should be signed type. */
7276 tree cmp_type
= build_nonstandard_integer_type (prec
, 0);
7277 vec_cmp_type
= get_same_sized_vectype (cmp_type
, vectype
);
7278 if (vec_cmp_type
== NULL_TREE
)
7283 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7284 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7291 vec_oprnds0
.create (1);
7292 vec_oprnds1
.create (1);
7293 vec_oprnds2
.create (1);
7294 vec_oprnds3
.create (1);
7298 scalar_dest
= gimple_assign_lhs (stmt
);
7299 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7301 /* Handle cond expr. */
7302 for (j
= 0; j
< ncopies
; j
++)
7304 gassign
*new_stmt
= NULL
;
7309 auto_vec
<tree
, 4> ops
;
7310 auto_vec
<vec
<tree
>, 4> vec_defs
;
7312 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7313 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7314 ops
.safe_push (then_clause
);
7315 ops
.safe_push (else_clause
);
7316 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7317 vec_oprnds3
= vec_defs
.pop ();
7318 vec_oprnds2
= vec_defs
.pop ();
7319 vec_oprnds1
= vec_defs
.pop ();
7320 vec_oprnds0
= vec_defs
.pop ();
7323 vec_defs
.release ();
7329 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0), stmt
);
7330 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7331 loop_vinfo
, >emp
, &dts
[0]);
7334 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7336 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7337 loop_vinfo
, >emp
, &dts
[1]);
7338 if (reduc_index
== 1)
7339 vec_then_clause
= reduc_def
;
7342 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7344 vect_is_simple_use (then_clause
, loop_vinfo
,
7347 if (reduc_index
== 2)
7348 vec_else_clause
= reduc_def
;
7351 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7353 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7359 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
7360 vec_oprnds0
.pop ());
7361 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
7362 vec_oprnds1
.pop ());
7363 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7364 vec_oprnds2
.pop ());
7365 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7366 vec_oprnds3
.pop ());
7371 vec_oprnds0
.quick_push (vec_cond_lhs
);
7372 vec_oprnds1
.quick_push (vec_cond_rhs
);
7373 vec_oprnds2
.quick_push (vec_then_clause
);
7374 vec_oprnds3
.quick_push (vec_else_clause
);
7377 /* Arguments are ready. Create the new vector stmt. */
7378 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7380 vec_cond_rhs
= vec_oprnds1
[i
];
7381 vec_then_clause
= vec_oprnds2
[i
];
7382 vec_else_clause
= vec_oprnds3
[i
];
7384 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7385 vec_cond_lhs
, vec_cond_rhs
);
7386 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7387 vec_compare
, vec_then_clause
, vec_else_clause
);
7389 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7390 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7391 gimple_assign_set_lhs (new_stmt
, new_temp
);
7392 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7394 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7401 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7403 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7405 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7408 vec_oprnds0
.release ();
7409 vec_oprnds1
.release ();
7410 vec_oprnds2
.release ();
7411 vec_oprnds3
.release ();
7417 /* Make sure the statement is vectorizable. */
7420 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7422 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7423 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7424 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7426 tree scalar_type
, vectype
;
7427 gimple
*pattern_stmt
;
7428 gimple_seq pattern_def_seq
;
7430 if (dump_enabled_p ())
7432 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7433 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7436 if (gimple_has_volatile_ops (stmt
))
7438 if (dump_enabled_p ())
7439 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7440 "not vectorized: stmt has volatile operands\n");
7445 /* Skip stmts that do not need to be vectorized. In loops this is expected
7447 - the COND_EXPR which is the loop exit condition
7448 - any LABEL_EXPRs in the loop
7449 - computations that are used only for array indexing or loop control.
7450 In basic blocks we only analyze statements that are a part of some SLP
7451 instance, therefore, all the statements are relevant.
7453 Pattern statement needs to be analyzed instead of the original statement
7454 if the original statement is not relevant. Otherwise, we analyze both
7455 statements. In basic blocks we are called from some SLP instance
7456 traversal, don't analyze pattern stmts instead, the pattern stmts
7457 already will be part of SLP instance. */
7459 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7460 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7461 && !STMT_VINFO_LIVE_P (stmt_info
))
7463 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7465 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7466 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7468 /* Analyze PATTERN_STMT instead of the original stmt. */
7469 stmt
= pattern_stmt
;
7470 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7471 if (dump_enabled_p ())
7473 dump_printf_loc (MSG_NOTE
, vect_location
,
7474 "==> examining pattern statement: ");
7475 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7480 if (dump_enabled_p ())
7481 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7486 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7489 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7490 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7492 /* Analyze PATTERN_STMT too. */
7493 if (dump_enabled_p ())
7495 dump_printf_loc (MSG_NOTE
, vect_location
,
7496 "==> examining pattern statement: ");
7497 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7500 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7504 if (is_pattern_stmt_p (stmt_info
)
7506 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7508 gimple_stmt_iterator si
;
7510 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7512 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7513 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7514 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7516 /* Analyze def stmt of STMT if it's a pattern stmt. */
7517 if (dump_enabled_p ())
7519 dump_printf_loc (MSG_NOTE
, vect_location
,
7520 "==> examining pattern def statement: ");
7521 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7524 if (!vect_analyze_stmt (pattern_def_stmt
,
7525 need_to_vectorize
, node
))
7531 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7533 case vect_internal_def
:
7536 case vect_reduction_def
:
7537 case vect_nested_cycle
:
7538 gcc_assert (!bb_vinfo
7539 && (relevance
== vect_used_in_outer
7540 || relevance
== vect_used_in_outer_by_reduction
7541 || relevance
== vect_used_by_reduction
7542 || relevance
== vect_unused_in_scope
));
7545 case vect_induction_def
:
7546 case vect_constant_def
:
7547 case vect_external_def
:
7548 case vect_unknown_def_type
:
7555 gcc_assert (PURE_SLP_STMT (stmt_info
));
7557 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7558 if (dump_enabled_p ())
7560 dump_printf_loc (MSG_NOTE
, vect_location
,
7561 "get vectype for scalar type: ");
7562 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7563 dump_printf (MSG_NOTE
, "\n");
7566 vectype
= get_vectype_for_scalar_type (scalar_type
);
7569 if (dump_enabled_p ())
7571 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7572 "not SLPed: unsupported data-type ");
7573 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7575 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7580 if (dump_enabled_p ())
7582 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7583 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7584 dump_printf (MSG_NOTE
, "\n");
7587 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7590 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7592 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7593 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7594 || (is_gimple_call (stmt
)
7595 && gimple_call_lhs (stmt
) == NULL_TREE
));
7596 *need_to_vectorize
= true;
7599 if (PURE_SLP_STMT (stmt_info
) && !node
)
7601 dump_printf_loc (MSG_NOTE
, vect_location
,
7602 "handled only by SLP analysis\n");
7608 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7609 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7610 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7611 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7612 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7613 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7614 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7615 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7616 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7617 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7618 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7619 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7623 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7624 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7625 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7626 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7627 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7628 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7629 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7630 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7631 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7636 if (dump_enabled_p ())
7638 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7639 "not vectorized: relevant stmt not ");
7640 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7641 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7650 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7651 need extra handling, except for vectorizable reductions. */
7652 if (STMT_VINFO_LIVE_P (stmt_info
)
7653 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7654 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7658 if (dump_enabled_p ())
7660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7661 "not vectorized: live stmt not ");
7662 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7663 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7673 /* Function vect_transform_stmt.
7675 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7678 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7679 bool *grouped_store
, slp_tree slp_node
,
7680 slp_instance slp_node_instance
)
7682 bool is_store
= false;
7683 gimple
*vec_stmt
= NULL
;
7684 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7687 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7689 switch (STMT_VINFO_TYPE (stmt_info
))
7691 case type_demotion_vec_info_type
:
7692 case type_promotion_vec_info_type
:
7693 case type_conversion_vec_info_type
:
7694 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7698 case induc_vec_info_type
:
7699 gcc_assert (!slp_node
);
7700 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7704 case shift_vec_info_type
:
7705 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7709 case op_vec_info_type
:
7710 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7714 case assignment_vec_info_type
:
7715 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7719 case load_vec_info_type
:
7720 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7725 case store_vec_info_type
:
7726 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7728 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7730 /* In case of interleaving, the whole chain is vectorized when the
7731 last store in the chain is reached. Store stmts before the last
7732 one are skipped, and there vec_stmt_info shouldn't be freed
7734 *grouped_store
= true;
7735 if (STMT_VINFO_VEC_STMT (stmt_info
))
7742 case condition_vec_info_type
:
7743 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7747 case call_vec_info_type
:
7748 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7749 stmt
= gsi_stmt (*gsi
);
7750 if (is_gimple_call (stmt
)
7751 && gimple_call_internal_p (stmt
)
7752 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7756 case call_simd_clone_vec_info_type
:
7757 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7758 stmt
= gsi_stmt (*gsi
);
7761 case reduc_vec_info_type
:
7762 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7767 if (!STMT_VINFO_LIVE_P (stmt_info
))
7769 if (dump_enabled_p ())
7770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7771 "stmt not supported.\n");
7776 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7777 This would break hybrid SLP vectorization. */
7779 gcc_assert (!vec_stmt
7780 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
7782 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7783 is being vectorized, but outside the immediately enclosing loop. */
7785 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7786 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7787 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7788 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7789 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7790 || STMT_VINFO_RELEVANT (stmt_info
) ==
7791 vect_used_in_outer_by_reduction
))
7793 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7794 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7795 imm_use_iterator imm_iter
;
7796 use_operand_p use_p
;
7800 if (dump_enabled_p ())
7801 dump_printf_loc (MSG_NOTE
, vect_location
,
7802 "Record the vdef for outer-loop vectorization.\n");
7804 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7805 (to be used when vectorizing outer-loop stmts that use the DEF of
7807 if (gimple_code (stmt
) == GIMPLE_PHI
)
7808 scalar_dest
= PHI_RESULT (stmt
);
7810 scalar_dest
= gimple_assign_lhs (stmt
);
7812 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7814 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7816 exit_phi
= USE_STMT (use_p
);
7817 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7822 /* Handle stmts whose DEF is used outside the loop-nest that is
7823 being vectorized. */
7824 if (STMT_VINFO_LIVE_P (stmt_info
)
7825 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7827 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7832 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7838 /* Remove a group of stores (for SLP or interleaving), free their
7842 vect_remove_stores (gimple
*first_stmt
)
7844 gimple
*next
= first_stmt
;
7846 gimple_stmt_iterator next_si
;
7850 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7852 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7853 if (is_pattern_stmt_p (stmt_info
))
7854 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7855 /* Free the attached stmt_vec_info and remove the stmt. */
7856 next_si
= gsi_for_stmt (next
);
7857 unlink_stmt_vdef (next
);
7858 gsi_remove (&next_si
, true);
7859 release_defs (next
);
7860 free_stmt_vec_info (next
);
7866 /* Function new_stmt_vec_info.
7868 Create and initialize a new stmt_vec_info struct for STMT. */
7871 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
7874 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7876 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7877 STMT_VINFO_STMT (res
) = stmt
;
7879 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7880 STMT_VINFO_LIVE_P (res
) = false;
7881 STMT_VINFO_VECTYPE (res
) = NULL
;
7882 STMT_VINFO_VEC_STMT (res
) = NULL
;
7883 STMT_VINFO_VECTORIZABLE (res
) = true;
7884 STMT_VINFO_IN_PATTERN_P (res
) = false;
7885 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7886 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7887 STMT_VINFO_DATA_REF (res
) = NULL
;
7889 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7890 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7891 STMT_VINFO_DR_INIT (res
) = NULL
;
7892 STMT_VINFO_DR_STEP (res
) = NULL
;
7893 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7895 if (gimple_code (stmt
) == GIMPLE_PHI
7896 && is_loop_header_bb_p (gimple_bb (stmt
)))
7897 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7899 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7901 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7902 STMT_SLP_TYPE (res
) = loop_vect
;
7903 GROUP_FIRST_ELEMENT (res
) = NULL
;
7904 GROUP_NEXT_ELEMENT (res
) = NULL
;
7905 GROUP_SIZE (res
) = 0;
7906 GROUP_STORE_COUNT (res
) = 0;
7907 GROUP_GAP (res
) = 0;
7908 GROUP_SAME_DR_STMT (res
) = NULL
;
7914 /* Create a hash table for stmt_vec_info. */
7917 init_stmt_vec_info_vec (void)
7919 gcc_assert (!stmt_vec_info_vec
.exists ());
7920 stmt_vec_info_vec
.create (50);
7924 /* Free hash table for stmt_vec_info. */
7927 free_stmt_vec_info_vec (void)
7931 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7933 free_stmt_vec_info (STMT_VINFO_STMT (info
));
7934 gcc_assert (stmt_vec_info_vec
.exists ());
7935 stmt_vec_info_vec
.release ();
7939 /* Free stmt vectorization related info. */
7942 free_stmt_vec_info (gimple
*stmt
)
7944 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7949 /* Check if this statement has a related "pattern stmt"
7950 (introduced by the vectorizer during the pattern recognition
7951 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7953 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7955 stmt_vec_info patt_info
7956 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7959 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7960 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
7961 gimple_set_bb (patt_stmt
, NULL
);
7962 tree lhs
= gimple_get_lhs (patt_stmt
);
7963 if (TREE_CODE (lhs
) == SSA_NAME
)
7964 release_ssa_name (lhs
);
7967 gimple_stmt_iterator si
;
7968 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7970 gimple
*seq_stmt
= gsi_stmt (si
);
7971 gimple_set_bb (seq_stmt
, NULL
);
7972 lhs
= gimple_get_lhs (seq_stmt
);
7973 if (TREE_CODE (lhs
) == SSA_NAME
)
7974 release_ssa_name (lhs
);
7975 free_stmt_vec_info (seq_stmt
);
7978 free_stmt_vec_info (patt_stmt
);
7982 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7983 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
7984 set_vinfo_for_stmt (stmt
, NULL
);
7989 /* Function get_vectype_for_scalar_type_and_size.
7991 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7995 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7997 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7998 machine_mode simd_mode
;
7999 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8006 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8007 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8010 /* For vector types of elements whose mode precision doesn't
8011 match their types precision we use a element type of mode
8012 precision. The vectorization routines will have to make sure
8013 they support the proper result truncation/extension.
8014 We also make sure to build vector types with INTEGER_TYPE
8015 component type only. */
8016 if (INTEGRAL_TYPE_P (scalar_type
)
8017 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8018 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8019 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8020 TYPE_UNSIGNED (scalar_type
));
8022 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8023 When the component mode passes the above test simply use a type
8024 corresponding to that mode. The theory is that any use that
8025 would cause problems with this will disable vectorization anyway. */
8026 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8027 && !INTEGRAL_TYPE_P (scalar_type
))
8028 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8030 /* We can't build a vector type of elements with alignment bigger than
8032 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8033 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8034 TYPE_UNSIGNED (scalar_type
));
8036 /* If we felt back to using the mode fail if there was
8037 no scalar type for it. */
8038 if (scalar_type
== NULL_TREE
)
8041 /* If no size was supplied use the mode the target prefers. Otherwise
8042 lookup a vector mode of the specified size. */
8044 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8046 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8047 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8051 vectype
= build_vector_type (scalar_type
, nunits
);
8053 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8054 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8060 unsigned int current_vector_size
;
8062 /* Function get_vectype_for_scalar_type.
8064 Returns the vector type corresponding to SCALAR_TYPE as supported
8068 get_vectype_for_scalar_type (tree scalar_type
)
8071 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8072 current_vector_size
);
8074 && current_vector_size
== 0)
8075 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8079 /* Function get_same_sized_vectype
8081 Returns a vector type corresponding to SCALAR_TYPE of size
8082 VECTOR_TYPE if supported by the target. */
8085 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8087 return get_vectype_for_scalar_type_and_size
8088 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8091 /* Function vect_is_simple_use.
8094 VINFO - the vect info of the loop or basic block that is being vectorized.
8095 OPERAND - operand in the loop or bb.
8097 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8098 DT - the type of definition
8100 Returns whether a stmt with OPERAND can be vectorized.
8101 For loops, supportable operands are constants, loop invariants, and operands
8102 that are defined by the current iteration of the loop. Unsupportable
8103 operands are those that are defined by a previous iteration of the loop (as
8104 is the case in reduction/induction computations).
8105 For basic blocks, supportable operands are constants and bb invariants.
8106 For now, operands defined outside the basic block are not supported. */
8109 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8110 gimple
**def_stmt
, enum vect_def_type
*dt
)
8113 *dt
= vect_unknown_def_type
;
8115 if (dump_enabled_p ())
8117 dump_printf_loc (MSG_NOTE
, vect_location
,
8118 "vect_is_simple_use: operand ");
8119 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8120 dump_printf (MSG_NOTE
, "\n");
8123 if (CONSTANT_CLASS_P (operand
))
8125 *dt
= vect_constant_def
;
8129 if (is_gimple_min_invariant (operand
))
8131 *dt
= vect_external_def
;
8135 if (TREE_CODE (operand
) != SSA_NAME
)
8137 if (dump_enabled_p ())
8138 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8143 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8145 *dt
= vect_external_def
;
8149 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8150 if (dump_enabled_p ())
8152 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8153 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8156 basic_block bb
= gimple_bb (*def_stmt
);
8157 if ((is_a
<loop_vec_info
> (vinfo
)
8158 && !flow_bb_inside_loop_p (as_a
<loop_vec_info
> (vinfo
)->loop
, bb
))
8159 || (is_a
<bb_vec_info
> (vinfo
)
8160 && (bb
!= as_a
<bb_vec_info
> (vinfo
)->bb
8161 || gimple_code (*def_stmt
) == GIMPLE_PHI
)))
8162 *dt
= vect_external_def
;
8165 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8166 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8167 *dt
= vect_external_def
;
8169 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8172 if (dump_enabled_p ())
8174 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8177 case vect_uninitialized_def
:
8178 dump_printf (MSG_NOTE
, "uninitialized\n");
8180 case vect_constant_def
:
8181 dump_printf (MSG_NOTE
, "constant\n");
8183 case vect_external_def
:
8184 dump_printf (MSG_NOTE
, "external\n");
8186 case vect_internal_def
:
8187 dump_printf (MSG_NOTE
, "internal\n");
8189 case vect_induction_def
:
8190 dump_printf (MSG_NOTE
, "induction\n");
8192 case vect_reduction_def
:
8193 dump_printf (MSG_NOTE
, "reduction\n");
8195 case vect_double_reduction_def
:
8196 dump_printf (MSG_NOTE
, "double reduction\n");
8198 case vect_nested_cycle
:
8199 dump_printf (MSG_NOTE
, "nested cycle\n");
8201 case vect_unknown_def_type
:
8202 dump_printf (MSG_NOTE
, "unknown\n");
8207 if (*dt
== vect_unknown_def_type
)
8209 if (dump_enabled_p ())
8210 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8211 "Unsupported pattern.\n");
8215 switch (gimple_code (*def_stmt
))
8222 if (dump_enabled_p ())
8223 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8224 "unsupported defining stmt:\n");
8231 /* Function vect_is_simple_use.
8233 Same as vect_is_simple_use but also determines the vector operand
8234 type of OPERAND and stores it to *VECTYPE. If the definition of
8235 OPERAND is vect_uninitialized_def, vect_constant_def or
8236 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8237 is responsible to compute the best suited vector type for the
8241 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8242 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8244 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8247 /* Now get a vector type if the def is internal, otherwise supply
8248 NULL_TREE and leave it up to the caller to figure out a proper
8249 type for the use stmt. */
8250 if (*dt
== vect_internal_def
8251 || *dt
== vect_induction_def
8252 || *dt
== vect_reduction_def
8253 || *dt
== vect_double_reduction_def
8254 || *dt
== vect_nested_cycle
)
8256 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8258 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8259 && !STMT_VINFO_RELEVANT (stmt_info
)
8260 && !STMT_VINFO_LIVE_P (stmt_info
))
8261 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8263 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8264 gcc_assert (*vectype
!= NULL_TREE
);
8266 else if (*dt
== vect_uninitialized_def
8267 || *dt
== vect_constant_def
8268 || *dt
== vect_external_def
)
8269 *vectype
= NULL_TREE
;
8277 /* Function supportable_widening_operation
8279 Check whether an operation represented by the code CODE is a
8280 widening operation that is supported by the target platform in
8281 vector form (i.e., when operating on arguments of type VECTYPE_IN
8282 producing a result of type VECTYPE_OUT).
8284 Widening operations we currently support are NOP (CONVERT), FLOAT
8285 and WIDEN_MULT. This function checks if these operations are supported
8286 by the target platform either directly (via vector tree-codes), or via
8290 - CODE1 and CODE2 are codes of vector operations to be used when
8291 vectorizing the operation, if available.
8292 - MULTI_STEP_CVT determines the number of required intermediate steps in
8293 case of multi-step conversion (like char->short->int - in that case
8294 MULTI_STEP_CVT will be 1).
8295 - INTERM_TYPES contains the intermediate type required to perform the
8296 widening operation (short in the above example). */
8299 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8300 tree vectype_out
, tree vectype_in
,
8301 enum tree_code
*code1
, enum tree_code
*code2
,
8302 int *multi_step_cvt
,
8303 vec
<tree
> *interm_types
)
8305 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8306 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8307 struct loop
*vect_loop
= NULL
;
8308 machine_mode vec_mode
;
8309 enum insn_code icode1
, icode2
;
8310 optab optab1
, optab2
;
8311 tree vectype
= vectype_in
;
8312 tree wide_vectype
= vectype_out
;
8313 enum tree_code c1
, c2
;
8315 tree prev_type
, intermediate_type
;
8316 machine_mode intermediate_mode
, prev_mode
;
8317 optab optab3
, optab4
;
8319 *multi_step_cvt
= 0;
8321 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8325 case WIDEN_MULT_EXPR
:
8326 /* The result of a vectorized widening operation usually requires
8327 two vectors (because the widened results do not fit into one vector).
8328 The generated vector results would normally be expected to be
8329 generated in the same order as in the original scalar computation,
8330 i.e. if 8 results are generated in each vector iteration, they are
8331 to be organized as follows:
8332 vect1: [res1,res2,res3,res4],
8333 vect2: [res5,res6,res7,res8].
8335 However, in the special case that the result of the widening
8336 operation is used in a reduction computation only, the order doesn't
8337 matter (because when vectorizing a reduction we change the order of
8338 the computation). Some targets can take advantage of this and
8339 generate more efficient code. For example, targets like Altivec,
8340 that support widen_mult using a sequence of {mult_even,mult_odd}
8341 generate the following vectors:
8342 vect1: [res1,res3,res5,res7],
8343 vect2: [res2,res4,res6,res8].
8345 When vectorizing outer-loops, we execute the inner-loop sequentially
8346 (each vectorized inner-loop iteration contributes to VF outer-loop
8347 iterations in parallel). We therefore don't allow to change the
8348 order of the computation in the inner-loop during outer-loop
8350 /* TODO: Another case in which order doesn't *really* matter is when we
8351 widen and then contract again, e.g. (short)((int)x * y >> 8).
8352 Normally, pack_trunc performs an even/odd permute, whereas the
8353 repack from an even/odd expansion would be an interleave, which
8354 would be significantly simpler for e.g. AVX2. */
8355 /* In any case, in order to avoid duplicating the code below, recurse
8356 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8357 are properly set up for the caller. If we fail, we'll continue with
8358 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8360 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8361 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8362 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8363 stmt
, vectype_out
, vectype_in
,
8364 code1
, code2
, multi_step_cvt
,
8367 /* Elements in a vector with vect_used_by_reduction property cannot
8368 be reordered if the use chain with this property does not have the
8369 same operation. One such an example is s += a * b, where elements
8370 in a and b cannot be reordered. Here we check if the vector defined
8371 by STMT is only directly used in the reduction statement. */
8372 tree lhs
= gimple_assign_lhs (stmt
);
8373 use_operand_p dummy
;
8375 stmt_vec_info use_stmt_info
= NULL
;
8376 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8377 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8378 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8381 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8382 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8395 case VEC_WIDEN_MULT_EVEN_EXPR
:
8396 /* Support the recursion induced just above. */
8397 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8398 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8401 case WIDEN_LSHIFT_EXPR
:
8402 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8403 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8407 c1
= VEC_UNPACK_LO_EXPR
;
8408 c2
= VEC_UNPACK_HI_EXPR
;
8412 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8413 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8416 case FIX_TRUNC_EXPR
:
8417 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8418 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8419 computing the operation. */
8426 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8429 if (code
== FIX_TRUNC_EXPR
)
8431 /* The signedness is determined from output operand. */
8432 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8433 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8437 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8438 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8441 if (!optab1
|| !optab2
)
8444 vec_mode
= TYPE_MODE (vectype
);
8445 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8446 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8452 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8453 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8456 /* Check if it's a multi-step conversion that can be done using intermediate
8459 prev_type
= vectype
;
8460 prev_mode
= vec_mode
;
8462 if (!CONVERT_EXPR_CODE_P (code
))
8465 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8466 intermediate steps in promotion sequence. We try
8467 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8469 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8470 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8472 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8474 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8475 TYPE_UNSIGNED (prev_type
));
8476 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8477 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8479 if (!optab3
|| !optab4
8480 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8481 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8482 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8483 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8484 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8485 == CODE_FOR_nothing
)
8486 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8487 == CODE_FOR_nothing
))
8490 interm_types
->quick_push (intermediate_type
);
8491 (*multi_step_cvt
)++;
8493 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8494 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8497 prev_type
= intermediate_type
;
8498 prev_mode
= intermediate_mode
;
8501 interm_types
->release ();
8506 /* Function supportable_narrowing_operation
8508 Check whether an operation represented by the code CODE is a
8509 narrowing operation that is supported by the target platform in
8510 vector form (i.e., when operating on arguments of type VECTYPE_IN
8511 and producing a result of type VECTYPE_OUT).
8513 Narrowing operations we currently support are NOP (CONVERT) and
8514 FIX_TRUNC. This function checks if these operations are supported by
8515 the target platform directly via vector tree-codes.
8518 - CODE1 is the code of a vector operation to be used when
8519 vectorizing the operation, if available.
8520 - MULTI_STEP_CVT determines the number of required intermediate steps in
8521 case of multi-step conversion (like int->short->char - in that case
8522 MULTI_STEP_CVT will be 1).
8523 - INTERM_TYPES contains the intermediate type required to perform the
8524 narrowing operation (short in the above example). */
8527 supportable_narrowing_operation (enum tree_code code
,
8528 tree vectype_out
, tree vectype_in
,
8529 enum tree_code
*code1
, int *multi_step_cvt
,
8530 vec
<tree
> *interm_types
)
8532 machine_mode vec_mode
;
8533 enum insn_code icode1
;
8534 optab optab1
, interm_optab
;
8535 tree vectype
= vectype_in
;
8536 tree narrow_vectype
= vectype_out
;
8538 tree intermediate_type
;
8539 machine_mode intermediate_mode
, prev_mode
;
8543 *multi_step_cvt
= 0;
8547 c1
= VEC_PACK_TRUNC_EXPR
;
8550 case FIX_TRUNC_EXPR
:
8551 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8555 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8556 tree code and optabs used for computing the operation. */
8563 if (code
== FIX_TRUNC_EXPR
)
8564 /* The signedness is determined from output operand. */
8565 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8567 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8572 vec_mode
= TYPE_MODE (vectype
);
8573 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8578 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8581 /* Check if it's a multi-step conversion that can be done using intermediate
8583 prev_mode
= vec_mode
;
8584 if (code
== FIX_TRUNC_EXPR
)
8585 uns
= TYPE_UNSIGNED (vectype_out
);
8587 uns
= TYPE_UNSIGNED (vectype
);
8589 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8590 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8591 costly than signed. */
8592 if (code
== FIX_TRUNC_EXPR
&& uns
)
8594 enum insn_code icode2
;
8597 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8599 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8600 if (interm_optab
!= unknown_optab
8601 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8602 && insn_data
[icode1
].operand
[0].mode
8603 == insn_data
[icode2
].operand
[0].mode
)
8606 optab1
= interm_optab
;
8611 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8612 intermediate steps in promotion sequence. We try
8613 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8614 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8615 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8617 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8619 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8621 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8624 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8625 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8626 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8627 == CODE_FOR_nothing
))
8630 interm_types
->quick_push (intermediate_type
);
8631 (*multi_step_cvt
)++;
8633 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8636 prev_mode
= intermediate_mode
;
8637 optab1
= interm_optab
;
8640 interm_types
->release ();