1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
35 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "gimple-ssa.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-ssa-loop-manip.h"
49 #include "tree-ssa-loop.h"
50 #include "tree-scalar-evolution.h"
52 #include "recog.h" /* FIXME: for insn_data */
54 #include "diagnostic-core.h"
55 #include "tree-vectorizer.h"
59 /* For lang_hooks.types.type_for_mode. */
60 #include "langhooks.h"
62 /* Return the vectorized type for the given statement. */
65 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
67 return STMT_VINFO_VECTYPE (stmt_info
);
70 /* Return TRUE iff the given statement is in an inner loop relative to
71 the loop being vectorized. */
73 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
75 gimple stmt
= STMT_VINFO_STMT (stmt_info
);
76 basic_block bb
= gimple_bb (stmt
);
77 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
83 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
85 return (bb
->loop_father
== loop
->inner
);
88 /* Record the cost of a statement, either by directly informing the
89 target model or by saving it in a vector for later processing.
90 Return a preliminary estimate of the statement's cost. */
93 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
94 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
95 int misalign
, enum vect_cost_model_location where
)
99 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
100 add_stmt_info_to_vec (body_cost_vec
, count
, kind
,
101 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
110 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
111 void *target_cost_data
;
114 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
116 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
118 return add_stmt_cost (target_cost_data
, count
, kind
, stmt_info
,
123 /* Return a variable of type ELEM_TYPE[NELEMS]. */
126 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
128 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
132 /* ARRAY is an array of vectors created by create_vector_array.
133 Return an SSA_NAME for the vector in index N. The reference
134 is part of the vectorization of STMT and the vector is associated
135 with scalar destination SCALAR_DEST. */
138 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
139 tree array
, unsigned HOST_WIDE_INT n
)
141 tree vect_type
, vect
, vect_name
, array_ref
;
144 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
145 vect_type
= TREE_TYPE (TREE_TYPE (array
));
146 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
147 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
148 build_int_cst (size_type_node
, n
),
149 NULL_TREE
, NULL_TREE
);
151 new_stmt
= gimple_build_assign (vect
, array_ref
);
152 vect_name
= make_ssa_name (vect
, new_stmt
);
153 gimple_assign_set_lhs (new_stmt
, vect_name
);
154 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
159 /* ARRAY is an array of vectors created by create_vector_array.
160 Emit code to store SSA_NAME VECT in index N of the array.
161 The store is part of the vectorization of STMT. */
164 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
165 tree array
, unsigned HOST_WIDE_INT n
)
170 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
171 build_int_cst (size_type_node
, n
),
172 NULL_TREE
, NULL_TREE
);
174 new_stmt
= gimple_build_assign (array_ref
, vect
);
175 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
178 /* PTR is a pointer to an array of type TYPE. Return a representation
179 of *PTR. The memory reference replaces those in FIRST_DR
183 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
185 tree mem_ref
, alias_ptr_type
;
187 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
188 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
189 /* Arrays have the same alignment as their type. */
190 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
194 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
196 /* Function vect_mark_relevant.
198 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
201 vect_mark_relevant (vec
<gimple
> *worklist
, gimple stmt
,
202 enum vect_relevant relevant
, bool live_p
,
203 bool used_in_pattern
)
205 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
206 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
207 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
210 if (dump_enabled_p ())
211 dump_printf_loc (MSG_NOTE
, vect_location
,
212 "mark relevant %d, live %d.\n", relevant
, live_p
);
214 /* If this stmt is an original stmt in a pattern, we might need to mark its
215 related pattern stmt instead of the original stmt. However, such stmts
216 may have their own uses that are not in any pattern, in such cases the
217 stmt itself should be marked. */
218 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
221 if (!used_in_pattern
)
223 imm_use_iterator imm_iter
;
227 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
228 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
230 if (is_gimple_assign (stmt
))
231 lhs
= gimple_assign_lhs (stmt
);
233 lhs
= gimple_call_lhs (stmt
);
235 /* This use is out of pattern use, if LHS has other uses that are
236 pattern uses, we should mark the stmt itself, and not the pattern
238 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
239 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
241 if (is_gimple_debug (USE_STMT (use_p
)))
243 use_stmt
= USE_STMT (use_p
);
245 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
248 if (vinfo_for_stmt (use_stmt
)
249 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
259 /* This is the last stmt in a sequence that was detected as a
260 pattern that can potentially be vectorized. Don't mark the stmt
261 as relevant/live because it's not going to be vectorized.
262 Instead mark the pattern-stmt that replaces it. */
264 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE
, vect_location
,
268 "last stmt in pattern. don't mark"
269 " relevant/live.\n");
270 stmt_info
= vinfo_for_stmt (pattern_stmt
);
271 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
272 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
273 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
278 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
279 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
280 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
282 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
283 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
285 if (dump_enabled_p ())
286 dump_printf_loc (MSG_NOTE
, vect_location
,
287 "already marked relevant/live.\n");
291 worklist
->safe_push (stmt
);
295 /* Function vect_stmt_relevant_p.
297 Return true if STMT in loop that is represented by LOOP_VINFO is
298 "relevant for vectorization".
300 A stmt is considered "relevant for vectorization" if:
301 - it has uses outside the loop.
302 - it has vdefs (it alters memory).
303 - control stmts in the loop (except for the exit condition).
305 CHECKME: what other side effects would the vectorizer allow? */
308 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
309 enum vect_relevant
*relevant
, bool *live_p
)
311 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
313 imm_use_iterator imm_iter
;
317 *relevant
= vect_unused_in_scope
;
320 /* cond stmt other than loop exit cond. */
321 if (is_ctrl_stmt (stmt
)
322 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
323 != loop_exit_ctrl_vec_info_type
)
324 *relevant
= vect_used_in_scope
;
326 /* changing memory. */
327 if (gimple_code (stmt
) != GIMPLE_PHI
)
328 if (gimple_vdef (stmt
))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE
, vect_location
,
332 "vec_stmt_relevant_p: stmt has vdefs.\n");
333 *relevant
= vect_used_in_scope
;
336 /* uses outside the loop. */
337 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
339 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
341 basic_block bb
= gimple_bb (USE_STMT (use_p
));
342 if (!flow_bb_inside_loop_p (loop
, bb
))
344 if (dump_enabled_p ())
345 dump_printf_loc (MSG_NOTE
, vect_location
,
346 "vec_stmt_relevant_p: used out of loop.\n");
348 if (is_gimple_debug (USE_STMT (use_p
)))
351 /* We expect all such uses to be in the loop exit phis
352 (because of loop closed form) */
353 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
354 gcc_assert (bb
== single_exit (loop
)->dest
);
361 return (*live_p
|| *relevant
);
365 /* Function exist_non_indexing_operands_for_use_p
367 USE is one of the uses attached to STMT. Check if USE is
368 used in STMT for anything other than indexing an array. */
371 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
374 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
376 /* USE corresponds to some operand in STMT. If there is no data
377 reference in STMT, then any operand that corresponds to USE
378 is not indexing an array. */
379 if (!STMT_VINFO_DATA_REF (stmt_info
))
382 /* STMT has a data_ref. FORNOW this means that its of one of
386 (This should have been verified in analyze_data_refs).
388 'var' in the second case corresponds to a def, not a use,
389 so USE cannot correspond to any operands that are not used
392 Therefore, all we need to check is if STMT falls into the
393 first case, and whether var corresponds to USE. */
395 if (!gimple_assign_copy_p (stmt
))
397 if (is_gimple_call (stmt
)
398 && gimple_call_internal_p (stmt
))
399 switch (gimple_call_internal_fn (stmt
))
402 operand
= gimple_call_arg (stmt
, 3);
407 operand
= gimple_call_arg (stmt
, 2);
417 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
419 operand
= gimple_assign_rhs1 (stmt
);
420 if (TREE_CODE (operand
) != SSA_NAME
)
431 Function process_use.
434 - a USE in STMT in a loop represented by LOOP_VINFO
435 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
436 that defined USE. This is done by calling mark_relevant and passing it
437 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
438 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
442 Generally, LIVE_P and RELEVANT are used to define the liveness and
443 relevance info of the DEF_STMT of this USE:
444 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
445 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 - case 1: If USE is used only for address computations (e.g. array indexing),
448 which does not need to be directly vectorized, then the liveness/relevance
449 of the respective DEF_STMT is left unchanged.
450 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
451 skip DEF_STMT cause it had already been processed.
452 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
453 be modified accordingly.
455 Return true if everything is as expected. Return false otherwise. */
458 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
459 enum vect_relevant relevant
, vec
<gimple
> *worklist
,
462 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
463 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
464 stmt_vec_info dstmt_vinfo
;
465 basic_block bb
, def_bb
;
468 enum vect_def_type dt
;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
475 if (!vect_is_simple_use (use
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
479 "not vectorized: unsupported use in stmt.\n");
483 if (!def_stmt
|| gimple_nop_p (def_stmt
))
486 def_bb
= gimple_bb (def_stmt
);
487 if (!flow_bb_inside_loop_p (loop
, def_bb
))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
500 bb
= gimple_bb (stmt
);
501 if (gimple_code (stmt
) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
503 && gimple_code (def_stmt
) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
505 && bb
->loop_father
== def_bb
->loop_father
)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE
, vect_location
,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
511 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
525 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE
, vect_location
,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
533 case vect_unused_in_scope
:
534 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
535 vect_used_in_scope
: vect_unused_in_scope
;
538 case vect_used_in_outer_by_reduction
:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
540 relevant
= vect_used_by_reduction
;
543 case vect_used_in_outer
:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
545 relevant
= vect_used_in_scope
;
548 case vect_used_in_scope
:
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
563 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE
, vect_location
,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
571 case vect_unused_in_scope
:
572 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
574 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
577 case vect_used_by_reduction
:
578 relevant
= vect_used_in_outer_by_reduction
;
581 case vect_used_in_scope
:
582 relevant
= vect_used_in_outer
;
590 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
591 is_pattern_stmt_p (stmt_vinfo
));
596 /* Function vect_mark_stmts_to_be_vectorized.
598 Not all stmts in the loop need to be vectorized. For example:
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
610 This pass detects such stmts. */
613 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
615 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
616 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
617 unsigned int nbbs
= loop
->num_nodes
;
618 gimple_stmt_iterator si
;
621 stmt_vec_info stmt_vinfo
;
625 enum vect_relevant relevant
, tmp_relevant
;
626 enum vect_def_type def_type
;
628 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE
, vect_location
,
630 "=== vect_mark_stmts_to_be_vectorized ===\n");
632 auto_vec
<gimple
, 64> worklist
;
634 /* 1. Init worklist. */
635 for (i
= 0; i
< nbbs
; i
++)
638 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
641 if (dump_enabled_p ())
643 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
644 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
645 dump_printf (MSG_NOTE
, "\n");
648 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
649 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
651 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
653 stmt
= gsi_stmt (si
);
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
657 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
658 dump_printf (MSG_NOTE
, "\n");
661 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
662 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
666 /* 2. Process_worklist */
667 while (worklist
.length () > 0)
672 stmt
= worklist
.pop ();
673 if (dump_enabled_p ())
675 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
676 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
677 dump_printf (MSG_NOTE
, "\n");
680 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
681 (DEF_STMT) as relevant/irrelevant and live/dead according to the
682 liveness and relevance properties of STMT. */
683 stmt_vinfo
= vinfo_for_stmt (stmt
);
684 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
685 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
687 /* Generally, the liveness and relevance properties of STMT are
688 propagated as is to the DEF_STMTs of its USEs:
689 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
690 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
692 One exception is when STMT has been identified as defining a reduction
693 variable; in this case we set the liveness/relevance as follows:
695 relevant = vect_used_by_reduction
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
703 tmp_relevant
= relevant
;
706 case vect_reduction_def
:
707 switch (tmp_relevant
)
709 case vect_unused_in_scope
:
710 relevant
= vect_used_by_reduction
;
713 case vect_used_by_reduction
:
714 if (gimple_code (stmt
) == GIMPLE_PHI
)
719 if (dump_enabled_p ())
720 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
721 "unsupported use of reduction.\n");
728 case vect_nested_cycle
:
729 if (tmp_relevant
!= vect_unused_in_scope
730 && tmp_relevant
!= vect_used_in_outer_by_reduction
731 && tmp_relevant
!= vect_used_in_outer
)
733 if (dump_enabled_p ())
734 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
735 "unsupported use of nested cycle.\n");
743 case vect_double_reduction_def
:
744 if (tmp_relevant
!= vect_unused_in_scope
745 && tmp_relevant
!= vect_used_by_reduction
)
747 if (dump_enabled_p ())
748 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
749 "unsupported use of double reduction.\n");
761 if (is_pattern_stmt_p (stmt_vinfo
))
763 /* Pattern statements are not inserted into the code, so
764 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
765 have to scan the RHS or function arguments instead. */
766 if (is_gimple_assign (stmt
))
768 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
769 tree op
= gimple_assign_rhs1 (stmt
);
772 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
774 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
775 live_p
, relevant
, &worklist
, false)
776 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
777 live_p
, relevant
, &worklist
, false))
781 for (; i
< gimple_num_ops (stmt
); i
++)
783 op
= gimple_op (stmt
, i
);
784 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
789 else if (is_gimple_call (stmt
))
791 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
793 tree arg
= gimple_call_arg (stmt
, i
);
794 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
801 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
803 tree op
= USE_FROM_PTR (use_p
);
804 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
809 if (STMT_VINFO_GATHER_P (stmt_vinfo
))
812 tree decl
= vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
814 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
818 } /* while worklist */
824 /* Function vect_model_simple_cost.
826 Models cost for simple operations, i.e. those that only emit ncopies of a
827 single op. Right now, this does not account for multiple insns that could
828 be generated for the single vector op. We will handle that shortly. */
831 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
832 enum vect_def_type
*dt
,
833 stmt_vector_for_cost
*prologue_cost_vec
,
834 stmt_vector_for_cost
*body_cost_vec
)
837 int inside_cost
= 0, prologue_cost
= 0;
839 /* The SLP costs were already calculated during SLP tree build. */
840 if (PURE_SLP_STMT (stmt_info
))
843 /* FORNOW: Assuming maximum 2 args per stmts. */
844 for (i
= 0; i
< 2; i
++)
845 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
846 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
847 stmt_info
, 0, vect_prologue
);
849 /* Pass the inside-of-loop statements to the target-specific cost model. */
850 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
851 stmt_info
, 0, vect_body
);
853 if (dump_enabled_p ())
854 dump_printf_loc (MSG_NOTE
, vect_location
,
855 "vect_model_simple_cost: inside_cost = %d, "
856 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
860 /* Model cost for type demotion and promotion operations. PWR is normally
861 zero for single-step promotions and demotions. It will be one if
862 two-step promotion/demotion is required, and so on. Each additional
863 step doubles the number of instructions required. */
866 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
867 enum vect_def_type
*dt
, int pwr
)
870 int inside_cost
= 0, prologue_cost
= 0;
871 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
872 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
873 void *target_cost_data
;
875 /* The SLP costs were already calculated during SLP tree build. */
876 if (PURE_SLP_STMT (stmt_info
))
880 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
882 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
884 for (i
= 0; i
< pwr
+ 1; i
++)
886 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
888 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
889 vec_promote_demote
, stmt_info
, 0,
893 /* FORNOW: Assuming maximum 2 args per stmts. */
894 for (i
= 0; i
< 2; i
++)
895 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
896 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
897 stmt_info
, 0, vect_prologue
);
899 if (dump_enabled_p ())
900 dump_printf_loc (MSG_NOTE
, vect_location
,
901 "vect_model_promotion_demotion_cost: inside_cost = %d, "
902 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
905 /* Function vect_cost_group_size
907 For grouped load or store, return the group_size only if it is the first
908 load or store of a group, else return 1. This ensures that group size is
909 only returned once per group. */
912 vect_cost_group_size (stmt_vec_info stmt_info
)
914 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
916 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
917 return GROUP_SIZE (stmt_info
);
923 /* Function vect_model_store_cost
925 Models cost for stores. In the case of grouped accesses, one access
926 has the overhead of the grouped access attributed to it. */
929 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
930 bool store_lanes_p
, enum vect_def_type dt
,
932 stmt_vector_for_cost
*prologue_cost_vec
,
933 stmt_vector_for_cost
*body_cost_vec
)
936 unsigned int inside_cost
= 0, prologue_cost
= 0;
937 struct data_reference
*first_dr
;
940 /* The SLP costs were already calculated during SLP tree build. */
941 if (PURE_SLP_STMT (stmt_info
))
944 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
945 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
946 stmt_info
, 0, vect_prologue
);
948 /* Grouped access? */
949 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
953 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
958 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
959 group_size
= vect_cost_group_size (stmt_info
);
962 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
964 /* Not a grouped access. */
968 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
971 /* We assume that the cost of a single store-lanes instruction is
972 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
973 access is instead being provided by a permute-and-store operation,
974 include the cost of the permutes. */
975 if (!store_lanes_p
&& group_size
> 1)
977 /* Uses a high and low interleave operation for each needed permute. */
979 int nstmts
= ncopies
* exact_log2 (group_size
) * group_size
;
980 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
981 stmt_info
, 0, vect_body
);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE
, vect_location
,
985 "vect_model_store_cost: strided group_size = %d .\n",
989 /* Costs of the stores. */
990 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
992 if (dump_enabled_p ())
993 dump_printf_loc (MSG_NOTE
, vect_location
,
994 "vect_model_store_cost: inside_cost = %d, "
995 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
999 /* Calculate cost of DR's memory access. */
1001 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
1002 unsigned int *inside_cost
,
1003 stmt_vector_for_cost
*body_cost_vec
)
1005 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1006 gimple stmt
= DR_STMT (dr
);
1007 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1009 switch (alignment_support_scheme
)
1013 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1014 vector_store
, stmt_info
, 0,
1017 if (dump_enabled_p ())
1018 dump_printf_loc (MSG_NOTE
, vect_location
,
1019 "vect_model_store_cost: aligned.\n");
1023 case dr_unaligned_supported
:
1025 /* Here, we assign an additional cost for the unaligned store. */
1026 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1027 unaligned_store
, stmt_info
,
1028 DR_MISALIGNMENT (dr
), vect_body
);
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_NOTE
, vect_location
,
1031 "vect_model_store_cost: unaligned supported by "
1036 case dr_unaligned_unsupported
:
1038 *inside_cost
= VECT_MAX_COST
;
1040 if (dump_enabled_p ())
1041 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1042 "vect_model_store_cost: unsupported access.\n");
1052 /* Function vect_model_load_cost
1054 Models cost for loads. In the case of grouped accesses, the last access
1055 has the overhead of the grouped access attributed to it. Since unaligned
1056 accesses are supported for loads, we also account for the costs of the
1057 access scheme chosen. */
1060 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1061 bool load_lanes_p
, slp_tree slp_node
,
1062 stmt_vector_for_cost
*prologue_cost_vec
,
1063 stmt_vector_for_cost
*body_cost_vec
)
1067 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1068 unsigned int inside_cost
= 0, prologue_cost
= 0;
1070 /* The SLP costs were already calculated during SLP tree build. */
1071 if (PURE_SLP_STMT (stmt_info
))
1074 /* Grouped accesses? */
1075 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1076 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1078 group_size
= vect_cost_group_size (stmt_info
);
1079 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1081 /* Not a grouped access. */
1088 /* We assume that the cost of a single load-lanes instruction is
1089 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1090 access is instead being provided by a load-and-permute operation,
1091 include the cost of the permutes. */
1092 if (!load_lanes_p
&& group_size
> 1)
1094 /* Uses an even and odd extract operations or shuffle operations
1095 for each needed permute. */
1096 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1097 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1098 stmt_info
, 0, vect_body
);
1100 if (dump_enabled_p ())
1101 dump_printf_loc (MSG_NOTE
, vect_location
,
1102 "vect_model_load_cost: strided group_size = %d .\n",
1106 /* The loads themselves. */
1107 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1109 /* N scalar loads plus gathering them into a vector. */
1110 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1111 inside_cost
+= record_stmt_cost (body_cost_vec
,
1112 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1113 scalar_load
, stmt_info
, 0, vect_body
);
1114 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1115 stmt_info
, 0, vect_body
);
1118 vect_get_load_cost (first_dr
, ncopies
,
1119 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1120 || group_size
> 1 || slp_node
),
1121 &inside_cost
, &prologue_cost
,
1122 prologue_cost_vec
, body_cost_vec
, true);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE
, vect_location
,
1126 "vect_model_load_cost: inside_cost = %d, "
1127 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1131 /* Calculate cost of DR's memory access. */
1133 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1134 bool add_realign_cost
, unsigned int *inside_cost
,
1135 unsigned int *prologue_cost
,
1136 stmt_vector_for_cost
*prologue_cost_vec
,
1137 stmt_vector_for_cost
*body_cost_vec
,
1138 bool record_prologue_costs
)
1140 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1141 gimple stmt
= DR_STMT (dr
);
1142 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1144 switch (alignment_support_scheme
)
1148 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1149 stmt_info
, 0, vect_body
);
1151 if (dump_enabled_p ())
1152 dump_printf_loc (MSG_NOTE
, vect_location
,
1153 "vect_model_load_cost: aligned.\n");
1157 case dr_unaligned_supported
:
1159 /* Here, we assign an additional cost for the unaligned load. */
1160 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1161 unaligned_load
, stmt_info
,
1162 DR_MISALIGNMENT (dr
), vect_body
);
1164 if (dump_enabled_p ())
1165 dump_printf_loc (MSG_NOTE
, vect_location
,
1166 "vect_model_load_cost: unaligned supported by "
1171 case dr_explicit_realign
:
1173 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1174 vector_load
, stmt_info
, 0, vect_body
);
1175 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1176 vec_perm
, stmt_info
, 0, vect_body
);
1178 /* FIXME: If the misalignment remains fixed across the iterations of
1179 the containing loop, the following cost should be added to the
1181 if (targetm
.vectorize
.builtin_mask_for_load
)
1182 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1183 stmt_info
, 0, vect_body
);
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_NOTE
, vect_location
,
1187 "vect_model_load_cost: explicit realign\n");
1191 case dr_explicit_realign_optimized
:
1193 if (dump_enabled_p ())
1194 dump_printf_loc (MSG_NOTE
, vect_location
,
1195 "vect_model_load_cost: unaligned software "
1198 /* Unaligned software pipeline has a load of an address, an initial
1199 load, and possibly a mask operation to "prime" the loop. However,
1200 if this is an access in a group of loads, which provide grouped
1201 access, then the above cost should only be considered for one
1202 access in the group. Inside the loop, there is a load op
1203 and a realignment op. */
1205 if (add_realign_cost
&& record_prologue_costs
)
1207 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1208 vector_stmt
, stmt_info
,
1210 if (targetm
.vectorize
.builtin_mask_for_load
)
1211 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1212 vector_stmt
, stmt_info
,
1216 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1217 stmt_info
, 0, vect_body
);
1218 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1219 stmt_info
, 0, vect_body
);
1221 if (dump_enabled_p ())
1222 dump_printf_loc (MSG_NOTE
, vect_location
,
1223 "vect_model_load_cost: explicit realign optimized"
1229 case dr_unaligned_unsupported
:
1231 *inside_cost
= VECT_MAX_COST
;
1233 if (dump_enabled_p ())
1234 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1235 "vect_model_load_cost: unsupported access.\n");
1244 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1245 the loop preheader for the vectorized stmt STMT. */
1248 vect_init_vector_1 (gimple stmt
, gimple new_stmt
, gimple_stmt_iterator
*gsi
)
1251 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1254 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1255 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1259 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1263 if (nested_in_vect_loop_p (loop
, stmt
))
1266 pe
= loop_preheader_edge (loop
);
1267 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1268 gcc_assert (!new_bb
);
1272 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1274 gimple_stmt_iterator gsi_bb_start
;
1276 gcc_assert (bb_vinfo
);
1277 bb
= BB_VINFO_BB (bb_vinfo
);
1278 gsi_bb_start
= gsi_after_labels (bb
);
1279 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1283 if (dump_enabled_p ())
1285 dump_printf_loc (MSG_NOTE
, vect_location
,
1286 "created new init_stmt: ");
1287 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1288 dump_printf (MSG_NOTE
, "\n");
1292 /* Function vect_init_vector.
1294 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1295 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1296 vector type a vector with all elements equal to VAL is created first.
1297 Place the initialization at BSI if it is not NULL. Otherwise, place the
1298 initialization at the loop preheader.
1299 Return the DEF of INIT_STMT.
1300 It will be used in the vectorization of STMT. */
1303 vect_init_vector (gimple stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1310 if (TREE_CODE (type
) == VECTOR_TYPE
1311 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1313 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1315 if (CONSTANT_CLASS_P (val
))
1316 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1319 new_temp
= make_ssa_name (TREE_TYPE (type
), NULL
);
1320 init_stmt
= gimple_build_assign_with_ops (NOP_EXPR
,
1323 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1327 val
= build_vector_from_val (type
, val
);
1330 new_var
= vect_get_new_vect_var (type
, vect_simple_var
, "cst_");
1331 init_stmt
= gimple_build_assign (new_var
, val
);
1332 new_temp
= make_ssa_name (new_var
, init_stmt
);
1333 gimple_assign_set_lhs (init_stmt
, new_temp
);
1334 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1335 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1340 /* Function vect_get_vec_def_for_operand.
1342 OP is an operand in STMT. This function returns a (vector) def that will be
1343 used in the vectorized stmt for STMT.
1345 In the case that OP is an SSA_NAME which is defined in the loop, then
1346 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1348 In case OP is an invariant or constant, a new stmt that creates a vector def
1349 needs to be introduced. */
1352 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1357 stmt_vec_info def_stmt_info
= NULL
;
1358 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1359 unsigned int nunits
;
1360 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1362 enum vect_def_type dt
;
1366 if (dump_enabled_p ())
1368 dump_printf_loc (MSG_NOTE
, vect_location
,
1369 "vect_get_vec_def_for_operand: ");
1370 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1371 dump_printf (MSG_NOTE
, "\n");
1374 is_simple_use
= vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
,
1375 &def_stmt
, &def
, &dt
);
1376 gcc_assert (is_simple_use
);
1377 if (dump_enabled_p ())
1379 int loc_printed
= 0;
1382 dump_printf_loc (MSG_NOTE
, vect_location
, "def = ");
1384 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, def
);
1385 dump_printf (MSG_NOTE
, "\n");
1390 dump_printf (MSG_NOTE
, " def_stmt = ");
1392 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1393 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1394 dump_printf (MSG_NOTE
, "\n");
1400 /* Case 1: operand is a constant. */
1401 case vect_constant_def
:
1403 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1404 gcc_assert (vector_type
);
1405 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1410 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1411 if (dump_enabled_p ())
1412 dump_printf_loc (MSG_NOTE
, vect_location
,
1413 "Create vector_cst. nunits = %d\n", nunits
);
1415 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1418 /* Case 2: operand is defined outside the loop - loop invariant. */
1419 case vect_external_def
:
1421 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1422 gcc_assert (vector_type
);
1427 /* Create 'vec_inv = {inv,inv,..,inv}' */
1428 if (dump_enabled_p ())
1429 dump_printf_loc (MSG_NOTE
, vect_location
, "Create vector_inv.\n");
1431 return vect_init_vector (stmt
, def
, vector_type
, NULL
);
1434 /* Case 3: operand is defined inside the loop. */
1435 case vect_internal_def
:
1438 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1440 /* Get the def from the vectorized stmt. */
1441 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1443 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1444 /* Get vectorized pattern statement. */
1446 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1447 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1448 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1449 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1450 gcc_assert (vec_stmt
);
1451 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1452 vec_oprnd
= PHI_RESULT (vec_stmt
);
1453 else if (is_gimple_call (vec_stmt
))
1454 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1456 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1460 /* Case 4: operand is defined by a loop header phi - reduction */
1461 case vect_reduction_def
:
1462 case vect_double_reduction_def
:
1463 case vect_nested_cycle
:
1467 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1468 loop
= (gimple_bb (def_stmt
))->loop_father
;
1470 /* Get the def before the loop */
1471 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1472 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1475 /* Case 5: operand is defined by loop-header phi - induction. */
1476 case vect_induction_def
:
1478 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1480 /* Get the def from the vectorized stmt. */
1481 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1482 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1483 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1484 vec_oprnd
= PHI_RESULT (vec_stmt
);
1486 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1496 /* Function vect_get_vec_def_for_stmt_copy
1498 Return a vector-def for an operand. This function is used when the
1499 vectorized stmt to be created (by the caller to this function) is a "copy"
1500 created in case the vectorized result cannot fit in one vector, and several
1501 copies of the vector-stmt are required. In this case the vector-def is
1502 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1503 of the stmt that defines VEC_OPRND.
1504 DT is the type of the vector def VEC_OPRND.
1507 In case the vectorization factor (VF) is bigger than the number
1508 of elements that can fit in a vectype (nunits), we have to generate
1509 more than one vector stmt to vectorize the scalar stmt. This situation
1510 arises when there are multiple data-types operated upon in the loop; the
1511 smallest data-type determines the VF, and as a result, when vectorizing
1512 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1513 vector stmt (each computing a vector of 'nunits' results, and together
1514 computing 'VF' results in each iteration). This function is called when
1515 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1516 which VF=16 and nunits=4, so the number of copies required is 4):
1518 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1520 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1521 VS1.1: vx.1 = memref1 VS1.2
1522 VS1.2: vx.2 = memref2 VS1.3
1523 VS1.3: vx.3 = memref3
1525 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1526 VSnew.1: vz1 = vx.1 + ... VSnew.2
1527 VSnew.2: vz2 = vx.2 + ... VSnew.3
1528 VSnew.3: vz3 = vx.3 + ...
1530 The vectorization of S1 is explained in vectorizable_load.
1531 The vectorization of S2:
1532 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1533 the function 'vect_get_vec_def_for_operand' is called to
1534 get the relevant vector-def for each operand of S2. For operand x it
1535 returns the vector-def 'vx.0'.
1537 To create the remaining copies of the vector-stmt (VSnew.j), this
1538 function is called to get the relevant vector-def for each operand. It is
1539 obtained from the respective VS1.j stmt, which is recorded in the
1540 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1542 For example, to obtain the vector-def 'vx.1' in order to create the
1543 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1544 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1545 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1546 and return its def ('vx.1').
1547 Overall, to create the above sequence this function will be called 3 times:
1548 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1549 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1550 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1553 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1555 gimple vec_stmt_for_operand
;
1556 stmt_vec_info def_stmt_info
;
1558 /* Do nothing; can reuse same def. */
1559 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1562 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1563 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1564 gcc_assert (def_stmt_info
);
1565 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1566 gcc_assert (vec_stmt_for_operand
);
1567 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1568 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1569 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1571 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1576 /* Get vectorized definitions for the operands to create a copy of an original
1577 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1580 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1581 vec
<tree
> *vec_oprnds0
,
1582 vec
<tree
> *vec_oprnds1
)
1584 tree vec_oprnd
= vec_oprnds0
->pop ();
1586 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1587 vec_oprnds0
->quick_push (vec_oprnd
);
1589 if (vec_oprnds1
&& vec_oprnds1
->length ())
1591 vec_oprnd
= vec_oprnds1
->pop ();
1592 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1593 vec_oprnds1
->quick_push (vec_oprnd
);
1598 /* Get vectorized definitions for OP0 and OP1.
1599 REDUC_INDEX is the index of reduction operand in case of reduction,
1600 and -1 otherwise. */
1603 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1604 vec
<tree
> *vec_oprnds0
,
1605 vec
<tree
> *vec_oprnds1
,
1606 slp_tree slp_node
, int reduc_index
)
1610 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1611 auto_vec
<tree
> ops (nops
);
1612 auto_vec
<vec
<tree
> > vec_defs (nops
);
1614 ops
.quick_push (op0
);
1616 ops
.quick_push (op1
);
1618 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1620 *vec_oprnds0
= vec_defs
[0];
1622 *vec_oprnds1
= vec_defs
[1];
1628 vec_oprnds0
->create (1);
1629 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1630 vec_oprnds0
->quick_push (vec_oprnd
);
1634 vec_oprnds1
->create (1);
1635 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1636 vec_oprnds1
->quick_push (vec_oprnd
);
1642 /* Function vect_finish_stmt_generation.
1644 Insert a new stmt. */
1647 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1648 gimple_stmt_iterator
*gsi
)
1650 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1651 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1652 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1654 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1656 if (!gsi_end_p (*gsi
)
1657 && gimple_has_mem_ops (vec_stmt
))
1659 gimple at_stmt
= gsi_stmt (*gsi
);
1660 tree vuse
= gimple_vuse (at_stmt
);
1661 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1663 tree vdef
= gimple_vdef (at_stmt
);
1664 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1665 /* If we have an SSA vuse and insert a store, update virtual
1666 SSA form to avoid triggering the renamer. Do so only
1667 if we can easily see all uses - which is what almost always
1668 happens with the way vectorized stmts are inserted. */
1669 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1670 && ((is_gimple_assign (vec_stmt
)
1671 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1672 || (is_gimple_call (vec_stmt
)
1673 && !(gimple_call_flags (vec_stmt
)
1674 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1676 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1677 gimple_set_vdef (vec_stmt
, new_vdef
);
1678 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1682 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1684 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1687 if (dump_enabled_p ())
1689 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1690 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1691 dump_printf (MSG_NOTE
, "\n");
1694 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1696 /* While EH edges will generally prevent vectorization, stmt might
1697 e.g. be in a must-not-throw region. Ensure newly created stmts
1698 that could throw are part of the same region. */
1699 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1700 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1701 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1704 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1705 a function declaration if the target has a vectorized version
1706 of the function, or NULL_TREE if the function cannot be vectorized. */
1709 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1711 tree fndecl
= gimple_call_fndecl (call
);
1713 /* We only handle functions that do not read or clobber memory -- i.e.
1714 const or novops ones. */
1715 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1719 || TREE_CODE (fndecl
) != FUNCTION_DECL
1720 || !DECL_BUILT_IN (fndecl
))
1723 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1728 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
,
1729 gimple_stmt_iterator
*);
1732 /* Function vectorizable_mask_load_store.
1734 Check if STMT performs a conditional load or store that can be vectorized.
1735 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1736 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1737 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1740 vectorizable_mask_load_store (gimple stmt
, gimple_stmt_iterator
*gsi
,
1741 gimple
*vec_stmt
, slp_tree slp_node
)
1743 tree vec_dest
= NULL
;
1744 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1745 stmt_vec_info prev_stmt_info
;
1746 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1747 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1748 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1749 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1750 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1754 tree dataref_ptr
= NULL_TREE
;
1756 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1760 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1761 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1762 int gather_scale
= 1;
1763 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1768 enum vect_def_type dt
;
1770 if (slp_node
!= NULL
)
1773 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1774 gcc_assert (ncopies
>= 1);
1776 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1777 mask
= gimple_call_arg (stmt
, 2);
1778 if (TYPE_PRECISION (TREE_TYPE (mask
))
1779 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1782 /* FORNOW. This restriction should be relaxed. */
1783 if (nested_in_vect_loop
&& ncopies
> 1)
1785 if (dump_enabled_p ())
1786 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1787 "multiple types in nested loop.");
1791 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1794 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1797 if (!STMT_VINFO_DATA_REF (stmt_info
))
1800 elem_type
= TREE_TYPE (vectype
);
1802 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1805 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1808 if (STMT_VINFO_GATHER_P (stmt_info
))
1812 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
1813 &gather_off
, &gather_scale
);
1814 gcc_assert (gather_decl
);
1815 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, NULL
,
1816 &def_stmt
, &def
, &gather_dt
,
1817 &gather_off_vectype
))
1819 if (dump_enabled_p ())
1820 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1821 "gather index use not simple.");
1825 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1827 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1828 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1830 if (dump_enabled_p ())
1831 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1832 "masked gather with integer mask not supported.");
1836 else if (tree_int_cst_compare (nested_in_vect_loop
1837 ? STMT_VINFO_DR_STEP (stmt_info
)
1838 : DR_STEP (dr
), size_zero_node
) <= 0)
1840 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1841 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1844 if (TREE_CODE (mask
) != SSA_NAME
)
1847 if (!vect_is_simple_use (mask
, stmt
, loop_vinfo
, NULL
,
1848 &def_stmt
, &def
, &dt
))
1853 tree rhs
= gimple_call_arg (stmt
, 3);
1854 if (!vect_is_simple_use (rhs
, stmt
, loop_vinfo
, NULL
,
1855 &def_stmt
, &def
, &dt
))
1859 if (!vec_stmt
) /* transformation not required. */
1861 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1863 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1866 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1872 if (STMT_VINFO_GATHER_P (stmt_info
))
1874 tree vec_oprnd0
= NULL_TREE
, op
;
1875 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1876 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1877 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1878 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1879 tree mask_perm_mask
= NULL_TREE
;
1880 edge pe
= loop_preheader_edge (loop
);
1883 enum { NARROW
, NONE
, WIDEN
} modifier
;
1884 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1886 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1887 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1888 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1889 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1890 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1891 scaletype
= TREE_VALUE (arglist
);
1892 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1893 && types_compatible_p (srctype
, masktype
));
1895 if (nunits
== gather_off_nunits
)
1897 else if (nunits
== gather_off_nunits
/ 2)
1899 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1902 for (i
= 0; i
< gather_off_nunits
; ++i
)
1903 sel
[i
] = i
| nunits
;
1905 perm_mask
= vect_gen_perm_mask (gather_off_vectype
, sel
);
1906 gcc_assert (perm_mask
!= NULL_TREE
);
1908 else if (nunits
== gather_off_nunits
* 2)
1910 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1913 for (i
= 0; i
< nunits
; ++i
)
1914 sel
[i
] = i
< gather_off_nunits
1915 ? i
: i
+ nunits
- gather_off_nunits
;
1917 perm_mask
= vect_gen_perm_mask (vectype
, sel
);
1918 gcc_assert (perm_mask
!= NULL_TREE
);
1920 for (i
= 0; i
< nunits
; ++i
)
1921 sel
[i
] = i
| gather_off_nunits
;
1922 mask_perm_mask
= vect_gen_perm_mask (masktype
, sel
);
1923 gcc_assert (mask_perm_mask
!= NULL_TREE
);
1928 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1930 ptr
= fold_convert (ptrtype
, gather_base
);
1931 if (!is_gimple_min_invariant (ptr
))
1933 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1934 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1935 gcc_assert (!new_bb
);
1938 scale
= build_int_cst (scaletype
, gather_scale
);
1940 prev_stmt_info
= NULL
;
1941 for (j
= 0; j
< ncopies
; ++j
)
1943 if (modifier
== WIDEN
&& (j
& 1))
1944 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1945 perm_mask
, stmt
, gsi
);
1948 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
1951 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1953 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1955 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1956 == TYPE_VECTOR_SUBPARTS (idxtype
));
1957 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
1958 var
= make_ssa_name (var
, NULL
);
1959 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1961 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
1963 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1967 if (mask_perm_mask
&& (j
& 1))
1968 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1969 mask_perm_mask
, stmt
, gsi
);
1973 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
1976 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
,
1977 &def_stmt
, &def
, &dt
);
1978 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1982 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1984 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1985 == TYPE_VECTOR_SUBPARTS (masktype
));
1986 var
= vect_get_new_vect_var (masktype
, vect_simple_var
,
1988 var
= make_ssa_name (var
, NULL
);
1989 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1991 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
1992 mask_op
, NULL_TREE
);
1993 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1999 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
2002 if (!useless_type_conversion_p (vectype
, rettype
))
2004 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2005 == TYPE_VECTOR_SUBPARTS (rettype
));
2006 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
2007 op
= make_ssa_name (var
, new_stmt
);
2008 gimple_call_set_lhs (new_stmt
, op
);
2009 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2010 var
= make_ssa_name (vec_dest
, NULL
);
2011 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2013 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
2018 var
= make_ssa_name (vec_dest
, new_stmt
);
2019 gimple_call_set_lhs (new_stmt
, var
);
2022 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2024 if (modifier
== NARROW
)
2031 var
= permute_vec_elements (prev_res
, var
,
2032 perm_mask
, stmt
, gsi
);
2033 new_stmt
= SSA_NAME_DEF_STMT (var
);
2036 if (prev_stmt_info
== NULL
)
2037 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2039 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2040 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2043 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2045 tree lhs
= gimple_call_lhs (stmt
);
2046 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2047 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2048 set_vinfo_for_stmt (stmt
, NULL
);
2049 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2050 gsi_replace (gsi
, new_stmt
, true);
2055 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2056 prev_stmt_info
= NULL
;
2057 for (i
= 0; i
< ncopies
; i
++)
2059 unsigned align
, misalign
;
2063 tree rhs
= gimple_call_arg (stmt
, 3);
2064 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
, NULL
);
2065 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2066 /* We should have catched mismatched types earlier. */
2067 gcc_assert (useless_type_conversion_p (vectype
,
2068 TREE_TYPE (vec_rhs
)));
2069 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2070 NULL_TREE
, &dummy
, gsi
,
2071 &ptr_incr
, false, &inv_p
);
2072 gcc_assert (!inv_p
);
2076 vect_is_simple_use (vec_rhs
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2078 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2079 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2081 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2082 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2083 TYPE_SIZE_UNIT (vectype
));
2086 align
= TYPE_ALIGN_UNIT (vectype
);
2087 if (aligned_access_p (dr
))
2089 else if (DR_MISALIGNMENT (dr
) == -1)
2091 align
= TYPE_ALIGN_UNIT (elem_type
);
2095 misalign
= DR_MISALIGNMENT (dr
);
2096 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2099 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2100 gimple_call_arg (stmt
, 1),
2102 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2104 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2106 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2107 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2112 tree vec_mask
= NULL_TREE
;
2113 prev_stmt_info
= NULL
;
2114 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2115 for (i
= 0; i
< ncopies
; i
++)
2117 unsigned align
, misalign
;
2121 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2122 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2123 NULL_TREE
, &dummy
, gsi
,
2124 &ptr_incr
, false, &inv_p
);
2125 gcc_assert (!inv_p
);
2129 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2131 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2132 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2133 TYPE_SIZE_UNIT (vectype
));
2136 align
= TYPE_ALIGN_UNIT (vectype
);
2137 if (aligned_access_p (dr
))
2139 else if (DR_MISALIGNMENT (dr
) == -1)
2141 align
= TYPE_ALIGN_UNIT (elem_type
);
2145 misalign
= DR_MISALIGNMENT (dr
);
2146 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2149 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2150 gimple_call_arg (stmt
, 1),
2152 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
, NULL
));
2153 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2155 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2157 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2158 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2164 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2166 tree lhs
= gimple_call_lhs (stmt
);
2167 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2168 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2169 set_vinfo_for_stmt (stmt
, NULL
);
2170 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2171 gsi_replace (gsi
, new_stmt
, true);
2178 /* Function vectorizable_call.
2180 Check if STMT performs a function call that can be vectorized.
2181 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2182 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2183 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2186 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
2192 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2194 tree vectype_out
, vectype_in
;
2197 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2198 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2199 tree fndecl
, new_temp
, def
, rhs_type
;
2201 enum vect_def_type dt
[3]
2202 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2203 gimple new_stmt
= NULL
;
2205 vec
<tree
> vargs
= vNULL
;
2206 enum { NARROW
, NONE
, WIDEN
} modifier
;
2210 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2213 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2216 /* Is STMT a vectorizable call? */
2217 if (!is_gimple_call (stmt
))
2220 if (gimple_call_internal_p (stmt
)
2221 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2222 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2223 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2226 if (gimple_call_lhs (stmt
) == NULL_TREE
2227 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2230 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2232 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2234 /* Process function arguments. */
2235 rhs_type
= NULL_TREE
;
2236 vectype_in
= NULL_TREE
;
2237 nargs
= gimple_call_num_args (stmt
);
2239 /* Bail out if the function has more than three arguments, we do not have
2240 interesting builtin functions to vectorize with more than two arguments
2241 except for fma. No arguments is also not good. */
2242 if (nargs
== 0 || nargs
> 3)
2245 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2246 if (gimple_call_internal_p (stmt
)
2247 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2250 rhs_type
= unsigned_type_node
;
2253 for (i
= 0; i
< nargs
; i
++)
2257 op
= gimple_call_arg (stmt
, i
);
2259 /* We can only handle calls with arguments of the same type. */
2261 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2263 if (dump_enabled_p ())
2264 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2265 "argument types differ.\n");
2269 rhs_type
= TREE_TYPE (op
);
2271 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2272 &def_stmt
, &def
, &dt
[i
], &opvectype
))
2274 if (dump_enabled_p ())
2275 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2276 "use not simple.\n");
2281 vectype_in
= opvectype
;
2283 && opvectype
!= vectype_in
)
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2287 "argument vector types differ.\n");
2291 /* If all arguments are external or constant defs use a vector type with
2292 the same size as the output vector type. */
2294 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2296 gcc_assert (vectype_in
);
2299 if (dump_enabled_p ())
2301 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2302 "no vectype for scalar type ");
2303 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2304 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2311 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2312 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2313 if (nunits_in
== nunits_out
/ 2)
2315 else if (nunits_out
== nunits_in
)
2317 else if (nunits_out
== nunits_in
/ 2)
2322 /* For now, we only vectorize functions if a target specific builtin
2323 is available. TODO -- in some cases, it might be profitable to
2324 insert the calls for pieces of the vector, in order to be able
2325 to vectorize other operations in the loop. */
2326 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2327 if (fndecl
== NULL_TREE
)
2329 if (gimple_call_internal_p (stmt
)
2330 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2333 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2334 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2335 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2336 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2338 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2339 { 0, 1, 2, ... vf - 1 } vector. */
2340 gcc_assert (nargs
== 0);
2344 if (dump_enabled_p ())
2345 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2346 "function is not vectorizable.\n");
2351 gcc_assert (!gimple_vuse (stmt
));
2353 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2355 else if (modifier
== NARROW
)
2356 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2358 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2360 /* Sanity check: make sure that at least one copy of the vectorized stmt
2361 needs to be generated. */
2362 gcc_assert (ncopies
>= 1);
2364 if (!vec_stmt
) /* transformation not required. */
2366 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2367 if (dump_enabled_p ())
2368 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2370 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2376 if (dump_enabled_p ())
2377 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2380 scalar_dest
= gimple_call_lhs (stmt
);
2381 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2383 prev_stmt_info
= NULL
;
2387 for (j
= 0; j
< ncopies
; ++j
)
2389 /* Build argument list for the vectorized call. */
2391 vargs
.create (nargs
);
2397 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2398 vec
<tree
> vec_oprnds0
;
2400 for (i
= 0; i
< nargs
; i
++)
2401 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2402 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2403 vec_oprnds0
= vec_defs
[0];
2405 /* Arguments are ready. Create the new vector stmt. */
2406 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2409 for (k
= 0; k
< nargs
; k
++)
2411 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2412 vargs
[k
] = vec_oprndsk
[i
];
2414 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2415 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2416 gimple_call_set_lhs (new_stmt
, new_temp
);
2417 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2418 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2421 for (i
= 0; i
< nargs
; i
++)
2423 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2424 vec_oprndsi
.release ();
2429 for (i
= 0; i
< nargs
; i
++)
2431 op
= gimple_call_arg (stmt
, i
);
2434 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2437 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2439 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2442 vargs
.quick_push (vec_oprnd0
);
2445 if (gimple_call_internal_p (stmt
)
2446 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2448 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2450 for (k
= 0; k
< nunits_out
; ++k
)
2451 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2452 tree cst
= build_vector (vectype_out
, v
);
2454 = vect_get_new_vect_var (vectype_out
, vect_simple_var
, "cst_");
2455 gimple init_stmt
= gimple_build_assign (new_var
, cst
);
2456 new_temp
= make_ssa_name (new_var
, init_stmt
);
2457 gimple_assign_set_lhs (init_stmt
, new_temp
);
2458 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2459 new_temp
= make_ssa_name (vec_dest
, NULL
);
2460 new_stmt
= gimple_build_assign (new_temp
,
2461 gimple_assign_lhs (init_stmt
));
2465 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2466 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2467 gimple_call_set_lhs (new_stmt
, new_temp
);
2469 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2472 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2474 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2476 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2482 for (j
= 0; j
< ncopies
; ++j
)
2484 /* Build argument list for the vectorized call. */
2486 vargs
.create (nargs
* 2);
2492 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2493 vec
<tree
> vec_oprnds0
;
2495 for (i
= 0; i
< nargs
; i
++)
2496 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2497 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2498 vec_oprnds0
= vec_defs
[0];
2500 /* Arguments are ready. Create the new vector stmt. */
2501 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2505 for (k
= 0; k
< nargs
; k
++)
2507 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2508 vargs
.quick_push (vec_oprndsk
[i
]);
2509 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2511 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2512 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2513 gimple_call_set_lhs (new_stmt
, new_temp
);
2514 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2515 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2518 for (i
= 0; i
< nargs
; i
++)
2520 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2521 vec_oprndsi
.release ();
2526 for (i
= 0; i
< nargs
; i
++)
2528 op
= gimple_call_arg (stmt
, i
);
2532 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2534 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2538 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2540 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2542 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2545 vargs
.quick_push (vec_oprnd0
);
2546 vargs
.quick_push (vec_oprnd1
);
2549 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2550 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2551 gimple_call_set_lhs (new_stmt
, new_temp
);
2552 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2555 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2557 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2559 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2562 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2567 /* No current target implements this case. */
2573 /* The call in STMT might prevent it from being removed in dce.
2574 We however cannot remove it here, due to the way the ssa name
2575 it defines is mapped to the new definition. So just replace
2576 rhs of the statement with something harmless. */
2581 type
= TREE_TYPE (scalar_dest
);
2582 if (is_pattern_stmt_p (stmt_info
))
2583 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2585 lhs
= gimple_call_lhs (stmt
);
2586 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2587 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2588 set_vinfo_for_stmt (stmt
, NULL
);
2589 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2590 gsi_replace (gsi
, new_stmt
, false);
2596 struct simd_call_arg_info
2600 enum vect_def_type dt
;
2601 HOST_WIDE_INT linear_step
;
2605 /* Function vectorizable_simd_clone_call.
2607 Check if STMT performs a function call that can be vectorized
2608 by calling a simd clone of the function.
2609 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2610 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2611 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2614 vectorizable_simd_clone_call (gimple stmt
, gimple_stmt_iterator
*gsi
,
2615 gimple
*vec_stmt
, slp_tree slp_node
)
2620 tree vec_oprnd0
= NULL_TREE
;
2621 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2623 unsigned int nunits
;
2624 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2625 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2626 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2627 tree fndecl
, new_temp
, def
;
2629 gimple new_stmt
= NULL
;
2631 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2632 vec
<tree
> vargs
= vNULL
;
2634 tree lhs
, rtype
, ratype
;
2635 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2637 /* Is STMT a vectorizable call? */
2638 if (!is_gimple_call (stmt
))
2641 fndecl
= gimple_call_fndecl (stmt
);
2642 if (fndecl
== NULL_TREE
)
2645 struct cgraph_node
*node
= cgraph_get_node (fndecl
);
2646 if (node
== NULL
|| node
->simd_clones
== NULL
)
2649 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2652 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2655 if (gimple_call_lhs (stmt
)
2656 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2659 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2661 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2663 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2667 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2670 /* Process function arguments. */
2671 nargs
= gimple_call_num_args (stmt
);
2673 /* Bail out if the function has zero arguments. */
2677 arginfo
.create (nargs
);
2679 for (i
= 0; i
< nargs
; i
++)
2681 simd_call_arg_info thisarginfo
;
2684 thisarginfo
.linear_step
= 0;
2685 thisarginfo
.align
= 0;
2686 thisarginfo
.op
= NULL_TREE
;
2688 op
= gimple_call_arg (stmt
, i
);
2689 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2690 &def_stmt
, &def
, &thisarginfo
.dt
,
2691 &thisarginfo
.vectype
)
2692 || thisarginfo
.dt
== vect_uninitialized_def
)
2694 if (dump_enabled_p ())
2695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2696 "use not simple.\n");
2701 if (thisarginfo
.dt
== vect_constant_def
2702 || thisarginfo
.dt
== vect_external_def
)
2703 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2705 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2707 if (thisarginfo
.dt
!= vect_constant_def
2708 && thisarginfo
.dt
!= vect_external_def
2710 && TREE_CODE (op
) == SSA_NAME
2711 && simple_iv (loop
, loop_containing_stmt (stmt
), op
, &iv
, false)
2712 && tree_fits_shwi_p (iv
.step
))
2714 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2715 thisarginfo
.op
= iv
.base
;
2717 else if ((thisarginfo
.dt
== vect_constant_def
2718 || thisarginfo
.dt
== vect_external_def
)
2719 && POINTER_TYPE_P (TREE_TYPE (op
)))
2720 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2722 arginfo
.quick_push (thisarginfo
);
2725 unsigned int badness
= 0;
2726 struct cgraph_node
*bestn
= NULL
;
2727 if (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
))
2728 bestn
= cgraph_get_node (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
));
2730 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2731 n
= n
->simdclone
->next_clone
)
2733 unsigned int this_badness
= 0;
2734 if (n
->simdclone
->simdlen
2735 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2736 || n
->simdclone
->nargs
!= nargs
)
2738 if (n
->simdclone
->simdlen
2739 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2740 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2741 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2742 if (n
->simdclone
->inbranch
)
2743 this_badness
+= 2048;
2744 int target_badness
= targetm
.simd_clone
.usable (n
);
2745 if (target_badness
< 0)
2747 this_badness
+= target_badness
* 512;
2748 /* FORNOW: Have to add code to add the mask argument. */
2749 if (n
->simdclone
->inbranch
)
2751 for (i
= 0; i
< nargs
; i
++)
2753 switch (n
->simdclone
->args
[i
].arg_type
)
2755 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2756 if (!useless_type_conversion_p
2757 (n
->simdclone
->args
[i
].orig_type
,
2758 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2760 else if (arginfo
[i
].dt
== vect_constant_def
2761 || arginfo
[i
].dt
== vect_external_def
2762 || arginfo
[i
].linear_step
)
2765 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2766 if (arginfo
[i
].dt
!= vect_constant_def
2767 && arginfo
[i
].dt
!= vect_external_def
)
2770 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2771 if (arginfo
[i
].dt
== vect_constant_def
2772 || arginfo
[i
].dt
== vect_external_def
2773 || (arginfo
[i
].linear_step
2774 != n
->simdclone
->args
[i
].linear_step
))
2777 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2781 case SIMD_CLONE_ARG_TYPE_MASK
:
2784 if (i
== (size_t) -1)
2786 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2791 if (arginfo
[i
].align
)
2792 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2793 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2795 if (i
== (size_t) -1)
2797 if (bestn
== NULL
|| this_badness
< badness
)
2800 badness
= this_badness
;
2810 for (i
= 0; i
< nargs
; i
++)
2811 if ((arginfo
[i
].dt
== vect_constant_def
2812 || arginfo
[i
].dt
== vect_external_def
)
2813 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2816 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2818 if (arginfo
[i
].vectype
== NULL
2819 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2820 > bestn
->simdclone
->simdlen
))
2827 fndecl
= bestn
->decl
;
2828 nunits
= bestn
->simdclone
->simdlen
;
2829 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2831 /* If the function isn't const, only allow it in simd loops where user
2832 has asserted that at least nunits consecutive iterations can be
2833 performed using SIMD instructions. */
2834 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2835 && gimple_vuse (stmt
))
2841 /* Sanity check: make sure that at least one copy of the vectorized stmt
2842 needs to be generated. */
2843 gcc_assert (ncopies
>= 1);
2845 if (!vec_stmt
) /* transformation not required. */
2847 STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
) = bestn
->decl
;
2848 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2849 if (dump_enabled_p ())
2850 dump_printf_loc (MSG_NOTE
, vect_location
,
2851 "=== vectorizable_simd_clone_call ===\n");
2852 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2859 if (dump_enabled_p ())
2860 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2863 scalar_dest
= gimple_call_lhs (stmt
);
2864 vec_dest
= NULL_TREE
;
2869 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2870 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2871 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2874 rtype
= TREE_TYPE (ratype
);
2878 prev_stmt_info
= NULL
;
2879 for (j
= 0; j
< ncopies
; ++j
)
2881 /* Build argument list for the vectorized call. */
2883 vargs
.create (nargs
);
2887 for (i
= 0; i
< nargs
; i
++)
2889 unsigned int k
, l
, m
, o
;
2891 op
= gimple_call_arg (stmt
, i
);
2892 switch (bestn
->simdclone
->args
[i
].arg_type
)
2894 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2895 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2896 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2897 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2899 if (TYPE_VECTOR_SUBPARTS (atype
)
2900 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2902 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2903 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2904 / TYPE_VECTOR_SUBPARTS (atype
));
2905 gcc_assert ((k
& (k
- 1)) == 0);
2908 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2911 vec_oprnd0
= arginfo
[i
].op
;
2912 if ((m
& (k
- 1)) == 0)
2914 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2917 arginfo
[i
].op
= vec_oprnd0
;
2919 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
2921 bitsize_int ((m
& (k
- 1)) * prec
));
2923 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2925 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2926 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2930 k
= (TYPE_VECTOR_SUBPARTS (atype
)
2931 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
2932 gcc_assert ((k
& (k
- 1)) == 0);
2933 vec
<constructor_elt
, va_gc
> *ctor_elts
;
2935 vec_alloc (ctor_elts
, k
);
2938 for (l
= 0; l
< k
; l
++)
2940 if (m
== 0 && l
== 0)
2942 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2945 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2947 arginfo
[i
].op
= vec_oprnd0
;
2950 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
2954 vargs
.safe_push (vec_oprnd0
);
2957 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
2959 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2961 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2962 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2967 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2968 vargs
.safe_push (op
);
2970 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2975 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
2980 edge pe
= loop_preheader_edge (loop
);
2981 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2982 gcc_assert (!new_bb
);
2984 tree phi_res
= copy_ssa_name (op
, NULL
);
2985 gimple new_phi
= create_phi_node (phi_res
, loop
->header
);
2986 set_vinfo_for_stmt (new_phi
,
2987 new_stmt_vec_info (new_phi
, loop_vinfo
,
2989 add_phi_arg (new_phi
, arginfo
[i
].op
,
2990 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
2992 = POINTER_TYPE_P (TREE_TYPE (op
))
2993 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
2994 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
2995 ? sizetype
: TREE_TYPE (op
);
2997 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
2999 tree tcst
= wide_int_to_tree (type
, cst
);
3000 tree phi_arg
= copy_ssa_name (op
, NULL
);
3001 new_stmt
= gimple_build_assign_with_ops (code
, phi_arg
,
3003 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3004 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3005 set_vinfo_for_stmt (new_stmt
,
3006 new_stmt_vec_info (new_stmt
, loop_vinfo
,
3008 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3010 arginfo
[i
].op
= phi_res
;
3011 vargs
.safe_push (phi_res
);
3016 = POINTER_TYPE_P (TREE_TYPE (op
))
3017 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3018 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3019 ? sizetype
: TREE_TYPE (op
);
3021 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3023 tree tcst
= wide_int_to_tree (type
, cst
);
3024 new_temp
= make_ssa_name (TREE_TYPE (op
), NULL
);
3026 = gimple_build_assign_with_ops (code
, new_temp
,
3027 arginfo
[i
].op
, tcst
);
3028 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3029 vargs
.safe_push (new_temp
);
3032 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3038 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3041 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3043 new_temp
= create_tmp_var (ratype
, NULL
);
3044 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3045 == TYPE_VECTOR_SUBPARTS (rtype
))
3046 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3048 new_temp
= make_ssa_name (rtype
, new_stmt
);
3049 gimple_call_set_lhs (new_stmt
, new_temp
);
3051 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3055 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3058 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3059 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3060 gcc_assert ((k
& (k
- 1)) == 0);
3061 for (l
= 0; l
< k
; l
++)
3066 t
= build_fold_addr_expr (new_temp
);
3067 t
= build2 (MEM_REF
, vectype
, t
,
3068 build_int_cst (TREE_TYPE (t
),
3069 l
* prec
/ BITS_PER_UNIT
));
3072 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3073 size_int (prec
), bitsize_int (l
* prec
));
3075 = gimple_build_assign (make_ssa_name (vectype
, NULL
), t
);
3076 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3077 if (j
== 0 && l
== 0)
3078 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3080 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3082 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3087 tree clobber
= build_constructor (ratype
, NULL
);
3088 TREE_THIS_VOLATILE (clobber
) = 1;
3089 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3090 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3094 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3096 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3097 / TYPE_VECTOR_SUBPARTS (rtype
));
3098 gcc_assert ((k
& (k
- 1)) == 0);
3099 if ((j
& (k
- 1)) == 0)
3100 vec_alloc (ret_ctor_elts
, k
);
3103 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3104 for (m
= 0; m
< o
; m
++)
3106 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3107 size_int (m
), NULL_TREE
, NULL_TREE
);
3109 = gimple_build_assign (make_ssa_name (rtype
, NULL
),
3111 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3112 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3113 gimple_assign_lhs (new_stmt
));
3115 tree clobber
= build_constructor (ratype
, NULL
);
3116 TREE_THIS_VOLATILE (clobber
) = 1;
3117 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3118 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3121 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3122 if ((j
& (k
- 1)) != k
- 1)
3124 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3126 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
),
3128 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3130 if ((unsigned) j
== k
- 1)
3131 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3133 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3135 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3140 tree t
= build_fold_addr_expr (new_temp
);
3141 t
= build2 (MEM_REF
, vectype
, t
,
3142 build_int_cst (TREE_TYPE (t
), 0));
3144 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
), t
);
3145 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3146 tree clobber
= build_constructor (ratype
, NULL
);
3147 TREE_THIS_VOLATILE (clobber
) = 1;
3148 vect_finish_stmt_generation (stmt
,
3149 gimple_build_assign (new_temp
,
3155 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3157 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3159 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3164 /* The call in STMT might prevent it from being removed in dce.
3165 We however cannot remove it here, due to the way the ssa name
3166 it defines is mapped to the new definition. So just replace
3167 rhs of the statement with something harmless. */
3174 type
= TREE_TYPE (scalar_dest
);
3175 if (is_pattern_stmt_p (stmt_info
))
3176 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3178 lhs
= gimple_call_lhs (stmt
);
3179 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3182 new_stmt
= gimple_build_nop ();
3183 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3184 set_vinfo_for_stmt (stmt
, NULL
);
3185 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3186 gsi_replace (gsi
, new_stmt
, false);
3187 unlink_stmt_vdef (stmt
);
3193 /* Function vect_gen_widened_results_half
3195 Create a vector stmt whose code, type, number of arguments, and result
3196 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3197 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3198 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3199 needs to be created (DECL is a function-decl of a target-builtin).
3200 STMT is the original scalar stmt that we are vectorizing. */
3203 vect_gen_widened_results_half (enum tree_code code
,
3205 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3206 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3212 /* Generate half of the widened result: */
3213 if (code
== CALL_EXPR
)
3215 /* Target specific support */
3216 if (op_type
== binary_op
)
3217 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3219 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3220 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3221 gimple_call_set_lhs (new_stmt
, new_temp
);
3225 /* Generic support */
3226 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3227 if (op_type
!= binary_op
)
3229 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
3231 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3232 gimple_assign_set_lhs (new_stmt
, new_temp
);
3234 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3240 /* Get vectorized definitions for loop-based vectorization. For the first
3241 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3242 scalar operand), and for the rest we get a copy with
3243 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3244 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3245 The vectors are collected into VEC_OPRNDS. */
3248 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
3249 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3253 /* Get first vector operand. */
3254 /* All the vector operands except the very first one (that is scalar oprnd)
3256 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3257 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
3259 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3261 vec_oprnds
->quick_push (vec_oprnd
);
3263 /* Get second vector operand. */
3264 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3265 vec_oprnds
->quick_push (vec_oprnd
);
3269 /* For conversion in multiple steps, continue to get operands
3272 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3276 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3277 For multi-step conversions store the resulting vectors and call the function
3281 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3282 int multi_step_cvt
, gimple stmt
,
3284 gimple_stmt_iterator
*gsi
,
3285 slp_tree slp_node
, enum tree_code code
,
3286 stmt_vec_info
*prev_stmt_info
)
3289 tree vop0
, vop1
, new_tmp
, vec_dest
;
3291 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3293 vec_dest
= vec_dsts
.pop ();
3295 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3297 /* Create demotion operation. */
3298 vop0
= (*vec_oprnds
)[i
];
3299 vop1
= (*vec_oprnds
)[i
+ 1];
3300 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
3301 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3302 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3303 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3306 /* Store the resulting vector for next recursive call. */
3307 (*vec_oprnds
)[i
/2] = new_tmp
;
3310 /* This is the last step of the conversion sequence. Store the
3311 vectors in SLP_NODE or in vector info of the scalar statement
3312 (or in STMT_VINFO_RELATED_STMT chain). */
3314 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3317 if (!*prev_stmt_info
)
3318 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3320 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3322 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3327 /* For multi-step demotion operations we first generate demotion operations
3328 from the source type to the intermediate types, and then combine the
3329 results (stored in VEC_OPRNDS) in demotion operation to the destination
3333 /* At each level of recursion we have half of the operands we had at the
3335 vec_oprnds
->truncate ((i
+1)/2);
3336 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3337 stmt
, vec_dsts
, gsi
, slp_node
,
3338 VEC_PACK_TRUNC_EXPR
,
3342 vec_dsts
.quick_push (vec_dest
);
3346 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3347 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3348 the resulting vectors and call the function recursively. */
3351 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3352 vec
<tree
> *vec_oprnds1
,
3353 gimple stmt
, tree vec_dest
,
3354 gimple_stmt_iterator
*gsi
,
3355 enum tree_code code1
,
3356 enum tree_code code2
, tree decl1
,
3357 tree decl2
, int op_type
)
3360 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3361 gimple new_stmt1
, new_stmt2
;
3362 vec
<tree
> vec_tmp
= vNULL
;
3364 vec_tmp
.create (vec_oprnds0
->length () * 2);
3365 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3367 if (op_type
== binary_op
)
3368 vop1
= (*vec_oprnds1
)[i
];
3372 /* Generate the two halves of promotion operation. */
3373 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3374 op_type
, vec_dest
, gsi
, stmt
);
3375 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3376 op_type
, vec_dest
, gsi
, stmt
);
3377 if (is_gimple_call (new_stmt1
))
3379 new_tmp1
= gimple_call_lhs (new_stmt1
);
3380 new_tmp2
= gimple_call_lhs (new_stmt2
);
3384 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3385 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3388 /* Store the results for the next step. */
3389 vec_tmp
.quick_push (new_tmp1
);
3390 vec_tmp
.quick_push (new_tmp2
);
3393 vec_oprnds0
->release ();
3394 *vec_oprnds0
= vec_tmp
;
3398 /* Check if STMT performs a conversion operation, that can be vectorized.
3399 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3400 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3401 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3404 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3405 gimple
*vec_stmt
, slp_tree slp_node
)
3409 tree op0
, op1
= NULL_TREE
;
3410 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3411 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3412 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3413 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3414 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3415 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3419 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3420 gimple new_stmt
= NULL
;
3421 stmt_vec_info prev_stmt_info
;
3424 tree vectype_out
, vectype_in
;
3426 tree lhs_type
, rhs_type
;
3427 enum { NARROW
, NONE
, WIDEN
} modifier
;
3428 vec
<tree
> vec_oprnds0
= vNULL
;
3429 vec
<tree
> vec_oprnds1
= vNULL
;
3431 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3432 int multi_step_cvt
= 0;
3433 vec
<tree
> vec_dsts
= vNULL
;
3434 vec
<tree
> interm_types
= vNULL
;
3435 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3437 enum machine_mode rhs_mode
;
3438 unsigned short fltsz
;
3440 /* Is STMT a vectorizable conversion? */
3442 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3445 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3448 if (!is_gimple_assign (stmt
))
3451 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3454 code
= gimple_assign_rhs_code (stmt
);
3455 if (!CONVERT_EXPR_CODE_P (code
)
3456 && code
!= FIX_TRUNC_EXPR
3457 && code
!= FLOAT_EXPR
3458 && code
!= WIDEN_MULT_EXPR
3459 && code
!= WIDEN_LSHIFT_EXPR
)
3462 op_type
= TREE_CODE_LENGTH (code
);
3464 /* Check types of lhs and rhs. */
3465 scalar_dest
= gimple_assign_lhs (stmt
);
3466 lhs_type
= TREE_TYPE (scalar_dest
);
3467 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3469 op0
= gimple_assign_rhs1 (stmt
);
3470 rhs_type
= TREE_TYPE (op0
);
3472 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3473 && !((INTEGRAL_TYPE_P (lhs_type
)
3474 && INTEGRAL_TYPE_P (rhs_type
))
3475 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3476 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3479 if ((INTEGRAL_TYPE_P (lhs_type
)
3480 && (TYPE_PRECISION (lhs_type
)
3481 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3482 || (INTEGRAL_TYPE_P (rhs_type
)
3483 && (TYPE_PRECISION (rhs_type
)
3484 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3486 if (dump_enabled_p ())
3487 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3488 "type conversion to/from bit-precision unsupported."
3493 /* Check the operands of the operation. */
3494 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
3495 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3497 if (dump_enabled_p ())
3498 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3499 "use not simple.\n");
3502 if (op_type
== binary_op
)
3506 op1
= gimple_assign_rhs2 (stmt
);
3507 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3508 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3510 if (CONSTANT_CLASS_P (op0
))
3511 ok
= vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
,
3512 &def_stmt
, &def
, &dt
[1], &vectype_in
);
3514 ok
= vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3519 if (dump_enabled_p ())
3520 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3521 "use not simple.\n");
3526 /* If op0 is an external or constant defs use a vector type of
3527 the same size as the output vector type. */
3529 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3531 gcc_assert (vectype_in
);
3534 if (dump_enabled_p ())
3536 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3537 "no vectype for scalar type ");
3538 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3539 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3545 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3546 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3547 if (nunits_in
< nunits_out
)
3549 else if (nunits_out
== nunits_in
)
3554 /* Multiple types in SLP are handled by creating the appropriate number of
3555 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3557 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3559 else if (modifier
== NARROW
)
3560 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3562 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3564 /* Sanity check: make sure that at least one copy of the vectorized stmt
3565 needs to be generated. */
3566 gcc_assert (ncopies
>= 1);
3568 /* Supportable by target? */
3572 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3574 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3579 if (dump_enabled_p ())
3580 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3581 "conversion not supported by target.\n");
3585 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3586 &code1
, &code2
, &multi_step_cvt
,
3589 /* Binary widening operation can only be supported directly by the
3591 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3595 if (code
!= FLOAT_EXPR
3596 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3597 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3600 rhs_mode
= TYPE_MODE (rhs_type
);
3601 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3602 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3603 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3604 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3607 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3608 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3609 if (cvt_type
== NULL_TREE
)
3612 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3614 if (!supportable_convert_operation (code
, vectype_out
,
3615 cvt_type
, &decl1
, &codecvt1
))
3618 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3619 cvt_type
, &codecvt1
,
3620 &codecvt2
, &multi_step_cvt
,
3624 gcc_assert (multi_step_cvt
== 0);
3626 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3627 vectype_in
, &code1
, &code2
,
3628 &multi_step_cvt
, &interm_types
))
3632 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3635 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3636 codecvt2
= ERROR_MARK
;
3640 interm_types
.safe_push (cvt_type
);
3641 cvt_type
= NULL_TREE
;
3646 gcc_assert (op_type
== unary_op
);
3647 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3648 &code1
, &multi_step_cvt
,
3652 if (code
!= FIX_TRUNC_EXPR
3653 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3654 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3657 rhs_mode
= TYPE_MODE (rhs_type
);
3659 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3660 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3661 if (cvt_type
== NULL_TREE
)
3663 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3666 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3667 &code1
, &multi_step_cvt
,
3676 if (!vec_stmt
) /* transformation not required. */
3678 if (dump_enabled_p ())
3679 dump_printf_loc (MSG_NOTE
, vect_location
,
3680 "=== vectorizable_conversion ===\n");
3681 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3683 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3684 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3686 else if (modifier
== NARROW
)
3688 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3689 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3693 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3694 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3696 interm_types
.release ();
3701 if (dump_enabled_p ())
3702 dump_printf_loc (MSG_NOTE
, vect_location
,
3703 "transform conversion. ncopies = %d.\n", ncopies
);
3705 if (op_type
== binary_op
)
3707 if (CONSTANT_CLASS_P (op0
))
3708 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3709 else if (CONSTANT_CLASS_P (op1
))
3710 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3713 /* In case of multi-step conversion, we first generate conversion operations
3714 to the intermediate types, and then from that types to the final one.
3715 We create vector destinations for the intermediate type (TYPES) received
3716 from supportable_*_operation, and store them in the correct order
3717 for future use in vect_create_vectorized_*_stmts (). */
3718 vec_dsts
.create (multi_step_cvt
+ 1);
3719 vec_dest
= vect_create_destination_var (scalar_dest
,
3720 (cvt_type
&& modifier
== WIDEN
)
3721 ? cvt_type
: vectype_out
);
3722 vec_dsts
.quick_push (vec_dest
);
3726 for (i
= interm_types
.length () - 1;
3727 interm_types
.iterate (i
, &intermediate_type
); i
--)
3729 vec_dest
= vect_create_destination_var (scalar_dest
,
3731 vec_dsts
.quick_push (vec_dest
);
3736 vec_dest
= vect_create_destination_var (scalar_dest
,
3738 ? vectype_out
: cvt_type
);
3742 if (modifier
== WIDEN
)
3744 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3745 if (op_type
== binary_op
)
3746 vec_oprnds1
.create (1);
3748 else if (modifier
== NARROW
)
3749 vec_oprnds0
.create (
3750 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3752 else if (code
== WIDEN_LSHIFT_EXPR
)
3753 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3756 prev_stmt_info
= NULL
;
3760 for (j
= 0; j
< ncopies
; j
++)
3763 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3766 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3768 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3770 /* Arguments are ready, create the new vector stmt. */
3771 if (code1
== CALL_EXPR
)
3773 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3774 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3775 gimple_call_set_lhs (new_stmt
, new_temp
);
3779 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3780 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
,
3782 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3783 gimple_assign_set_lhs (new_stmt
, new_temp
);
3786 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3788 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3792 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3794 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3795 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3800 /* In case the vectorization factor (VF) is bigger than the number
3801 of elements that we can fit in a vectype (nunits), we have to
3802 generate more than one vector stmt - i.e - we need to "unroll"
3803 the vector stmt by a factor VF/nunits. */
3804 for (j
= 0; j
< ncopies
; j
++)
3811 if (code
== WIDEN_LSHIFT_EXPR
)
3816 /* Store vec_oprnd1 for every vector stmt to be created
3817 for SLP_NODE. We check during the analysis that all
3818 the shift arguments are the same. */
3819 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3820 vec_oprnds1
.quick_push (vec_oprnd1
);
3822 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3826 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3827 &vec_oprnds1
, slp_node
, -1);
3831 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3832 vec_oprnds0
.quick_push (vec_oprnd0
);
3833 if (op_type
== binary_op
)
3835 if (code
== WIDEN_LSHIFT_EXPR
)
3838 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
,
3840 vec_oprnds1
.quick_push (vec_oprnd1
);
3846 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3847 vec_oprnds0
.truncate (0);
3848 vec_oprnds0
.quick_push (vec_oprnd0
);
3849 if (op_type
== binary_op
)
3851 if (code
== WIDEN_LSHIFT_EXPR
)
3854 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3856 vec_oprnds1
.truncate (0);
3857 vec_oprnds1
.quick_push (vec_oprnd1
);
3861 /* Arguments are ready. Create the new vector stmts. */
3862 for (i
= multi_step_cvt
; i
>= 0; i
--)
3864 tree this_dest
= vec_dsts
[i
];
3865 enum tree_code c1
= code1
, c2
= code2
;
3866 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3871 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3873 stmt
, this_dest
, gsi
,
3874 c1
, c2
, decl1
, decl2
,
3878 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3882 if (codecvt1
== CALL_EXPR
)
3884 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3885 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3886 gimple_call_set_lhs (new_stmt
, new_temp
);
3890 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3891 new_temp
= make_ssa_name (vec_dest
, NULL
);
3892 new_stmt
= gimple_build_assign_with_ops (codecvt1
,
3897 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3900 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3903 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3906 if (!prev_stmt_info
)
3907 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3909 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3910 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3915 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3919 /* In case the vectorization factor (VF) is bigger than the number
3920 of elements that we can fit in a vectype (nunits), we have to
3921 generate more than one vector stmt - i.e - we need to "unroll"
3922 the vector stmt by a factor VF/nunits. */
3923 for (j
= 0; j
< ncopies
; j
++)
3927 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3931 vec_oprnds0
.truncate (0);
3932 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3933 vect_pow2 (multi_step_cvt
) - 1);
3936 /* Arguments are ready. Create the new vector stmts. */
3938 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3940 if (codecvt1
== CALL_EXPR
)
3942 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3943 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3944 gimple_call_set_lhs (new_stmt
, new_temp
);
3948 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3949 new_temp
= make_ssa_name (vec_dest
, NULL
);
3950 new_stmt
= gimple_build_assign_with_ops (codecvt1
, new_temp
,
3954 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3955 vec_oprnds0
[i
] = new_temp
;
3958 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
3959 stmt
, vec_dsts
, gsi
,
3964 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3968 vec_oprnds0
.release ();
3969 vec_oprnds1
.release ();
3970 vec_dsts
.release ();
3971 interm_types
.release ();
3977 /* Function vectorizable_assignment.
3979 Check if STMT performs an assignment (copy) that can be vectorized.
3980 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3981 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3982 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3985 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
3986 gimple
*vec_stmt
, slp_tree slp_node
)
3991 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3992 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3993 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3997 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3998 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4001 vec
<tree
> vec_oprnds
= vNULL
;
4003 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4004 gimple new_stmt
= NULL
;
4005 stmt_vec_info prev_stmt_info
= NULL
;
4006 enum tree_code code
;
4009 /* Multiple types in SLP are handled by creating the appropriate number of
4010 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4012 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4015 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4017 gcc_assert (ncopies
>= 1);
4019 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4022 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4025 /* Is vectorizable assignment? */
4026 if (!is_gimple_assign (stmt
))
4029 scalar_dest
= gimple_assign_lhs (stmt
);
4030 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4033 code
= gimple_assign_rhs_code (stmt
);
4034 if (gimple_assign_single_p (stmt
)
4035 || code
== PAREN_EXPR
4036 || CONVERT_EXPR_CODE_P (code
))
4037 op
= gimple_assign_rhs1 (stmt
);
4041 if (code
== VIEW_CONVERT_EXPR
)
4042 op
= TREE_OPERAND (op
, 0);
4044 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
4045 &def_stmt
, &def
, &dt
[0], &vectype_in
))
4047 if (dump_enabled_p ())
4048 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4049 "use not simple.\n");
4053 /* We can handle NOP_EXPR conversions that do not change the number
4054 of elements or the vector size. */
4055 if ((CONVERT_EXPR_CODE_P (code
)
4056 || code
== VIEW_CONVERT_EXPR
)
4058 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4059 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4060 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4063 /* We do not handle bit-precision changes. */
4064 if ((CONVERT_EXPR_CODE_P (code
)
4065 || code
== VIEW_CONVERT_EXPR
)
4066 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4067 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4068 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4069 || ((TYPE_PRECISION (TREE_TYPE (op
))
4070 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4071 /* But a conversion that does not change the bit-pattern is ok. */
4072 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4073 > TYPE_PRECISION (TREE_TYPE (op
)))
4074 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4076 if (dump_enabled_p ())
4077 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4078 "type conversion to/from bit-precision "
4083 if (!vec_stmt
) /* transformation not required. */
4085 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4086 if (dump_enabled_p ())
4087 dump_printf_loc (MSG_NOTE
, vect_location
,
4088 "=== vectorizable_assignment ===\n");
4089 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4094 if (dump_enabled_p ())
4095 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4098 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4101 for (j
= 0; j
< ncopies
; j
++)
4105 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4107 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4109 /* Arguments are ready. create the new vector stmt. */
4110 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4112 if (CONVERT_EXPR_CODE_P (code
)
4113 || code
== VIEW_CONVERT_EXPR
)
4114 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4115 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4116 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4117 gimple_assign_set_lhs (new_stmt
, new_temp
);
4118 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4120 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4127 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4129 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4131 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4134 vec_oprnds
.release ();
4139 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4140 either as shift by a scalar or by a vector. */
4143 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4146 enum machine_mode vec_mode
;
4151 vectype
= get_vectype_for_scalar_type (scalar_type
);
4155 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4157 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4159 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4161 || (optab_handler (optab
, TYPE_MODE (vectype
))
4162 == CODE_FOR_nothing
))
4166 vec_mode
= TYPE_MODE (vectype
);
4167 icode
= (int) optab_handler (optab
, vec_mode
);
4168 if (icode
== CODE_FOR_nothing
)
4175 /* Function vectorizable_shift.
4177 Check if STMT performs a shift operation that can be vectorized.
4178 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4179 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4180 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4183 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
4184 gimple
*vec_stmt
, slp_tree slp_node
)
4188 tree op0
, op1
= NULL
;
4189 tree vec_oprnd1
= NULL_TREE
;
4190 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4192 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4193 enum tree_code code
;
4194 enum machine_mode vec_mode
;
4198 enum machine_mode optab_op2_mode
;
4201 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4202 gimple new_stmt
= NULL
;
4203 stmt_vec_info prev_stmt_info
;
4210 vec
<tree
> vec_oprnds0
= vNULL
;
4211 vec
<tree
> vec_oprnds1
= vNULL
;
4214 bool scalar_shift_arg
= true;
4215 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4218 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4221 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4224 /* Is STMT a vectorizable binary/unary operation? */
4225 if (!is_gimple_assign (stmt
))
4228 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4231 code
= gimple_assign_rhs_code (stmt
);
4233 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4234 || code
== RROTATE_EXPR
))
4237 scalar_dest
= gimple_assign_lhs (stmt
);
4238 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4239 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4240 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4242 if (dump_enabled_p ())
4243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4244 "bit-precision shifts not supported.\n");
4248 op0
= gimple_assign_rhs1 (stmt
);
4249 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4250 &def_stmt
, &def
, &dt
[0], &vectype
))
4252 if (dump_enabled_p ())
4253 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4254 "use not simple.\n");
4257 /* If op0 is an external or constant def use a vector type with
4258 the same size as the output vector type. */
4260 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4262 gcc_assert (vectype
);
4265 if (dump_enabled_p ())
4266 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4267 "no vectype for scalar type\n");
4271 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4272 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4273 if (nunits_out
!= nunits_in
)
4276 op1
= gimple_assign_rhs2 (stmt
);
4277 if (!vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4278 &def
, &dt
[1], &op1_vectype
))
4280 if (dump_enabled_p ())
4281 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4282 "use not simple.\n");
4287 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4291 /* Multiple types in SLP are handled by creating the appropriate number of
4292 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4294 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4297 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4299 gcc_assert (ncopies
>= 1);
4301 /* Determine whether the shift amount is a vector, or scalar. If the
4302 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4304 if (dt
[1] == vect_internal_def
&& !slp_node
)
4305 scalar_shift_arg
= false;
4306 else if (dt
[1] == vect_constant_def
4307 || dt
[1] == vect_external_def
4308 || dt
[1] == vect_internal_def
)
4310 /* In SLP, need to check whether the shift count is the same,
4311 in loops if it is a constant or invariant, it is always
4315 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4318 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4319 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4320 scalar_shift_arg
= false;
4325 if (dump_enabled_p ())
4326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4327 "operand mode requires invariant argument.\n");
4331 /* Vector shifted by vector. */
4332 if (!scalar_shift_arg
)
4334 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4335 if (dump_enabled_p ())
4336 dump_printf_loc (MSG_NOTE
, vect_location
,
4337 "vector/vector shift/rotate found.\n");
4340 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4341 if (op1_vectype
== NULL_TREE
4342 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4344 if (dump_enabled_p ())
4345 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4346 "unusable type for last operand in"
4347 " vector/vector shift/rotate.\n");
4351 /* See if the machine has a vector shifted by scalar insn and if not
4352 then see if it has a vector shifted by vector insn. */
4355 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4357 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4359 if (dump_enabled_p ())
4360 dump_printf_loc (MSG_NOTE
, vect_location
,
4361 "vector/scalar shift/rotate found.\n");
4365 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4367 && (optab_handler (optab
, TYPE_MODE (vectype
))
4368 != CODE_FOR_nothing
))
4370 scalar_shift_arg
= false;
4372 if (dump_enabled_p ())
4373 dump_printf_loc (MSG_NOTE
, vect_location
,
4374 "vector/vector shift/rotate found.\n");
4376 /* Unlike the other binary operators, shifts/rotates have
4377 the rhs being int, instead of the same type as the lhs,
4378 so make sure the scalar is the right type if we are
4379 dealing with vectors of long long/long/short/char. */
4380 if (dt
[1] == vect_constant_def
)
4381 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4382 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4386 && TYPE_MODE (TREE_TYPE (vectype
))
4387 != TYPE_MODE (TREE_TYPE (op1
)))
4389 if (dump_enabled_p ())
4390 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4391 "unusable type for last operand in"
4392 " vector/vector shift/rotate.\n");
4395 if (vec_stmt
&& !slp_node
)
4397 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4398 op1
= vect_init_vector (stmt
, op1
,
4399 TREE_TYPE (vectype
), NULL
);
4406 /* Supportable by target? */
4409 if (dump_enabled_p ())
4410 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4414 vec_mode
= TYPE_MODE (vectype
);
4415 icode
= (int) optab_handler (optab
, vec_mode
);
4416 if (icode
== CODE_FOR_nothing
)
4418 if (dump_enabled_p ())
4419 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4420 "op not supported by target.\n");
4421 /* Check only during analysis. */
4422 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4423 || (vf
< vect_min_worthwhile_factor (code
)
4426 if (dump_enabled_p ())
4427 dump_printf_loc (MSG_NOTE
, vect_location
,
4428 "proceeding using word mode.\n");
4431 /* Worthwhile without SIMD support? Check only during analysis. */
4432 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4433 && vf
< vect_min_worthwhile_factor (code
)
4436 if (dump_enabled_p ())
4437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4438 "not worthwhile without SIMD support.\n");
4442 if (!vec_stmt
) /* transformation not required. */
4444 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4445 if (dump_enabled_p ())
4446 dump_printf_loc (MSG_NOTE
, vect_location
,
4447 "=== vectorizable_shift ===\n");
4448 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4454 if (dump_enabled_p ())
4455 dump_printf_loc (MSG_NOTE
, vect_location
,
4456 "transform binary/unary operation.\n");
4459 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4461 prev_stmt_info
= NULL
;
4462 for (j
= 0; j
< ncopies
; j
++)
4467 if (scalar_shift_arg
)
4469 /* Vector shl and shr insn patterns can be defined with scalar
4470 operand 2 (shift operand). In this case, use constant or loop
4471 invariant op1 directly, without extending it to vector mode
4473 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4474 if (!VECTOR_MODE_P (optab_op2_mode
))
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_NOTE
, vect_location
,
4478 "operand 1 using scalar mode.\n");
4480 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4481 vec_oprnds1
.quick_push (vec_oprnd1
);
4484 /* Store vec_oprnd1 for every vector stmt to be created
4485 for SLP_NODE. We check during the analysis that all
4486 the shift arguments are the same.
4487 TODO: Allow different constants for different vector
4488 stmts generated for an SLP instance. */
4489 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4490 vec_oprnds1
.quick_push (vec_oprnd1
);
4495 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4496 (a special case for certain kind of vector shifts); otherwise,
4497 operand 1 should be of a vector type (the usual case). */
4499 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4502 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4506 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4508 /* Arguments are ready. Create the new vector stmt. */
4509 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4511 vop1
= vec_oprnds1
[i
];
4512 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
4513 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4514 gimple_assign_set_lhs (new_stmt
, new_temp
);
4515 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4517 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4524 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4526 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4527 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4530 vec_oprnds0
.release ();
4531 vec_oprnds1
.release ();
4537 /* Function vectorizable_operation.
4539 Check if STMT performs a binary, unary or ternary operation that can
4541 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4542 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4543 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4546 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
4547 gimple
*vec_stmt
, slp_tree slp_node
)
4551 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4552 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4554 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4555 enum tree_code code
;
4556 enum machine_mode vec_mode
;
4563 enum vect_def_type dt
[3]
4564 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4565 gimple new_stmt
= NULL
;
4566 stmt_vec_info prev_stmt_info
;
4572 vec
<tree
> vec_oprnds0
= vNULL
;
4573 vec
<tree
> vec_oprnds1
= vNULL
;
4574 vec
<tree
> vec_oprnds2
= vNULL
;
4575 tree vop0
, vop1
, vop2
;
4576 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4579 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4582 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4585 /* Is STMT a vectorizable binary/unary operation? */
4586 if (!is_gimple_assign (stmt
))
4589 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4592 code
= gimple_assign_rhs_code (stmt
);
4594 /* For pointer addition, we should use the normal plus for
4595 the vector addition. */
4596 if (code
== POINTER_PLUS_EXPR
)
4599 /* Support only unary or binary operations. */
4600 op_type
= TREE_CODE_LENGTH (code
);
4601 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4603 if (dump_enabled_p ())
4604 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4605 "num. args = %d (not unary/binary/ternary op).\n",
4610 scalar_dest
= gimple_assign_lhs (stmt
);
4611 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4613 /* Most operations cannot handle bit-precision types without extra
4615 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4616 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4617 /* Exception are bitwise binary operations. */
4618 && code
!= BIT_IOR_EXPR
4619 && code
!= BIT_XOR_EXPR
4620 && code
!= BIT_AND_EXPR
)
4622 if (dump_enabled_p ())
4623 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4624 "bit-precision arithmetic not supported.\n");
4628 op0
= gimple_assign_rhs1 (stmt
);
4629 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4630 &def_stmt
, &def
, &dt
[0], &vectype
))
4632 if (dump_enabled_p ())
4633 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4634 "use not simple.\n");
4637 /* If op0 is an external or constant def use a vector type with
4638 the same size as the output vector type. */
4640 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4642 gcc_assert (vectype
);
4645 if (dump_enabled_p ())
4647 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4648 "no vectype for scalar type ");
4649 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4651 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4657 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4658 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4659 if (nunits_out
!= nunits_in
)
4662 if (op_type
== binary_op
|| op_type
== ternary_op
)
4664 op1
= gimple_assign_rhs2 (stmt
);
4665 if (!vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4668 if (dump_enabled_p ())
4669 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4670 "use not simple.\n");
4674 if (op_type
== ternary_op
)
4676 op2
= gimple_assign_rhs3 (stmt
);
4677 if (!vect_is_simple_use (op2
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4682 "use not simple.\n");
4688 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4692 /* Multiple types in SLP are handled by creating the appropriate number of
4693 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4695 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4698 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4700 gcc_assert (ncopies
>= 1);
4702 /* Shifts are handled in vectorizable_shift (). */
4703 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4704 || code
== RROTATE_EXPR
)
4707 /* Supportable by target? */
4709 vec_mode
= TYPE_MODE (vectype
);
4710 if (code
== MULT_HIGHPART_EXPR
)
4712 if (can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
)))
4713 icode
= LAST_INSN_CODE
;
4715 icode
= CODE_FOR_nothing
;
4719 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4722 if (dump_enabled_p ())
4723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4727 icode
= (int) optab_handler (optab
, vec_mode
);
4730 if (icode
== CODE_FOR_nothing
)
4732 if (dump_enabled_p ())
4733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4734 "op not supported by target.\n");
4735 /* Check only during analysis. */
4736 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4737 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4739 if (dump_enabled_p ())
4740 dump_printf_loc (MSG_NOTE
, vect_location
,
4741 "proceeding using word mode.\n");
4744 /* Worthwhile without SIMD support? Check only during analysis. */
4745 if (!VECTOR_MODE_P (vec_mode
)
4747 && vf
< vect_min_worthwhile_factor (code
))
4749 if (dump_enabled_p ())
4750 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4751 "not worthwhile without SIMD support.\n");
4755 if (!vec_stmt
) /* transformation not required. */
4757 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4758 if (dump_enabled_p ())
4759 dump_printf_loc (MSG_NOTE
, vect_location
,
4760 "=== vectorizable_operation ===\n");
4761 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4767 if (dump_enabled_p ())
4768 dump_printf_loc (MSG_NOTE
, vect_location
,
4769 "transform binary/unary operation.\n");
4772 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4774 /* In case the vectorization factor (VF) is bigger than the number
4775 of elements that we can fit in a vectype (nunits), we have to generate
4776 more than one vector stmt - i.e - we need to "unroll" the
4777 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4778 from one copy of the vector stmt to the next, in the field
4779 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4780 stages to find the correct vector defs to be used when vectorizing
4781 stmts that use the defs of the current stmt. The example below
4782 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4783 we need to create 4 vectorized stmts):
4785 before vectorization:
4786 RELATED_STMT VEC_STMT
4790 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4792 RELATED_STMT VEC_STMT
4793 VS1_0: vx0 = memref0 VS1_1 -
4794 VS1_1: vx1 = memref1 VS1_2 -
4795 VS1_2: vx2 = memref2 VS1_3 -
4796 VS1_3: vx3 = memref3 - -
4797 S1: x = load - VS1_0
4800 step2: vectorize stmt S2 (done here):
4801 To vectorize stmt S2 we first need to find the relevant vector
4802 def for the first operand 'x'. This is, as usual, obtained from
4803 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4804 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4805 relevant vector def 'vx0'. Having found 'vx0' we can generate
4806 the vector stmt VS2_0, and as usual, record it in the
4807 STMT_VINFO_VEC_STMT of stmt S2.
4808 When creating the second copy (VS2_1), we obtain the relevant vector
4809 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4810 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4811 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4812 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4813 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4814 chain of stmts and pointers:
4815 RELATED_STMT VEC_STMT
4816 VS1_0: vx0 = memref0 VS1_1 -
4817 VS1_1: vx1 = memref1 VS1_2 -
4818 VS1_2: vx2 = memref2 VS1_3 -
4819 VS1_3: vx3 = memref3 - -
4820 S1: x = load - VS1_0
4821 VS2_0: vz0 = vx0 + v1 VS2_1 -
4822 VS2_1: vz1 = vx1 + v1 VS2_2 -
4823 VS2_2: vz2 = vx2 + v1 VS2_3 -
4824 VS2_3: vz3 = vx3 + v1 - -
4825 S2: z = x + 1 - VS2_0 */
4827 prev_stmt_info
= NULL
;
4828 for (j
= 0; j
< ncopies
; j
++)
4833 if (op_type
== binary_op
|| op_type
== ternary_op
)
4834 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4837 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4839 if (op_type
== ternary_op
)
4841 vec_oprnds2
.create (1);
4842 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4849 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4850 if (op_type
== ternary_op
)
4852 tree vec_oprnd
= vec_oprnds2
.pop ();
4853 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4858 /* Arguments are ready. Create the new vector stmt. */
4859 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4861 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4862 ? vec_oprnds1
[i
] : NULL_TREE
);
4863 vop2
= ((op_type
== ternary_op
)
4864 ? vec_oprnds2
[i
] : NULL_TREE
);
4865 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
4867 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4868 gimple_assign_set_lhs (new_stmt
, new_temp
);
4869 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4871 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4878 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4880 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4881 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4884 vec_oprnds0
.release ();
4885 vec_oprnds1
.release ();
4886 vec_oprnds2
.release ();
4891 /* A helper function to ensure data reference DR's base alignment
4895 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4900 if (((dataref_aux
*)dr
->aux
)->base_misaligned
)
4902 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4903 tree base_decl
= ((dataref_aux
*)dr
->aux
)->base_decl
;
4905 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4906 DECL_USER_ALIGN (base_decl
) = 1;
4907 ((dataref_aux
*)dr
->aux
)->base_misaligned
= false;
4912 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4913 reversal of the vector elements. If that is impossible to do,
4917 perm_mask_for_reverse (tree vectype
)
4922 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4923 sel
= XALLOCAVEC (unsigned char, nunits
);
4925 for (i
= 0; i
< nunits
; ++i
)
4926 sel
[i
] = nunits
- 1 - i
;
4928 return vect_gen_perm_mask (vectype
, sel
);
4931 /* Function vectorizable_store.
4933 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4935 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4936 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4937 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4940 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
4946 tree vec_oprnd
= NULL_TREE
;
4947 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4948 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
4949 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4951 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4952 struct loop
*loop
= NULL
;
4953 enum machine_mode vec_mode
;
4955 enum dr_alignment_support alignment_support_scheme
;
4958 enum vect_def_type dt
;
4959 stmt_vec_info prev_stmt_info
= NULL
;
4960 tree dataref_ptr
= NULL_TREE
;
4961 tree dataref_offset
= NULL_TREE
;
4962 gimple ptr_incr
= NULL
;
4963 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4966 gimple next_stmt
, first_stmt
= NULL
;
4967 bool grouped_store
= false;
4968 bool store_lanes_p
= false;
4969 unsigned int group_size
, i
;
4970 vec
<tree
> dr_chain
= vNULL
;
4971 vec
<tree
> oprnds
= vNULL
;
4972 vec
<tree
> result_chain
= vNULL
;
4974 bool negative
= false;
4975 tree offset
= NULL_TREE
;
4976 vec
<tree
> vec_oprnds
= vNULL
;
4977 bool slp
= (slp_node
!= NULL
);
4978 unsigned int vec_num
;
4979 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4983 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4985 /* Multiple types in SLP are handled by creating the appropriate number of
4986 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4988 if (slp
|| PURE_SLP_STMT (stmt_info
))
4991 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4993 gcc_assert (ncopies
>= 1);
4995 /* FORNOW. This restriction should be relaxed. */
4996 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
4998 if (dump_enabled_p ())
4999 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5000 "multiple types in nested loop.\n");
5004 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5007 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5010 /* Is vectorizable store? */
5012 if (!is_gimple_assign (stmt
))
5015 scalar_dest
= gimple_assign_lhs (stmt
);
5016 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5017 && is_pattern_stmt_p (stmt_info
))
5018 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5019 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5020 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5021 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5022 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5023 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5024 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5025 && TREE_CODE (scalar_dest
) != MEM_REF
)
5028 gcc_assert (gimple_assign_single_p (stmt
));
5029 op
= gimple_assign_rhs1 (stmt
);
5030 if (!vect_is_simple_use (op
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5033 if (dump_enabled_p ())
5034 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5035 "use not simple.\n");
5039 elem_type
= TREE_TYPE (vectype
);
5040 vec_mode
= TYPE_MODE (vectype
);
5042 /* FORNOW. In some cases can vectorize even if data-type not supported
5043 (e.g. - array initialization with 0). */
5044 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5047 if (!STMT_VINFO_DATA_REF (stmt_info
))
5051 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5052 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5053 size_zero_node
) < 0;
5054 if (negative
&& ncopies
> 1)
5056 if (dump_enabled_p ())
5057 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5058 "multiple types with negative step.\n");
5064 gcc_assert (!grouped_store
);
5065 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5066 if (alignment_support_scheme
!= dr_aligned
5067 && alignment_support_scheme
!= dr_unaligned_supported
)
5069 if (dump_enabled_p ())
5070 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5071 "negative step but alignment required.\n");
5074 if (dt
!= vect_constant_def
5075 && dt
!= vect_external_def
5076 && !perm_mask_for_reverse (vectype
))
5078 if (dump_enabled_p ())
5079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5080 "negative step and reversing not supported.\n");
5085 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5087 grouped_store
= true;
5088 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5089 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5091 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5092 if (vect_store_lanes_supported (vectype
, group_size
))
5093 store_lanes_p
= true;
5094 else if (!vect_grouped_store_supported (vectype
, group_size
))
5098 if (first_stmt
== stmt
)
5100 /* STMT is the leader of the group. Check the operands of all the
5101 stmts of the group. */
5102 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5105 gcc_assert (gimple_assign_single_p (next_stmt
));
5106 op
= gimple_assign_rhs1 (next_stmt
);
5107 if (!vect_is_simple_use (op
, next_stmt
, loop_vinfo
, bb_vinfo
,
5108 &def_stmt
, &def
, &dt
))
5110 if (dump_enabled_p ())
5111 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5112 "use not simple.\n");
5115 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5120 if (!vec_stmt
) /* transformation not required. */
5122 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5123 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5130 ensure_base_align (stmt_info
, dr
);
5134 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5135 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5137 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5140 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5142 /* We vectorize all the stmts of the interleaving group when we
5143 reach the last stmt in the group. */
5144 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5145 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5154 grouped_store
= false;
5155 /* VEC_NUM is the number of vect stmts to be created for this
5157 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5158 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5159 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5160 op
= gimple_assign_rhs1 (first_stmt
);
5163 /* VEC_NUM is the number of vect stmts to be created for this
5165 vec_num
= group_size
;
5171 group_size
= vec_num
= 1;
5174 if (dump_enabled_p ())
5175 dump_printf_loc (MSG_NOTE
, vect_location
,
5176 "transform store. ncopies = %d\n", ncopies
);
5178 dr_chain
.create (group_size
);
5179 oprnds
.create (group_size
);
5181 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5182 gcc_assert (alignment_support_scheme
);
5183 /* Targets with store-lane instructions must not require explicit
5185 gcc_assert (!store_lanes_p
5186 || alignment_support_scheme
== dr_aligned
5187 || alignment_support_scheme
== dr_unaligned_supported
);
5190 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5193 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5195 aggr_type
= vectype
;
5197 /* In case the vectorization factor (VF) is bigger than the number
5198 of elements that we can fit in a vectype (nunits), we have to generate
5199 more than one vector stmt - i.e - we need to "unroll" the
5200 vector stmt by a factor VF/nunits. For more details see documentation in
5201 vect_get_vec_def_for_copy_stmt. */
5203 /* In case of interleaving (non-unit grouped access):
5210 We create vectorized stores starting from base address (the access of the
5211 first stmt in the chain (S2 in the above example), when the last store stmt
5212 of the chain (S4) is reached:
5215 VS2: &base + vec_size*1 = vx0
5216 VS3: &base + vec_size*2 = vx1
5217 VS4: &base + vec_size*3 = vx3
5219 Then permutation statements are generated:
5221 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5222 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5225 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5226 (the order of the data-refs in the output of vect_permute_store_chain
5227 corresponds to the order of scalar stmts in the interleaving chain - see
5228 the documentation of vect_permute_store_chain()).
5230 In case of both multiple types and interleaving, above vector stores and
5231 permutation stmts are created for every copy. The result vector stmts are
5232 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5233 STMT_VINFO_RELATED_STMT for the next copies.
5236 prev_stmt_info
= NULL
;
5237 for (j
= 0; j
< ncopies
; j
++)
5245 /* Get vectorized arguments for SLP_NODE. */
5246 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5247 NULL
, slp_node
, -1);
5249 vec_oprnd
= vec_oprnds
[0];
5253 /* For interleaved stores we collect vectorized defs for all the
5254 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5255 used as an input to vect_permute_store_chain(), and OPRNDS as
5256 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5258 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5259 OPRNDS are of size 1. */
5260 next_stmt
= first_stmt
;
5261 for (i
= 0; i
< group_size
; i
++)
5263 /* Since gaps are not supported for interleaved stores,
5264 GROUP_SIZE is the exact number of stmts in the chain.
5265 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5266 there is no interleaving, GROUP_SIZE is 1, and only one
5267 iteration of the loop will be executed. */
5268 gcc_assert (next_stmt
5269 && gimple_assign_single_p (next_stmt
));
5270 op
= gimple_assign_rhs1 (next_stmt
);
5272 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
5274 dr_chain
.quick_push (vec_oprnd
);
5275 oprnds
.quick_push (vec_oprnd
);
5276 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5280 /* We should have catched mismatched types earlier. */
5281 gcc_assert (useless_type_conversion_p (vectype
,
5282 TREE_TYPE (vec_oprnd
)));
5283 bool simd_lane_access_p
5284 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5285 if (simd_lane_access_p
5286 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5287 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5288 && integer_zerop (DR_OFFSET (first_dr
))
5289 && integer_zerop (DR_INIT (first_dr
))
5290 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5291 get_alias_set (DR_REF (first_dr
))))
5293 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5294 dataref_offset
= build_int_cst (reference_alias_ptr_type
5295 (DR_REF (first_dr
)), 0);
5300 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5301 simd_lane_access_p
? loop
: NULL
,
5302 offset
, &dummy
, gsi
, &ptr_incr
,
5303 simd_lane_access_p
, &inv_p
);
5304 gcc_assert (bb_vinfo
|| !inv_p
);
5308 /* For interleaved stores we created vectorized defs for all the
5309 defs stored in OPRNDS in the previous iteration (previous copy).
5310 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5311 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5313 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5314 OPRNDS are of size 1. */
5315 for (i
= 0; i
< group_size
; i
++)
5318 vect_is_simple_use (op
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5320 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5321 dr_chain
[i
] = vec_oprnd
;
5322 oprnds
[i
] = vec_oprnd
;
5326 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5327 TYPE_SIZE_UNIT (aggr_type
));
5329 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5330 TYPE_SIZE_UNIT (aggr_type
));
5337 /* Combine all the vectors into an array. */
5338 vec_array
= create_vector_array (vectype
, vec_num
);
5339 for (i
= 0; i
< vec_num
; i
++)
5341 vec_oprnd
= dr_chain
[i
];
5342 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5346 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5347 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5348 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5349 gimple_call_set_lhs (new_stmt
, data_ref
);
5350 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5358 result_chain
.create (group_size
);
5360 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5364 next_stmt
= first_stmt
;
5365 for (i
= 0; i
< vec_num
; i
++)
5367 unsigned align
, misalign
;
5370 /* Bump the vector pointer. */
5371 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5375 vec_oprnd
= vec_oprnds
[i
];
5376 else if (grouped_store
)
5377 /* For grouped stores vectorized defs are interleaved in
5378 vect_permute_store_chain(). */
5379 vec_oprnd
= result_chain
[i
];
5381 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
5384 : build_int_cst (reference_alias_ptr_type
5385 (DR_REF (first_dr
)), 0));
5386 align
= TYPE_ALIGN_UNIT (vectype
);
5387 if (aligned_access_p (first_dr
))
5389 else if (DR_MISALIGNMENT (first_dr
) == -1)
5391 TREE_TYPE (data_ref
)
5392 = build_aligned_type (TREE_TYPE (data_ref
),
5393 TYPE_ALIGN (elem_type
));
5394 align
= TYPE_ALIGN_UNIT (elem_type
);
5399 TREE_TYPE (data_ref
)
5400 = build_aligned_type (TREE_TYPE (data_ref
),
5401 TYPE_ALIGN (elem_type
));
5402 misalign
= DR_MISALIGNMENT (first_dr
);
5404 if (dataref_offset
== NULL_TREE
)
5405 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5409 && dt
!= vect_constant_def
5410 && dt
!= vect_external_def
)
5412 tree perm_mask
= perm_mask_for_reverse (vectype
);
5414 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5416 tree new_temp
= make_ssa_name (perm_dest
, NULL
);
5418 /* Generate the permute statement. */
5420 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, new_temp
,
5421 vec_oprnd
, vec_oprnd
,
5423 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5425 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5426 vec_oprnd
= new_temp
;
5429 /* Arguments are ready. Create the new vector stmt. */
5430 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5431 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5436 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5444 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5446 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5447 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5451 dr_chain
.release ();
5453 result_chain
.release ();
5454 vec_oprnds
.release ();
5459 /* Given a vector type VECTYPE and permutation SEL returns
5460 the VECTOR_CST mask that implements the permutation of the
5461 vector elements. If that is impossible to do, returns NULL. */
5464 vect_gen_perm_mask (tree vectype
, unsigned char *sel
)
5466 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5469 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5471 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5474 mask_elt_type
= lang_hooks
.types
.type_for_mode
5475 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5476 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5478 mask_elts
= XALLOCAVEC (tree
, nunits
);
5479 for (i
= nunits
- 1; i
>= 0; i
--)
5480 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5481 mask_vec
= build_vector (mask_type
, mask_elts
);
5486 /* Given a vector variable X and Y, that was generated for the scalar
5487 STMT, generate instructions to permute the vector elements of X and Y
5488 using permutation mask MASK_VEC, insert them at *GSI and return the
5489 permuted vector variable. */
5492 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple stmt
,
5493 gimple_stmt_iterator
*gsi
)
5495 tree vectype
= TREE_TYPE (x
);
5496 tree perm_dest
, data_ref
;
5499 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5500 data_ref
= make_ssa_name (perm_dest
, NULL
);
5502 /* Generate the permute statement. */
5503 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5505 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5510 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5511 inserting them on the loops preheader edge. Returns true if we
5512 were successful in doing so (and thus STMT can be moved then),
5513 otherwise returns false. */
5516 hoist_defs_of_uses (gimple stmt
, struct loop
*loop
)
5522 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5524 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5525 if (!gimple_nop_p (def_stmt
)
5526 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5528 /* Make sure we don't need to recurse. While we could do
5529 so in simple cases when there are more complex use webs
5530 we don't have an easy way to preserve stmt order to fulfil
5531 dependencies within them. */
5534 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5536 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5538 gimple def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5539 if (!gimple_nop_p (def_stmt2
)
5540 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5550 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5552 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5553 if (!gimple_nop_p (def_stmt
)
5554 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5556 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5557 gsi_remove (&gsi
, false);
5558 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5565 /* vectorizable_load.
5567 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5569 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5570 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5571 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5574 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
5575 slp_tree slp_node
, slp_instance slp_node_instance
)
5578 tree vec_dest
= NULL
;
5579 tree data_ref
= NULL
;
5580 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5581 stmt_vec_info prev_stmt_info
;
5582 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5583 struct loop
*loop
= NULL
;
5584 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5585 bool nested_in_vect_loop
= false;
5586 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5587 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5590 enum machine_mode mode
;
5591 gimple new_stmt
= NULL
;
5593 enum dr_alignment_support alignment_support_scheme
;
5594 tree dataref_ptr
= NULL_TREE
;
5595 tree dataref_offset
= NULL_TREE
;
5596 gimple ptr_incr
= NULL
;
5597 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5599 int i
, j
, group_size
, group_gap
;
5600 tree msq
= NULL_TREE
, lsq
;
5601 tree offset
= NULL_TREE
;
5602 tree realignment_token
= NULL_TREE
;
5604 vec
<tree
> dr_chain
= vNULL
;
5605 bool grouped_load
= false;
5606 bool load_lanes_p
= false;
5609 bool negative
= false;
5610 bool compute_in_loop
= false;
5611 struct loop
*at_loop
;
5613 bool slp
= (slp_node
!= NULL
);
5614 bool slp_perm
= false;
5615 enum tree_code code
;
5616 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5619 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
5620 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
5621 int gather_scale
= 1;
5622 enum vect_def_type gather_dt
= vect_unknown_def_type
;
5626 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5627 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
5628 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5633 /* Multiple types in SLP are handled by creating the appropriate number of
5634 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5636 if (slp
|| PURE_SLP_STMT (stmt_info
))
5639 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5641 gcc_assert (ncopies
>= 1);
5643 /* FORNOW. This restriction should be relaxed. */
5644 if (nested_in_vect_loop
&& ncopies
> 1)
5646 if (dump_enabled_p ())
5647 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5648 "multiple types in nested loop.\n");
5652 /* Invalidate assumptions made by dependence analysis when vectorization
5653 on the unrolled body effectively re-orders stmts. */
5655 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5656 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5657 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5659 if (dump_enabled_p ())
5660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5661 "cannot perform implicit CSE when unrolling "
5662 "with negative dependence distance\n");
5666 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5669 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5672 /* Is vectorizable load? */
5673 if (!is_gimple_assign (stmt
))
5676 scalar_dest
= gimple_assign_lhs (stmt
);
5677 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5680 code
= gimple_assign_rhs_code (stmt
);
5681 if (code
!= ARRAY_REF
5682 && code
!= BIT_FIELD_REF
5683 && code
!= INDIRECT_REF
5684 && code
!= COMPONENT_REF
5685 && code
!= IMAGPART_EXPR
5686 && code
!= REALPART_EXPR
5688 && TREE_CODE_CLASS (code
) != tcc_declaration
)
5691 if (!STMT_VINFO_DATA_REF (stmt_info
))
5694 elem_type
= TREE_TYPE (vectype
);
5695 mode
= TYPE_MODE (vectype
);
5697 /* FORNOW. In some cases can vectorize even if data-type not supported
5698 (e.g. - data copies). */
5699 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
5701 if (dump_enabled_p ())
5702 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5703 "Aligned load, but unsupported type.\n");
5707 /* Check if the load is a part of an interleaving chain. */
5708 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5710 grouped_load
= true;
5712 gcc_assert (! nested_in_vect_loop
&& !STMT_VINFO_GATHER_P (stmt_info
));
5714 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5715 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5717 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5718 if (vect_load_lanes_supported (vectype
, group_size
))
5719 load_lanes_p
= true;
5720 else if (!vect_grouped_load_supported (vectype
, group_size
))
5724 /* Invalidate assumptions made by dependence analysis when vectorization
5725 on the unrolled body effectively re-orders stmts. */
5726 if (!PURE_SLP_STMT (stmt_info
)
5727 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5728 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5729 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5731 if (dump_enabled_p ())
5732 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5733 "cannot perform implicit CSE when performing "
5734 "group loads with negative dependence distance\n");
5740 if (STMT_VINFO_GATHER_P (stmt_info
))
5744 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
5745 &gather_off
, &gather_scale
);
5746 gcc_assert (gather_decl
);
5747 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, bb_vinfo
,
5748 &def_stmt
, &def
, &gather_dt
,
5749 &gather_off_vectype
))
5751 if (dump_enabled_p ())
5752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5753 "gather index use not simple.\n");
5757 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
5761 negative
= tree_int_cst_compare (nested_in_vect_loop
5762 ? STMT_VINFO_DR_STEP (stmt_info
)
5764 size_zero_node
) < 0;
5765 if (negative
&& ncopies
> 1)
5767 if (dump_enabled_p ())
5768 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5769 "multiple types with negative step.\n");
5777 if (dump_enabled_p ())
5778 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5779 "negative step for group load not supported"
5783 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5784 if (alignment_support_scheme
!= dr_aligned
5785 && alignment_support_scheme
!= dr_unaligned_supported
)
5787 if (dump_enabled_p ())
5788 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5789 "negative step but alignment required.\n");
5792 if (!perm_mask_for_reverse (vectype
))
5794 if (dump_enabled_p ())
5795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5796 "negative step and reversing not supported."
5803 if (!vec_stmt
) /* transformation not required. */
5805 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
5806 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
, NULL
, NULL
, NULL
);
5810 if (dump_enabled_p ())
5811 dump_printf_loc (MSG_NOTE
, vect_location
,
5812 "transform load. ncopies = %d\n", ncopies
);
5816 ensure_base_align (stmt_info
, dr
);
5818 if (STMT_VINFO_GATHER_P (stmt_info
))
5820 tree vec_oprnd0
= NULL_TREE
, op
;
5821 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
5822 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5823 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
5824 edge pe
= loop_preheader_edge (loop
);
5827 enum { NARROW
, NONE
, WIDEN
} modifier
;
5828 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
5830 if (nunits
== gather_off_nunits
)
5832 else if (nunits
== gather_off_nunits
/ 2)
5834 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
5837 for (i
= 0; i
< gather_off_nunits
; ++i
)
5838 sel
[i
] = i
| nunits
;
5840 perm_mask
= vect_gen_perm_mask (gather_off_vectype
, sel
);
5841 gcc_assert (perm_mask
!= NULL_TREE
);
5843 else if (nunits
== gather_off_nunits
* 2)
5845 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5848 for (i
= 0; i
< nunits
; ++i
)
5849 sel
[i
] = i
< gather_off_nunits
5850 ? i
: i
+ nunits
- gather_off_nunits
;
5852 perm_mask
= vect_gen_perm_mask (vectype
, sel
);
5853 gcc_assert (perm_mask
!= NULL_TREE
);
5859 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
5860 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5861 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5862 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5863 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5864 scaletype
= TREE_VALUE (arglist
);
5865 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
5867 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5869 ptr
= fold_convert (ptrtype
, gather_base
);
5870 if (!is_gimple_min_invariant (ptr
))
5872 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5873 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5874 gcc_assert (!new_bb
);
5877 /* Currently we support only unconditional gather loads,
5878 so mask should be all ones. */
5879 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
5880 mask
= build_int_cst (masktype
, -1);
5881 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
5883 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
5884 mask
= build_vector_from_val (masktype
, mask
);
5885 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5887 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
5891 for (j
= 0; j
< 6; ++j
)
5893 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
5894 mask
= build_real (TREE_TYPE (masktype
), r
);
5895 mask
= build_vector_from_val (masktype
, mask
);
5896 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5901 scale
= build_int_cst (scaletype
, gather_scale
);
5903 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
5904 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
5905 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
5909 for (j
= 0; j
< 6; ++j
)
5911 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
5912 merge
= build_real (TREE_TYPE (rettype
), r
);
5916 merge
= build_vector_from_val (rettype
, merge
);
5917 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
5919 prev_stmt_info
= NULL
;
5920 for (j
= 0; j
< ncopies
; ++j
)
5922 if (modifier
== WIDEN
&& (j
& 1))
5923 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
5924 perm_mask
, stmt
, gsi
);
5927 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
5930 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
5932 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5934 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5935 == TYPE_VECTOR_SUBPARTS (idxtype
));
5936 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
5937 var
= make_ssa_name (var
, NULL
);
5938 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5940 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
5942 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5947 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
5949 if (!useless_type_conversion_p (vectype
, rettype
))
5951 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
5952 == TYPE_VECTOR_SUBPARTS (rettype
));
5953 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
5954 op
= make_ssa_name (var
, new_stmt
);
5955 gimple_call_set_lhs (new_stmt
, op
);
5956 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5957 var
= make_ssa_name (vec_dest
, NULL
);
5958 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
5960 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
5965 var
= make_ssa_name (vec_dest
, new_stmt
);
5966 gimple_call_set_lhs (new_stmt
, var
);
5969 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5971 if (modifier
== NARROW
)
5978 var
= permute_vec_elements (prev_res
, var
,
5979 perm_mask
, stmt
, gsi
);
5980 new_stmt
= SSA_NAME_DEF_STMT (var
);
5983 if (prev_stmt_info
== NULL
)
5984 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5986 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5987 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5991 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
5993 gimple_stmt_iterator incr_gsi
;
5999 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6000 gimple_seq stmts
= NULL
;
6001 tree stride_base
, stride_step
, alias_off
;
6003 gcc_assert (!nested_in_vect_loop
);
6006 = fold_build_pointer_plus
6007 (unshare_expr (DR_BASE_ADDRESS (dr
)),
6008 size_binop (PLUS_EXPR
,
6009 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr
))),
6010 convert_to_ptrofftype (DR_INIT (dr
))));
6011 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (dr
)));
6013 /* For a load with loop-invariant (but other than power-of-2)
6014 stride (i.e. not a grouped access) like so:
6016 for (i = 0; i < n; i += stride)
6019 we generate a new induction variable and new accesses to
6020 form a new vector (or vectors, depending on ncopies):
6022 for (j = 0; ; j += VF*stride)
6024 tmp2 = array[j + stride];
6026 vectemp = {tmp1, tmp2, ...}
6029 ivstep
= stride_step
;
6030 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6031 build_int_cst (TREE_TYPE (ivstep
), vf
));
6033 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6035 create_iv (stride_base
, ivstep
, NULL
,
6036 loop
, &incr_gsi
, insert_after
,
6038 incr
= gsi_stmt (incr_gsi
);
6039 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
6041 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6043 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6045 prev_stmt_info
= NULL
;
6046 running_off
= offvar
;
6047 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (dr
)), 0);
6048 for (j
= 0; j
< ncopies
; j
++)
6052 vec_alloc (v
, nunits
);
6053 for (i
= 0; i
< nunits
; i
++)
6055 tree newref
, newoff
;
6057 newref
= build2 (MEM_REF
, TREE_TYPE (vectype
),
6058 running_off
, alias_off
);
6060 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6063 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6064 newoff
= copy_ssa_name (running_off
, NULL
);
6065 incr
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, newoff
,
6066 running_off
, stride_step
);
6067 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6069 running_off
= newoff
;
6072 vec_inv
= build_constructor (vectype
, v
);
6073 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6074 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6077 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6079 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6080 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6087 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6089 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6090 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6091 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6093 /* Check if the chain of loads is already vectorized. */
6094 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6095 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6096 ??? But we can only do so if there is exactly one
6097 as we have no way to get at the rest. Leave the CSE
6099 ??? With the group load eventually participating
6100 in multiple different permutations (having multiple
6101 slp nodes which refer to the same group) the CSE
6102 is even wrong code. See PR56270. */
6105 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6108 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6109 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6111 /* VEC_NUM is the number of vect stmts to be created for this group. */
6114 grouped_load
= false;
6115 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6116 if (SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6118 group_gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
6122 vec_num
= group_size
;
6130 group_size
= vec_num
= 1;
6134 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6135 gcc_assert (alignment_support_scheme
);
6136 /* Targets with load-lane instructions must not require explicit
6138 gcc_assert (!load_lanes_p
6139 || alignment_support_scheme
== dr_aligned
6140 || alignment_support_scheme
== dr_unaligned_supported
);
6142 /* In case the vectorization factor (VF) is bigger than the number
6143 of elements that we can fit in a vectype (nunits), we have to generate
6144 more than one vector stmt - i.e - we need to "unroll" the
6145 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6146 from one copy of the vector stmt to the next, in the field
6147 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6148 stages to find the correct vector defs to be used when vectorizing
6149 stmts that use the defs of the current stmt. The example below
6150 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6151 need to create 4 vectorized stmts):
6153 before vectorization:
6154 RELATED_STMT VEC_STMT
6158 step 1: vectorize stmt S1:
6159 We first create the vector stmt VS1_0, and, as usual, record a
6160 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6161 Next, we create the vector stmt VS1_1, and record a pointer to
6162 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6163 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6165 RELATED_STMT VEC_STMT
6166 VS1_0: vx0 = memref0 VS1_1 -
6167 VS1_1: vx1 = memref1 VS1_2 -
6168 VS1_2: vx2 = memref2 VS1_3 -
6169 VS1_3: vx3 = memref3 - -
6170 S1: x = load - VS1_0
6173 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6174 information we recorded in RELATED_STMT field is used to vectorize
6177 /* In case of interleaving (non-unit grouped access):
6184 Vectorized loads are created in the order of memory accesses
6185 starting from the access of the first stmt of the chain:
6188 VS2: vx1 = &base + vec_size*1
6189 VS3: vx3 = &base + vec_size*2
6190 VS4: vx4 = &base + vec_size*3
6192 Then permutation statements are generated:
6194 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6195 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6198 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6199 (the order of the data-refs in the output of vect_permute_load_chain
6200 corresponds to the order of scalar stmts in the interleaving chain - see
6201 the documentation of vect_permute_load_chain()).
6202 The generation of permutation stmts and recording them in
6203 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6205 In case of both multiple types and interleaving, the vector loads and
6206 permutation stmts above are created for every copy. The result vector
6207 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6208 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6210 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6211 on a target that supports unaligned accesses (dr_unaligned_supported)
6212 we generate the following code:
6216 p = p + indx * vectype_size;
6221 Otherwise, the data reference is potentially unaligned on a target that
6222 does not support unaligned accesses (dr_explicit_realign_optimized) -
6223 then generate the following code, in which the data in each iteration is
6224 obtained by two vector loads, one from the previous iteration, and one
6225 from the current iteration:
6227 msq_init = *(floor(p1))
6228 p2 = initial_addr + VS - 1;
6229 realignment_token = call target_builtin;
6232 p2 = p2 + indx * vectype_size
6234 vec_dest = realign_load (msq, lsq, realignment_token)
6239 /* If the misalignment remains the same throughout the execution of the
6240 loop, we can create the init_addr and permutation mask at the loop
6241 preheader. Otherwise, it needs to be created inside the loop.
6242 This can only occur when vectorizing memory accesses in the inner-loop
6243 nested within an outer-loop that is being vectorized. */
6245 if (nested_in_vect_loop
6246 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6247 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6249 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6250 compute_in_loop
= true;
6253 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6254 || alignment_support_scheme
== dr_explicit_realign
)
6255 && !compute_in_loop
)
6257 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6258 alignment_support_scheme
, NULL_TREE
,
6260 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6262 phi
= SSA_NAME_DEF_STMT (msq
);
6263 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
6270 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6273 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6275 aggr_type
= vectype
;
6277 prev_stmt_info
= NULL
;
6278 for (j
= 0; j
< ncopies
; j
++)
6280 /* 1. Create the vector or array pointer update chain. */
6283 bool simd_lane_access_p
6284 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6285 if (simd_lane_access_p
6286 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6287 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6288 && integer_zerop (DR_OFFSET (first_dr
))
6289 && integer_zerop (DR_INIT (first_dr
))
6290 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6291 get_alias_set (DR_REF (first_dr
)))
6292 && (alignment_support_scheme
== dr_aligned
6293 || alignment_support_scheme
== dr_unaligned_supported
))
6295 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6296 dataref_offset
= build_int_cst (reference_alias_ptr_type
6297 (DR_REF (first_dr
)), 0);
6302 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6303 offset
, &dummy
, gsi
, &ptr_incr
,
6304 simd_lane_access_p
, &inv_p
);
6306 else if (dataref_offset
)
6307 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6308 TYPE_SIZE_UNIT (aggr_type
));
6310 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6311 TYPE_SIZE_UNIT (aggr_type
));
6313 if (grouped_load
|| slp_perm
)
6314 dr_chain
.create (vec_num
);
6320 vec_array
= create_vector_array (vectype
, vec_num
);
6323 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6324 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6325 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6326 gimple_call_set_lhs (new_stmt
, vec_array
);
6327 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6329 /* Extract each vector into an SSA_NAME. */
6330 for (i
= 0; i
< vec_num
; i
++)
6332 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6334 dr_chain
.quick_push (new_temp
);
6337 /* Record the mapping between SSA_NAMEs and statements. */
6338 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6342 for (i
= 0; i
< vec_num
; i
++)
6345 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6348 /* 2. Create the vector-load in the loop. */
6349 switch (alignment_support_scheme
)
6352 case dr_unaligned_supported
:
6354 unsigned int align
, misalign
;
6357 = build2 (MEM_REF
, vectype
, dataref_ptr
,
6360 : build_int_cst (reference_alias_ptr_type
6361 (DR_REF (first_dr
)), 0));
6362 align
= TYPE_ALIGN_UNIT (vectype
);
6363 if (alignment_support_scheme
== dr_aligned
)
6365 gcc_assert (aligned_access_p (first_dr
));
6368 else if (DR_MISALIGNMENT (first_dr
) == -1)
6370 TREE_TYPE (data_ref
)
6371 = build_aligned_type (TREE_TYPE (data_ref
),
6372 TYPE_ALIGN (elem_type
));
6373 align
= TYPE_ALIGN_UNIT (elem_type
);
6378 TREE_TYPE (data_ref
)
6379 = build_aligned_type (TREE_TYPE (data_ref
),
6380 TYPE_ALIGN (elem_type
));
6381 misalign
= DR_MISALIGNMENT (first_dr
);
6383 if (dataref_offset
== NULL_TREE
)
6384 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6388 case dr_explicit_realign
:
6393 vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
6395 if (compute_in_loop
)
6396 msq
= vect_setup_realignment (first_stmt
, gsi
,
6398 dr_explicit_realign
,
6401 ptr
= copy_ssa_name (dataref_ptr
, NULL
);
6402 new_stmt
= gimple_build_assign_with_ops
6403 (BIT_AND_EXPR
, ptr
, dataref_ptr
,
6405 (TREE_TYPE (dataref_ptr
),
6406 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6407 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6409 = build2 (MEM_REF
, vectype
, ptr
,
6410 build_int_cst (reference_alias_ptr_type
6411 (DR_REF (first_dr
)), 0));
6412 vec_dest
= vect_create_destination_var (scalar_dest
,
6414 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6415 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6416 gimple_assign_set_lhs (new_stmt
, new_temp
);
6417 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6418 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6419 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6422 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
6423 TYPE_SIZE_UNIT (elem_type
));
6424 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6425 new_stmt
= gimple_build_assign_with_ops
6426 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
6429 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6430 ptr
= copy_ssa_name (dataref_ptr
, new_stmt
);
6431 gimple_assign_set_lhs (new_stmt
, ptr
);
6432 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6434 = build2 (MEM_REF
, vectype
, ptr
,
6435 build_int_cst (reference_alias_ptr_type
6436 (DR_REF (first_dr
)), 0));
6439 case dr_explicit_realign_optimized
:
6440 new_temp
= copy_ssa_name (dataref_ptr
, NULL
);
6441 new_stmt
= gimple_build_assign_with_ops
6442 (BIT_AND_EXPR
, new_temp
, dataref_ptr
,
6444 (TREE_TYPE (dataref_ptr
),
6445 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6446 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6448 = build2 (MEM_REF
, vectype
, new_temp
,
6449 build_int_cst (reference_alias_ptr_type
6450 (DR_REF (first_dr
)), 0));
6455 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6456 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6457 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6458 gimple_assign_set_lhs (new_stmt
, new_temp
);
6459 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6461 /* 3. Handle explicit realignment if necessary/supported.
6463 vec_dest = realign_load (msq, lsq, realignment_token) */
6464 if (alignment_support_scheme
== dr_explicit_realign_optimized
6465 || alignment_support_scheme
== dr_explicit_realign
)
6467 lsq
= gimple_assign_lhs (new_stmt
);
6468 if (!realignment_token
)
6469 realignment_token
= dataref_ptr
;
6470 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6472 = gimple_build_assign_with_ops (REALIGN_LOAD_EXPR
,
6475 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6476 gimple_assign_set_lhs (new_stmt
, new_temp
);
6477 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6479 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6482 if (i
== vec_num
- 1 && j
== ncopies
- 1)
6483 add_phi_arg (phi
, lsq
,
6484 loop_latch_edge (containing_loop
),
6490 /* 4. Handle invariant-load. */
6491 if (inv_p
&& !bb_vinfo
)
6493 gcc_assert (!grouped_load
);
6494 /* If we have versioned for aliasing or the loop doesn't
6495 have any data dependencies that would preclude this,
6496 then we are sure this is a loop invariant load and
6497 thus we can insert it on the preheader edge. */
6498 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
6499 && !nested_in_vect_loop
6500 && hoist_defs_of_uses (stmt
, loop
))
6502 if (dump_enabled_p ())
6504 dump_printf_loc (MSG_NOTE
, vect_location
,
6505 "hoisting out of the vectorized "
6507 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6508 dump_printf (MSG_NOTE
, "\n");
6510 tree tem
= copy_ssa_name (scalar_dest
, NULL
);
6511 gsi_insert_on_edge_immediate
6512 (loop_preheader_edge (loop
),
6513 gimple_build_assign (tem
,
6515 (gimple_assign_rhs1 (stmt
))));
6516 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
6520 gimple_stmt_iterator gsi2
= *gsi
;
6522 new_temp
= vect_init_vector (stmt
, scalar_dest
,
6525 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6526 set_vinfo_for_stmt (new_stmt
,
6527 new_stmt_vec_info (new_stmt
, loop_vinfo
,
6533 tree perm_mask
= perm_mask_for_reverse (vectype
);
6534 new_temp
= permute_vec_elements (new_temp
, new_temp
,
6535 perm_mask
, stmt
, gsi
);
6536 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6539 /* Collect vector loads and later create their permutation in
6540 vect_transform_grouped_load (). */
6541 if (grouped_load
|| slp_perm
)
6542 dr_chain
.quick_push (new_temp
);
6544 /* Store vector loads in the corresponding SLP_NODE. */
6545 if (slp
&& !slp_perm
)
6546 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6548 /* Bump the vector pointer to account for a gap. */
6549 if (slp
&& group_gap
!= 0)
6551 tree bump
= size_binop (MULT_EXPR
,
6552 TYPE_SIZE_UNIT (elem_type
),
6553 size_int (group_gap
));
6554 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6559 if (slp
&& !slp_perm
)
6564 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6565 slp_node_instance
, false))
6567 dr_chain
.release ();
6576 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
6577 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6582 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6584 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6585 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6588 dr_chain
.release ();
6594 /* Function vect_is_simple_cond.
6597 LOOP - the loop that is being vectorized.
6598 COND - Condition that is checked for simple use.
6601 *COMP_VECTYPE - the vector type for the comparison.
6603 Returns whether a COND can be vectorized. Checks whether
6604 condition operands are supportable using vec_is_simple_use. */
6607 vect_is_simple_cond (tree cond
, gimple stmt
, loop_vec_info loop_vinfo
,
6608 bb_vec_info bb_vinfo
, tree
*comp_vectype
)
6612 enum vect_def_type dt
;
6613 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
6615 if (!COMPARISON_CLASS_P (cond
))
6618 lhs
= TREE_OPERAND (cond
, 0);
6619 rhs
= TREE_OPERAND (cond
, 1);
6621 if (TREE_CODE (lhs
) == SSA_NAME
)
6623 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
6624 if (!vect_is_simple_use_1 (lhs
, stmt
, loop_vinfo
, bb_vinfo
,
6625 &lhs_def_stmt
, &def
, &dt
, &vectype1
))
6628 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
6629 && TREE_CODE (lhs
) != FIXED_CST
)
6632 if (TREE_CODE (rhs
) == SSA_NAME
)
6634 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
6635 if (!vect_is_simple_use_1 (rhs
, stmt
, loop_vinfo
, bb_vinfo
,
6636 &rhs_def_stmt
, &def
, &dt
, &vectype2
))
6639 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
6640 && TREE_CODE (rhs
) != FIXED_CST
)
6643 *comp_vectype
= vectype1
? vectype1
: vectype2
;
6647 /* vectorizable_condition.
6649 Check if STMT is conditional modify expression that can be vectorized.
6650 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6651 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6654 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6655 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6656 else caluse if it is 2).
6658 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6661 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
6662 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
,
6665 tree scalar_dest
= NULL_TREE
;
6666 tree vec_dest
= NULL_TREE
;
6667 tree cond_expr
, then_clause
, else_clause
;
6668 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6669 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6670 tree comp_vectype
= NULL_TREE
;
6671 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
6672 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
6673 tree vec_compare
, vec_cond_expr
;
6675 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6677 enum vect_def_type dt
, dts
[4];
6678 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6680 enum tree_code code
;
6681 stmt_vec_info prev_stmt_info
= NULL
;
6683 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6684 vec
<tree
> vec_oprnds0
= vNULL
;
6685 vec
<tree
> vec_oprnds1
= vNULL
;
6686 vec
<tree
> vec_oprnds2
= vNULL
;
6687 vec
<tree
> vec_oprnds3
= vNULL
;
6690 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
6693 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6695 gcc_assert (ncopies
>= 1);
6696 if (reduc_index
&& ncopies
> 1)
6697 return false; /* FORNOW */
6699 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
6702 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6705 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6706 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
6710 /* FORNOW: not yet supported. */
6711 if (STMT_VINFO_LIVE_P (stmt_info
))
6713 if (dump_enabled_p ())
6714 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6715 "value used after loop.\n");
6719 /* Is vectorizable conditional operation? */
6720 if (!is_gimple_assign (stmt
))
6723 code
= gimple_assign_rhs_code (stmt
);
6725 if (code
!= COND_EXPR
)
6728 cond_expr
= gimple_assign_rhs1 (stmt
);
6729 then_clause
= gimple_assign_rhs2 (stmt
);
6730 else_clause
= gimple_assign_rhs3 (stmt
);
6732 if (!vect_is_simple_cond (cond_expr
, stmt
, loop_vinfo
, bb_vinfo
,
6737 if (TREE_CODE (then_clause
) == SSA_NAME
)
6739 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
6740 if (!vect_is_simple_use (then_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6741 &then_def_stmt
, &def
, &dt
))
6744 else if (TREE_CODE (then_clause
) != INTEGER_CST
6745 && TREE_CODE (then_clause
) != REAL_CST
6746 && TREE_CODE (then_clause
) != FIXED_CST
)
6749 if (TREE_CODE (else_clause
) == SSA_NAME
)
6751 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
6752 if (!vect_is_simple_use (else_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6753 &else_def_stmt
, &def
, &dt
))
6756 else if (TREE_CODE (else_clause
) != INTEGER_CST
6757 && TREE_CODE (else_clause
) != REAL_CST
6758 && TREE_CODE (else_clause
) != FIXED_CST
)
6761 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
)));
6762 /* The result of a vector comparison should be signed type. */
6763 tree cmp_type
= build_nonstandard_integer_type (prec
, 0);
6764 vec_cmp_type
= get_same_sized_vectype (cmp_type
, vectype
);
6765 if (vec_cmp_type
== NULL_TREE
)
6770 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
6771 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
6778 vec_oprnds0
.create (1);
6779 vec_oprnds1
.create (1);
6780 vec_oprnds2
.create (1);
6781 vec_oprnds3
.create (1);
6785 scalar_dest
= gimple_assign_lhs (stmt
);
6786 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6788 /* Handle cond expr. */
6789 for (j
= 0; j
< ncopies
; j
++)
6791 gimple new_stmt
= NULL
;
6796 auto_vec
<tree
, 4> ops
;
6797 auto_vec
<vec
<tree
>, 4> vec_defs
;
6799 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
6800 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
6801 ops
.safe_push (then_clause
);
6802 ops
.safe_push (else_clause
);
6803 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
6804 vec_oprnds3
= vec_defs
.pop ();
6805 vec_oprnds2
= vec_defs
.pop ();
6806 vec_oprnds1
= vec_defs
.pop ();
6807 vec_oprnds0
= vec_defs
.pop ();
6810 vec_defs
.release ();
6816 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
6818 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), stmt
,
6819 loop_vinfo
, NULL
, >emp
, &def
, &dts
[0]);
6822 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
6824 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), stmt
,
6825 loop_vinfo
, NULL
, >emp
, &def
, &dts
[1]);
6826 if (reduc_index
== 1)
6827 vec_then_clause
= reduc_def
;
6830 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
6832 vect_is_simple_use (then_clause
, stmt
, loop_vinfo
,
6833 NULL
, >emp
, &def
, &dts
[2]);
6835 if (reduc_index
== 2)
6836 vec_else_clause
= reduc_def
;
6839 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
6841 vect_is_simple_use (else_clause
, stmt
, loop_vinfo
,
6842 NULL
, >emp
, &def
, &dts
[3]);
6848 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
6849 vec_oprnds0
.pop ());
6850 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
6851 vec_oprnds1
.pop ());
6852 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
6853 vec_oprnds2
.pop ());
6854 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
6855 vec_oprnds3
.pop ());
6860 vec_oprnds0
.quick_push (vec_cond_lhs
);
6861 vec_oprnds1
.quick_push (vec_cond_rhs
);
6862 vec_oprnds2
.quick_push (vec_then_clause
);
6863 vec_oprnds3
.quick_push (vec_else_clause
);
6866 /* Arguments are ready. Create the new vector stmt. */
6867 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
6869 vec_cond_rhs
= vec_oprnds1
[i
];
6870 vec_then_clause
= vec_oprnds2
[i
];
6871 vec_else_clause
= vec_oprnds3
[i
];
6873 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
6874 vec_cond_lhs
, vec_cond_rhs
);
6875 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
6876 vec_compare
, vec_then_clause
, vec_else_clause
);
6878 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
6879 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6880 gimple_assign_set_lhs (new_stmt
, new_temp
);
6881 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6883 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6890 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6892 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6894 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6897 vec_oprnds0
.release ();
6898 vec_oprnds1
.release ();
6899 vec_oprnds2
.release ();
6900 vec_oprnds3
.release ();
6906 /* Make sure the statement is vectorizable. */
6909 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
6911 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6912 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6913 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
6915 tree scalar_type
, vectype
;
6916 gimple pattern_stmt
;
6917 gimple_seq pattern_def_seq
;
6919 if (dump_enabled_p ())
6921 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
6922 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6923 dump_printf (MSG_NOTE
, "\n");
6926 if (gimple_has_volatile_ops (stmt
))
6928 if (dump_enabled_p ())
6929 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6930 "not vectorized: stmt has volatile operands\n");
6935 /* Skip stmts that do not need to be vectorized. In loops this is expected
6937 - the COND_EXPR which is the loop exit condition
6938 - any LABEL_EXPRs in the loop
6939 - computations that are used only for array indexing or loop control.
6940 In basic blocks we only analyze statements that are a part of some SLP
6941 instance, therefore, all the statements are relevant.
6943 Pattern statement needs to be analyzed instead of the original statement
6944 if the original statement is not relevant. Otherwise, we analyze both
6945 statements. In basic blocks we are called from some SLP instance
6946 traversal, don't analyze pattern stmts instead, the pattern stmts
6947 already will be part of SLP instance. */
6949 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
6950 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
6951 && !STMT_VINFO_LIVE_P (stmt_info
))
6953 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6955 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6956 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6958 /* Analyze PATTERN_STMT instead of the original stmt. */
6959 stmt
= pattern_stmt
;
6960 stmt_info
= vinfo_for_stmt (pattern_stmt
);
6961 if (dump_enabled_p ())
6963 dump_printf_loc (MSG_NOTE
, vect_location
,
6964 "==> examining pattern statement: ");
6965 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6966 dump_printf (MSG_NOTE
, "\n");
6971 if (dump_enabled_p ())
6972 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
6977 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6980 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6981 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6983 /* Analyze PATTERN_STMT too. */
6984 if (dump_enabled_p ())
6986 dump_printf_loc (MSG_NOTE
, vect_location
,
6987 "==> examining pattern statement: ");
6988 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6989 dump_printf (MSG_NOTE
, "\n");
6992 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
6996 if (is_pattern_stmt_p (stmt_info
)
6998 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7000 gimple_stmt_iterator si
;
7002 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7004 gimple pattern_def_stmt
= gsi_stmt (si
);
7005 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7006 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7008 /* Analyze def stmt of STMT if it's a pattern stmt. */
7009 if (dump_enabled_p ())
7011 dump_printf_loc (MSG_NOTE
, vect_location
,
7012 "==> examining pattern def statement: ");
7013 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7014 dump_printf (MSG_NOTE
, "\n");
7017 if (!vect_analyze_stmt (pattern_def_stmt
,
7018 need_to_vectorize
, node
))
7024 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7026 case vect_internal_def
:
7029 case vect_reduction_def
:
7030 case vect_nested_cycle
:
7031 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
7032 || relevance
== vect_used_in_outer_by_reduction
7033 || relevance
== vect_unused_in_scope
));
7036 case vect_induction_def
:
7037 case vect_constant_def
:
7038 case vect_external_def
:
7039 case vect_unknown_def_type
:
7046 gcc_assert (PURE_SLP_STMT (stmt_info
));
7048 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7049 if (dump_enabled_p ())
7051 dump_printf_loc (MSG_NOTE
, vect_location
,
7052 "get vectype for scalar type: ");
7053 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7054 dump_printf (MSG_NOTE
, "\n");
7057 vectype
= get_vectype_for_scalar_type (scalar_type
);
7060 if (dump_enabled_p ())
7062 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7063 "not SLPed: unsupported data-type ");
7064 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7066 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7071 if (dump_enabled_p ())
7073 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7074 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7075 dump_printf (MSG_NOTE
, "\n");
7078 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7081 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7083 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7084 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7085 || (is_gimple_call (stmt
)
7086 && gimple_call_lhs (stmt
) == NULL_TREE
));
7087 *need_to_vectorize
= true;
7092 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7093 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7094 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, NULL
)
7095 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
7096 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
7097 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
7098 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
7099 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
7100 || vectorizable_call (stmt
, NULL
, NULL
, NULL
)
7101 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
7102 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
7103 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, NULL
));
7107 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7108 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7109 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7110 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7111 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7112 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7113 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7114 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7115 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7120 if (dump_enabled_p ())
7122 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7123 "not vectorized: relevant stmt not ");
7124 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7125 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7126 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7135 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7136 need extra handling, except for vectorizable reductions. */
7137 if (STMT_VINFO_LIVE_P (stmt_info
)
7138 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7139 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7143 if (dump_enabled_p ())
7145 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7146 "not vectorized: live stmt not ");
7147 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7148 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7149 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7159 /* Function vect_transform_stmt.
7161 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7164 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
7165 bool *grouped_store
, slp_tree slp_node
,
7166 slp_instance slp_node_instance
)
7168 bool is_store
= false;
7169 gimple vec_stmt
= NULL
;
7170 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7173 switch (STMT_VINFO_TYPE (stmt_info
))
7175 case type_demotion_vec_info_type
:
7176 case type_promotion_vec_info_type
:
7177 case type_conversion_vec_info_type
:
7178 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7182 case induc_vec_info_type
:
7183 gcc_assert (!slp_node
);
7184 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7188 case shift_vec_info_type
:
7189 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7193 case op_vec_info_type
:
7194 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7198 case assignment_vec_info_type
:
7199 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7203 case load_vec_info_type
:
7204 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7209 case store_vec_info_type
:
7210 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7212 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7214 /* In case of interleaving, the whole chain is vectorized when the
7215 last store in the chain is reached. Store stmts before the last
7216 one are skipped, and there vec_stmt_info shouldn't be freed
7218 *grouped_store
= true;
7219 if (STMT_VINFO_VEC_STMT (stmt_info
))
7226 case condition_vec_info_type
:
7227 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7231 case call_vec_info_type
:
7232 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7233 stmt
= gsi_stmt (*gsi
);
7234 if (is_gimple_call (stmt
)
7235 && gimple_call_internal_p (stmt
)
7236 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7240 case call_simd_clone_vec_info_type
:
7241 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7242 stmt
= gsi_stmt (*gsi
);
7245 case reduc_vec_info_type
:
7246 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7251 if (!STMT_VINFO_LIVE_P (stmt_info
))
7253 if (dump_enabled_p ())
7254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7255 "stmt not supported.\n");
7260 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7261 is being vectorized, but outside the immediately enclosing loop. */
7263 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7264 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7265 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7266 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7267 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7268 || STMT_VINFO_RELEVANT (stmt_info
) ==
7269 vect_used_in_outer_by_reduction
))
7271 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7272 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7273 imm_use_iterator imm_iter
;
7274 use_operand_p use_p
;
7278 if (dump_enabled_p ())
7279 dump_printf_loc (MSG_NOTE
, vect_location
,
7280 "Record the vdef for outer-loop vectorization.\n");
7282 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7283 (to be used when vectorizing outer-loop stmts that use the DEF of
7285 if (gimple_code (stmt
) == GIMPLE_PHI
)
7286 scalar_dest
= PHI_RESULT (stmt
);
7288 scalar_dest
= gimple_assign_lhs (stmt
);
7290 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7292 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7294 exit_phi
= USE_STMT (use_p
);
7295 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7300 /* Handle stmts whose DEF is used outside the loop-nest that is
7301 being vectorized. */
7302 if (STMT_VINFO_LIVE_P (stmt_info
)
7303 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7305 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7310 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7316 /* Remove a group of stores (for SLP or interleaving), free their
7320 vect_remove_stores (gimple first_stmt
)
7322 gimple next
= first_stmt
;
7324 gimple_stmt_iterator next_si
;
7328 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7330 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7331 if (is_pattern_stmt_p (stmt_info
))
7332 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7333 /* Free the attached stmt_vec_info and remove the stmt. */
7334 next_si
= gsi_for_stmt (next
);
7335 unlink_stmt_vdef (next
);
7336 gsi_remove (&next_si
, true);
7337 release_defs (next
);
7338 free_stmt_vec_info (next
);
7344 /* Function new_stmt_vec_info.
7346 Create and initialize a new stmt_vec_info struct for STMT. */
7349 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
7350 bb_vec_info bb_vinfo
)
7353 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7355 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7356 STMT_VINFO_STMT (res
) = stmt
;
7357 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
7358 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
7359 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7360 STMT_VINFO_LIVE_P (res
) = false;
7361 STMT_VINFO_VECTYPE (res
) = NULL
;
7362 STMT_VINFO_VEC_STMT (res
) = NULL
;
7363 STMT_VINFO_VECTORIZABLE (res
) = true;
7364 STMT_VINFO_IN_PATTERN_P (res
) = false;
7365 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7366 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7367 STMT_VINFO_DATA_REF (res
) = NULL
;
7369 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7370 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7371 STMT_VINFO_DR_INIT (res
) = NULL
;
7372 STMT_VINFO_DR_STEP (res
) = NULL
;
7373 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7375 if (gimple_code (stmt
) == GIMPLE_PHI
7376 && is_loop_header_bb_p (gimple_bb (stmt
)))
7377 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7379 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7381 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7382 STMT_SLP_TYPE (res
) = loop_vect
;
7383 GROUP_FIRST_ELEMENT (res
) = NULL
;
7384 GROUP_NEXT_ELEMENT (res
) = NULL
;
7385 GROUP_SIZE (res
) = 0;
7386 GROUP_STORE_COUNT (res
) = 0;
7387 GROUP_GAP (res
) = 0;
7388 GROUP_SAME_DR_STMT (res
) = NULL
;
7394 /* Create a hash table for stmt_vec_info. */
7397 init_stmt_vec_info_vec (void)
7399 gcc_assert (!stmt_vec_info_vec
.exists ());
7400 stmt_vec_info_vec
.create (50);
7404 /* Free hash table for stmt_vec_info. */
7407 free_stmt_vec_info_vec (void)
7411 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7413 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info
) info
));
7414 gcc_assert (stmt_vec_info_vec
.exists ());
7415 stmt_vec_info_vec
.release ();
7419 /* Free stmt vectorization related info. */
7422 free_stmt_vec_info (gimple stmt
)
7424 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7429 /* Check if this statement has a related "pattern stmt"
7430 (introduced by the vectorizer during the pattern recognition
7431 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7433 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7435 stmt_vec_info patt_info
7436 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7439 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7440 gimple patt_stmt
= STMT_VINFO_STMT (patt_info
);
7441 gimple_set_bb (patt_stmt
, NULL
);
7442 tree lhs
= gimple_get_lhs (patt_stmt
);
7443 if (TREE_CODE (lhs
) == SSA_NAME
)
7444 release_ssa_name (lhs
);
7447 gimple_stmt_iterator si
;
7448 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7450 gimple seq_stmt
= gsi_stmt (si
);
7451 gimple_set_bb (seq_stmt
, NULL
);
7452 lhs
= gimple_get_lhs (patt_stmt
);
7453 if (TREE_CODE (lhs
) == SSA_NAME
)
7454 release_ssa_name (lhs
);
7455 free_stmt_vec_info (seq_stmt
);
7458 free_stmt_vec_info (patt_stmt
);
7462 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7463 set_vinfo_for_stmt (stmt
, NULL
);
7468 /* Function get_vectype_for_scalar_type_and_size.
7470 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7474 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7476 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7477 enum machine_mode simd_mode
;
7478 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7485 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7486 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7489 /* For vector types of elements whose mode precision doesn't
7490 match their types precision we use a element type of mode
7491 precision. The vectorization routines will have to make sure
7492 they support the proper result truncation/extension.
7493 We also make sure to build vector types with INTEGER_TYPE
7494 component type only. */
7495 if (INTEGRAL_TYPE_P (scalar_type
)
7496 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
7497 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
7498 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
7499 TYPE_UNSIGNED (scalar_type
));
7501 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7502 When the component mode passes the above test simply use a type
7503 corresponding to that mode. The theory is that any use that
7504 would cause problems with this will disable vectorization anyway. */
7505 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
7506 && !INTEGRAL_TYPE_P (scalar_type
))
7507 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
7509 /* We can't build a vector type of elements with alignment bigger than
7511 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
7512 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
7513 TYPE_UNSIGNED (scalar_type
));
7515 /* If we felt back to using the mode fail if there was
7516 no scalar type for it. */
7517 if (scalar_type
== NULL_TREE
)
7520 /* If no size was supplied use the mode the target prefers. Otherwise
7521 lookup a vector mode of the specified size. */
7523 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
7525 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
7526 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
7530 vectype
= build_vector_type (scalar_type
, nunits
);
7532 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
7533 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
7539 unsigned int current_vector_size
;
7541 /* Function get_vectype_for_scalar_type.
7543 Returns the vector type corresponding to SCALAR_TYPE as supported
7547 get_vectype_for_scalar_type (tree scalar_type
)
7550 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
7551 current_vector_size
);
7553 && current_vector_size
== 0)
7554 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
7558 /* Function get_same_sized_vectype
7560 Returns a vector type corresponding to SCALAR_TYPE of size
7561 VECTOR_TYPE if supported by the target. */
7564 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
7566 return get_vectype_for_scalar_type_and_size
7567 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
7570 /* Function vect_is_simple_use.
7573 LOOP_VINFO - the vect info of the loop that is being vectorized.
7574 BB_VINFO - the vect info of the basic block that is being vectorized.
7575 OPERAND - operand of STMT in the loop or bb.
7576 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7578 Returns whether a stmt with OPERAND can be vectorized.
7579 For loops, supportable operands are constants, loop invariants, and operands
7580 that are defined by the current iteration of the loop. Unsupportable
7581 operands are those that are defined by a previous iteration of the loop (as
7582 is the case in reduction/induction computations).
7583 For basic blocks, supportable operands are constants and bb invariants.
7584 For now, operands defined outside the basic block are not supported. */
7587 vect_is_simple_use (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7588 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7589 tree
*def
, enum vect_def_type
*dt
)
7592 stmt_vec_info stmt_vinfo
;
7593 struct loop
*loop
= NULL
;
7596 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7601 if (dump_enabled_p ())
7603 dump_printf_loc (MSG_NOTE
, vect_location
,
7604 "vect_is_simple_use: operand ");
7605 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
7606 dump_printf (MSG_NOTE
, "\n");
7609 if (CONSTANT_CLASS_P (operand
))
7611 *dt
= vect_constant_def
;
7615 if (is_gimple_min_invariant (operand
))
7618 *dt
= vect_external_def
;
7622 if (TREE_CODE (operand
) == PAREN_EXPR
)
7624 if (dump_enabled_p ())
7625 dump_printf_loc (MSG_NOTE
, vect_location
, "non-associatable copy.\n");
7626 operand
= TREE_OPERAND (operand
, 0);
7629 if (TREE_CODE (operand
) != SSA_NAME
)
7631 if (dump_enabled_p ())
7632 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7637 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
7638 if (*def_stmt
== NULL
)
7640 if (dump_enabled_p ())
7641 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7646 if (dump_enabled_p ())
7648 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
7649 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
7650 dump_printf (MSG_NOTE
, "\n");
7653 /* Empty stmt is expected only in case of a function argument.
7654 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7655 if (gimple_nop_p (*def_stmt
))
7658 *dt
= vect_external_def
;
7662 bb
= gimple_bb (*def_stmt
);
7664 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
7665 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
7666 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
7667 *dt
= vect_external_def
;
7670 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
7671 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
7674 if (*dt
== vect_unknown_def_type
7676 && *dt
== vect_double_reduction_def
7677 && gimple_code (stmt
) != GIMPLE_PHI
))
7679 if (dump_enabled_p ())
7680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7681 "Unsupported pattern.\n");
7685 if (dump_enabled_p ())
7686 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: %d.\n", *dt
);
7688 switch (gimple_code (*def_stmt
))
7691 *def
= gimple_phi_result (*def_stmt
);
7695 *def
= gimple_assign_lhs (*def_stmt
);
7699 *def
= gimple_call_lhs (*def_stmt
);
7704 if (dump_enabled_p ())
7705 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7706 "unsupported defining stmt:\n");
7713 /* Function vect_is_simple_use_1.
7715 Same as vect_is_simple_use_1 but also determines the vector operand
7716 type of OPERAND and stores it to *VECTYPE. If the definition of
7717 OPERAND is vect_uninitialized_def, vect_constant_def or
7718 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7719 is responsible to compute the best suited vector type for the
7723 vect_is_simple_use_1 (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7724 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7725 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
7727 if (!vect_is_simple_use (operand
, stmt
, loop_vinfo
, bb_vinfo
, def_stmt
,
7731 /* Now get a vector type if the def is internal, otherwise supply
7732 NULL_TREE and leave it up to the caller to figure out a proper
7733 type for the use stmt. */
7734 if (*dt
== vect_internal_def
7735 || *dt
== vect_induction_def
7736 || *dt
== vect_reduction_def
7737 || *dt
== vect_double_reduction_def
7738 || *dt
== vect_nested_cycle
)
7740 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
7742 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7743 && !STMT_VINFO_RELEVANT (stmt_info
)
7744 && !STMT_VINFO_LIVE_P (stmt_info
))
7745 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7747 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7748 gcc_assert (*vectype
!= NULL_TREE
);
7750 else if (*dt
== vect_uninitialized_def
7751 || *dt
== vect_constant_def
7752 || *dt
== vect_external_def
)
7753 *vectype
= NULL_TREE
;
7761 /* Function supportable_widening_operation
7763 Check whether an operation represented by the code CODE is a
7764 widening operation that is supported by the target platform in
7765 vector form (i.e., when operating on arguments of type VECTYPE_IN
7766 producing a result of type VECTYPE_OUT).
7768 Widening operations we currently support are NOP (CONVERT), FLOAT
7769 and WIDEN_MULT. This function checks if these operations are supported
7770 by the target platform either directly (via vector tree-codes), or via
7774 - CODE1 and CODE2 are codes of vector operations to be used when
7775 vectorizing the operation, if available.
7776 - MULTI_STEP_CVT determines the number of required intermediate steps in
7777 case of multi-step conversion (like char->short->int - in that case
7778 MULTI_STEP_CVT will be 1).
7779 - INTERM_TYPES contains the intermediate type required to perform the
7780 widening operation (short in the above example). */
7783 supportable_widening_operation (enum tree_code code
, gimple stmt
,
7784 tree vectype_out
, tree vectype_in
,
7785 enum tree_code
*code1
, enum tree_code
*code2
,
7786 int *multi_step_cvt
,
7787 vec
<tree
> *interm_types
)
7789 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7790 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7791 struct loop
*vect_loop
= NULL
;
7792 enum machine_mode vec_mode
;
7793 enum insn_code icode1
, icode2
;
7794 optab optab1
, optab2
;
7795 tree vectype
= vectype_in
;
7796 tree wide_vectype
= vectype_out
;
7797 enum tree_code c1
, c2
;
7799 tree prev_type
, intermediate_type
;
7800 enum machine_mode intermediate_mode
, prev_mode
;
7801 optab optab3
, optab4
;
7803 *multi_step_cvt
= 0;
7805 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
7809 case WIDEN_MULT_EXPR
:
7810 /* The result of a vectorized widening operation usually requires
7811 two vectors (because the widened results do not fit into one vector).
7812 The generated vector results would normally be expected to be
7813 generated in the same order as in the original scalar computation,
7814 i.e. if 8 results are generated in each vector iteration, they are
7815 to be organized as follows:
7816 vect1: [res1,res2,res3,res4],
7817 vect2: [res5,res6,res7,res8].
7819 However, in the special case that the result of the widening
7820 operation is used in a reduction computation only, the order doesn't
7821 matter (because when vectorizing a reduction we change the order of
7822 the computation). Some targets can take advantage of this and
7823 generate more efficient code. For example, targets like Altivec,
7824 that support widen_mult using a sequence of {mult_even,mult_odd}
7825 generate the following vectors:
7826 vect1: [res1,res3,res5,res7],
7827 vect2: [res2,res4,res6,res8].
7829 When vectorizing outer-loops, we execute the inner-loop sequentially
7830 (each vectorized inner-loop iteration contributes to VF outer-loop
7831 iterations in parallel). We therefore don't allow to change the
7832 order of the computation in the inner-loop during outer-loop
7834 /* TODO: Another case in which order doesn't *really* matter is when we
7835 widen and then contract again, e.g. (short)((int)x * y >> 8).
7836 Normally, pack_trunc performs an even/odd permute, whereas the
7837 repack from an even/odd expansion would be an interleave, which
7838 would be significantly simpler for e.g. AVX2. */
7839 /* In any case, in order to avoid duplicating the code below, recurse
7840 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
7841 are properly set up for the caller. If we fail, we'll continue with
7842 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
7844 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
7845 && !nested_in_vect_loop_p (vect_loop
, stmt
)
7846 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
7847 stmt
, vectype_out
, vectype_in
,
7848 code1
, code2
, multi_step_cvt
,
7851 /* Elements in a vector with vect_used_by_reduction property cannot
7852 be reordered if the use chain with this property does not have the
7853 same operation. One such an example is s += a * b, where elements
7854 in a and b cannot be reordered. Here we check if the vector defined
7855 by STMT is only directly used in the reduction statement. */
7856 tree lhs
= gimple_assign_lhs (stmt
);
7857 use_operand_p dummy
;
7859 stmt_vec_info use_stmt_info
= NULL
;
7860 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
7861 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
7862 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
7865 c1
= VEC_WIDEN_MULT_LO_EXPR
;
7866 c2
= VEC_WIDEN_MULT_HI_EXPR
;
7869 case VEC_WIDEN_MULT_EVEN_EXPR
:
7870 /* Support the recursion induced just above. */
7871 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
7872 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
7875 case WIDEN_LSHIFT_EXPR
:
7876 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
7877 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
7881 c1
= VEC_UNPACK_LO_EXPR
;
7882 c2
= VEC_UNPACK_HI_EXPR
;
7886 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
7887 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
7890 case FIX_TRUNC_EXPR
:
7891 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
7892 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
7893 computing the operation. */
7900 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
7902 enum tree_code ctmp
= c1
;
7907 if (code
== FIX_TRUNC_EXPR
)
7909 /* The signedness is determined from output operand. */
7910 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
7911 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
7915 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
7916 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
7919 if (!optab1
|| !optab2
)
7922 vec_mode
= TYPE_MODE (vectype
);
7923 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
7924 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
7930 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7931 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7934 /* Check if it's a multi-step conversion that can be done using intermediate
7937 prev_type
= vectype
;
7938 prev_mode
= vec_mode
;
7940 if (!CONVERT_EXPR_CODE_P (code
))
7943 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
7944 intermediate steps in promotion sequence. We try
7945 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
7947 interm_types
->create (MAX_INTERM_CVT_STEPS
);
7948 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
7950 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
7952 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
7953 TYPE_UNSIGNED (prev_type
));
7954 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
7955 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
7957 if (!optab3
|| !optab4
7958 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
7959 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
7960 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
7961 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
7962 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
7963 == CODE_FOR_nothing
)
7964 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
7965 == CODE_FOR_nothing
))
7968 interm_types
->quick_push (intermediate_type
);
7969 (*multi_step_cvt
)++;
7971 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7972 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7975 prev_type
= intermediate_type
;
7976 prev_mode
= intermediate_mode
;
7979 interm_types
->release ();
7984 /* Function supportable_narrowing_operation
7986 Check whether an operation represented by the code CODE is a
7987 narrowing operation that is supported by the target platform in
7988 vector form (i.e., when operating on arguments of type VECTYPE_IN
7989 and producing a result of type VECTYPE_OUT).
7991 Narrowing operations we currently support are NOP (CONVERT) and
7992 FIX_TRUNC. This function checks if these operations are supported by
7993 the target platform directly via vector tree-codes.
7996 - CODE1 is the code of a vector operation to be used when
7997 vectorizing the operation, if available.
7998 - MULTI_STEP_CVT determines the number of required intermediate steps in
7999 case of multi-step conversion (like int->short->char - in that case
8000 MULTI_STEP_CVT will be 1).
8001 - INTERM_TYPES contains the intermediate type required to perform the
8002 narrowing operation (short in the above example). */
8005 supportable_narrowing_operation (enum tree_code code
,
8006 tree vectype_out
, tree vectype_in
,
8007 enum tree_code
*code1
, int *multi_step_cvt
,
8008 vec
<tree
> *interm_types
)
8010 enum machine_mode vec_mode
;
8011 enum insn_code icode1
;
8012 optab optab1
, interm_optab
;
8013 tree vectype
= vectype_in
;
8014 tree narrow_vectype
= vectype_out
;
8016 tree intermediate_type
;
8017 enum machine_mode intermediate_mode
, prev_mode
;
8021 *multi_step_cvt
= 0;
8025 c1
= VEC_PACK_TRUNC_EXPR
;
8028 case FIX_TRUNC_EXPR
:
8029 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8033 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8034 tree code and optabs used for computing the operation. */
8041 if (code
== FIX_TRUNC_EXPR
)
8042 /* The signedness is determined from output operand. */
8043 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8045 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8050 vec_mode
= TYPE_MODE (vectype
);
8051 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8056 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8059 /* Check if it's a multi-step conversion that can be done using intermediate
8061 prev_mode
= vec_mode
;
8062 if (code
== FIX_TRUNC_EXPR
)
8063 uns
= TYPE_UNSIGNED (vectype_out
);
8065 uns
= TYPE_UNSIGNED (vectype
);
8067 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8068 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8069 costly than signed. */
8070 if (code
== FIX_TRUNC_EXPR
&& uns
)
8072 enum insn_code icode2
;
8075 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8077 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8078 if (interm_optab
!= unknown_optab
8079 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8080 && insn_data
[icode1
].operand
[0].mode
8081 == insn_data
[icode2
].operand
[0].mode
)
8084 optab1
= interm_optab
;
8089 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8090 intermediate steps in promotion sequence. We try
8091 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8092 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8093 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8095 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8097 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8099 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8102 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8103 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8104 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8105 == CODE_FOR_nothing
))
8108 interm_types
->quick_push (intermediate_type
);
8109 (*multi_step_cvt
)++;
8111 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8114 prev_mode
= intermediate_mode
;
8115 optab1
= interm_optab
;
8118 interm_types
->release ();