1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
35 #include "gimple-expr.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "gimple-ssa.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-ssa-loop-manip.h"
49 #include "tree-ssa-loop.h"
50 #include "tree-scalar-evolution.h"
52 #include "recog.h" /* FIXME: for insn_data */
54 #include "diagnostic-core.h"
55 #include "tree-vectorizer.h"
59 /* For lang_hooks.types.type_for_mode. */
60 #include "langhooks.h"
62 /* Return the vectorized type for the given statement. */
65 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
67 return STMT_VINFO_VECTYPE (stmt_info
);
70 /* Return TRUE iff the given statement is in an inner loop relative to
71 the loop being vectorized. */
73 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
75 gimple stmt
= STMT_VINFO_STMT (stmt_info
);
76 basic_block bb
= gimple_bb (stmt
);
77 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
83 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
85 return (bb
->loop_father
== loop
->inner
);
88 /* Record the cost of a statement, either by directly informing the
89 target model or by saving it in a vector for later processing.
90 Return a preliminary estimate of the statement's cost. */
93 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
94 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
95 int misalign
, enum vect_cost_model_location where
)
99 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
100 add_stmt_info_to_vec (body_cost_vec
, count
, kind
,
101 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
110 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
111 void *target_cost_data
;
114 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
116 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
118 return add_stmt_cost (target_cost_data
, count
, kind
, stmt_info
,
123 /* Return a variable of type ELEM_TYPE[NELEMS]. */
126 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
128 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
132 /* ARRAY is an array of vectors created by create_vector_array.
133 Return an SSA_NAME for the vector in index N. The reference
134 is part of the vectorization of STMT and the vector is associated
135 with scalar destination SCALAR_DEST. */
138 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
139 tree array
, unsigned HOST_WIDE_INT n
)
141 tree vect_type
, vect
, vect_name
, array_ref
;
144 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
145 vect_type
= TREE_TYPE (TREE_TYPE (array
));
146 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
147 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
148 build_int_cst (size_type_node
, n
),
149 NULL_TREE
, NULL_TREE
);
151 new_stmt
= gimple_build_assign (vect
, array_ref
);
152 vect_name
= make_ssa_name (vect
, new_stmt
);
153 gimple_assign_set_lhs (new_stmt
, vect_name
);
154 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
159 /* ARRAY is an array of vectors created by create_vector_array.
160 Emit code to store SSA_NAME VECT in index N of the array.
161 The store is part of the vectorization of STMT. */
164 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
165 tree array
, unsigned HOST_WIDE_INT n
)
170 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
171 build_int_cst (size_type_node
, n
),
172 NULL_TREE
, NULL_TREE
);
174 new_stmt
= gimple_build_assign (array_ref
, vect
);
175 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
178 /* PTR is a pointer to an array of type TYPE. Return a representation
179 of *PTR. The memory reference replaces those in FIRST_DR
183 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
185 tree mem_ref
, alias_ptr_type
;
187 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
188 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
189 /* Arrays have the same alignment as their type. */
190 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
194 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
196 /* Function vect_mark_relevant.
198 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
201 vect_mark_relevant (vec
<gimple
> *worklist
, gimple stmt
,
202 enum vect_relevant relevant
, bool live_p
,
203 bool used_in_pattern
)
205 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
206 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
207 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
210 if (dump_enabled_p ())
211 dump_printf_loc (MSG_NOTE
, vect_location
,
212 "mark relevant %d, live %d.\n", relevant
, live_p
);
214 /* If this stmt is an original stmt in a pattern, we might need to mark its
215 related pattern stmt instead of the original stmt. However, such stmts
216 may have their own uses that are not in any pattern, in such cases the
217 stmt itself should be marked. */
218 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
221 if (!used_in_pattern
)
223 imm_use_iterator imm_iter
;
227 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
228 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
230 if (is_gimple_assign (stmt
))
231 lhs
= gimple_assign_lhs (stmt
);
233 lhs
= gimple_call_lhs (stmt
);
235 /* This use is out of pattern use, if LHS has other uses that are
236 pattern uses, we should mark the stmt itself, and not the pattern
238 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
239 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
241 if (is_gimple_debug (USE_STMT (use_p
)))
243 use_stmt
= USE_STMT (use_p
);
245 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
248 if (vinfo_for_stmt (use_stmt
)
249 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
259 /* This is the last stmt in a sequence that was detected as a
260 pattern that can potentially be vectorized. Don't mark the stmt
261 as relevant/live because it's not going to be vectorized.
262 Instead mark the pattern-stmt that replaces it. */
264 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE
, vect_location
,
268 "last stmt in pattern. don't mark"
269 " relevant/live.\n");
270 stmt_info
= vinfo_for_stmt (pattern_stmt
);
271 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
272 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
273 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
278 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
279 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
280 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
282 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
283 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
285 if (dump_enabled_p ())
286 dump_printf_loc (MSG_NOTE
, vect_location
,
287 "already marked relevant/live.\n");
291 worklist
->safe_push (stmt
);
295 /* Function vect_stmt_relevant_p.
297 Return true if STMT in loop that is represented by LOOP_VINFO is
298 "relevant for vectorization".
300 A stmt is considered "relevant for vectorization" if:
301 - it has uses outside the loop.
302 - it has vdefs (it alters memory).
303 - control stmts in the loop (except for the exit condition).
305 CHECKME: what other side effects would the vectorizer allow? */
308 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
309 enum vect_relevant
*relevant
, bool *live_p
)
311 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
313 imm_use_iterator imm_iter
;
317 *relevant
= vect_unused_in_scope
;
320 /* cond stmt other than loop exit cond. */
321 if (is_ctrl_stmt (stmt
)
322 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
323 != loop_exit_ctrl_vec_info_type
)
324 *relevant
= vect_used_in_scope
;
326 /* changing memory. */
327 if (gimple_code (stmt
) != GIMPLE_PHI
)
328 if (gimple_vdef (stmt
))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE
, vect_location
,
332 "vec_stmt_relevant_p: stmt has vdefs.\n");
333 *relevant
= vect_used_in_scope
;
336 /* uses outside the loop. */
337 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
339 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
341 basic_block bb
= gimple_bb (USE_STMT (use_p
));
342 if (!flow_bb_inside_loop_p (loop
, bb
))
344 if (dump_enabled_p ())
345 dump_printf_loc (MSG_NOTE
, vect_location
,
346 "vec_stmt_relevant_p: used out of loop.\n");
348 if (is_gimple_debug (USE_STMT (use_p
)))
351 /* We expect all such uses to be in the loop exit phis
352 (because of loop closed form) */
353 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
354 gcc_assert (bb
== single_exit (loop
)->dest
);
361 return (*live_p
|| *relevant
);
365 /* Function exist_non_indexing_operands_for_use_p
367 USE is one of the uses attached to STMT. Check if USE is
368 used in STMT for anything other than indexing an array. */
371 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
374 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
376 /* USE corresponds to some operand in STMT. If there is no data
377 reference in STMT, then any operand that corresponds to USE
378 is not indexing an array. */
379 if (!STMT_VINFO_DATA_REF (stmt_info
))
382 /* STMT has a data_ref. FORNOW this means that its of one of
386 (This should have been verified in analyze_data_refs).
388 'var' in the second case corresponds to a def, not a use,
389 so USE cannot correspond to any operands that are not used
392 Therefore, all we need to check is if STMT falls into the
393 first case, and whether var corresponds to USE. */
395 if (!gimple_assign_copy_p (stmt
))
397 if (is_gimple_call (stmt
)
398 && gimple_call_internal_p (stmt
))
399 switch (gimple_call_internal_fn (stmt
))
402 operand
= gimple_call_arg (stmt
, 3);
407 operand
= gimple_call_arg (stmt
, 2);
417 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
419 operand
= gimple_assign_rhs1 (stmt
);
420 if (TREE_CODE (operand
) != SSA_NAME
)
431 Function process_use.
434 - a USE in STMT in a loop represented by LOOP_VINFO
435 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
436 that defined USE. This is done by calling mark_relevant and passing it
437 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
438 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
442 Generally, LIVE_P and RELEVANT are used to define the liveness and
443 relevance info of the DEF_STMT of this USE:
444 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
445 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 - case 1: If USE is used only for address computations (e.g. array indexing),
448 which does not need to be directly vectorized, then the liveness/relevance
449 of the respective DEF_STMT is left unchanged.
450 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
451 skip DEF_STMT cause it had already been processed.
452 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
453 be modified accordingly.
455 Return true if everything is as expected. Return false otherwise. */
458 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
459 enum vect_relevant relevant
, vec
<gimple
> *worklist
,
462 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
463 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
464 stmt_vec_info dstmt_vinfo
;
465 basic_block bb
, def_bb
;
468 enum vect_def_type dt
;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
475 if (!vect_is_simple_use (use
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
479 "not vectorized: unsupported use in stmt.\n");
483 if (!def_stmt
|| gimple_nop_p (def_stmt
))
486 def_bb
= gimple_bb (def_stmt
);
487 if (!flow_bb_inside_loop_p (loop
, def_bb
))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
500 bb
= gimple_bb (stmt
);
501 if (gimple_code (stmt
) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
503 && gimple_code (def_stmt
) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
505 && bb
->loop_father
== def_bb
->loop_father
)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE
, vect_location
,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
511 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
525 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE
, vect_location
,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
533 case vect_unused_in_scope
:
534 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
535 vect_used_in_scope
: vect_unused_in_scope
;
538 case vect_used_in_outer_by_reduction
:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
540 relevant
= vect_used_by_reduction
;
543 case vect_used_in_outer
:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
545 relevant
= vect_used_in_scope
;
548 case vect_used_in_scope
:
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
563 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE
, vect_location
,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
571 case vect_unused_in_scope
:
572 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
574 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
577 case vect_used_by_reduction
:
578 relevant
= vect_used_in_outer_by_reduction
;
581 case vect_used_in_scope
:
582 relevant
= vect_used_in_outer
;
590 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
591 is_pattern_stmt_p (stmt_vinfo
));
596 /* Function vect_mark_stmts_to_be_vectorized.
598 Not all stmts in the loop need to be vectorized. For example:
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
610 This pass detects such stmts. */
613 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
615 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
616 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
617 unsigned int nbbs
= loop
->num_nodes
;
618 gimple_stmt_iterator si
;
621 stmt_vec_info stmt_vinfo
;
625 enum vect_relevant relevant
, tmp_relevant
;
626 enum vect_def_type def_type
;
628 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE
, vect_location
,
630 "=== vect_mark_stmts_to_be_vectorized ===\n");
632 auto_vec
<gimple
, 64> worklist
;
634 /* 1. Init worklist. */
635 for (i
= 0; i
< nbbs
; i
++)
638 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
641 if (dump_enabled_p ())
643 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
644 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
645 dump_printf (MSG_NOTE
, "\n");
648 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
649 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
651 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
653 stmt
= gsi_stmt (si
);
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
657 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
658 dump_printf (MSG_NOTE
, "\n");
661 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
662 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
666 /* 2. Process_worklist */
667 while (worklist
.length () > 0)
672 stmt
= worklist
.pop ();
673 if (dump_enabled_p ())
675 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
676 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
677 dump_printf (MSG_NOTE
, "\n");
680 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
681 (DEF_STMT) as relevant/irrelevant and live/dead according to the
682 liveness and relevance properties of STMT. */
683 stmt_vinfo
= vinfo_for_stmt (stmt
);
684 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
685 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
687 /* Generally, the liveness and relevance properties of STMT are
688 propagated as is to the DEF_STMTs of its USEs:
689 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
690 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
692 One exception is when STMT has been identified as defining a reduction
693 variable; in this case we set the liveness/relevance as follows:
695 relevant = vect_used_by_reduction
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
703 tmp_relevant
= relevant
;
706 case vect_reduction_def
:
707 switch (tmp_relevant
)
709 case vect_unused_in_scope
:
710 relevant
= vect_used_by_reduction
;
713 case vect_used_by_reduction
:
714 if (gimple_code (stmt
) == GIMPLE_PHI
)
719 if (dump_enabled_p ())
720 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
721 "unsupported use of reduction.\n");
728 case vect_nested_cycle
:
729 if (tmp_relevant
!= vect_unused_in_scope
730 && tmp_relevant
!= vect_used_in_outer_by_reduction
731 && tmp_relevant
!= vect_used_in_outer
)
733 if (dump_enabled_p ())
734 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
735 "unsupported use of nested cycle.\n");
743 case vect_double_reduction_def
:
744 if (tmp_relevant
!= vect_unused_in_scope
745 && tmp_relevant
!= vect_used_by_reduction
)
747 if (dump_enabled_p ())
748 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
749 "unsupported use of double reduction.\n");
761 if (is_pattern_stmt_p (stmt_vinfo
))
763 /* Pattern statements are not inserted into the code, so
764 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
765 have to scan the RHS or function arguments instead. */
766 if (is_gimple_assign (stmt
))
768 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
769 tree op
= gimple_assign_rhs1 (stmt
);
772 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
774 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
775 live_p
, relevant
, &worklist
, false)
776 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
777 live_p
, relevant
, &worklist
, false))
781 for (; i
< gimple_num_ops (stmt
); i
++)
783 op
= gimple_op (stmt
, i
);
784 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
789 else if (is_gimple_call (stmt
))
791 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
793 tree arg
= gimple_call_arg (stmt
, i
);
794 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
801 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
803 tree op
= USE_FROM_PTR (use_p
);
804 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
809 if (STMT_VINFO_GATHER_P (stmt_vinfo
))
812 tree decl
= vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
814 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
818 } /* while worklist */
824 /* Function vect_model_simple_cost.
826 Models cost for simple operations, i.e. those that only emit ncopies of a
827 single op. Right now, this does not account for multiple insns that could
828 be generated for the single vector op. We will handle that shortly. */
831 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
832 enum vect_def_type
*dt
,
833 stmt_vector_for_cost
*prologue_cost_vec
,
834 stmt_vector_for_cost
*body_cost_vec
)
837 int inside_cost
= 0, prologue_cost
= 0;
839 /* The SLP costs were already calculated during SLP tree build. */
840 if (PURE_SLP_STMT (stmt_info
))
843 /* FORNOW: Assuming maximum 2 args per stmts. */
844 for (i
= 0; i
< 2; i
++)
845 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
846 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
847 stmt_info
, 0, vect_prologue
);
849 /* Pass the inside-of-loop statements to the target-specific cost model. */
850 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
851 stmt_info
, 0, vect_body
);
853 if (dump_enabled_p ())
854 dump_printf_loc (MSG_NOTE
, vect_location
,
855 "vect_model_simple_cost: inside_cost = %d, "
856 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
860 /* Model cost for type demotion and promotion operations. PWR is normally
861 zero for single-step promotions and demotions. It will be one if
862 two-step promotion/demotion is required, and so on. Each additional
863 step doubles the number of instructions required. */
866 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
867 enum vect_def_type
*dt
, int pwr
)
870 int inside_cost
= 0, prologue_cost
= 0;
871 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
872 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
873 void *target_cost_data
;
875 /* The SLP costs were already calculated during SLP tree build. */
876 if (PURE_SLP_STMT (stmt_info
))
880 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
882 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
884 for (i
= 0; i
< pwr
+ 1; i
++)
886 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
888 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
889 vec_promote_demote
, stmt_info
, 0,
893 /* FORNOW: Assuming maximum 2 args per stmts. */
894 for (i
= 0; i
< 2; i
++)
895 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
896 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
897 stmt_info
, 0, vect_prologue
);
899 if (dump_enabled_p ())
900 dump_printf_loc (MSG_NOTE
, vect_location
,
901 "vect_model_promotion_demotion_cost: inside_cost = %d, "
902 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
905 /* Function vect_cost_group_size
907 For grouped load or store, return the group_size only if it is the first
908 load or store of a group, else return 1. This ensures that group size is
909 only returned once per group. */
912 vect_cost_group_size (stmt_vec_info stmt_info
)
914 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
916 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
917 return GROUP_SIZE (stmt_info
);
923 /* Function vect_model_store_cost
925 Models cost for stores. In the case of grouped accesses, one access
926 has the overhead of the grouped access attributed to it. */
929 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
930 bool store_lanes_p
, enum vect_def_type dt
,
932 stmt_vector_for_cost
*prologue_cost_vec
,
933 stmt_vector_for_cost
*body_cost_vec
)
936 unsigned int inside_cost
= 0, prologue_cost
= 0;
937 struct data_reference
*first_dr
;
940 /* The SLP costs were already calculated during SLP tree build. */
941 if (PURE_SLP_STMT (stmt_info
))
944 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
945 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
946 stmt_info
, 0, vect_prologue
);
948 /* Grouped access? */
949 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
953 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
958 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
959 group_size
= vect_cost_group_size (stmt_info
);
962 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
964 /* Not a grouped access. */
968 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
971 /* We assume that the cost of a single store-lanes instruction is
972 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
973 access is instead being provided by a permute-and-store operation,
974 include the cost of the permutes. */
975 if (!store_lanes_p
&& group_size
> 1)
977 /* Uses a high and low interleave operation for each needed permute. */
979 int nstmts
= ncopies
* exact_log2 (group_size
) * group_size
;
980 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
981 stmt_info
, 0, vect_body
);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE
, vect_location
,
985 "vect_model_store_cost: strided group_size = %d .\n",
989 /* Costs of the stores. */
990 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
992 if (dump_enabled_p ())
993 dump_printf_loc (MSG_NOTE
, vect_location
,
994 "vect_model_store_cost: inside_cost = %d, "
995 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
999 /* Calculate cost of DR's memory access. */
1001 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
1002 unsigned int *inside_cost
,
1003 stmt_vector_for_cost
*body_cost_vec
)
1005 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1006 gimple stmt
= DR_STMT (dr
);
1007 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1009 switch (alignment_support_scheme
)
1013 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1014 vector_store
, stmt_info
, 0,
1017 if (dump_enabled_p ())
1018 dump_printf_loc (MSG_NOTE
, vect_location
,
1019 "vect_model_store_cost: aligned.\n");
1023 case dr_unaligned_supported
:
1025 /* Here, we assign an additional cost for the unaligned store. */
1026 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1027 unaligned_store
, stmt_info
,
1028 DR_MISALIGNMENT (dr
), vect_body
);
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_NOTE
, vect_location
,
1031 "vect_model_store_cost: unaligned supported by "
1036 case dr_unaligned_unsupported
:
1038 *inside_cost
= VECT_MAX_COST
;
1040 if (dump_enabled_p ())
1041 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1042 "vect_model_store_cost: unsupported access.\n");
1052 /* Function vect_model_load_cost
1054 Models cost for loads. In the case of grouped accesses, the last access
1055 has the overhead of the grouped access attributed to it. Since unaligned
1056 accesses are supported for loads, we also account for the costs of the
1057 access scheme chosen. */
1060 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1061 bool load_lanes_p
, slp_tree slp_node
,
1062 stmt_vector_for_cost
*prologue_cost_vec
,
1063 stmt_vector_for_cost
*body_cost_vec
)
1067 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1068 unsigned int inside_cost
= 0, prologue_cost
= 0;
1070 /* The SLP costs were already calculated during SLP tree build. */
1071 if (PURE_SLP_STMT (stmt_info
))
1074 /* Grouped accesses? */
1075 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1076 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1078 group_size
= vect_cost_group_size (stmt_info
);
1079 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1081 /* Not a grouped access. */
1088 /* We assume that the cost of a single load-lanes instruction is
1089 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1090 access is instead being provided by a load-and-permute operation,
1091 include the cost of the permutes. */
1092 if (!load_lanes_p
&& group_size
> 1)
1094 /* Uses an even and odd extract operations for each needed permute. */
1095 int nstmts
= ncopies
* exact_log2 (group_size
) * group_size
;
1096 inside_cost
+= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1097 stmt_info
, 0, vect_body
);
1099 if (dump_enabled_p ())
1100 dump_printf_loc (MSG_NOTE
, vect_location
,
1101 "vect_model_load_cost: strided group_size = %d .\n",
1105 /* The loads themselves. */
1106 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1108 /* N scalar loads plus gathering them into a vector. */
1109 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1110 inside_cost
+= record_stmt_cost (body_cost_vec
,
1111 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1112 scalar_load
, stmt_info
, 0, vect_body
);
1113 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1114 stmt_info
, 0, vect_body
);
1117 vect_get_load_cost (first_dr
, ncopies
,
1118 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1119 || group_size
> 1 || slp_node
),
1120 &inside_cost
, &prologue_cost
,
1121 prologue_cost_vec
, body_cost_vec
, true);
1123 if (dump_enabled_p ())
1124 dump_printf_loc (MSG_NOTE
, vect_location
,
1125 "vect_model_load_cost: inside_cost = %d, "
1126 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1130 /* Calculate cost of DR's memory access. */
1132 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1133 bool add_realign_cost
, unsigned int *inside_cost
,
1134 unsigned int *prologue_cost
,
1135 stmt_vector_for_cost
*prologue_cost_vec
,
1136 stmt_vector_for_cost
*body_cost_vec
,
1137 bool record_prologue_costs
)
1139 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1140 gimple stmt
= DR_STMT (dr
);
1141 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1143 switch (alignment_support_scheme
)
1147 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1148 stmt_info
, 0, vect_body
);
1150 if (dump_enabled_p ())
1151 dump_printf_loc (MSG_NOTE
, vect_location
,
1152 "vect_model_load_cost: aligned.\n");
1156 case dr_unaligned_supported
:
1158 /* Here, we assign an additional cost for the unaligned load. */
1159 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1160 unaligned_load
, stmt_info
,
1161 DR_MISALIGNMENT (dr
), vect_body
);
1163 if (dump_enabled_p ())
1164 dump_printf_loc (MSG_NOTE
, vect_location
,
1165 "vect_model_load_cost: unaligned supported by "
1170 case dr_explicit_realign
:
1172 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1173 vector_load
, stmt_info
, 0, vect_body
);
1174 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1175 vec_perm
, stmt_info
, 0, vect_body
);
1177 /* FIXME: If the misalignment remains fixed across the iterations of
1178 the containing loop, the following cost should be added to the
1180 if (targetm
.vectorize
.builtin_mask_for_load
)
1181 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1182 stmt_info
, 0, vect_body
);
1184 if (dump_enabled_p ())
1185 dump_printf_loc (MSG_NOTE
, vect_location
,
1186 "vect_model_load_cost: explicit realign\n");
1190 case dr_explicit_realign_optimized
:
1192 if (dump_enabled_p ())
1193 dump_printf_loc (MSG_NOTE
, vect_location
,
1194 "vect_model_load_cost: unaligned software "
1197 /* Unaligned software pipeline has a load of an address, an initial
1198 load, and possibly a mask operation to "prime" the loop. However,
1199 if this is an access in a group of loads, which provide grouped
1200 access, then the above cost should only be considered for one
1201 access in the group. Inside the loop, there is a load op
1202 and a realignment op. */
1204 if (add_realign_cost
&& record_prologue_costs
)
1206 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1207 vector_stmt
, stmt_info
,
1209 if (targetm
.vectorize
.builtin_mask_for_load
)
1210 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1211 vector_stmt
, stmt_info
,
1215 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1216 stmt_info
, 0, vect_body
);
1217 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1218 stmt_info
, 0, vect_body
);
1220 if (dump_enabled_p ())
1221 dump_printf_loc (MSG_NOTE
, vect_location
,
1222 "vect_model_load_cost: explicit realign optimized"
1228 case dr_unaligned_unsupported
:
1230 *inside_cost
= VECT_MAX_COST
;
1232 if (dump_enabled_p ())
1233 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1234 "vect_model_load_cost: unsupported access.\n");
1243 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1244 the loop preheader for the vectorized stmt STMT. */
1247 vect_init_vector_1 (gimple stmt
, gimple new_stmt
, gimple_stmt_iterator
*gsi
)
1250 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1253 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1254 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1258 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1262 if (nested_in_vect_loop_p (loop
, stmt
))
1265 pe
= loop_preheader_edge (loop
);
1266 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1267 gcc_assert (!new_bb
);
1271 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1273 gimple_stmt_iterator gsi_bb_start
;
1275 gcc_assert (bb_vinfo
);
1276 bb
= BB_VINFO_BB (bb_vinfo
);
1277 gsi_bb_start
= gsi_after_labels (bb
);
1278 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1282 if (dump_enabled_p ())
1284 dump_printf_loc (MSG_NOTE
, vect_location
,
1285 "created new init_stmt: ");
1286 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1287 dump_printf (MSG_NOTE
, "\n");
1291 /* Function vect_init_vector.
1293 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1294 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1295 vector type a vector with all elements equal to VAL is created first.
1296 Place the initialization at BSI if it is not NULL. Otherwise, place the
1297 initialization at the loop preheader.
1298 Return the DEF of INIT_STMT.
1299 It will be used in the vectorization of STMT. */
1302 vect_init_vector (gimple stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1309 if (TREE_CODE (type
) == VECTOR_TYPE
1310 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1312 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1314 if (CONSTANT_CLASS_P (val
))
1315 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1318 new_temp
= make_ssa_name (TREE_TYPE (type
), NULL
);
1319 init_stmt
= gimple_build_assign_with_ops (NOP_EXPR
,
1322 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1326 val
= build_vector_from_val (type
, val
);
1329 new_var
= vect_get_new_vect_var (type
, vect_simple_var
, "cst_");
1330 init_stmt
= gimple_build_assign (new_var
, val
);
1331 new_temp
= make_ssa_name (new_var
, init_stmt
);
1332 gimple_assign_set_lhs (init_stmt
, new_temp
);
1333 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1334 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1339 /* Function vect_get_vec_def_for_operand.
1341 OP is an operand in STMT. This function returns a (vector) def that will be
1342 used in the vectorized stmt for STMT.
1344 In the case that OP is an SSA_NAME which is defined in the loop, then
1345 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1347 In case OP is an invariant or constant, a new stmt that creates a vector def
1348 needs to be introduced. */
1351 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1356 stmt_vec_info def_stmt_info
= NULL
;
1357 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1358 unsigned int nunits
;
1359 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1361 enum vect_def_type dt
;
1365 if (dump_enabled_p ())
1367 dump_printf_loc (MSG_NOTE
, vect_location
,
1368 "vect_get_vec_def_for_operand: ");
1369 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1370 dump_printf (MSG_NOTE
, "\n");
1373 is_simple_use
= vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
,
1374 &def_stmt
, &def
, &dt
);
1375 gcc_assert (is_simple_use
);
1376 if (dump_enabled_p ())
1378 int loc_printed
= 0;
1381 dump_printf_loc (MSG_NOTE
, vect_location
, "def = ");
1383 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, def
);
1384 dump_printf (MSG_NOTE
, "\n");
1389 dump_printf (MSG_NOTE
, " def_stmt = ");
1391 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1392 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1393 dump_printf (MSG_NOTE
, "\n");
1399 /* Case 1: operand is a constant. */
1400 case vect_constant_def
:
1402 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1403 gcc_assert (vector_type
);
1404 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1409 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1410 if (dump_enabled_p ())
1411 dump_printf_loc (MSG_NOTE
, vect_location
,
1412 "Create vector_cst. nunits = %d\n", nunits
);
1414 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1417 /* Case 2: operand is defined outside the loop - loop invariant. */
1418 case vect_external_def
:
1420 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1421 gcc_assert (vector_type
);
1426 /* Create 'vec_inv = {inv,inv,..,inv}' */
1427 if (dump_enabled_p ())
1428 dump_printf_loc (MSG_NOTE
, vect_location
, "Create vector_inv.\n");
1430 return vect_init_vector (stmt
, def
, vector_type
, NULL
);
1433 /* Case 3: operand is defined inside the loop. */
1434 case vect_internal_def
:
1437 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1439 /* Get the def from the vectorized stmt. */
1440 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1442 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1443 /* Get vectorized pattern statement. */
1445 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1446 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1447 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1448 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1449 gcc_assert (vec_stmt
);
1450 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1451 vec_oprnd
= PHI_RESULT (vec_stmt
);
1452 else if (is_gimple_call (vec_stmt
))
1453 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1455 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1459 /* Case 4: operand is defined by a loop header phi - reduction */
1460 case vect_reduction_def
:
1461 case vect_double_reduction_def
:
1462 case vect_nested_cycle
:
1466 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1467 loop
= (gimple_bb (def_stmt
))->loop_father
;
1469 /* Get the def before the loop */
1470 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1471 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1474 /* Case 5: operand is defined by loop-header phi - induction. */
1475 case vect_induction_def
:
1477 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1479 /* Get the def from the vectorized stmt. */
1480 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1481 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1482 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1483 vec_oprnd
= PHI_RESULT (vec_stmt
);
1485 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1495 /* Function vect_get_vec_def_for_stmt_copy
1497 Return a vector-def for an operand. This function is used when the
1498 vectorized stmt to be created (by the caller to this function) is a "copy"
1499 created in case the vectorized result cannot fit in one vector, and several
1500 copies of the vector-stmt are required. In this case the vector-def is
1501 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1502 of the stmt that defines VEC_OPRND.
1503 DT is the type of the vector def VEC_OPRND.
1506 In case the vectorization factor (VF) is bigger than the number
1507 of elements that can fit in a vectype (nunits), we have to generate
1508 more than one vector stmt to vectorize the scalar stmt. This situation
1509 arises when there are multiple data-types operated upon in the loop; the
1510 smallest data-type determines the VF, and as a result, when vectorizing
1511 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1512 vector stmt (each computing a vector of 'nunits' results, and together
1513 computing 'VF' results in each iteration). This function is called when
1514 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1515 which VF=16 and nunits=4, so the number of copies required is 4):
1517 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1519 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1520 VS1.1: vx.1 = memref1 VS1.2
1521 VS1.2: vx.2 = memref2 VS1.3
1522 VS1.3: vx.3 = memref3
1524 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1525 VSnew.1: vz1 = vx.1 + ... VSnew.2
1526 VSnew.2: vz2 = vx.2 + ... VSnew.3
1527 VSnew.3: vz3 = vx.3 + ...
1529 The vectorization of S1 is explained in vectorizable_load.
1530 The vectorization of S2:
1531 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1532 the function 'vect_get_vec_def_for_operand' is called to
1533 get the relevant vector-def for each operand of S2. For operand x it
1534 returns the vector-def 'vx.0'.
1536 To create the remaining copies of the vector-stmt (VSnew.j), this
1537 function is called to get the relevant vector-def for each operand. It is
1538 obtained from the respective VS1.j stmt, which is recorded in the
1539 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1541 For example, to obtain the vector-def 'vx.1' in order to create the
1542 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1543 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1544 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1545 and return its def ('vx.1').
1546 Overall, to create the above sequence this function will be called 3 times:
1547 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1548 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1549 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1552 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1554 gimple vec_stmt_for_operand
;
1555 stmt_vec_info def_stmt_info
;
1557 /* Do nothing; can reuse same def. */
1558 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1561 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1562 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1563 gcc_assert (def_stmt_info
);
1564 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1565 gcc_assert (vec_stmt_for_operand
);
1566 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1567 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1568 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1570 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1575 /* Get vectorized definitions for the operands to create a copy of an original
1576 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1579 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1580 vec
<tree
> *vec_oprnds0
,
1581 vec
<tree
> *vec_oprnds1
)
1583 tree vec_oprnd
= vec_oprnds0
->pop ();
1585 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1586 vec_oprnds0
->quick_push (vec_oprnd
);
1588 if (vec_oprnds1
&& vec_oprnds1
->length ())
1590 vec_oprnd
= vec_oprnds1
->pop ();
1591 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1592 vec_oprnds1
->quick_push (vec_oprnd
);
1597 /* Get vectorized definitions for OP0 and OP1.
1598 REDUC_INDEX is the index of reduction operand in case of reduction,
1599 and -1 otherwise. */
1602 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1603 vec
<tree
> *vec_oprnds0
,
1604 vec
<tree
> *vec_oprnds1
,
1605 slp_tree slp_node
, int reduc_index
)
1609 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1610 auto_vec
<tree
> ops (nops
);
1611 auto_vec
<vec
<tree
> > vec_defs (nops
);
1613 ops
.quick_push (op0
);
1615 ops
.quick_push (op1
);
1617 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1619 *vec_oprnds0
= vec_defs
[0];
1621 *vec_oprnds1
= vec_defs
[1];
1627 vec_oprnds0
->create (1);
1628 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1629 vec_oprnds0
->quick_push (vec_oprnd
);
1633 vec_oprnds1
->create (1);
1634 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1635 vec_oprnds1
->quick_push (vec_oprnd
);
1641 /* Function vect_finish_stmt_generation.
1643 Insert a new stmt. */
1646 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1647 gimple_stmt_iterator
*gsi
)
1649 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1650 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1651 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1653 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1655 if (!gsi_end_p (*gsi
)
1656 && gimple_has_mem_ops (vec_stmt
))
1658 gimple at_stmt
= gsi_stmt (*gsi
);
1659 tree vuse
= gimple_vuse (at_stmt
);
1660 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1662 tree vdef
= gimple_vdef (at_stmt
);
1663 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1664 /* If we have an SSA vuse and insert a store, update virtual
1665 SSA form to avoid triggering the renamer. Do so only
1666 if we can easily see all uses - which is what almost always
1667 happens with the way vectorized stmts are inserted. */
1668 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1669 && ((is_gimple_assign (vec_stmt
)
1670 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1671 || (is_gimple_call (vec_stmt
)
1672 && !(gimple_call_flags (vec_stmt
)
1673 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1675 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1676 gimple_set_vdef (vec_stmt
, new_vdef
);
1677 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1681 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1683 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1686 if (dump_enabled_p ())
1688 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1689 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1690 dump_printf (MSG_NOTE
, "\n");
1693 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1695 /* While EH edges will generally prevent vectorization, stmt might
1696 e.g. be in a must-not-throw region. Ensure newly created stmts
1697 that could throw are part of the same region. */
1698 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1699 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1700 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1703 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1704 a function declaration if the target has a vectorized version
1705 of the function, or NULL_TREE if the function cannot be vectorized. */
1708 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1710 tree fndecl
= gimple_call_fndecl (call
);
1712 /* We only handle functions that do not read or clobber memory -- i.e.
1713 const or novops ones. */
1714 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1718 || TREE_CODE (fndecl
) != FUNCTION_DECL
1719 || !DECL_BUILT_IN (fndecl
))
1722 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1727 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
,
1728 gimple_stmt_iterator
*);
1731 /* Function vectorizable_mask_load_store.
1733 Check if STMT performs a conditional load or store that can be vectorized.
1734 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1735 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1736 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1739 vectorizable_mask_load_store (gimple stmt
, gimple_stmt_iterator
*gsi
,
1740 gimple
*vec_stmt
, slp_tree slp_node
)
1742 tree vec_dest
= NULL
;
1743 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1744 stmt_vec_info prev_stmt_info
;
1745 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1746 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1747 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1748 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1749 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1753 tree dataref_ptr
= NULL_TREE
;
1755 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1759 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1760 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1761 int gather_scale
= 1;
1762 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1767 enum vect_def_type dt
;
1769 if (slp_node
!= NULL
)
1772 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1773 gcc_assert (ncopies
>= 1);
1775 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1776 mask
= gimple_call_arg (stmt
, 2);
1777 if (TYPE_PRECISION (TREE_TYPE (mask
))
1778 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1781 /* FORNOW. This restriction should be relaxed. */
1782 if (nested_in_vect_loop
&& ncopies
> 1)
1784 if (dump_enabled_p ())
1785 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1786 "multiple types in nested loop.");
1790 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1793 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1796 if (!STMT_VINFO_DATA_REF (stmt_info
))
1799 elem_type
= TREE_TYPE (vectype
);
1801 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1804 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
1807 if (STMT_VINFO_GATHER_P (stmt_info
))
1811 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
1812 &gather_off
, &gather_scale
);
1813 gcc_assert (gather_decl
);
1814 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, NULL
,
1815 &def_stmt
, &def
, &gather_dt
,
1816 &gather_off_vectype
))
1818 if (dump_enabled_p ())
1819 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1820 "gather index use not simple.");
1824 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1826 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1827 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1831 "masked gather with integer mask not supported.");
1835 else if (tree_int_cst_compare (nested_in_vect_loop
1836 ? STMT_VINFO_DR_STEP (stmt_info
)
1837 : DR_STEP (dr
), size_zero_node
) <= 0)
1839 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1840 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1843 if (TREE_CODE (mask
) != SSA_NAME
)
1846 if (!vect_is_simple_use (mask
, stmt
, loop_vinfo
, NULL
,
1847 &def_stmt
, &def
, &dt
))
1852 tree rhs
= gimple_call_arg (stmt
, 3);
1853 if (!vect_is_simple_use (rhs
, stmt
, loop_vinfo
, NULL
,
1854 &def_stmt
, &def
, &dt
))
1858 if (!vec_stmt
) /* transformation not required. */
1860 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1862 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1865 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1871 if (STMT_VINFO_GATHER_P (stmt_info
))
1873 tree vec_oprnd0
= NULL_TREE
, op
;
1874 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1875 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1876 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1877 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1878 tree mask_perm_mask
= NULL_TREE
;
1879 edge pe
= loop_preheader_edge (loop
);
1882 enum { NARROW
, NONE
, WIDEN
} modifier
;
1883 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1885 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1886 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1887 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1888 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1889 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1890 scaletype
= TREE_VALUE (arglist
);
1891 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1892 && types_compatible_p (srctype
, masktype
));
1894 if (nunits
== gather_off_nunits
)
1896 else if (nunits
== gather_off_nunits
/ 2)
1898 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1901 for (i
= 0; i
< gather_off_nunits
; ++i
)
1902 sel
[i
] = i
| nunits
;
1904 perm_mask
= vect_gen_perm_mask (gather_off_vectype
, sel
);
1905 gcc_assert (perm_mask
!= NULL_TREE
);
1907 else if (nunits
== gather_off_nunits
* 2)
1909 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1912 for (i
= 0; i
< nunits
; ++i
)
1913 sel
[i
] = i
< gather_off_nunits
1914 ? i
: i
+ nunits
- gather_off_nunits
;
1916 perm_mask
= vect_gen_perm_mask (vectype
, sel
);
1917 gcc_assert (perm_mask
!= NULL_TREE
);
1919 for (i
= 0; i
< nunits
; ++i
)
1920 sel
[i
] = i
| gather_off_nunits
;
1921 mask_perm_mask
= vect_gen_perm_mask (masktype
, sel
);
1922 gcc_assert (mask_perm_mask
!= NULL_TREE
);
1927 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1929 ptr
= fold_convert (ptrtype
, gather_base
);
1930 if (!is_gimple_min_invariant (ptr
))
1932 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1933 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1934 gcc_assert (!new_bb
);
1937 scale
= build_int_cst (scaletype
, gather_scale
);
1939 prev_stmt_info
= NULL
;
1940 for (j
= 0; j
< ncopies
; ++j
)
1942 if (modifier
== WIDEN
&& (j
& 1))
1943 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1944 perm_mask
, stmt
, gsi
);
1947 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
1950 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1952 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1954 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1955 == TYPE_VECTOR_SUBPARTS (idxtype
));
1956 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
1957 var
= make_ssa_name (var
, NULL
);
1958 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1960 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
1962 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1966 if (mask_perm_mask
&& (j
& 1))
1967 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1968 mask_perm_mask
, stmt
, gsi
);
1972 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
1975 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
,
1976 &def_stmt
, &def
, &dt
);
1977 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1981 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1983 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1984 == TYPE_VECTOR_SUBPARTS (masktype
));
1985 var
= vect_get_new_vect_var (masktype
, vect_simple_var
,
1987 var
= make_ssa_name (var
, NULL
);
1988 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1990 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
1991 mask_op
, NULL_TREE
);
1992 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1998 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
2001 if (!useless_type_conversion_p (vectype
, rettype
))
2003 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2004 == TYPE_VECTOR_SUBPARTS (rettype
));
2005 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
2006 op
= make_ssa_name (var
, new_stmt
);
2007 gimple_call_set_lhs (new_stmt
, op
);
2008 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2009 var
= make_ssa_name (vec_dest
, NULL
);
2010 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2012 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
2017 var
= make_ssa_name (vec_dest
, new_stmt
);
2018 gimple_call_set_lhs (new_stmt
, var
);
2021 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2023 if (modifier
== NARROW
)
2030 var
= permute_vec_elements (prev_res
, var
,
2031 perm_mask
, stmt
, gsi
);
2032 new_stmt
= SSA_NAME_DEF_STMT (var
);
2035 if (prev_stmt_info
== NULL
)
2036 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2038 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2039 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2042 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2044 tree lhs
= gimple_call_lhs (stmt
);
2045 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2046 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2047 set_vinfo_for_stmt (stmt
, NULL
);
2048 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2049 gsi_replace (gsi
, new_stmt
, true);
2054 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2055 prev_stmt_info
= NULL
;
2056 for (i
= 0; i
< ncopies
; i
++)
2058 unsigned align
, misalign
;
2062 tree rhs
= gimple_call_arg (stmt
, 3);
2063 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
, NULL
);
2064 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2065 /* We should have catched mismatched types earlier. */
2066 gcc_assert (useless_type_conversion_p (vectype
,
2067 TREE_TYPE (vec_rhs
)));
2068 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2069 NULL_TREE
, &dummy
, gsi
,
2070 &ptr_incr
, false, &inv_p
);
2071 gcc_assert (!inv_p
);
2075 vect_is_simple_use (vec_rhs
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2077 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2078 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2080 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2081 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2082 TYPE_SIZE_UNIT (vectype
));
2085 align
= TYPE_ALIGN_UNIT (vectype
);
2086 if (aligned_access_p (dr
))
2088 else if (DR_MISALIGNMENT (dr
) == -1)
2090 align
= TYPE_ALIGN_UNIT (elem_type
);
2094 misalign
= DR_MISALIGNMENT (dr
);
2095 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2098 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2099 gimple_call_arg (stmt
, 1),
2101 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2103 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2105 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2106 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2111 tree vec_mask
= NULL_TREE
;
2112 prev_stmt_info
= NULL
;
2113 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2114 for (i
= 0; i
< ncopies
; i
++)
2116 unsigned align
, misalign
;
2120 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2121 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2122 NULL_TREE
, &dummy
, gsi
,
2123 &ptr_incr
, false, &inv_p
);
2124 gcc_assert (!inv_p
);
2128 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2130 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2131 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2132 TYPE_SIZE_UNIT (vectype
));
2135 align
= TYPE_ALIGN_UNIT (vectype
);
2136 if (aligned_access_p (dr
))
2138 else if (DR_MISALIGNMENT (dr
) == -1)
2140 align
= TYPE_ALIGN_UNIT (elem_type
);
2144 misalign
= DR_MISALIGNMENT (dr
);
2145 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2148 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2149 gimple_call_arg (stmt
, 1),
2151 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
, NULL
));
2152 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2154 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2156 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2157 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2163 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2165 tree lhs
= gimple_call_lhs (stmt
);
2166 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2167 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2168 set_vinfo_for_stmt (stmt
, NULL
);
2169 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2170 gsi_replace (gsi
, new_stmt
, true);
2177 /* Function vectorizable_call.
2179 Check if STMT performs a function call that can be vectorized.
2180 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2181 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2182 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2185 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
2191 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2192 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2193 tree vectype_out
, vectype_in
;
2196 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2197 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2198 tree fndecl
, new_temp
, def
, rhs_type
;
2200 enum vect_def_type dt
[3]
2201 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2202 gimple new_stmt
= NULL
;
2204 vec
<tree
> vargs
= vNULL
;
2205 enum { NARROW
, NONE
, WIDEN
} modifier
;
2209 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2212 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2215 /* Is STMT a vectorizable call? */
2216 if (!is_gimple_call (stmt
))
2219 if (gimple_call_internal_p (stmt
)
2220 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2221 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2222 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2225 if (gimple_call_lhs (stmt
) == NULL_TREE
2226 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2229 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2231 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2233 /* Process function arguments. */
2234 rhs_type
= NULL_TREE
;
2235 vectype_in
= NULL_TREE
;
2236 nargs
= gimple_call_num_args (stmt
);
2238 /* Bail out if the function has more than three arguments, we do not have
2239 interesting builtin functions to vectorize with more than two arguments
2240 except for fma. No arguments is also not good. */
2241 if (nargs
== 0 || nargs
> 3)
2244 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2245 if (gimple_call_internal_p (stmt
)
2246 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2249 rhs_type
= unsigned_type_node
;
2252 for (i
= 0; i
< nargs
; i
++)
2256 op
= gimple_call_arg (stmt
, i
);
2258 /* We can only handle calls with arguments of the same type. */
2260 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2264 "argument types differ.\n");
2268 rhs_type
= TREE_TYPE (op
);
2270 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2271 &def_stmt
, &def
, &dt
[i
], &opvectype
))
2273 if (dump_enabled_p ())
2274 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2275 "use not simple.\n");
2280 vectype_in
= opvectype
;
2282 && opvectype
!= vectype_in
)
2284 if (dump_enabled_p ())
2285 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2286 "argument vector types differ.\n");
2290 /* If all arguments are external or constant defs use a vector type with
2291 the same size as the output vector type. */
2293 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2295 gcc_assert (vectype_in
);
2298 if (dump_enabled_p ())
2300 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2301 "no vectype for scalar type ");
2302 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2303 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2310 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2311 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2312 if (nunits_in
== nunits_out
/ 2)
2314 else if (nunits_out
== nunits_in
)
2316 else if (nunits_out
== nunits_in
/ 2)
2321 /* For now, we only vectorize functions if a target specific builtin
2322 is available. TODO -- in some cases, it might be profitable to
2323 insert the calls for pieces of the vector, in order to be able
2324 to vectorize other operations in the loop. */
2325 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2326 if (fndecl
== NULL_TREE
)
2328 if (gimple_call_internal_p (stmt
)
2329 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2332 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2333 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2334 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2335 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2337 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2338 { 0, 1, 2, ... vf - 1 } vector. */
2339 gcc_assert (nargs
== 0);
2343 if (dump_enabled_p ())
2344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2345 "function is not vectorizable.\n");
2350 gcc_assert (!gimple_vuse (stmt
));
2352 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2354 else if (modifier
== NARROW
)
2355 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2357 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2359 /* Sanity check: make sure that at least one copy of the vectorized stmt
2360 needs to be generated. */
2361 gcc_assert (ncopies
>= 1);
2363 if (!vec_stmt
) /* transformation not required. */
2365 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2366 if (dump_enabled_p ())
2367 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2369 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2375 if (dump_enabled_p ())
2376 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2379 scalar_dest
= gimple_call_lhs (stmt
);
2380 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2382 prev_stmt_info
= NULL
;
2386 for (j
= 0; j
< ncopies
; ++j
)
2388 /* Build argument list for the vectorized call. */
2390 vargs
.create (nargs
);
2396 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2397 vec
<tree
> vec_oprnds0
;
2399 for (i
= 0; i
< nargs
; i
++)
2400 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2401 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2402 vec_oprnds0
= vec_defs
[0];
2404 /* Arguments are ready. Create the new vector stmt. */
2405 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2408 for (k
= 0; k
< nargs
; k
++)
2410 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2411 vargs
[k
] = vec_oprndsk
[i
];
2413 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2414 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2415 gimple_call_set_lhs (new_stmt
, new_temp
);
2416 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2417 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2420 for (i
= 0; i
< nargs
; i
++)
2422 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2423 vec_oprndsi
.release ();
2428 for (i
= 0; i
< nargs
; i
++)
2430 op
= gimple_call_arg (stmt
, i
);
2433 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2436 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2438 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2441 vargs
.quick_push (vec_oprnd0
);
2444 if (gimple_call_internal_p (stmt
)
2445 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2447 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2449 for (k
= 0; k
< nunits_out
; ++k
)
2450 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2451 tree cst
= build_vector (vectype_out
, v
);
2453 = vect_get_new_vect_var (vectype_out
, vect_simple_var
, "cst_");
2454 gimple init_stmt
= gimple_build_assign (new_var
, cst
);
2455 new_temp
= make_ssa_name (new_var
, init_stmt
);
2456 gimple_assign_set_lhs (init_stmt
, new_temp
);
2457 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2458 new_temp
= make_ssa_name (vec_dest
, NULL
);
2459 new_stmt
= gimple_build_assign (new_temp
,
2460 gimple_assign_lhs (init_stmt
));
2464 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2465 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2466 gimple_call_set_lhs (new_stmt
, new_temp
);
2468 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2471 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2473 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2475 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2481 for (j
= 0; j
< ncopies
; ++j
)
2483 /* Build argument list for the vectorized call. */
2485 vargs
.create (nargs
* 2);
2491 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2492 vec
<tree
> vec_oprnds0
;
2494 for (i
= 0; i
< nargs
; i
++)
2495 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2496 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2497 vec_oprnds0
= vec_defs
[0];
2499 /* Arguments are ready. Create the new vector stmt. */
2500 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2504 for (k
= 0; k
< nargs
; k
++)
2506 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2507 vargs
.quick_push (vec_oprndsk
[i
]);
2508 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2510 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2511 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2512 gimple_call_set_lhs (new_stmt
, new_temp
);
2513 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2514 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2517 for (i
= 0; i
< nargs
; i
++)
2519 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2520 vec_oprndsi
.release ();
2525 for (i
= 0; i
< nargs
; i
++)
2527 op
= gimple_call_arg (stmt
, i
);
2531 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2533 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2537 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2539 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2541 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2544 vargs
.quick_push (vec_oprnd0
);
2545 vargs
.quick_push (vec_oprnd1
);
2548 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2549 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2550 gimple_call_set_lhs (new_stmt
, new_temp
);
2551 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2554 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2556 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2558 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2561 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2566 /* No current target implements this case. */
2572 /* The call in STMT might prevent it from being removed in dce.
2573 We however cannot remove it here, due to the way the ssa name
2574 it defines is mapped to the new definition. So just replace
2575 rhs of the statement with something harmless. */
2580 type
= TREE_TYPE (scalar_dest
);
2581 if (is_pattern_stmt_p (stmt_info
))
2582 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2584 lhs
= gimple_call_lhs (stmt
);
2585 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2586 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2587 set_vinfo_for_stmt (stmt
, NULL
);
2588 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2589 gsi_replace (gsi
, new_stmt
, false);
2595 struct simd_call_arg_info
2599 enum vect_def_type dt
;
2600 HOST_WIDE_INT linear_step
;
2604 /* Function vectorizable_simd_clone_call.
2606 Check if STMT performs a function call that can be vectorized
2607 by calling a simd clone of the function.
2608 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2609 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2610 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2613 vectorizable_simd_clone_call (gimple stmt
, gimple_stmt_iterator
*gsi
,
2614 gimple
*vec_stmt
, slp_tree slp_node
)
2619 tree vec_oprnd0
= NULL_TREE
;
2620 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2622 unsigned int nunits
;
2623 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2624 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2625 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2626 tree fndecl
, new_temp
, def
;
2628 gimple new_stmt
= NULL
;
2630 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2631 vec
<tree
> vargs
= vNULL
;
2633 tree lhs
, rtype
, ratype
;
2634 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2636 /* Is STMT a vectorizable call? */
2637 if (!is_gimple_call (stmt
))
2640 fndecl
= gimple_call_fndecl (stmt
);
2641 if (fndecl
== NULL_TREE
)
2644 struct cgraph_node
*node
= cgraph_get_node (fndecl
);
2645 if (node
== NULL
|| node
->simd_clones
== NULL
)
2648 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2651 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2654 if (gimple_call_lhs (stmt
)
2655 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2658 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2660 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2662 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2666 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2669 /* Process function arguments. */
2670 nargs
= gimple_call_num_args (stmt
);
2672 /* Bail out if the function has zero arguments. */
2676 arginfo
.create (nargs
);
2678 for (i
= 0; i
< nargs
; i
++)
2680 simd_call_arg_info thisarginfo
;
2683 thisarginfo
.linear_step
= 0;
2684 thisarginfo
.align
= 0;
2685 thisarginfo
.op
= NULL_TREE
;
2687 op
= gimple_call_arg (stmt
, i
);
2688 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2689 &def_stmt
, &def
, &thisarginfo
.dt
,
2690 &thisarginfo
.vectype
)
2691 || thisarginfo
.dt
== vect_uninitialized_def
)
2693 if (dump_enabled_p ())
2694 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2695 "use not simple.\n");
2700 if (thisarginfo
.dt
== vect_constant_def
2701 || thisarginfo
.dt
== vect_external_def
)
2702 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2704 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2706 if (thisarginfo
.dt
!= vect_constant_def
2707 && thisarginfo
.dt
!= vect_external_def
2709 && TREE_CODE (op
) == SSA_NAME
2710 && simple_iv (loop
, loop_containing_stmt (stmt
), op
, &iv
, false)
2711 && tree_fits_shwi_p (iv
.step
))
2713 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2714 thisarginfo
.op
= iv
.base
;
2716 else if ((thisarginfo
.dt
== vect_constant_def
2717 || thisarginfo
.dt
== vect_external_def
)
2718 && POINTER_TYPE_P (TREE_TYPE (op
)))
2719 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2721 arginfo
.quick_push (thisarginfo
);
2724 unsigned int badness
= 0;
2725 struct cgraph_node
*bestn
= NULL
;
2726 if (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
))
2727 bestn
= cgraph_get_node (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
));
2729 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2730 n
= n
->simdclone
->next_clone
)
2732 unsigned int this_badness
= 0;
2733 if (n
->simdclone
->simdlen
2734 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2735 || n
->simdclone
->nargs
!= nargs
)
2737 if (n
->simdclone
->simdlen
2738 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2739 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2740 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2741 if (n
->simdclone
->inbranch
)
2742 this_badness
+= 2048;
2743 int target_badness
= targetm
.simd_clone
.usable (n
);
2744 if (target_badness
< 0)
2746 this_badness
+= target_badness
* 512;
2747 /* FORNOW: Have to add code to add the mask argument. */
2748 if (n
->simdclone
->inbranch
)
2750 for (i
= 0; i
< nargs
; i
++)
2752 switch (n
->simdclone
->args
[i
].arg_type
)
2754 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2755 if (!useless_type_conversion_p
2756 (n
->simdclone
->args
[i
].orig_type
,
2757 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2759 else if (arginfo
[i
].dt
== vect_constant_def
2760 || arginfo
[i
].dt
== vect_external_def
2761 || arginfo
[i
].linear_step
)
2764 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2765 if (arginfo
[i
].dt
!= vect_constant_def
2766 && arginfo
[i
].dt
!= vect_external_def
)
2769 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2770 if (arginfo
[i
].dt
== vect_constant_def
2771 || arginfo
[i
].dt
== vect_external_def
2772 || (arginfo
[i
].linear_step
2773 != n
->simdclone
->args
[i
].linear_step
))
2776 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2780 case SIMD_CLONE_ARG_TYPE_MASK
:
2783 if (i
== (size_t) -1)
2785 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2790 if (arginfo
[i
].align
)
2791 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2792 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2794 if (i
== (size_t) -1)
2796 if (bestn
== NULL
|| this_badness
< badness
)
2799 badness
= this_badness
;
2809 for (i
= 0; i
< nargs
; i
++)
2810 if ((arginfo
[i
].dt
== vect_constant_def
2811 || arginfo
[i
].dt
== vect_external_def
)
2812 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2815 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2817 if (arginfo
[i
].vectype
== NULL
2818 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2819 > bestn
->simdclone
->simdlen
))
2826 fndecl
= bestn
->decl
;
2827 nunits
= bestn
->simdclone
->simdlen
;
2828 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2830 /* If the function isn't const, only allow it in simd loops where user
2831 has asserted that at least nunits consecutive iterations can be
2832 performed using SIMD instructions. */
2833 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2834 && gimple_vuse (stmt
))
2840 /* Sanity check: make sure that at least one copy of the vectorized stmt
2841 needs to be generated. */
2842 gcc_assert (ncopies
>= 1);
2844 if (!vec_stmt
) /* transformation not required. */
2846 STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info
) = bestn
->decl
;
2847 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2848 if (dump_enabled_p ())
2849 dump_printf_loc (MSG_NOTE
, vect_location
,
2850 "=== vectorizable_simd_clone_call ===\n");
2851 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2858 if (dump_enabled_p ())
2859 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2862 scalar_dest
= gimple_call_lhs (stmt
);
2863 vec_dest
= NULL_TREE
;
2868 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2869 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
2870 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
2873 rtype
= TREE_TYPE (ratype
);
2877 prev_stmt_info
= NULL
;
2878 for (j
= 0; j
< ncopies
; ++j
)
2880 /* Build argument list for the vectorized call. */
2882 vargs
.create (nargs
);
2886 for (i
= 0; i
< nargs
; i
++)
2888 unsigned int k
, l
, m
, o
;
2890 op
= gimple_call_arg (stmt
, i
);
2891 switch (bestn
->simdclone
->args
[i
].arg_type
)
2893 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2894 atype
= bestn
->simdclone
->args
[i
].vector_type
;
2895 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
2896 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
2898 if (TYPE_VECTOR_SUBPARTS (atype
)
2899 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
2901 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
2902 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2903 / TYPE_VECTOR_SUBPARTS (atype
));
2904 gcc_assert ((k
& (k
- 1)) == 0);
2907 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2910 vec_oprnd0
= arginfo
[i
].op
;
2911 if ((m
& (k
- 1)) == 0)
2913 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2916 arginfo
[i
].op
= vec_oprnd0
;
2918 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
2920 bitsize_int ((m
& (k
- 1)) * prec
));
2922 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2925 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2929 k
= (TYPE_VECTOR_SUBPARTS (atype
)
2930 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
2931 gcc_assert ((k
& (k
- 1)) == 0);
2932 vec
<constructor_elt
, va_gc
> *ctor_elts
;
2934 vec_alloc (ctor_elts
, k
);
2937 for (l
= 0; l
< k
; l
++)
2939 if (m
== 0 && l
== 0)
2941 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2944 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
2946 arginfo
[i
].op
= vec_oprnd0
;
2949 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
2953 vargs
.safe_push (vec_oprnd0
);
2956 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
2958 = gimple_build_assign (make_ssa_name (atype
, NULL
),
2960 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2961 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
2966 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2967 vargs
.safe_push (op
);
2969 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2974 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
2979 edge pe
= loop_preheader_edge (loop
);
2980 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2981 gcc_assert (!new_bb
);
2983 tree phi_res
= copy_ssa_name (op
, NULL
);
2984 gimple new_phi
= create_phi_node (phi_res
, loop
->header
);
2985 set_vinfo_for_stmt (new_phi
,
2986 new_stmt_vec_info (new_phi
, loop_vinfo
,
2988 add_phi_arg (new_phi
, arginfo
[i
].op
,
2989 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
2991 = POINTER_TYPE_P (TREE_TYPE (op
))
2992 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
2993 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
2994 ? sizetype
: TREE_TYPE (op
);
2996 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
2998 tree tcst
= wide_int_to_tree (type
, cst
);
2999 tree phi_arg
= copy_ssa_name (op
, NULL
);
3000 new_stmt
= gimple_build_assign_with_ops (code
, phi_arg
,
3002 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3003 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3004 set_vinfo_for_stmt (new_stmt
,
3005 new_stmt_vec_info (new_stmt
, loop_vinfo
,
3007 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3009 arginfo
[i
].op
= phi_res
;
3010 vargs
.safe_push (phi_res
);
3015 = POINTER_TYPE_P (TREE_TYPE (op
))
3016 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3017 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3018 ? sizetype
: TREE_TYPE (op
);
3020 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3022 tree tcst
= wide_int_to_tree (type
, cst
);
3023 new_temp
= make_ssa_name (TREE_TYPE (op
), NULL
);
3025 = gimple_build_assign_with_ops (code
, new_temp
,
3026 arginfo
[i
].op
, tcst
);
3027 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3028 vargs
.safe_push (new_temp
);
3031 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3037 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3040 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3042 new_temp
= create_tmp_var (ratype
, NULL
);
3043 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3044 == TYPE_VECTOR_SUBPARTS (rtype
))
3045 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3047 new_temp
= make_ssa_name (rtype
, new_stmt
);
3048 gimple_call_set_lhs (new_stmt
, new_temp
);
3050 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3054 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3057 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3058 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3059 gcc_assert ((k
& (k
- 1)) == 0);
3060 for (l
= 0; l
< k
; l
++)
3065 t
= build_fold_addr_expr (new_temp
);
3066 t
= build2 (MEM_REF
, vectype
, t
,
3067 build_int_cst (TREE_TYPE (t
),
3068 l
* prec
/ BITS_PER_UNIT
));
3071 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3072 size_int (prec
), bitsize_int (l
* prec
));
3074 = gimple_build_assign (make_ssa_name (vectype
, NULL
), t
);
3075 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3076 if (j
== 0 && l
== 0)
3077 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3079 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3081 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3086 tree clobber
= build_constructor (ratype
, NULL
);
3087 TREE_THIS_VOLATILE (clobber
) = 1;
3088 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3089 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3093 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3095 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3096 / TYPE_VECTOR_SUBPARTS (rtype
));
3097 gcc_assert ((k
& (k
- 1)) == 0);
3098 if ((j
& (k
- 1)) == 0)
3099 vec_alloc (ret_ctor_elts
, k
);
3102 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3103 for (m
= 0; m
< o
; m
++)
3105 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3106 size_int (m
), NULL_TREE
, NULL_TREE
);
3108 = gimple_build_assign (make_ssa_name (rtype
, NULL
),
3110 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3111 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3112 gimple_assign_lhs (new_stmt
));
3114 tree clobber
= build_constructor (ratype
, NULL
);
3115 TREE_THIS_VOLATILE (clobber
) = 1;
3116 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3117 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3120 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3121 if ((j
& (k
- 1)) != k
- 1)
3123 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3125 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
),
3127 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3129 if ((unsigned) j
== k
- 1)
3130 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3132 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3134 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3139 tree t
= build_fold_addr_expr (new_temp
);
3140 t
= build2 (MEM_REF
, vectype
, t
,
3141 build_int_cst (TREE_TYPE (t
), 0));
3143 = gimple_build_assign (make_ssa_name (vec_dest
, NULL
), t
);
3144 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3145 tree clobber
= build_constructor (ratype
, NULL
);
3146 TREE_THIS_VOLATILE (clobber
) = 1;
3147 vect_finish_stmt_generation (stmt
,
3148 gimple_build_assign (new_temp
,
3154 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3156 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3158 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3163 /* The call in STMT might prevent it from being removed in dce.
3164 We however cannot remove it here, due to the way the ssa name
3165 it defines is mapped to the new definition. So just replace
3166 rhs of the statement with something harmless. */
3173 type
= TREE_TYPE (scalar_dest
);
3174 if (is_pattern_stmt_p (stmt_info
))
3175 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3177 lhs
= gimple_call_lhs (stmt
);
3178 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3181 new_stmt
= gimple_build_nop ();
3182 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3183 set_vinfo_for_stmt (stmt
, NULL
);
3184 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3185 gsi_replace (gsi
, new_stmt
, false);
3186 unlink_stmt_vdef (stmt
);
3192 /* Function vect_gen_widened_results_half
3194 Create a vector stmt whose code, type, number of arguments, and result
3195 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3196 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3197 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3198 needs to be created (DECL is a function-decl of a target-builtin).
3199 STMT is the original scalar stmt that we are vectorizing. */
3202 vect_gen_widened_results_half (enum tree_code code
,
3204 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3205 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3211 /* Generate half of the widened result: */
3212 if (code
== CALL_EXPR
)
3214 /* Target specific support */
3215 if (op_type
== binary_op
)
3216 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3218 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3219 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3220 gimple_call_set_lhs (new_stmt
, new_temp
);
3224 /* Generic support */
3225 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3226 if (op_type
!= binary_op
)
3228 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
3230 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3231 gimple_assign_set_lhs (new_stmt
, new_temp
);
3233 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3239 /* Get vectorized definitions for loop-based vectorization. For the first
3240 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3241 scalar operand), and for the rest we get a copy with
3242 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3243 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3244 The vectors are collected into VEC_OPRNDS. */
3247 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
3248 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3252 /* Get first vector operand. */
3253 /* All the vector operands except the very first one (that is scalar oprnd)
3255 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3256 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
3258 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3260 vec_oprnds
->quick_push (vec_oprnd
);
3262 /* Get second vector operand. */
3263 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3264 vec_oprnds
->quick_push (vec_oprnd
);
3268 /* For conversion in multiple steps, continue to get operands
3271 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3275 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3276 For multi-step conversions store the resulting vectors and call the function
3280 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3281 int multi_step_cvt
, gimple stmt
,
3283 gimple_stmt_iterator
*gsi
,
3284 slp_tree slp_node
, enum tree_code code
,
3285 stmt_vec_info
*prev_stmt_info
)
3288 tree vop0
, vop1
, new_tmp
, vec_dest
;
3290 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3292 vec_dest
= vec_dsts
.pop ();
3294 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3296 /* Create demotion operation. */
3297 vop0
= (*vec_oprnds
)[i
];
3298 vop1
= (*vec_oprnds
)[i
+ 1];
3299 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
3300 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3301 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3302 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3305 /* Store the resulting vector for next recursive call. */
3306 (*vec_oprnds
)[i
/2] = new_tmp
;
3309 /* This is the last step of the conversion sequence. Store the
3310 vectors in SLP_NODE or in vector info of the scalar statement
3311 (or in STMT_VINFO_RELATED_STMT chain). */
3313 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3316 if (!*prev_stmt_info
)
3317 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3319 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3321 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3326 /* For multi-step demotion operations we first generate demotion operations
3327 from the source type to the intermediate types, and then combine the
3328 results (stored in VEC_OPRNDS) in demotion operation to the destination
3332 /* At each level of recursion we have half of the operands we had at the
3334 vec_oprnds
->truncate ((i
+1)/2);
3335 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3336 stmt
, vec_dsts
, gsi
, slp_node
,
3337 VEC_PACK_TRUNC_EXPR
,
3341 vec_dsts
.quick_push (vec_dest
);
3345 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3346 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3347 the resulting vectors and call the function recursively. */
3350 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3351 vec
<tree
> *vec_oprnds1
,
3352 gimple stmt
, tree vec_dest
,
3353 gimple_stmt_iterator
*gsi
,
3354 enum tree_code code1
,
3355 enum tree_code code2
, tree decl1
,
3356 tree decl2
, int op_type
)
3359 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3360 gimple new_stmt1
, new_stmt2
;
3361 vec
<tree
> vec_tmp
= vNULL
;
3363 vec_tmp
.create (vec_oprnds0
->length () * 2);
3364 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3366 if (op_type
== binary_op
)
3367 vop1
= (*vec_oprnds1
)[i
];
3371 /* Generate the two halves of promotion operation. */
3372 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3373 op_type
, vec_dest
, gsi
, stmt
);
3374 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3375 op_type
, vec_dest
, gsi
, stmt
);
3376 if (is_gimple_call (new_stmt1
))
3378 new_tmp1
= gimple_call_lhs (new_stmt1
);
3379 new_tmp2
= gimple_call_lhs (new_stmt2
);
3383 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3384 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3387 /* Store the results for the next step. */
3388 vec_tmp
.quick_push (new_tmp1
);
3389 vec_tmp
.quick_push (new_tmp2
);
3392 vec_oprnds0
->release ();
3393 *vec_oprnds0
= vec_tmp
;
3397 /* Check if STMT performs a conversion operation, that can be vectorized.
3398 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3399 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3400 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3403 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3404 gimple
*vec_stmt
, slp_tree slp_node
)
3408 tree op0
, op1
= NULL_TREE
;
3409 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3410 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3411 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3412 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3413 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3414 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3418 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3419 gimple new_stmt
= NULL
;
3420 stmt_vec_info prev_stmt_info
;
3423 tree vectype_out
, vectype_in
;
3425 tree lhs_type
, rhs_type
;
3426 enum { NARROW
, NONE
, WIDEN
} modifier
;
3427 vec
<tree
> vec_oprnds0
= vNULL
;
3428 vec
<tree
> vec_oprnds1
= vNULL
;
3430 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3431 int multi_step_cvt
= 0;
3432 vec
<tree
> vec_dsts
= vNULL
;
3433 vec
<tree
> interm_types
= vNULL
;
3434 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3436 enum machine_mode rhs_mode
;
3437 unsigned short fltsz
;
3439 /* Is STMT a vectorizable conversion? */
3441 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3444 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3447 if (!is_gimple_assign (stmt
))
3450 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3453 code
= gimple_assign_rhs_code (stmt
);
3454 if (!CONVERT_EXPR_CODE_P (code
)
3455 && code
!= FIX_TRUNC_EXPR
3456 && code
!= FLOAT_EXPR
3457 && code
!= WIDEN_MULT_EXPR
3458 && code
!= WIDEN_LSHIFT_EXPR
)
3461 op_type
= TREE_CODE_LENGTH (code
);
3463 /* Check types of lhs and rhs. */
3464 scalar_dest
= gimple_assign_lhs (stmt
);
3465 lhs_type
= TREE_TYPE (scalar_dest
);
3466 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3468 op0
= gimple_assign_rhs1 (stmt
);
3469 rhs_type
= TREE_TYPE (op0
);
3471 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3472 && !((INTEGRAL_TYPE_P (lhs_type
)
3473 && INTEGRAL_TYPE_P (rhs_type
))
3474 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3475 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3478 if ((INTEGRAL_TYPE_P (lhs_type
)
3479 && (TYPE_PRECISION (lhs_type
)
3480 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3481 || (INTEGRAL_TYPE_P (rhs_type
)
3482 && (TYPE_PRECISION (rhs_type
)
3483 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3485 if (dump_enabled_p ())
3486 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3487 "type conversion to/from bit-precision unsupported."
3492 /* Check the operands of the operation. */
3493 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
3494 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3496 if (dump_enabled_p ())
3497 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3498 "use not simple.\n");
3501 if (op_type
== binary_op
)
3505 op1
= gimple_assign_rhs2 (stmt
);
3506 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3507 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3509 if (CONSTANT_CLASS_P (op0
))
3510 ok
= vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
,
3511 &def_stmt
, &def
, &dt
[1], &vectype_in
);
3513 ok
= vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3518 if (dump_enabled_p ())
3519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3520 "use not simple.\n");
3525 /* If op0 is an external or constant defs use a vector type of
3526 the same size as the output vector type. */
3528 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3530 gcc_assert (vectype_in
);
3533 if (dump_enabled_p ())
3535 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3536 "no vectype for scalar type ");
3537 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3538 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3544 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3545 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3546 if (nunits_in
< nunits_out
)
3548 else if (nunits_out
== nunits_in
)
3553 /* Multiple types in SLP are handled by creating the appropriate number of
3554 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3556 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3558 else if (modifier
== NARROW
)
3559 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3561 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3563 /* Sanity check: make sure that at least one copy of the vectorized stmt
3564 needs to be generated. */
3565 gcc_assert (ncopies
>= 1);
3567 /* Supportable by target? */
3571 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3573 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3578 if (dump_enabled_p ())
3579 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3580 "conversion not supported by target.\n");
3584 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3585 &code1
, &code2
, &multi_step_cvt
,
3588 /* Binary widening operation can only be supported directly by the
3590 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3594 if (code
!= FLOAT_EXPR
3595 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3596 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3599 rhs_mode
= TYPE_MODE (rhs_type
);
3600 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3601 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3602 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3603 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3606 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3607 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3608 if (cvt_type
== NULL_TREE
)
3611 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3613 if (!supportable_convert_operation (code
, vectype_out
,
3614 cvt_type
, &decl1
, &codecvt1
))
3617 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3618 cvt_type
, &codecvt1
,
3619 &codecvt2
, &multi_step_cvt
,
3623 gcc_assert (multi_step_cvt
== 0);
3625 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3626 vectype_in
, &code1
, &code2
,
3627 &multi_step_cvt
, &interm_types
))
3631 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3634 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3635 codecvt2
= ERROR_MARK
;
3639 interm_types
.safe_push (cvt_type
);
3640 cvt_type
= NULL_TREE
;
3645 gcc_assert (op_type
== unary_op
);
3646 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3647 &code1
, &multi_step_cvt
,
3651 if (code
!= FIX_TRUNC_EXPR
3652 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3653 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3656 rhs_mode
= TYPE_MODE (rhs_type
);
3658 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3659 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3660 if (cvt_type
== NULL_TREE
)
3662 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3665 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3666 &code1
, &multi_step_cvt
,
3675 if (!vec_stmt
) /* transformation not required. */
3677 if (dump_enabled_p ())
3678 dump_printf_loc (MSG_NOTE
, vect_location
,
3679 "=== vectorizable_conversion ===\n");
3680 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3682 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3683 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3685 else if (modifier
== NARROW
)
3687 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3688 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3692 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3693 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3695 interm_types
.release ();
3700 if (dump_enabled_p ())
3701 dump_printf_loc (MSG_NOTE
, vect_location
,
3702 "transform conversion. ncopies = %d.\n", ncopies
);
3704 if (op_type
== binary_op
)
3706 if (CONSTANT_CLASS_P (op0
))
3707 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3708 else if (CONSTANT_CLASS_P (op1
))
3709 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3712 /* In case of multi-step conversion, we first generate conversion operations
3713 to the intermediate types, and then from that types to the final one.
3714 We create vector destinations for the intermediate type (TYPES) received
3715 from supportable_*_operation, and store them in the correct order
3716 for future use in vect_create_vectorized_*_stmts (). */
3717 vec_dsts
.create (multi_step_cvt
+ 1);
3718 vec_dest
= vect_create_destination_var (scalar_dest
,
3719 (cvt_type
&& modifier
== WIDEN
)
3720 ? cvt_type
: vectype_out
);
3721 vec_dsts
.quick_push (vec_dest
);
3725 for (i
= interm_types
.length () - 1;
3726 interm_types
.iterate (i
, &intermediate_type
); i
--)
3728 vec_dest
= vect_create_destination_var (scalar_dest
,
3730 vec_dsts
.quick_push (vec_dest
);
3735 vec_dest
= vect_create_destination_var (scalar_dest
,
3737 ? vectype_out
: cvt_type
);
3741 if (modifier
== WIDEN
)
3743 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3744 if (op_type
== binary_op
)
3745 vec_oprnds1
.create (1);
3747 else if (modifier
== NARROW
)
3748 vec_oprnds0
.create (
3749 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3751 else if (code
== WIDEN_LSHIFT_EXPR
)
3752 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3755 prev_stmt_info
= NULL
;
3759 for (j
= 0; j
< ncopies
; j
++)
3762 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3765 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3767 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3769 /* Arguments are ready, create the new vector stmt. */
3770 if (code1
== CALL_EXPR
)
3772 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3773 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3774 gimple_call_set_lhs (new_stmt
, new_temp
);
3778 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3779 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
,
3781 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3782 gimple_assign_set_lhs (new_stmt
, new_temp
);
3785 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3787 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3791 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3793 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3794 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3799 /* In case the vectorization factor (VF) is bigger than the number
3800 of elements that we can fit in a vectype (nunits), we have to
3801 generate more than one vector stmt - i.e - we need to "unroll"
3802 the vector stmt by a factor VF/nunits. */
3803 for (j
= 0; j
< ncopies
; j
++)
3810 if (code
== WIDEN_LSHIFT_EXPR
)
3815 /* Store vec_oprnd1 for every vector stmt to be created
3816 for SLP_NODE. We check during the analysis that all
3817 the shift arguments are the same. */
3818 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3819 vec_oprnds1
.quick_push (vec_oprnd1
);
3821 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3825 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3826 &vec_oprnds1
, slp_node
, -1);
3830 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3831 vec_oprnds0
.quick_push (vec_oprnd0
);
3832 if (op_type
== binary_op
)
3834 if (code
== WIDEN_LSHIFT_EXPR
)
3837 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
,
3839 vec_oprnds1
.quick_push (vec_oprnd1
);
3845 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3846 vec_oprnds0
.truncate (0);
3847 vec_oprnds0
.quick_push (vec_oprnd0
);
3848 if (op_type
== binary_op
)
3850 if (code
== WIDEN_LSHIFT_EXPR
)
3853 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
3855 vec_oprnds1
.truncate (0);
3856 vec_oprnds1
.quick_push (vec_oprnd1
);
3860 /* Arguments are ready. Create the new vector stmts. */
3861 for (i
= multi_step_cvt
; i
>= 0; i
--)
3863 tree this_dest
= vec_dsts
[i
];
3864 enum tree_code c1
= code1
, c2
= code2
;
3865 if (i
== 0 && codecvt2
!= ERROR_MARK
)
3870 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
3872 stmt
, this_dest
, gsi
,
3873 c1
, c2
, decl1
, decl2
,
3877 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3881 if (codecvt1
== CALL_EXPR
)
3883 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3884 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3885 gimple_call_set_lhs (new_stmt
, new_temp
);
3889 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3890 new_temp
= make_ssa_name (vec_dest
, NULL
);
3891 new_stmt
= gimple_build_assign_with_ops (codecvt1
,
3896 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3899 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
3902 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3905 if (!prev_stmt_info
)
3906 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3908 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3909 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3914 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3918 /* In case the vectorization factor (VF) is bigger than the number
3919 of elements that we can fit in a vectype (nunits), we have to
3920 generate more than one vector stmt - i.e - we need to "unroll"
3921 the vector stmt by a factor VF/nunits. */
3922 for (j
= 0; j
< ncopies
; j
++)
3926 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3930 vec_oprnds0
.truncate (0);
3931 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3932 vect_pow2 (multi_step_cvt
) - 1);
3935 /* Arguments are ready. Create the new vector stmts. */
3937 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3939 if (codecvt1
== CALL_EXPR
)
3941 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3942 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3943 gimple_call_set_lhs (new_stmt
, new_temp
);
3947 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
3948 new_temp
= make_ssa_name (vec_dest
, NULL
);
3949 new_stmt
= gimple_build_assign_with_ops (codecvt1
, new_temp
,
3953 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3954 vec_oprnds0
[i
] = new_temp
;
3957 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
3958 stmt
, vec_dsts
, gsi
,
3963 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3967 vec_oprnds0
.release ();
3968 vec_oprnds1
.release ();
3969 vec_dsts
.release ();
3970 interm_types
.release ();
3976 /* Function vectorizable_assignment.
3978 Check if STMT performs an assignment (copy) that can be vectorized.
3979 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3980 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3981 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3984 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
3985 gimple
*vec_stmt
, slp_tree slp_node
)
3990 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3991 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3992 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3996 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3997 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4000 vec
<tree
> vec_oprnds
= vNULL
;
4002 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4003 gimple new_stmt
= NULL
;
4004 stmt_vec_info prev_stmt_info
= NULL
;
4005 enum tree_code code
;
4008 /* Multiple types in SLP are handled by creating the appropriate number of
4009 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4011 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4014 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4016 gcc_assert (ncopies
>= 1);
4018 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4021 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4024 /* Is vectorizable assignment? */
4025 if (!is_gimple_assign (stmt
))
4028 scalar_dest
= gimple_assign_lhs (stmt
);
4029 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4032 code
= gimple_assign_rhs_code (stmt
);
4033 if (gimple_assign_single_p (stmt
)
4034 || code
== PAREN_EXPR
4035 || CONVERT_EXPR_CODE_P (code
))
4036 op
= gimple_assign_rhs1 (stmt
);
4040 if (code
== VIEW_CONVERT_EXPR
)
4041 op
= TREE_OPERAND (op
, 0);
4043 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
4044 &def_stmt
, &def
, &dt
[0], &vectype_in
))
4046 if (dump_enabled_p ())
4047 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4048 "use not simple.\n");
4052 /* We can handle NOP_EXPR conversions that do not change the number
4053 of elements or the vector size. */
4054 if ((CONVERT_EXPR_CODE_P (code
)
4055 || code
== VIEW_CONVERT_EXPR
)
4057 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4058 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4059 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4062 /* We do not handle bit-precision changes. */
4063 if ((CONVERT_EXPR_CODE_P (code
)
4064 || code
== VIEW_CONVERT_EXPR
)
4065 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4066 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4067 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4068 || ((TYPE_PRECISION (TREE_TYPE (op
))
4069 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4070 /* But a conversion that does not change the bit-pattern is ok. */
4071 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4072 > TYPE_PRECISION (TREE_TYPE (op
)))
4073 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4075 if (dump_enabled_p ())
4076 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4077 "type conversion to/from bit-precision "
4082 if (!vec_stmt
) /* transformation not required. */
4084 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4085 if (dump_enabled_p ())
4086 dump_printf_loc (MSG_NOTE
, vect_location
,
4087 "=== vectorizable_assignment ===\n");
4088 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4093 if (dump_enabled_p ())
4094 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4097 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4100 for (j
= 0; j
< ncopies
; j
++)
4104 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4106 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4108 /* Arguments are ready. create the new vector stmt. */
4109 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4111 if (CONVERT_EXPR_CODE_P (code
)
4112 || code
== VIEW_CONVERT_EXPR
)
4113 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4114 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4115 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4116 gimple_assign_set_lhs (new_stmt
, new_temp
);
4117 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4119 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4126 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4128 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4130 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4133 vec_oprnds
.release ();
4138 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4139 either as shift by a scalar or by a vector. */
4142 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4145 enum machine_mode vec_mode
;
4150 vectype
= get_vectype_for_scalar_type (scalar_type
);
4154 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4156 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4158 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4160 || (optab_handler (optab
, TYPE_MODE (vectype
))
4161 == CODE_FOR_nothing
))
4165 vec_mode
= TYPE_MODE (vectype
);
4166 icode
= (int) optab_handler (optab
, vec_mode
);
4167 if (icode
== CODE_FOR_nothing
)
4174 /* Function vectorizable_shift.
4176 Check if STMT performs a shift operation that can be vectorized.
4177 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4178 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4179 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4182 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
4183 gimple
*vec_stmt
, slp_tree slp_node
)
4187 tree op0
, op1
= NULL
;
4188 tree vec_oprnd1
= NULL_TREE
;
4189 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4191 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4192 enum tree_code code
;
4193 enum machine_mode vec_mode
;
4197 enum machine_mode optab_op2_mode
;
4200 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4201 gimple new_stmt
= NULL
;
4202 stmt_vec_info prev_stmt_info
;
4209 vec
<tree
> vec_oprnds0
= vNULL
;
4210 vec
<tree
> vec_oprnds1
= vNULL
;
4213 bool scalar_shift_arg
= true;
4214 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4217 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4220 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4223 /* Is STMT a vectorizable binary/unary operation? */
4224 if (!is_gimple_assign (stmt
))
4227 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4230 code
= gimple_assign_rhs_code (stmt
);
4232 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4233 || code
== RROTATE_EXPR
))
4236 scalar_dest
= gimple_assign_lhs (stmt
);
4237 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4238 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4239 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4241 if (dump_enabled_p ())
4242 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4243 "bit-precision shifts not supported.\n");
4247 op0
= gimple_assign_rhs1 (stmt
);
4248 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4249 &def_stmt
, &def
, &dt
[0], &vectype
))
4251 if (dump_enabled_p ())
4252 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4253 "use not simple.\n");
4256 /* If op0 is an external or constant def use a vector type with
4257 the same size as the output vector type. */
4259 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4261 gcc_assert (vectype
);
4264 if (dump_enabled_p ())
4265 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4266 "no vectype for scalar type\n");
4270 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4271 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4272 if (nunits_out
!= nunits_in
)
4275 op1
= gimple_assign_rhs2 (stmt
);
4276 if (!vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4277 &def
, &dt
[1], &op1_vectype
))
4279 if (dump_enabled_p ())
4280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4281 "use not simple.\n");
4286 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4290 /* Multiple types in SLP are handled by creating the appropriate number of
4291 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4293 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4296 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4298 gcc_assert (ncopies
>= 1);
4300 /* Determine whether the shift amount is a vector, or scalar. If the
4301 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4303 if (dt
[1] == vect_internal_def
&& !slp_node
)
4304 scalar_shift_arg
= false;
4305 else if (dt
[1] == vect_constant_def
4306 || dt
[1] == vect_external_def
4307 || dt
[1] == vect_internal_def
)
4309 /* In SLP, need to check whether the shift count is the same,
4310 in loops if it is a constant or invariant, it is always
4314 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4317 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4318 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4319 scalar_shift_arg
= false;
4324 if (dump_enabled_p ())
4325 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4326 "operand mode requires invariant argument.\n");
4330 /* Vector shifted by vector. */
4331 if (!scalar_shift_arg
)
4333 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4334 if (dump_enabled_p ())
4335 dump_printf_loc (MSG_NOTE
, vect_location
,
4336 "vector/vector shift/rotate found.\n");
4339 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4340 if (op1_vectype
== NULL_TREE
4341 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4343 if (dump_enabled_p ())
4344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4345 "unusable type for last operand in"
4346 " vector/vector shift/rotate.\n");
4350 /* See if the machine has a vector shifted by scalar insn and if not
4351 then see if it has a vector shifted by vector insn. */
4354 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4356 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4358 if (dump_enabled_p ())
4359 dump_printf_loc (MSG_NOTE
, vect_location
,
4360 "vector/scalar shift/rotate found.\n");
4364 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4366 && (optab_handler (optab
, TYPE_MODE (vectype
))
4367 != CODE_FOR_nothing
))
4369 scalar_shift_arg
= false;
4371 if (dump_enabled_p ())
4372 dump_printf_loc (MSG_NOTE
, vect_location
,
4373 "vector/vector shift/rotate found.\n");
4375 /* Unlike the other binary operators, shifts/rotates have
4376 the rhs being int, instead of the same type as the lhs,
4377 so make sure the scalar is the right type if we are
4378 dealing with vectors of long long/long/short/char. */
4379 if (dt
[1] == vect_constant_def
)
4380 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4381 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4385 && TYPE_MODE (TREE_TYPE (vectype
))
4386 != TYPE_MODE (TREE_TYPE (op1
)))
4388 if (dump_enabled_p ())
4389 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4390 "unusable type for last operand in"
4391 " vector/vector shift/rotate.\n");
4394 if (vec_stmt
&& !slp_node
)
4396 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4397 op1
= vect_init_vector (stmt
, op1
,
4398 TREE_TYPE (vectype
), NULL
);
4405 /* Supportable by target? */
4408 if (dump_enabled_p ())
4409 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4413 vec_mode
= TYPE_MODE (vectype
);
4414 icode
= (int) optab_handler (optab
, vec_mode
);
4415 if (icode
== CODE_FOR_nothing
)
4417 if (dump_enabled_p ())
4418 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4419 "op not supported by target.\n");
4420 /* Check only during analysis. */
4421 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4422 || (vf
< vect_min_worthwhile_factor (code
)
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_NOTE
, vect_location
,
4427 "proceeding using word mode.\n");
4430 /* Worthwhile without SIMD support? Check only during analysis. */
4431 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4432 && vf
< vect_min_worthwhile_factor (code
)
4435 if (dump_enabled_p ())
4436 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4437 "not worthwhile without SIMD support.\n");
4441 if (!vec_stmt
) /* transformation not required. */
4443 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4444 if (dump_enabled_p ())
4445 dump_printf_loc (MSG_NOTE
, vect_location
,
4446 "=== vectorizable_shift ===\n");
4447 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4453 if (dump_enabled_p ())
4454 dump_printf_loc (MSG_NOTE
, vect_location
,
4455 "transform binary/unary operation.\n");
4458 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4460 prev_stmt_info
= NULL
;
4461 for (j
= 0; j
< ncopies
; j
++)
4466 if (scalar_shift_arg
)
4468 /* Vector shl and shr insn patterns can be defined with scalar
4469 operand 2 (shift operand). In this case, use constant or loop
4470 invariant op1 directly, without extending it to vector mode
4472 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4473 if (!VECTOR_MODE_P (optab_op2_mode
))
4475 if (dump_enabled_p ())
4476 dump_printf_loc (MSG_NOTE
, vect_location
,
4477 "operand 1 using scalar mode.\n");
4479 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4480 vec_oprnds1
.quick_push (vec_oprnd1
);
4483 /* Store vec_oprnd1 for every vector stmt to be created
4484 for SLP_NODE. We check during the analysis that all
4485 the shift arguments are the same.
4486 TODO: Allow different constants for different vector
4487 stmts generated for an SLP instance. */
4488 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4489 vec_oprnds1
.quick_push (vec_oprnd1
);
4494 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4495 (a special case for certain kind of vector shifts); otherwise,
4496 operand 1 should be of a vector type (the usual case). */
4498 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4501 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4505 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4507 /* Arguments are ready. Create the new vector stmt. */
4508 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4510 vop1
= vec_oprnds1
[i
];
4511 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
4512 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4513 gimple_assign_set_lhs (new_stmt
, new_temp
);
4514 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4516 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4523 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4525 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4526 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4529 vec_oprnds0
.release ();
4530 vec_oprnds1
.release ();
4536 /* Function vectorizable_operation.
4538 Check if STMT performs a binary, unary or ternary operation that can
4540 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4541 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4542 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4545 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
4546 gimple
*vec_stmt
, slp_tree slp_node
)
4550 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4551 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4553 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4554 enum tree_code code
;
4555 enum machine_mode vec_mode
;
4562 enum vect_def_type dt
[3]
4563 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4564 gimple new_stmt
= NULL
;
4565 stmt_vec_info prev_stmt_info
;
4571 vec
<tree
> vec_oprnds0
= vNULL
;
4572 vec
<tree
> vec_oprnds1
= vNULL
;
4573 vec
<tree
> vec_oprnds2
= vNULL
;
4574 tree vop0
, vop1
, vop2
;
4575 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4578 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4581 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4584 /* Is STMT a vectorizable binary/unary operation? */
4585 if (!is_gimple_assign (stmt
))
4588 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4591 code
= gimple_assign_rhs_code (stmt
);
4593 /* For pointer addition, we should use the normal plus for
4594 the vector addition. */
4595 if (code
== POINTER_PLUS_EXPR
)
4598 /* Support only unary or binary operations. */
4599 op_type
= TREE_CODE_LENGTH (code
);
4600 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4602 if (dump_enabled_p ())
4603 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4604 "num. args = %d (not unary/binary/ternary op).\n",
4609 scalar_dest
= gimple_assign_lhs (stmt
);
4610 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4612 /* Most operations cannot handle bit-precision types without extra
4614 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4615 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4616 /* Exception are bitwise binary operations. */
4617 && code
!= BIT_IOR_EXPR
4618 && code
!= BIT_XOR_EXPR
4619 && code
!= BIT_AND_EXPR
)
4621 if (dump_enabled_p ())
4622 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4623 "bit-precision arithmetic not supported.\n");
4627 op0
= gimple_assign_rhs1 (stmt
);
4628 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4629 &def_stmt
, &def
, &dt
[0], &vectype
))
4631 if (dump_enabled_p ())
4632 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4633 "use not simple.\n");
4636 /* If op0 is an external or constant def use a vector type with
4637 the same size as the output vector type. */
4639 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4641 gcc_assert (vectype
);
4644 if (dump_enabled_p ())
4646 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4647 "no vectype for scalar type ");
4648 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4650 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4656 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4657 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4658 if (nunits_out
!= nunits_in
)
4661 if (op_type
== binary_op
|| op_type
== ternary_op
)
4663 op1
= gimple_assign_rhs2 (stmt
);
4664 if (!vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4667 if (dump_enabled_p ())
4668 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4669 "use not simple.\n");
4673 if (op_type
== ternary_op
)
4675 op2
= gimple_assign_rhs3 (stmt
);
4676 if (!vect_is_simple_use (op2
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4679 if (dump_enabled_p ())
4680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4681 "use not simple.\n");
4687 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4691 /* Multiple types in SLP are handled by creating the appropriate number of
4692 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4694 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4697 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4699 gcc_assert (ncopies
>= 1);
4701 /* Shifts are handled in vectorizable_shift (). */
4702 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4703 || code
== RROTATE_EXPR
)
4706 /* Supportable by target? */
4708 vec_mode
= TYPE_MODE (vectype
);
4709 if (code
== MULT_HIGHPART_EXPR
)
4711 if (can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
)))
4712 icode
= LAST_INSN_CODE
;
4714 icode
= CODE_FOR_nothing
;
4718 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4721 if (dump_enabled_p ())
4722 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4726 icode
= (int) optab_handler (optab
, vec_mode
);
4729 if (icode
== CODE_FOR_nothing
)
4731 if (dump_enabled_p ())
4732 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4733 "op not supported by target.\n");
4734 /* Check only during analysis. */
4735 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4736 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4738 if (dump_enabled_p ())
4739 dump_printf_loc (MSG_NOTE
, vect_location
,
4740 "proceeding using word mode.\n");
4743 /* Worthwhile without SIMD support? Check only during analysis. */
4744 if (!VECTOR_MODE_P (vec_mode
)
4746 && vf
< vect_min_worthwhile_factor (code
))
4748 if (dump_enabled_p ())
4749 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4750 "not worthwhile without SIMD support.\n");
4754 if (!vec_stmt
) /* transformation not required. */
4756 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4757 if (dump_enabled_p ())
4758 dump_printf_loc (MSG_NOTE
, vect_location
,
4759 "=== vectorizable_operation ===\n");
4760 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4766 if (dump_enabled_p ())
4767 dump_printf_loc (MSG_NOTE
, vect_location
,
4768 "transform binary/unary operation.\n");
4771 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4773 /* In case the vectorization factor (VF) is bigger than the number
4774 of elements that we can fit in a vectype (nunits), we have to generate
4775 more than one vector stmt - i.e - we need to "unroll" the
4776 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4777 from one copy of the vector stmt to the next, in the field
4778 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4779 stages to find the correct vector defs to be used when vectorizing
4780 stmts that use the defs of the current stmt. The example below
4781 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4782 we need to create 4 vectorized stmts):
4784 before vectorization:
4785 RELATED_STMT VEC_STMT
4789 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4791 RELATED_STMT VEC_STMT
4792 VS1_0: vx0 = memref0 VS1_1 -
4793 VS1_1: vx1 = memref1 VS1_2 -
4794 VS1_2: vx2 = memref2 VS1_3 -
4795 VS1_3: vx3 = memref3 - -
4796 S1: x = load - VS1_0
4799 step2: vectorize stmt S2 (done here):
4800 To vectorize stmt S2 we first need to find the relevant vector
4801 def for the first operand 'x'. This is, as usual, obtained from
4802 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4803 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4804 relevant vector def 'vx0'. Having found 'vx0' we can generate
4805 the vector stmt VS2_0, and as usual, record it in the
4806 STMT_VINFO_VEC_STMT of stmt S2.
4807 When creating the second copy (VS2_1), we obtain the relevant vector
4808 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4809 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4810 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4811 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4812 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4813 chain of stmts and pointers:
4814 RELATED_STMT VEC_STMT
4815 VS1_0: vx0 = memref0 VS1_1 -
4816 VS1_1: vx1 = memref1 VS1_2 -
4817 VS1_2: vx2 = memref2 VS1_3 -
4818 VS1_3: vx3 = memref3 - -
4819 S1: x = load - VS1_0
4820 VS2_0: vz0 = vx0 + v1 VS2_1 -
4821 VS2_1: vz1 = vx1 + v1 VS2_2 -
4822 VS2_2: vz2 = vx2 + v1 VS2_3 -
4823 VS2_3: vz3 = vx3 + v1 - -
4824 S2: z = x + 1 - VS2_0 */
4826 prev_stmt_info
= NULL
;
4827 for (j
= 0; j
< ncopies
; j
++)
4832 if (op_type
== binary_op
|| op_type
== ternary_op
)
4833 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4836 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4838 if (op_type
== ternary_op
)
4840 vec_oprnds2
.create (1);
4841 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
4848 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4849 if (op_type
== ternary_op
)
4851 tree vec_oprnd
= vec_oprnds2
.pop ();
4852 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
4857 /* Arguments are ready. Create the new vector stmt. */
4858 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4860 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
4861 ? vec_oprnds1
[i
] : NULL_TREE
);
4862 vop2
= ((op_type
== ternary_op
)
4863 ? vec_oprnds2
[i
] : NULL_TREE
);
4864 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
4866 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4867 gimple_assign_set_lhs (new_stmt
, new_temp
);
4868 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4870 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4877 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4879 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4880 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4883 vec_oprnds0
.release ();
4884 vec_oprnds1
.release ();
4885 vec_oprnds2
.release ();
4890 /* A helper function to ensure data reference DR's base alignment
4894 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
4899 if (((dataref_aux
*)dr
->aux
)->base_misaligned
)
4901 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4902 tree base_decl
= ((dataref_aux
*)dr
->aux
)->base_decl
;
4904 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
4905 DECL_USER_ALIGN (base_decl
) = 1;
4906 ((dataref_aux
*)dr
->aux
)->base_misaligned
= false;
4911 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4912 reversal of the vector elements. If that is impossible to do,
4916 perm_mask_for_reverse (tree vectype
)
4921 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4922 sel
= XALLOCAVEC (unsigned char, nunits
);
4924 for (i
= 0; i
< nunits
; ++i
)
4925 sel
[i
] = nunits
- 1 - i
;
4927 return vect_gen_perm_mask (vectype
, sel
);
4930 /* Function vectorizable_store.
4932 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4934 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4935 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4936 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4939 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
4945 tree vec_oprnd
= NULL_TREE
;
4946 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4947 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
4948 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4950 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4951 struct loop
*loop
= NULL
;
4952 enum machine_mode vec_mode
;
4954 enum dr_alignment_support alignment_support_scheme
;
4957 enum vect_def_type dt
;
4958 stmt_vec_info prev_stmt_info
= NULL
;
4959 tree dataref_ptr
= NULL_TREE
;
4960 tree dataref_offset
= NULL_TREE
;
4961 gimple ptr_incr
= NULL
;
4962 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4965 gimple next_stmt
, first_stmt
= NULL
;
4966 bool grouped_store
= false;
4967 bool store_lanes_p
= false;
4968 unsigned int group_size
, i
;
4969 vec
<tree
> dr_chain
= vNULL
;
4970 vec
<tree
> oprnds
= vNULL
;
4971 vec
<tree
> result_chain
= vNULL
;
4973 bool negative
= false;
4974 tree offset
= NULL_TREE
;
4975 vec
<tree
> vec_oprnds
= vNULL
;
4976 bool slp
= (slp_node
!= NULL
);
4977 unsigned int vec_num
;
4978 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4982 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4984 /* Multiple types in SLP are handled by creating the appropriate number of
4985 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4987 if (slp
|| PURE_SLP_STMT (stmt_info
))
4990 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4992 gcc_assert (ncopies
>= 1);
4994 /* FORNOW. This restriction should be relaxed. */
4995 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
4997 if (dump_enabled_p ())
4998 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4999 "multiple types in nested loop.\n");
5003 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5006 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5009 /* Is vectorizable store? */
5011 if (!is_gimple_assign (stmt
))
5014 scalar_dest
= gimple_assign_lhs (stmt
);
5015 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5016 && is_pattern_stmt_p (stmt_info
))
5017 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5018 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5019 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5020 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5021 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5022 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5023 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5024 && TREE_CODE (scalar_dest
) != MEM_REF
)
5027 gcc_assert (gimple_assign_single_p (stmt
));
5028 op
= gimple_assign_rhs1 (stmt
);
5029 if (!vect_is_simple_use (op
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5032 if (dump_enabled_p ())
5033 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5034 "use not simple.\n");
5038 elem_type
= TREE_TYPE (vectype
);
5039 vec_mode
= TYPE_MODE (vectype
);
5041 /* FORNOW. In some cases can vectorize even if data-type not supported
5042 (e.g. - array initialization with 0). */
5043 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5046 if (!STMT_VINFO_DATA_REF (stmt_info
))
5050 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5051 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5052 size_zero_node
) < 0;
5053 if (negative
&& ncopies
> 1)
5055 if (dump_enabled_p ())
5056 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5057 "multiple types with negative step.\n");
5063 gcc_assert (!grouped_store
);
5064 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5065 if (alignment_support_scheme
!= dr_aligned
5066 && alignment_support_scheme
!= dr_unaligned_supported
)
5068 if (dump_enabled_p ())
5069 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5070 "negative step but alignment required.\n");
5073 if (dt
!= vect_constant_def
5074 && dt
!= vect_external_def
5075 && !perm_mask_for_reverse (vectype
))
5077 if (dump_enabled_p ())
5078 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5079 "negative step and reversing not supported.\n");
5084 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5086 grouped_store
= true;
5087 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5088 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5090 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5091 if (vect_store_lanes_supported (vectype
, group_size
))
5092 store_lanes_p
= true;
5093 else if (!vect_grouped_store_supported (vectype
, group_size
))
5097 if (first_stmt
== stmt
)
5099 /* STMT is the leader of the group. Check the operands of all the
5100 stmts of the group. */
5101 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5104 gcc_assert (gimple_assign_single_p (next_stmt
));
5105 op
= gimple_assign_rhs1 (next_stmt
);
5106 if (!vect_is_simple_use (op
, next_stmt
, loop_vinfo
, bb_vinfo
,
5107 &def_stmt
, &def
, &dt
))
5109 if (dump_enabled_p ())
5110 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5111 "use not simple.\n");
5114 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5119 if (!vec_stmt
) /* transformation not required. */
5121 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5122 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5129 ensure_base_align (stmt_info
, dr
);
5133 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5134 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5136 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5139 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5141 /* We vectorize all the stmts of the interleaving group when we
5142 reach the last stmt in the group. */
5143 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5144 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5153 grouped_store
= false;
5154 /* VEC_NUM is the number of vect stmts to be created for this
5156 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5157 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5158 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5159 op
= gimple_assign_rhs1 (first_stmt
);
5162 /* VEC_NUM is the number of vect stmts to be created for this
5164 vec_num
= group_size
;
5170 group_size
= vec_num
= 1;
5173 if (dump_enabled_p ())
5174 dump_printf_loc (MSG_NOTE
, vect_location
,
5175 "transform store. ncopies = %d\n", ncopies
);
5177 dr_chain
.create (group_size
);
5178 oprnds
.create (group_size
);
5180 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5181 gcc_assert (alignment_support_scheme
);
5182 /* Targets with store-lane instructions must not require explicit
5184 gcc_assert (!store_lanes_p
5185 || alignment_support_scheme
== dr_aligned
5186 || alignment_support_scheme
== dr_unaligned_supported
);
5189 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5192 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5194 aggr_type
= vectype
;
5196 /* In case the vectorization factor (VF) is bigger than the number
5197 of elements that we can fit in a vectype (nunits), we have to generate
5198 more than one vector stmt - i.e - we need to "unroll" the
5199 vector stmt by a factor VF/nunits. For more details see documentation in
5200 vect_get_vec_def_for_copy_stmt. */
5202 /* In case of interleaving (non-unit grouped access):
5209 We create vectorized stores starting from base address (the access of the
5210 first stmt in the chain (S2 in the above example), when the last store stmt
5211 of the chain (S4) is reached:
5214 VS2: &base + vec_size*1 = vx0
5215 VS3: &base + vec_size*2 = vx1
5216 VS4: &base + vec_size*3 = vx3
5218 Then permutation statements are generated:
5220 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5221 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5224 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5225 (the order of the data-refs in the output of vect_permute_store_chain
5226 corresponds to the order of scalar stmts in the interleaving chain - see
5227 the documentation of vect_permute_store_chain()).
5229 In case of both multiple types and interleaving, above vector stores and
5230 permutation stmts are created for every copy. The result vector stmts are
5231 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5232 STMT_VINFO_RELATED_STMT for the next copies.
5235 prev_stmt_info
= NULL
;
5236 for (j
= 0; j
< ncopies
; j
++)
5244 /* Get vectorized arguments for SLP_NODE. */
5245 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5246 NULL
, slp_node
, -1);
5248 vec_oprnd
= vec_oprnds
[0];
5252 /* For interleaved stores we collect vectorized defs for all the
5253 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5254 used as an input to vect_permute_store_chain(), and OPRNDS as
5255 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5257 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5258 OPRNDS are of size 1. */
5259 next_stmt
= first_stmt
;
5260 for (i
= 0; i
< group_size
; i
++)
5262 /* Since gaps are not supported for interleaved stores,
5263 GROUP_SIZE is the exact number of stmts in the chain.
5264 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5265 there is no interleaving, GROUP_SIZE is 1, and only one
5266 iteration of the loop will be executed. */
5267 gcc_assert (next_stmt
5268 && gimple_assign_single_p (next_stmt
));
5269 op
= gimple_assign_rhs1 (next_stmt
);
5271 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
5273 dr_chain
.quick_push (vec_oprnd
);
5274 oprnds
.quick_push (vec_oprnd
);
5275 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5279 /* We should have catched mismatched types earlier. */
5280 gcc_assert (useless_type_conversion_p (vectype
,
5281 TREE_TYPE (vec_oprnd
)));
5282 bool simd_lane_access_p
5283 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5284 if (simd_lane_access_p
5285 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5286 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5287 && integer_zerop (DR_OFFSET (first_dr
))
5288 && integer_zerop (DR_INIT (first_dr
))
5289 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5290 get_alias_set (DR_REF (first_dr
))))
5292 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5293 dataref_offset
= build_int_cst (reference_alias_ptr_type
5294 (DR_REF (first_dr
)), 0);
5299 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5300 simd_lane_access_p
? loop
: NULL
,
5301 offset
, &dummy
, gsi
, &ptr_incr
,
5302 simd_lane_access_p
, &inv_p
);
5303 gcc_assert (bb_vinfo
|| !inv_p
);
5307 /* For interleaved stores we created vectorized defs for all the
5308 defs stored in OPRNDS in the previous iteration (previous copy).
5309 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5310 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5312 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5313 OPRNDS are of size 1. */
5314 for (i
= 0; i
< group_size
; i
++)
5317 vect_is_simple_use (op
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5319 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5320 dr_chain
[i
] = vec_oprnd
;
5321 oprnds
[i
] = vec_oprnd
;
5325 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5326 TYPE_SIZE_UNIT (aggr_type
));
5328 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5329 TYPE_SIZE_UNIT (aggr_type
));
5336 /* Combine all the vectors into an array. */
5337 vec_array
= create_vector_array (vectype
, vec_num
);
5338 for (i
= 0; i
< vec_num
; i
++)
5340 vec_oprnd
= dr_chain
[i
];
5341 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5345 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5346 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5347 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5348 gimple_call_set_lhs (new_stmt
, data_ref
);
5349 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5357 result_chain
.create (group_size
);
5359 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5363 next_stmt
= first_stmt
;
5364 for (i
= 0; i
< vec_num
; i
++)
5366 unsigned align
, misalign
;
5369 /* Bump the vector pointer. */
5370 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5374 vec_oprnd
= vec_oprnds
[i
];
5375 else if (grouped_store
)
5376 /* For grouped stores vectorized defs are interleaved in
5377 vect_permute_store_chain(). */
5378 vec_oprnd
= result_chain
[i
];
5380 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
5383 : build_int_cst (reference_alias_ptr_type
5384 (DR_REF (first_dr
)), 0));
5385 align
= TYPE_ALIGN_UNIT (vectype
);
5386 if (aligned_access_p (first_dr
))
5388 else if (DR_MISALIGNMENT (first_dr
) == -1)
5390 TREE_TYPE (data_ref
)
5391 = build_aligned_type (TREE_TYPE (data_ref
),
5392 TYPE_ALIGN (elem_type
));
5393 align
= TYPE_ALIGN_UNIT (elem_type
);
5398 TREE_TYPE (data_ref
)
5399 = build_aligned_type (TREE_TYPE (data_ref
),
5400 TYPE_ALIGN (elem_type
));
5401 misalign
= DR_MISALIGNMENT (first_dr
);
5403 if (dataref_offset
== NULL_TREE
)
5404 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5408 && dt
!= vect_constant_def
5409 && dt
!= vect_external_def
)
5411 tree perm_mask
= perm_mask_for_reverse (vectype
);
5413 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5415 tree new_temp
= make_ssa_name (perm_dest
, NULL
);
5417 /* Generate the permute statement. */
5419 = gimple_build_assign_with_ops (VEC_PERM_EXPR
, new_temp
,
5420 vec_oprnd
, vec_oprnd
,
5422 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5424 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5425 vec_oprnd
= new_temp
;
5428 /* Arguments are ready. Create the new vector stmt. */
5429 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5430 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5435 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5443 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5445 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5446 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5450 dr_chain
.release ();
5452 result_chain
.release ();
5453 vec_oprnds
.release ();
5458 /* Given a vector type VECTYPE and permutation SEL returns
5459 the VECTOR_CST mask that implements the permutation of the
5460 vector elements. If that is impossible to do, returns NULL. */
5463 vect_gen_perm_mask (tree vectype
, unsigned char *sel
)
5465 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5468 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5470 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5473 mask_elt_type
= lang_hooks
.types
.type_for_mode
5474 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5475 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5477 mask_elts
= XALLOCAVEC (tree
, nunits
);
5478 for (i
= nunits
- 1; i
>= 0; i
--)
5479 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5480 mask_vec
= build_vector (mask_type
, mask_elts
);
5485 /* Given a vector variable X and Y, that was generated for the scalar
5486 STMT, generate instructions to permute the vector elements of X and Y
5487 using permutation mask MASK_VEC, insert them at *GSI and return the
5488 permuted vector variable. */
5491 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple stmt
,
5492 gimple_stmt_iterator
*gsi
)
5494 tree vectype
= TREE_TYPE (x
);
5495 tree perm_dest
, data_ref
;
5498 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5499 data_ref
= make_ssa_name (perm_dest
, NULL
);
5501 /* Generate the permute statement. */
5502 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, data_ref
,
5504 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5509 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5510 inserting them on the loops preheader edge. Returns true if we
5511 were successful in doing so (and thus STMT can be moved then),
5512 otherwise returns false. */
5515 hoist_defs_of_uses (gimple stmt
, struct loop
*loop
)
5521 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5523 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5524 if (!gimple_nop_p (def_stmt
)
5525 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5527 /* Make sure we don't need to recurse. While we could do
5528 so in simple cases when there are more complex use webs
5529 we don't have an easy way to preserve stmt order to fulfil
5530 dependencies within them. */
5533 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5535 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5537 gimple def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5538 if (!gimple_nop_p (def_stmt2
)
5539 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5549 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5551 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5552 if (!gimple_nop_p (def_stmt
)
5553 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5555 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5556 gsi_remove (&gsi
, false);
5557 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5564 /* vectorizable_load.
5566 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5568 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5569 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5570 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5573 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
5574 slp_tree slp_node
, slp_instance slp_node_instance
)
5577 tree vec_dest
= NULL
;
5578 tree data_ref
= NULL
;
5579 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5580 stmt_vec_info prev_stmt_info
;
5581 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5582 struct loop
*loop
= NULL
;
5583 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5584 bool nested_in_vect_loop
= false;
5585 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5586 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5589 enum machine_mode mode
;
5590 gimple new_stmt
= NULL
;
5592 enum dr_alignment_support alignment_support_scheme
;
5593 tree dataref_ptr
= NULL_TREE
;
5594 tree dataref_offset
= NULL_TREE
;
5595 gimple ptr_incr
= NULL
;
5596 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5598 int i
, j
, group_size
, group_gap
;
5599 tree msq
= NULL_TREE
, lsq
;
5600 tree offset
= NULL_TREE
;
5601 tree realignment_token
= NULL_TREE
;
5603 vec
<tree
> dr_chain
= vNULL
;
5604 bool grouped_load
= false;
5605 bool load_lanes_p
= false;
5608 bool negative
= false;
5609 bool compute_in_loop
= false;
5610 struct loop
*at_loop
;
5612 bool slp
= (slp_node
!= NULL
);
5613 bool slp_perm
= false;
5614 enum tree_code code
;
5615 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5618 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
5619 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
5620 int gather_scale
= 1;
5621 enum vect_def_type gather_dt
= vect_unknown_def_type
;
5625 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5626 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
5627 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5632 /* Multiple types in SLP are handled by creating the appropriate number of
5633 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5635 if (slp
|| PURE_SLP_STMT (stmt_info
))
5638 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5640 gcc_assert (ncopies
>= 1);
5642 /* FORNOW. This restriction should be relaxed. */
5643 if (nested_in_vect_loop
&& ncopies
> 1)
5645 if (dump_enabled_p ())
5646 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5647 "multiple types in nested loop.\n");
5651 /* Invalidate assumptions made by dependence analysis when vectorization
5652 on the unrolled body effectively re-orders stmts. */
5654 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5655 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5656 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5658 if (dump_enabled_p ())
5659 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5660 "cannot perform implicit CSE when unrolling "
5661 "with negative dependence distance\n");
5665 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5668 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5671 /* Is vectorizable load? */
5672 if (!is_gimple_assign (stmt
))
5675 scalar_dest
= gimple_assign_lhs (stmt
);
5676 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5679 code
= gimple_assign_rhs_code (stmt
);
5680 if (code
!= ARRAY_REF
5681 && code
!= BIT_FIELD_REF
5682 && code
!= INDIRECT_REF
5683 && code
!= COMPONENT_REF
5684 && code
!= IMAGPART_EXPR
5685 && code
!= REALPART_EXPR
5687 && TREE_CODE_CLASS (code
) != tcc_declaration
)
5690 if (!STMT_VINFO_DATA_REF (stmt_info
))
5693 elem_type
= TREE_TYPE (vectype
);
5694 mode
= TYPE_MODE (vectype
);
5696 /* FORNOW. In some cases can vectorize even if data-type not supported
5697 (e.g. - data copies). */
5698 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
5700 if (dump_enabled_p ())
5701 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5702 "Aligned load, but unsupported type.\n");
5706 /* Check if the load is a part of an interleaving chain. */
5707 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5709 grouped_load
= true;
5711 gcc_assert (! nested_in_vect_loop
&& !STMT_VINFO_GATHER_P (stmt_info
));
5713 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5714 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
5716 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5717 if (vect_load_lanes_supported (vectype
, group_size
))
5718 load_lanes_p
= true;
5719 else if (!vect_grouped_load_supported (vectype
, group_size
))
5723 /* Invalidate assumptions made by dependence analysis when vectorization
5724 on the unrolled body effectively re-orders stmts. */
5725 if (!PURE_SLP_STMT (stmt_info
)
5726 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
5727 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5728 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
5730 if (dump_enabled_p ())
5731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5732 "cannot perform implicit CSE when performing "
5733 "group loads with negative dependence distance\n");
5739 if (STMT_VINFO_GATHER_P (stmt_info
))
5743 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
5744 &gather_off
, &gather_scale
);
5745 gcc_assert (gather_decl
);
5746 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, bb_vinfo
,
5747 &def_stmt
, &def
, &gather_dt
,
5748 &gather_off_vectype
))
5750 if (dump_enabled_p ())
5751 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5752 "gather index use not simple.\n");
5756 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
5760 negative
= tree_int_cst_compare (nested_in_vect_loop
5761 ? STMT_VINFO_DR_STEP (stmt_info
)
5763 size_zero_node
) < 0;
5764 if (negative
&& ncopies
> 1)
5766 if (dump_enabled_p ())
5767 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5768 "multiple types with negative step.\n");
5776 if (dump_enabled_p ())
5777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5778 "negative step for group load not supported"
5782 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5783 if (alignment_support_scheme
!= dr_aligned
5784 && alignment_support_scheme
!= dr_unaligned_supported
)
5786 if (dump_enabled_p ())
5787 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5788 "negative step but alignment required.\n");
5791 if (!perm_mask_for_reverse (vectype
))
5793 if (dump_enabled_p ())
5794 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5795 "negative step and reversing not supported."
5802 if (!vec_stmt
) /* transformation not required. */
5804 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
5805 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
, NULL
, NULL
, NULL
);
5809 if (dump_enabled_p ())
5810 dump_printf_loc (MSG_NOTE
, vect_location
,
5811 "transform load. ncopies = %d\n", ncopies
);
5815 ensure_base_align (stmt_info
, dr
);
5817 if (STMT_VINFO_GATHER_P (stmt_info
))
5819 tree vec_oprnd0
= NULL_TREE
, op
;
5820 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
5821 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5822 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
5823 edge pe
= loop_preheader_edge (loop
);
5826 enum { NARROW
, NONE
, WIDEN
} modifier
;
5827 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
5829 if (nunits
== gather_off_nunits
)
5831 else if (nunits
== gather_off_nunits
/ 2)
5833 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
5836 for (i
= 0; i
< gather_off_nunits
; ++i
)
5837 sel
[i
] = i
| nunits
;
5839 perm_mask
= vect_gen_perm_mask (gather_off_vectype
, sel
);
5840 gcc_assert (perm_mask
!= NULL_TREE
);
5842 else if (nunits
== gather_off_nunits
* 2)
5844 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5847 for (i
= 0; i
< nunits
; ++i
)
5848 sel
[i
] = i
< gather_off_nunits
5849 ? i
: i
+ nunits
- gather_off_nunits
;
5851 perm_mask
= vect_gen_perm_mask (vectype
, sel
);
5852 gcc_assert (perm_mask
!= NULL_TREE
);
5858 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
5859 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5860 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5861 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5862 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5863 scaletype
= TREE_VALUE (arglist
);
5864 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
5866 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5868 ptr
= fold_convert (ptrtype
, gather_base
);
5869 if (!is_gimple_min_invariant (ptr
))
5871 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5872 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5873 gcc_assert (!new_bb
);
5876 /* Currently we support only unconditional gather loads,
5877 so mask should be all ones. */
5878 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
5879 mask
= build_int_cst (masktype
, -1);
5880 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
5882 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
5883 mask
= build_vector_from_val (masktype
, mask
);
5884 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5886 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
5890 for (j
= 0; j
< 6; ++j
)
5892 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
5893 mask
= build_real (TREE_TYPE (masktype
), r
);
5894 mask
= build_vector_from_val (masktype
, mask
);
5895 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5900 scale
= build_int_cst (scaletype
, gather_scale
);
5902 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
5903 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
5904 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
5908 for (j
= 0; j
< 6; ++j
)
5910 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
5911 merge
= build_real (TREE_TYPE (rettype
), r
);
5915 merge
= build_vector_from_val (rettype
, merge
);
5916 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
5918 prev_stmt_info
= NULL
;
5919 for (j
= 0; j
< ncopies
; ++j
)
5921 if (modifier
== WIDEN
&& (j
& 1))
5922 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
5923 perm_mask
, stmt
, gsi
);
5926 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
5929 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
5931 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5933 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5934 == TYPE_VECTOR_SUBPARTS (idxtype
));
5935 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
5936 var
= make_ssa_name (var
, NULL
);
5937 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5939 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
,
5941 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5946 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
5948 if (!useless_type_conversion_p (vectype
, rettype
))
5950 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
5951 == TYPE_VECTOR_SUBPARTS (rettype
));
5952 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
5953 op
= make_ssa_name (var
, new_stmt
);
5954 gimple_call_set_lhs (new_stmt
, op
);
5955 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5956 var
= make_ssa_name (vec_dest
, NULL
);
5957 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
5959 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
, var
, op
,
5964 var
= make_ssa_name (vec_dest
, new_stmt
);
5965 gimple_call_set_lhs (new_stmt
, var
);
5968 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5970 if (modifier
== NARROW
)
5977 var
= permute_vec_elements (prev_res
, var
,
5978 perm_mask
, stmt
, gsi
);
5979 new_stmt
= SSA_NAME_DEF_STMT (var
);
5982 if (prev_stmt_info
== NULL
)
5983 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5985 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5986 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5990 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info
))
5992 gimple_stmt_iterator incr_gsi
;
5998 vec
<constructor_elt
, va_gc
> *v
= NULL
;
5999 gimple_seq stmts
= NULL
;
6000 tree stride_base
, stride_step
, alias_off
;
6002 gcc_assert (!nested_in_vect_loop
);
6005 = fold_build_pointer_plus
6006 (unshare_expr (DR_BASE_ADDRESS (dr
)),
6007 size_binop (PLUS_EXPR
,
6008 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr
))),
6009 convert_to_ptrofftype (DR_INIT (dr
))));
6010 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (dr
)));
6012 /* For a load with loop-invariant (but other than power-of-2)
6013 stride (i.e. not a grouped access) like so:
6015 for (i = 0; i < n; i += stride)
6018 we generate a new induction variable and new accesses to
6019 form a new vector (or vectors, depending on ncopies):
6021 for (j = 0; ; j += VF*stride)
6023 tmp2 = array[j + stride];
6025 vectemp = {tmp1, tmp2, ...}
6028 ivstep
= stride_step
;
6029 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6030 build_int_cst (TREE_TYPE (ivstep
), vf
));
6032 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6034 create_iv (stride_base
, ivstep
, NULL
,
6035 loop
, &incr_gsi
, insert_after
,
6037 incr
= gsi_stmt (incr_gsi
);
6038 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
6040 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6042 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6044 prev_stmt_info
= NULL
;
6045 running_off
= offvar
;
6046 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (dr
)), 0);
6047 for (j
= 0; j
< ncopies
; j
++)
6051 vec_alloc (v
, nunits
);
6052 for (i
= 0; i
< nunits
; i
++)
6054 tree newref
, newoff
;
6056 newref
= build2 (MEM_REF
, TREE_TYPE (vectype
),
6057 running_off
, alias_off
);
6059 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6062 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6063 newoff
= copy_ssa_name (running_off
, NULL
);
6064 incr
= gimple_build_assign_with_ops (POINTER_PLUS_EXPR
, newoff
,
6065 running_off
, stride_step
);
6066 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6068 running_off
= newoff
;
6071 vec_inv
= build_constructor (vectype
, v
);
6072 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6073 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6076 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6078 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6079 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6086 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6088 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6089 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6090 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6092 /* Check if the chain of loads is already vectorized. */
6093 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6094 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6095 ??? But we can only do so if there is exactly one
6096 as we have no way to get at the rest. Leave the CSE
6098 ??? With the group load eventually participating
6099 in multiple different permutations (having multiple
6100 slp nodes which refer to the same group) the CSE
6101 is even wrong code. See PR56270. */
6104 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6107 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6108 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6110 /* VEC_NUM is the number of vect stmts to be created for this group. */
6113 grouped_load
= false;
6114 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6115 if (SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6117 group_gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
6121 vec_num
= group_size
;
6129 group_size
= vec_num
= 1;
6133 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6134 gcc_assert (alignment_support_scheme
);
6135 /* Targets with load-lane instructions must not require explicit
6137 gcc_assert (!load_lanes_p
6138 || alignment_support_scheme
== dr_aligned
6139 || alignment_support_scheme
== dr_unaligned_supported
);
6141 /* In case the vectorization factor (VF) is bigger than the number
6142 of elements that we can fit in a vectype (nunits), we have to generate
6143 more than one vector stmt - i.e - we need to "unroll" the
6144 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6145 from one copy of the vector stmt to the next, in the field
6146 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6147 stages to find the correct vector defs to be used when vectorizing
6148 stmts that use the defs of the current stmt. The example below
6149 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6150 need to create 4 vectorized stmts):
6152 before vectorization:
6153 RELATED_STMT VEC_STMT
6157 step 1: vectorize stmt S1:
6158 We first create the vector stmt VS1_0, and, as usual, record a
6159 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6160 Next, we create the vector stmt VS1_1, and record a pointer to
6161 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6162 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6164 RELATED_STMT VEC_STMT
6165 VS1_0: vx0 = memref0 VS1_1 -
6166 VS1_1: vx1 = memref1 VS1_2 -
6167 VS1_2: vx2 = memref2 VS1_3 -
6168 VS1_3: vx3 = memref3 - -
6169 S1: x = load - VS1_0
6172 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6173 information we recorded in RELATED_STMT field is used to vectorize
6176 /* In case of interleaving (non-unit grouped access):
6183 Vectorized loads are created in the order of memory accesses
6184 starting from the access of the first stmt of the chain:
6187 VS2: vx1 = &base + vec_size*1
6188 VS3: vx3 = &base + vec_size*2
6189 VS4: vx4 = &base + vec_size*3
6191 Then permutation statements are generated:
6193 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6194 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6197 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6198 (the order of the data-refs in the output of vect_permute_load_chain
6199 corresponds to the order of scalar stmts in the interleaving chain - see
6200 the documentation of vect_permute_load_chain()).
6201 The generation of permutation stmts and recording them in
6202 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6204 In case of both multiple types and interleaving, the vector loads and
6205 permutation stmts above are created for every copy. The result vector
6206 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6207 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6209 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6210 on a target that supports unaligned accesses (dr_unaligned_supported)
6211 we generate the following code:
6215 p = p + indx * vectype_size;
6220 Otherwise, the data reference is potentially unaligned on a target that
6221 does not support unaligned accesses (dr_explicit_realign_optimized) -
6222 then generate the following code, in which the data in each iteration is
6223 obtained by two vector loads, one from the previous iteration, and one
6224 from the current iteration:
6226 msq_init = *(floor(p1))
6227 p2 = initial_addr + VS - 1;
6228 realignment_token = call target_builtin;
6231 p2 = p2 + indx * vectype_size
6233 vec_dest = realign_load (msq, lsq, realignment_token)
6238 /* If the misalignment remains the same throughout the execution of the
6239 loop, we can create the init_addr and permutation mask at the loop
6240 preheader. Otherwise, it needs to be created inside the loop.
6241 This can only occur when vectorizing memory accesses in the inner-loop
6242 nested within an outer-loop that is being vectorized. */
6244 if (nested_in_vect_loop
6245 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6246 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6248 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6249 compute_in_loop
= true;
6252 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6253 || alignment_support_scheme
== dr_explicit_realign
)
6254 && !compute_in_loop
)
6256 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6257 alignment_support_scheme
, NULL_TREE
,
6259 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6261 phi
= SSA_NAME_DEF_STMT (msq
);
6262 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
6269 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6272 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6274 aggr_type
= vectype
;
6276 prev_stmt_info
= NULL
;
6277 for (j
= 0; j
< ncopies
; j
++)
6279 /* 1. Create the vector or array pointer update chain. */
6282 bool simd_lane_access_p
6283 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6284 if (simd_lane_access_p
6285 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6286 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6287 && integer_zerop (DR_OFFSET (first_dr
))
6288 && integer_zerop (DR_INIT (first_dr
))
6289 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6290 get_alias_set (DR_REF (first_dr
)))
6291 && (alignment_support_scheme
== dr_aligned
6292 || alignment_support_scheme
== dr_unaligned_supported
))
6294 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6295 dataref_offset
= build_int_cst (reference_alias_ptr_type
6296 (DR_REF (first_dr
)), 0);
6301 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6302 offset
, &dummy
, gsi
, &ptr_incr
,
6303 simd_lane_access_p
, &inv_p
);
6305 else if (dataref_offset
)
6306 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6307 TYPE_SIZE_UNIT (aggr_type
));
6309 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6310 TYPE_SIZE_UNIT (aggr_type
));
6312 if (grouped_load
|| slp_perm
)
6313 dr_chain
.create (vec_num
);
6319 vec_array
= create_vector_array (vectype
, vec_num
);
6322 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6323 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6324 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6325 gimple_call_set_lhs (new_stmt
, vec_array
);
6326 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6328 /* Extract each vector into an SSA_NAME. */
6329 for (i
= 0; i
< vec_num
; i
++)
6331 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6333 dr_chain
.quick_push (new_temp
);
6336 /* Record the mapping between SSA_NAMEs and statements. */
6337 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6341 for (i
= 0; i
< vec_num
; i
++)
6344 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6347 /* 2. Create the vector-load in the loop. */
6348 switch (alignment_support_scheme
)
6351 case dr_unaligned_supported
:
6353 unsigned int align
, misalign
;
6356 = build2 (MEM_REF
, vectype
, dataref_ptr
,
6359 : build_int_cst (reference_alias_ptr_type
6360 (DR_REF (first_dr
)), 0));
6361 align
= TYPE_ALIGN_UNIT (vectype
);
6362 if (alignment_support_scheme
== dr_aligned
)
6364 gcc_assert (aligned_access_p (first_dr
));
6367 else if (DR_MISALIGNMENT (first_dr
) == -1)
6369 TREE_TYPE (data_ref
)
6370 = build_aligned_type (TREE_TYPE (data_ref
),
6371 TYPE_ALIGN (elem_type
));
6372 align
= TYPE_ALIGN_UNIT (elem_type
);
6377 TREE_TYPE (data_ref
)
6378 = build_aligned_type (TREE_TYPE (data_ref
),
6379 TYPE_ALIGN (elem_type
));
6380 misalign
= DR_MISALIGNMENT (first_dr
);
6382 if (dataref_offset
== NULL_TREE
)
6383 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6387 case dr_explicit_realign
:
6392 vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
6394 if (compute_in_loop
)
6395 msq
= vect_setup_realignment (first_stmt
, gsi
,
6397 dr_explicit_realign
,
6400 ptr
= copy_ssa_name (dataref_ptr
, NULL
);
6401 new_stmt
= gimple_build_assign_with_ops
6402 (BIT_AND_EXPR
, ptr
, dataref_ptr
,
6404 (TREE_TYPE (dataref_ptr
),
6405 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6406 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6408 = build2 (MEM_REF
, vectype
, ptr
,
6409 build_int_cst (reference_alias_ptr_type
6410 (DR_REF (first_dr
)), 0));
6411 vec_dest
= vect_create_destination_var (scalar_dest
,
6413 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6414 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6415 gimple_assign_set_lhs (new_stmt
, new_temp
);
6416 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6417 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6418 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6421 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
6422 TYPE_SIZE_UNIT (elem_type
));
6423 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6424 new_stmt
= gimple_build_assign_with_ops
6425 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
6428 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6429 ptr
= copy_ssa_name (dataref_ptr
, new_stmt
);
6430 gimple_assign_set_lhs (new_stmt
, ptr
);
6431 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6433 = build2 (MEM_REF
, vectype
, ptr
,
6434 build_int_cst (reference_alias_ptr_type
6435 (DR_REF (first_dr
)), 0));
6438 case dr_explicit_realign_optimized
:
6439 new_temp
= copy_ssa_name (dataref_ptr
, NULL
);
6440 new_stmt
= gimple_build_assign_with_ops
6441 (BIT_AND_EXPR
, new_temp
, dataref_ptr
,
6443 (TREE_TYPE (dataref_ptr
),
6444 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6445 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6447 = build2 (MEM_REF
, vectype
, new_temp
,
6448 build_int_cst (reference_alias_ptr_type
6449 (DR_REF (first_dr
)), 0));
6454 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6455 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6456 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6457 gimple_assign_set_lhs (new_stmt
, new_temp
);
6458 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6460 /* 3. Handle explicit realignment if necessary/supported.
6462 vec_dest = realign_load (msq, lsq, realignment_token) */
6463 if (alignment_support_scheme
== dr_explicit_realign_optimized
6464 || alignment_support_scheme
== dr_explicit_realign
)
6466 lsq
= gimple_assign_lhs (new_stmt
);
6467 if (!realignment_token
)
6468 realignment_token
= dataref_ptr
;
6469 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6471 = gimple_build_assign_with_ops (REALIGN_LOAD_EXPR
,
6474 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6475 gimple_assign_set_lhs (new_stmt
, new_temp
);
6476 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6478 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6481 if (i
== vec_num
- 1 && j
== ncopies
- 1)
6482 add_phi_arg (phi
, lsq
,
6483 loop_latch_edge (containing_loop
),
6489 /* 4. Handle invariant-load. */
6490 if (inv_p
&& !bb_vinfo
)
6492 gcc_assert (!grouped_load
);
6493 /* If we have versioned for aliasing or the loop doesn't
6494 have any data dependencies that would preclude this,
6495 then we are sure this is a loop invariant load and
6496 thus we can insert it on the preheader edge. */
6497 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
6498 && !nested_in_vect_loop
6499 && hoist_defs_of_uses (stmt
, loop
))
6501 if (dump_enabled_p ())
6503 dump_printf_loc (MSG_NOTE
, vect_location
,
6504 "hoisting out of the vectorized "
6506 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6507 dump_printf (MSG_NOTE
, "\n");
6509 tree tem
= copy_ssa_name (scalar_dest
, NULL
);
6510 gsi_insert_on_edge_immediate
6511 (loop_preheader_edge (loop
),
6512 gimple_build_assign (tem
,
6514 (gimple_assign_rhs1 (stmt
))));
6515 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
6519 gimple_stmt_iterator gsi2
= *gsi
;
6521 new_temp
= vect_init_vector (stmt
, scalar_dest
,
6524 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6525 set_vinfo_for_stmt (new_stmt
,
6526 new_stmt_vec_info (new_stmt
, loop_vinfo
,
6532 tree perm_mask
= perm_mask_for_reverse (vectype
);
6533 new_temp
= permute_vec_elements (new_temp
, new_temp
,
6534 perm_mask
, stmt
, gsi
);
6535 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6538 /* Collect vector loads and later create their permutation in
6539 vect_transform_grouped_load (). */
6540 if (grouped_load
|| slp_perm
)
6541 dr_chain
.quick_push (new_temp
);
6543 /* Store vector loads in the corresponding SLP_NODE. */
6544 if (slp
&& !slp_perm
)
6545 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6547 /* Bump the vector pointer to account for a gap. */
6548 if (slp
&& group_gap
!= 0)
6550 tree bump
= size_binop (MULT_EXPR
,
6551 TYPE_SIZE_UNIT (elem_type
),
6552 size_int (group_gap
));
6553 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6558 if (slp
&& !slp_perm
)
6563 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6564 slp_node_instance
, false))
6566 dr_chain
.release ();
6575 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
6576 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6581 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6583 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6584 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6587 dr_chain
.release ();
6593 /* Function vect_is_simple_cond.
6596 LOOP - the loop that is being vectorized.
6597 COND - Condition that is checked for simple use.
6600 *COMP_VECTYPE - the vector type for the comparison.
6602 Returns whether a COND can be vectorized. Checks whether
6603 condition operands are supportable using vec_is_simple_use. */
6606 vect_is_simple_cond (tree cond
, gimple stmt
, loop_vec_info loop_vinfo
,
6607 bb_vec_info bb_vinfo
, tree
*comp_vectype
)
6611 enum vect_def_type dt
;
6612 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
6614 if (!COMPARISON_CLASS_P (cond
))
6617 lhs
= TREE_OPERAND (cond
, 0);
6618 rhs
= TREE_OPERAND (cond
, 1);
6620 if (TREE_CODE (lhs
) == SSA_NAME
)
6622 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
6623 if (!vect_is_simple_use_1 (lhs
, stmt
, loop_vinfo
, bb_vinfo
,
6624 &lhs_def_stmt
, &def
, &dt
, &vectype1
))
6627 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
6628 && TREE_CODE (lhs
) != FIXED_CST
)
6631 if (TREE_CODE (rhs
) == SSA_NAME
)
6633 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
6634 if (!vect_is_simple_use_1 (rhs
, stmt
, loop_vinfo
, bb_vinfo
,
6635 &rhs_def_stmt
, &def
, &dt
, &vectype2
))
6638 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
6639 && TREE_CODE (rhs
) != FIXED_CST
)
6642 *comp_vectype
= vectype1
? vectype1
: vectype2
;
6646 /* vectorizable_condition.
6648 Check if STMT is conditional modify expression that can be vectorized.
6649 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6650 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6653 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6654 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6655 else caluse if it is 2).
6657 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6660 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
6661 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
,
6664 tree scalar_dest
= NULL_TREE
;
6665 tree vec_dest
= NULL_TREE
;
6666 tree cond_expr
, then_clause
, else_clause
;
6667 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6668 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6669 tree comp_vectype
= NULL_TREE
;
6670 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
6671 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
6672 tree vec_compare
, vec_cond_expr
;
6674 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6676 enum vect_def_type dt
, dts
[4];
6677 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6679 enum tree_code code
;
6680 stmt_vec_info prev_stmt_info
= NULL
;
6682 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6683 vec
<tree
> vec_oprnds0
= vNULL
;
6684 vec
<tree
> vec_oprnds1
= vNULL
;
6685 vec
<tree
> vec_oprnds2
= vNULL
;
6686 vec
<tree
> vec_oprnds3
= vNULL
;
6689 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
6692 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6694 gcc_assert (ncopies
>= 1);
6695 if (reduc_index
&& ncopies
> 1)
6696 return false; /* FORNOW */
6698 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
6701 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6704 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6705 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
6709 /* FORNOW: not yet supported. */
6710 if (STMT_VINFO_LIVE_P (stmt_info
))
6712 if (dump_enabled_p ())
6713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6714 "value used after loop.\n");
6718 /* Is vectorizable conditional operation? */
6719 if (!is_gimple_assign (stmt
))
6722 code
= gimple_assign_rhs_code (stmt
);
6724 if (code
!= COND_EXPR
)
6727 cond_expr
= gimple_assign_rhs1 (stmt
);
6728 then_clause
= gimple_assign_rhs2 (stmt
);
6729 else_clause
= gimple_assign_rhs3 (stmt
);
6731 if (!vect_is_simple_cond (cond_expr
, stmt
, loop_vinfo
, bb_vinfo
,
6736 if (TREE_CODE (then_clause
) == SSA_NAME
)
6738 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
6739 if (!vect_is_simple_use (then_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6740 &then_def_stmt
, &def
, &dt
))
6743 else if (TREE_CODE (then_clause
) != INTEGER_CST
6744 && TREE_CODE (then_clause
) != REAL_CST
6745 && TREE_CODE (then_clause
) != FIXED_CST
)
6748 if (TREE_CODE (else_clause
) == SSA_NAME
)
6750 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
6751 if (!vect_is_simple_use (else_clause
, stmt
, loop_vinfo
, bb_vinfo
,
6752 &else_def_stmt
, &def
, &dt
))
6755 else if (TREE_CODE (else_clause
) != INTEGER_CST
6756 && TREE_CODE (else_clause
) != REAL_CST
6757 && TREE_CODE (else_clause
) != FIXED_CST
)
6760 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
)));
6761 /* The result of a vector comparison should be signed type. */
6762 tree cmp_type
= build_nonstandard_integer_type (prec
, 0);
6763 vec_cmp_type
= get_same_sized_vectype (cmp_type
, vectype
);
6764 if (vec_cmp_type
== NULL_TREE
)
6769 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
6770 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
6777 vec_oprnds0
.create (1);
6778 vec_oprnds1
.create (1);
6779 vec_oprnds2
.create (1);
6780 vec_oprnds3
.create (1);
6784 scalar_dest
= gimple_assign_lhs (stmt
);
6785 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6787 /* Handle cond expr. */
6788 for (j
= 0; j
< ncopies
; j
++)
6790 gimple new_stmt
= NULL
;
6795 auto_vec
<tree
, 4> ops
;
6796 auto_vec
<vec
<tree
>, 4> vec_defs
;
6798 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
6799 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
6800 ops
.safe_push (then_clause
);
6801 ops
.safe_push (else_clause
);
6802 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
6803 vec_oprnds3
= vec_defs
.pop ();
6804 vec_oprnds2
= vec_defs
.pop ();
6805 vec_oprnds1
= vec_defs
.pop ();
6806 vec_oprnds0
= vec_defs
.pop ();
6809 vec_defs
.release ();
6815 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
6817 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), stmt
,
6818 loop_vinfo
, NULL
, >emp
, &def
, &dts
[0]);
6821 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
6823 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), stmt
,
6824 loop_vinfo
, NULL
, >emp
, &def
, &dts
[1]);
6825 if (reduc_index
== 1)
6826 vec_then_clause
= reduc_def
;
6829 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
6831 vect_is_simple_use (then_clause
, stmt
, loop_vinfo
,
6832 NULL
, >emp
, &def
, &dts
[2]);
6834 if (reduc_index
== 2)
6835 vec_else_clause
= reduc_def
;
6838 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
6840 vect_is_simple_use (else_clause
, stmt
, loop_vinfo
,
6841 NULL
, >emp
, &def
, &dts
[3]);
6847 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
6848 vec_oprnds0
.pop ());
6849 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
6850 vec_oprnds1
.pop ());
6851 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
6852 vec_oprnds2
.pop ());
6853 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
6854 vec_oprnds3
.pop ());
6859 vec_oprnds0
.quick_push (vec_cond_lhs
);
6860 vec_oprnds1
.quick_push (vec_cond_rhs
);
6861 vec_oprnds2
.quick_push (vec_then_clause
);
6862 vec_oprnds3
.quick_push (vec_else_clause
);
6865 /* Arguments are ready. Create the new vector stmt. */
6866 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
6868 vec_cond_rhs
= vec_oprnds1
[i
];
6869 vec_then_clause
= vec_oprnds2
[i
];
6870 vec_else_clause
= vec_oprnds3
[i
];
6872 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
6873 vec_cond_lhs
, vec_cond_rhs
);
6874 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
6875 vec_compare
, vec_then_clause
, vec_else_clause
);
6877 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
6878 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6879 gimple_assign_set_lhs (new_stmt
, new_temp
);
6880 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6882 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6889 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6891 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6893 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6896 vec_oprnds0
.release ();
6897 vec_oprnds1
.release ();
6898 vec_oprnds2
.release ();
6899 vec_oprnds3
.release ();
6905 /* Make sure the statement is vectorizable. */
6908 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
6910 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6911 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6912 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
6914 tree scalar_type
, vectype
;
6915 gimple pattern_stmt
;
6916 gimple_seq pattern_def_seq
;
6918 if (dump_enabled_p ())
6920 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
6921 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6922 dump_printf (MSG_NOTE
, "\n");
6925 if (gimple_has_volatile_ops (stmt
))
6927 if (dump_enabled_p ())
6928 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6929 "not vectorized: stmt has volatile operands\n");
6934 /* Skip stmts that do not need to be vectorized. In loops this is expected
6936 - the COND_EXPR which is the loop exit condition
6937 - any LABEL_EXPRs in the loop
6938 - computations that are used only for array indexing or loop control.
6939 In basic blocks we only analyze statements that are a part of some SLP
6940 instance, therefore, all the statements are relevant.
6942 Pattern statement needs to be analyzed instead of the original statement
6943 if the original statement is not relevant. Otherwise, we analyze both
6944 statements. In basic blocks we are called from some SLP instance
6945 traversal, don't analyze pattern stmts instead, the pattern stmts
6946 already will be part of SLP instance. */
6948 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
6949 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
6950 && !STMT_VINFO_LIVE_P (stmt_info
))
6952 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6954 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6955 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6957 /* Analyze PATTERN_STMT instead of the original stmt. */
6958 stmt
= pattern_stmt
;
6959 stmt_info
= vinfo_for_stmt (pattern_stmt
);
6960 if (dump_enabled_p ())
6962 dump_printf_loc (MSG_NOTE
, vect_location
,
6963 "==> examining pattern statement: ");
6964 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6965 dump_printf (MSG_NOTE
, "\n");
6970 if (dump_enabled_p ())
6971 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
6976 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6979 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6980 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6982 /* Analyze PATTERN_STMT too. */
6983 if (dump_enabled_p ())
6985 dump_printf_loc (MSG_NOTE
, vect_location
,
6986 "==> examining pattern statement: ");
6987 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6988 dump_printf (MSG_NOTE
, "\n");
6991 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
6995 if (is_pattern_stmt_p (stmt_info
)
6997 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
6999 gimple_stmt_iterator si
;
7001 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7003 gimple pattern_def_stmt
= gsi_stmt (si
);
7004 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7005 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7007 /* Analyze def stmt of STMT if it's a pattern stmt. */
7008 if (dump_enabled_p ())
7010 dump_printf_loc (MSG_NOTE
, vect_location
,
7011 "==> examining pattern def statement: ");
7012 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7013 dump_printf (MSG_NOTE
, "\n");
7016 if (!vect_analyze_stmt (pattern_def_stmt
,
7017 need_to_vectorize
, node
))
7023 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7025 case vect_internal_def
:
7028 case vect_reduction_def
:
7029 case vect_nested_cycle
:
7030 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
7031 || relevance
== vect_used_in_outer_by_reduction
7032 || relevance
== vect_unused_in_scope
));
7035 case vect_induction_def
:
7036 case vect_constant_def
:
7037 case vect_external_def
:
7038 case vect_unknown_def_type
:
7045 gcc_assert (PURE_SLP_STMT (stmt_info
));
7047 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7048 if (dump_enabled_p ())
7050 dump_printf_loc (MSG_NOTE
, vect_location
,
7051 "get vectype for scalar type: ");
7052 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7053 dump_printf (MSG_NOTE
, "\n");
7056 vectype
= get_vectype_for_scalar_type (scalar_type
);
7059 if (dump_enabled_p ())
7061 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7062 "not SLPed: unsupported data-type ");
7063 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7065 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7070 if (dump_enabled_p ())
7072 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7073 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7074 dump_printf (MSG_NOTE
, "\n");
7077 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7080 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7082 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7083 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7084 || (is_gimple_call (stmt
)
7085 && gimple_call_lhs (stmt
) == NULL_TREE
));
7086 *need_to_vectorize
= true;
7091 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7092 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7093 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, NULL
)
7094 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
7095 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
7096 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
7097 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
7098 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
7099 || vectorizable_call (stmt
, NULL
, NULL
, NULL
)
7100 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
7101 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
7102 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, NULL
));
7106 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7107 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7108 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7109 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7110 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7111 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7112 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7113 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7114 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7119 if (dump_enabled_p ())
7121 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7122 "not vectorized: relevant stmt not ");
7123 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7124 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7125 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7134 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7135 need extra handling, except for vectorizable reductions. */
7136 if (STMT_VINFO_LIVE_P (stmt_info
)
7137 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7138 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7142 if (dump_enabled_p ())
7144 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7145 "not vectorized: live stmt not ");
7146 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7147 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7148 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7158 /* Function vect_transform_stmt.
7160 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7163 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
7164 bool *grouped_store
, slp_tree slp_node
,
7165 slp_instance slp_node_instance
)
7167 bool is_store
= false;
7168 gimple vec_stmt
= NULL
;
7169 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7172 switch (STMT_VINFO_TYPE (stmt_info
))
7174 case type_demotion_vec_info_type
:
7175 case type_promotion_vec_info_type
:
7176 case type_conversion_vec_info_type
:
7177 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7181 case induc_vec_info_type
:
7182 gcc_assert (!slp_node
);
7183 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7187 case shift_vec_info_type
:
7188 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7192 case op_vec_info_type
:
7193 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7197 case assignment_vec_info_type
:
7198 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7202 case load_vec_info_type
:
7203 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7208 case store_vec_info_type
:
7209 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7211 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7213 /* In case of interleaving, the whole chain is vectorized when the
7214 last store in the chain is reached. Store stmts before the last
7215 one are skipped, and there vec_stmt_info shouldn't be freed
7217 *grouped_store
= true;
7218 if (STMT_VINFO_VEC_STMT (stmt_info
))
7225 case condition_vec_info_type
:
7226 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7230 case call_vec_info_type
:
7231 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7232 stmt
= gsi_stmt (*gsi
);
7233 if (is_gimple_call (stmt
)
7234 && gimple_call_internal_p (stmt
)
7235 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7239 case call_simd_clone_vec_info_type
:
7240 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7241 stmt
= gsi_stmt (*gsi
);
7244 case reduc_vec_info_type
:
7245 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7250 if (!STMT_VINFO_LIVE_P (stmt_info
))
7252 if (dump_enabled_p ())
7253 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7254 "stmt not supported.\n");
7259 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7260 is being vectorized, but outside the immediately enclosing loop. */
7262 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7263 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7264 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7265 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7266 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7267 || STMT_VINFO_RELEVANT (stmt_info
) ==
7268 vect_used_in_outer_by_reduction
))
7270 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7271 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7272 imm_use_iterator imm_iter
;
7273 use_operand_p use_p
;
7277 if (dump_enabled_p ())
7278 dump_printf_loc (MSG_NOTE
, vect_location
,
7279 "Record the vdef for outer-loop vectorization.\n");
7281 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7282 (to be used when vectorizing outer-loop stmts that use the DEF of
7284 if (gimple_code (stmt
) == GIMPLE_PHI
)
7285 scalar_dest
= PHI_RESULT (stmt
);
7287 scalar_dest
= gimple_assign_lhs (stmt
);
7289 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7291 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7293 exit_phi
= USE_STMT (use_p
);
7294 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7299 /* Handle stmts whose DEF is used outside the loop-nest that is
7300 being vectorized. */
7301 if (STMT_VINFO_LIVE_P (stmt_info
)
7302 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7304 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7309 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7315 /* Remove a group of stores (for SLP or interleaving), free their
7319 vect_remove_stores (gimple first_stmt
)
7321 gimple next
= first_stmt
;
7323 gimple_stmt_iterator next_si
;
7327 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7329 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7330 if (is_pattern_stmt_p (stmt_info
))
7331 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7332 /* Free the attached stmt_vec_info and remove the stmt. */
7333 next_si
= gsi_for_stmt (next
);
7334 unlink_stmt_vdef (next
);
7335 gsi_remove (&next_si
, true);
7336 release_defs (next
);
7337 free_stmt_vec_info (next
);
7343 /* Function new_stmt_vec_info.
7345 Create and initialize a new stmt_vec_info struct for STMT. */
7348 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
7349 bb_vec_info bb_vinfo
)
7352 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7354 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7355 STMT_VINFO_STMT (res
) = stmt
;
7356 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
7357 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
7358 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7359 STMT_VINFO_LIVE_P (res
) = false;
7360 STMT_VINFO_VECTYPE (res
) = NULL
;
7361 STMT_VINFO_VEC_STMT (res
) = NULL
;
7362 STMT_VINFO_VECTORIZABLE (res
) = true;
7363 STMT_VINFO_IN_PATTERN_P (res
) = false;
7364 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7365 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7366 STMT_VINFO_DATA_REF (res
) = NULL
;
7368 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7369 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7370 STMT_VINFO_DR_INIT (res
) = NULL
;
7371 STMT_VINFO_DR_STEP (res
) = NULL
;
7372 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7374 if (gimple_code (stmt
) == GIMPLE_PHI
7375 && is_loop_header_bb_p (gimple_bb (stmt
)))
7376 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7378 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7380 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7381 STMT_SLP_TYPE (res
) = loop_vect
;
7382 GROUP_FIRST_ELEMENT (res
) = NULL
;
7383 GROUP_NEXT_ELEMENT (res
) = NULL
;
7384 GROUP_SIZE (res
) = 0;
7385 GROUP_STORE_COUNT (res
) = 0;
7386 GROUP_GAP (res
) = 0;
7387 GROUP_SAME_DR_STMT (res
) = NULL
;
7393 /* Create a hash table for stmt_vec_info. */
7396 init_stmt_vec_info_vec (void)
7398 gcc_assert (!stmt_vec_info_vec
.exists ());
7399 stmt_vec_info_vec
.create (50);
7403 /* Free hash table for stmt_vec_info. */
7406 free_stmt_vec_info_vec (void)
7410 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7412 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info
) info
));
7413 gcc_assert (stmt_vec_info_vec
.exists ());
7414 stmt_vec_info_vec
.release ();
7418 /* Free stmt vectorization related info. */
7421 free_stmt_vec_info (gimple stmt
)
7423 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7428 /* Check if this statement has a related "pattern stmt"
7429 (introduced by the vectorizer during the pattern recognition
7430 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7432 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7434 stmt_vec_info patt_info
7435 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7438 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7439 gimple patt_stmt
= STMT_VINFO_STMT (patt_info
);
7440 gimple_set_bb (patt_stmt
, NULL
);
7441 tree lhs
= gimple_get_lhs (patt_stmt
);
7442 if (TREE_CODE (lhs
) == SSA_NAME
)
7443 release_ssa_name (lhs
);
7446 gimple_stmt_iterator si
;
7447 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7449 gimple seq_stmt
= gsi_stmt (si
);
7450 gimple_set_bb (seq_stmt
, NULL
);
7451 lhs
= gimple_get_lhs (patt_stmt
);
7452 if (TREE_CODE (lhs
) == SSA_NAME
)
7453 release_ssa_name (lhs
);
7454 free_stmt_vec_info (seq_stmt
);
7457 free_stmt_vec_info (patt_stmt
);
7461 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7462 set_vinfo_for_stmt (stmt
, NULL
);
7467 /* Function get_vectype_for_scalar_type_and_size.
7469 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7473 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7475 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7476 enum machine_mode simd_mode
;
7477 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7484 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7485 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7488 /* For vector types of elements whose mode precision doesn't
7489 match their types precision we use a element type of mode
7490 precision. The vectorization routines will have to make sure
7491 they support the proper result truncation/extension.
7492 We also make sure to build vector types with INTEGER_TYPE
7493 component type only. */
7494 if (INTEGRAL_TYPE_P (scalar_type
)
7495 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
7496 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
7497 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
7498 TYPE_UNSIGNED (scalar_type
));
7500 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7501 When the component mode passes the above test simply use a type
7502 corresponding to that mode. The theory is that any use that
7503 would cause problems with this will disable vectorization anyway. */
7504 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
7505 && !INTEGRAL_TYPE_P (scalar_type
))
7506 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
7508 /* We can't build a vector type of elements with alignment bigger than
7510 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
7511 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
7512 TYPE_UNSIGNED (scalar_type
));
7514 /* If we felt back to using the mode fail if there was
7515 no scalar type for it. */
7516 if (scalar_type
== NULL_TREE
)
7519 /* If no size was supplied use the mode the target prefers. Otherwise
7520 lookup a vector mode of the specified size. */
7522 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
7524 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
7525 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
7529 vectype
= build_vector_type (scalar_type
, nunits
);
7531 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
7532 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
7538 unsigned int current_vector_size
;
7540 /* Function get_vectype_for_scalar_type.
7542 Returns the vector type corresponding to SCALAR_TYPE as supported
7546 get_vectype_for_scalar_type (tree scalar_type
)
7549 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
7550 current_vector_size
);
7552 && current_vector_size
== 0)
7553 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
7557 /* Function get_same_sized_vectype
7559 Returns a vector type corresponding to SCALAR_TYPE of size
7560 VECTOR_TYPE if supported by the target. */
7563 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
7565 return get_vectype_for_scalar_type_and_size
7566 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
7569 /* Function vect_is_simple_use.
7572 LOOP_VINFO - the vect info of the loop that is being vectorized.
7573 BB_VINFO - the vect info of the basic block that is being vectorized.
7574 OPERAND - operand of STMT in the loop or bb.
7575 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7577 Returns whether a stmt with OPERAND can be vectorized.
7578 For loops, supportable operands are constants, loop invariants, and operands
7579 that are defined by the current iteration of the loop. Unsupportable
7580 operands are those that are defined by a previous iteration of the loop (as
7581 is the case in reduction/induction computations).
7582 For basic blocks, supportable operands are constants and bb invariants.
7583 For now, operands defined outside the basic block are not supported. */
7586 vect_is_simple_use (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7587 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7588 tree
*def
, enum vect_def_type
*dt
)
7591 stmt_vec_info stmt_vinfo
;
7592 struct loop
*loop
= NULL
;
7595 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7600 if (dump_enabled_p ())
7602 dump_printf_loc (MSG_NOTE
, vect_location
,
7603 "vect_is_simple_use: operand ");
7604 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
7605 dump_printf (MSG_NOTE
, "\n");
7608 if (CONSTANT_CLASS_P (operand
))
7610 *dt
= vect_constant_def
;
7614 if (is_gimple_min_invariant (operand
))
7617 *dt
= vect_external_def
;
7621 if (TREE_CODE (operand
) == PAREN_EXPR
)
7623 if (dump_enabled_p ())
7624 dump_printf_loc (MSG_NOTE
, vect_location
, "non-associatable copy.\n");
7625 operand
= TREE_OPERAND (operand
, 0);
7628 if (TREE_CODE (operand
) != SSA_NAME
)
7630 if (dump_enabled_p ())
7631 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7636 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
7637 if (*def_stmt
== NULL
)
7639 if (dump_enabled_p ())
7640 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7645 if (dump_enabled_p ())
7647 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
7648 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
7649 dump_printf (MSG_NOTE
, "\n");
7652 /* Empty stmt is expected only in case of a function argument.
7653 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7654 if (gimple_nop_p (*def_stmt
))
7657 *dt
= vect_external_def
;
7661 bb
= gimple_bb (*def_stmt
);
7663 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
7664 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
7665 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
7666 *dt
= vect_external_def
;
7669 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
7670 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
7673 if (*dt
== vect_unknown_def_type
7675 && *dt
== vect_double_reduction_def
7676 && gimple_code (stmt
) != GIMPLE_PHI
))
7678 if (dump_enabled_p ())
7679 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7680 "Unsupported pattern.\n");
7684 if (dump_enabled_p ())
7685 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: %d.\n", *dt
);
7687 switch (gimple_code (*def_stmt
))
7690 *def
= gimple_phi_result (*def_stmt
);
7694 *def
= gimple_assign_lhs (*def_stmt
);
7698 *def
= gimple_call_lhs (*def_stmt
);
7703 if (dump_enabled_p ())
7704 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7705 "unsupported defining stmt:\n");
7712 /* Function vect_is_simple_use_1.
7714 Same as vect_is_simple_use_1 but also determines the vector operand
7715 type of OPERAND and stores it to *VECTYPE. If the definition of
7716 OPERAND is vect_uninitialized_def, vect_constant_def or
7717 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7718 is responsible to compute the best suited vector type for the
7722 vect_is_simple_use_1 (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
7723 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
7724 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
7726 if (!vect_is_simple_use (operand
, stmt
, loop_vinfo
, bb_vinfo
, def_stmt
,
7730 /* Now get a vector type if the def is internal, otherwise supply
7731 NULL_TREE and leave it up to the caller to figure out a proper
7732 type for the use stmt. */
7733 if (*dt
== vect_internal_def
7734 || *dt
== vect_induction_def
7735 || *dt
== vect_reduction_def
7736 || *dt
== vect_double_reduction_def
7737 || *dt
== vect_nested_cycle
)
7739 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
7741 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7742 && !STMT_VINFO_RELEVANT (stmt_info
)
7743 && !STMT_VINFO_LIVE_P (stmt_info
))
7744 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7746 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7747 gcc_assert (*vectype
!= NULL_TREE
);
7749 else if (*dt
== vect_uninitialized_def
7750 || *dt
== vect_constant_def
7751 || *dt
== vect_external_def
)
7752 *vectype
= NULL_TREE
;
7760 /* Function supportable_widening_operation
7762 Check whether an operation represented by the code CODE is a
7763 widening operation that is supported by the target platform in
7764 vector form (i.e., when operating on arguments of type VECTYPE_IN
7765 producing a result of type VECTYPE_OUT).
7767 Widening operations we currently support are NOP (CONVERT), FLOAT
7768 and WIDEN_MULT. This function checks if these operations are supported
7769 by the target platform either directly (via vector tree-codes), or via
7773 - CODE1 and CODE2 are codes of vector operations to be used when
7774 vectorizing the operation, if available.
7775 - MULTI_STEP_CVT determines the number of required intermediate steps in
7776 case of multi-step conversion (like char->short->int - in that case
7777 MULTI_STEP_CVT will be 1).
7778 - INTERM_TYPES contains the intermediate type required to perform the
7779 widening operation (short in the above example). */
7782 supportable_widening_operation (enum tree_code code
, gimple stmt
,
7783 tree vectype_out
, tree vectype_in
,
7784 enum tree_code
*code1
, enum tree_code
*code2
,
7785 int *multi_step_cvt
,
7786 vec
<tree
> *interm_types
)
7788 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7789 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7790 struct loop
*vect_loop
= NULL
;
7791 enum machine_mode vec_mode
;
7792 enum insn_code icode1
, icode2
;
7793 optab optab1
, optab2
;
7794 tree vectype
= vectype_in
;
7795 tree wide_vectype
= vectype_out
;
7796 enum tree_code c1
, c2
;
7798 tree prev_type
, intermediate_type
;
7799 enum machine_mode intermediate_mode
, prev_mode
;
7800 optab optab3
, optab4
;
7802 *multi_step_cvt
= 0;
7804 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
7808 case WIDEN_MULT_EXPR
:
7809 /* The result of a vectorized widening operation usually requires
7810 two vectors (because the widened results do not fit into one vector).
7811 The generated vector results would normally be expected to be
7812 generated in the same order as in the original scalar computation,
7813 i.e. if 8 results are generated in each vector iteration, they are
7814 to be organized as follows:
7815 vect1: [res1,res2,res3,res4],
7816 vect2: [res5,res6,res7,res8].
7818 However, in the special case that the result of the widening
7819 operation is used in a reduction computation only, the order doesn't
7820 matter (because when vectorizing a reduction we change the order of
7821 the computation). Some targets can take advantage of this and
7822 generate more efficient code. For example, targets like Altivec,
7823 that support widen_mult using a sequence of {mult_even,mult_odd}
7824 generate the following vectors:
7825 vect1: [res1,res3,res5,res7],
7826 vect2: [res2,res4,res6,res8].
7828 When vectorizing outer-loops, we execute the inner-loop sequentially
7829 (each vectorized inner-loop iteration contributes to VF outer-loop
7830 iterations in parallel). We therefore don't allow to change the
7831 order of the computation in the inner-loop during outer-loop
7833 /* TODO: Another case in which order doesn't *really* matter is when we
7834 widen and then contract again, e.g. (short)((int)x * y >> 8).
7835 Normally, pack_trunc performs an even/odd permute, whereas the
7836 repack from an even/odd expansion would be an interleave, which
7837 would be significantly simpler for e.g. AVX2. */
7838 /* In any case, in order to avoid duplicating the code below, recurse
7839 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
7840 are properly set up for the caller. If we fail, we'll continue with
7841 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
7843 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
7844 && !nested_in_vect_loop_p (vect_loop
, stmt
)
7845 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
7846 stmt
, vectype_out
, vectype_in
,
7847 code1
, code2
, multi_step_cvt
,
7850 /* Elements in a vector with vect_used_by_reduction property cannot
7851 be reordered if the use chain with this property does not have the
7852 same operation. One such an example is s += a * b, where elements
7853 in a and b cannot be reordered. Here we check if the vector defined
7854 by STMT is only directly used in the reduction statement. */
7855 tree lhs
= gimple_assign_lhs (stmt
);
7856 use_operand_p dummy
;
7858 stmt_vec_info use_stmt_info
= NULL
;
7859 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
7860 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
7861 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
7864 c1
= VEC_WIDEN_MULT_LO_EXPR
;
7865 c2
= VEC_WIDEN_MULT_HI_EXPR
;
7868 case VEC_WIDEN_MULT_EVEN_EXPR
:
7869 /* Support the recursion induced just above. */
7870 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
7871 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
7874 case WIDEN_LSHIFT_EXPR
:
7875 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
7876 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
7880 c1
= VEC_UNPACK_LO_EXPR
;
7881 c2
= VEC_UNPACK_HI_EXPR
;
7885 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
7886 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
7889 case FIX_TRUNC_EXPR
:
7890 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
7891 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
7892 computing the operation. */
7899 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
7901 enum tree_code ctmp
= c1
;
7906 if (code
== FIX_TRUNC_EXPR
)
7908 /* The signedness is determined from output operand. */
7909 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
7910 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
7914 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
7915 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
7918 if (!optab1
|| !optab2
)
7921 vec_mode
= TYPE_MODE (vectype
);
7922 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
7923 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
7929 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7930 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7933 /* Check if it's a multi-step conversion that can be done using intermediate
7936 prev_type
= vectype
;
7937 prev_mode
= vec_mode
;
7939 if (!CONVERT_EXPR_CODE_P (code
))
7942 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
7943 intermediate steps in promotion sequence. We try
7944 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
7946 interm_types
->create (MAX_INTERM_CVT_STEPS
);
7947 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
7949 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
7951 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
7952 TYPE_UNSIGNED (prev_type
));
7953 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
7954 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
7956 if (!optab3
|| !optab4
7957 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
7958 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
7959 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
7960 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
7961 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
7962 == CODE_FOR_nothing
)
7963 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
7964 == CODE_FOR_nothing
))
7967 interm_types
->quick_push (intermediate_type
);
7968 (*multi_step_cvt
)++;
7970 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
7971 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
7974 prev_type
= intermediate_type
;
7975 prev_mode
= intermediate_mode
;
7978 interm_types
->release ();
7983 /* Function supportable_narrowing_operation
7985 Check whether an operation represented by the code CODE is a
7986 narrowing operation that is supported by the target platform in
7987 vector form (i.e., when operating on arguments of type VECTYPE_IN
7988 and producing a result of type VECTYPE_OUT).
7990 Narrowing operations we currently support are NOP (CONVERT) and
7991 FIX_TRUNC. This function checks if these operations are supported by
7992 the target platform directly via vector tree-codes.
7995 - CODE1 is the code of a vector operation to be used when
7996 vectorizing the operation, if available.
7997 - MULTI_STEP_CVT determines the number of required intermediate steps in
7998 case of multi-step conversion (like int->short->char - in that case
7999 MULTI_STEP_CVT will be 1).
8000 - INTERM_TYPES contains the intermediate type required to perform the
8001 narrowing operation (short in the above example). */
8004 supportable_narrowing_operation (enum tree_code code
,
8005 tree vectype_out
, tree vectype_in
,
8006 enum tree_code
*code1
, int *multi_step_cvt
,
8007 vec
<tree
> *interm_types
)
8009 enum machine_mode vec_mode
;
8010 enum insn_code icode1
;
8011 optab optab1
, interm_optab
;
8012 tree vectype
= vectype_in
;
8013 tree narrow_vectype
= vectype_out
;
8015 tree intermediate_type
;
8016 enum machine_mode intermediate_mode
, prev_mode
;
8020 *multi_step_cvt
= 0;
8024 c1
= VEC_PACK_TRUNC_EXPR
;
8027 case FIX_TRUNC_EXPR
:
8028 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8032 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8033 tree code and optabs used for computing the operation. */
8040 if (code
== FIX_TRUNC_EXPR
)
8041 /* The signedness is determined from output operand. */
8042 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8044 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8049 vec_mode
= TYPE_MODE (vectype
);
8050 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8055 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8058 /* Check if it's a multi-step conversion that can be done using intermediate
8060 prev_mode
= vec_mode
;
8061 if (code
== FIX_TRUNC_EXPR
)
8062 uns
= TYPE_UNSIGNED (vectype_out
);
8064 uns
= TYPE_UNSIGNED (vectype
);
8066 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8067 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8068 costly than signed. */
8069 if (code
== FIX_TRUNC_EXPR
&& uns
)
8071 enum insn_code icode2
;
8074 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8076 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8077 if (interm_optab
!= unknown_optab
8078 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8079 && insn_data
[icode1
].operand
[0].mode
8080 == insn_data
[icode2
].operand
[0].mode
)
8083 optab1
= interm_optab
;
8088 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8089 intermediate steps in promotion sequence. We try
8090 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8091 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8092 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8094 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8096 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8098 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8101 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8102 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8103 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8104 == CODE_FOR_nothing
))
8107 interm_types
->quick_push (intermediate_type
);
8108 (*multi_step_cvt
)++;
8110 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8113 prev_mode
= intermediate_mode
;
8114 optab1
= interm_optab
;
8117 interm_types
->release ();