Fix PR/46316
[official-gcc.git] / gcc / tree-vect-stmts.c
blob3d18dfe95c1c335bfe2b7987bba240c07d08250d
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "toplev.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
48 /* Function vect_mark_relevant.
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
52 static void
53 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
54 enum vect_relevant relevant, bool live_p)
56 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
57 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
58 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
60 if (vect_print_dump_info (REPORT_DETAILS))
61 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
65 gimple pattern_stmt;
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
72 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
74 if (vect_print_dump_info (REPORT_DETAILS))
75 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info = vinfo_for_stmt (pattern_stmt);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
78 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
79 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
80 stmt = pattern_stmt;
83 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
84 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
85 STMT_VINFO_RELEVANT (stmt_info) = relevant;
87 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
90 if (vect_print_dump_info (REPORT_DETAILS))
91 fprintf (vect_dump, "already marked relevant/live.");
92 return;
95 VEC_safe_push (gimple, heap, *worklist, stmt);
99 /* Function vect_stmt_relevant_p.
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
109 CHECKME: what other side effects would the vectorizer allow? */
111 static bool
112 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
113 enum vect_relevant *relevant, bool *live_p)
115 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
116 ssa_op_iter op_iter;
117 imm_use_iterator imm_iter;
118 use_operand_p use_p;
119 def_operand_p def_p;
121 *relevant = vect_unused_in_scope;
122 *live_p = false;
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
127 != loop_exit_ctrl_vec_info_type)
128 *relevant = vect_used_in_scope;
130 /* changing memory. */
131 if (gimple_code (stmt) != GIMPLE_PHI)
132 if (gimple_vdef (stmt))
134 if (vect_print_dump_info (REPORT_DETAILS))
135 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant = vect_used_in_scope;
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
142 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
144 basic_block bb = gimple_bb (USE_STMT (use_p));
145 if (!flow_bb_inside_loop_p (loop, bb))
147 if (vect_print_dump_info (REPORT_DETAILS))
148 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
150 if (is_gimple_debug (USE_STMT (use_p)))
151 continue;
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
156 gcc_assert (bb == single_exit (loop)->dest);
158 *live_p = true;
163 return (*live_p || *relevant);
167 /* Function exist_non_indexing_operands_for_use_p
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
172 static bool
173 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
175 tree operand;
176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info))
182 return true;
184 /* STMT has a data_ref. FORNOW this means that its of one of
185 the following forms:
186 -1- ARRAY_REF = var
187 -2- var = ARRAY_REF
188 (This should have been verified in analyze_data_refs).
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
192 for array indexing.
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
197 if (!gimple_assign_copy_p (stmt))
198 return false;
199 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
200 return false;
201 operand = gimple_assign_rhs1 (stmt);
202 if (TREE_CODE (operand) != SSA_NAME)
203 return false;
205 if (operand == use)
206 return true;
208 return false;
213 Function process_use.
215 Inputs:
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
221 Outputs:
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 Exceptions:
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
235 Return true if everything is as expected. Return false otherwise. */
237 static bool
238 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
239 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
242 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
243 stmt_vec_info dstmt_vinfo;
244 basic_block bb, def_bb;
245 tree def;
246 gimple def_stmt;
247 enum vect_def_type dt;
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use, stmt))
252 return true;
254 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
257 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
258 return false;
261 if (!def_stmt || gimple_nop_p (def_stmt))
262 return true;
264 def_bb = gimple_bb (def_stmt);
265 if (!flow_bb_inside_loop_p (loop, def_bb))
267 if (vect_print_dump_info (REPORT_DETAILS))
268 fprintf (vect_dump, "def_stmt is out of loop.");
269 return true;
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo = vinfo_for_stmt (def_stmt);
278 bb = gimple_bb (stmt);
279 if (gimple_code (stmt) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
281 && gimple_code (def_stmt) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
283 && bb->loop_father == def_bb->loop_father)
285 if (vect_print_dump_info (REPORT_DETAILS))
286 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
288 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
292 return true;
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
297 d = def_stmt
298 inner-loop:
299 stmt # use (d)
300 outer-loop-tail-bb:
301 ... */
302 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
304 if (vect_print_dump_info (REPORT_DETAILS))
305 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
307 switch (relevant)
309 case vect_unused_in_scope:
310 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
311 vect_used_in_scope : vect_unused_in_scope;
312 break;
314 case vect_used_in_outer_by_reduction:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
316 relevant = vect_used_by_reduction;
317 break;
319 case vect_used_in_outer:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
321 relevant = vect_used_in_scope;
322 break;
324 case vect_used_in_scope:
325 break;
327 default:
328 gcc_unreachable ();
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
335 inner-loop:
336 d = def_stmt
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 stmt # use (d) */
339 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
341 if (vect_print_dump_info (REPORT_DETAILS))
342 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
344 switch (relevant)
346 case vect_unused_in_scope:
347 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
349 vect_used_in_outer_by_reduction : vect_unused_in_scope;
350 break;
352 case vect_used_by_reduction:
353 relevant = vect_used_in_outer_by_reduction;
354 break;
356 case vect_used_in_scope:
357 relevant = vect_used_in_outer;
358 break;
360 default:
361 gcc_unreachable ();
365 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
366 return true;
370 /* Function vect_mark_stmts_to_be_vectorized.
372 Not all stmts in the loop need to be vectorized. For example:
374 for i...
375 for j...
376 1. T0 = i + j
377 2. T1 = a[T0]
379 3. j = j + 1
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
384 This pass detects such stmts. */
386 bool
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
389 VEC(gimple,heap) *worklist;
390 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
391 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
392 unsigned int nbbs = loop->num_nodes;
393 gimple_stmt_iterator si;
394 gimple stmt;
395 unsigned int i;
396 stmt_vec_info stmt_vinfo;
397 basic_block bb;
398 gimple phi;
399 bool live_p;
400 enum vect_relevant relevant, tmp_relevant;
401 enum vect_def_type def_type;
403 if (vect_print_dump_info (REPORT_DETAILS))
404 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
406 worklist = VEC_alloc (gimple, heap, 64);
408 /* 1. Init worklist. */
409 for (i = 0; i < nbbs; i++)
411 bb = bbs[i];
412 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
414 phi = gsi_stmt (si);
415 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
421 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
422 vect_mark_relevant (&worklist, phi, relevant, live_p);
424 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
426 stmt = gsi_stmt (si);
427 if (vect_print_dump_info (REPORT_DETAILS))
429 fprintf (vect_dump, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
433 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
434 vect_mark_relevant (&worklist, stmt, relevant, live_p);
438 /* 2. Process_worklist */
439 while (VEC_length (gimple, worklist) > 0)
441 use_operand_p use_p;
442 ssa_op_iter iter;
444 stmt = VEC_pop (gimple, worklist);
445 if (vect_print_dump_info (REPORT_DETAILS))
447 fprintf (vect_dump, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo = vinfo_for_stmt (stmt);
455 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
456 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
465 live_p = false
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
473 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
474 tmp_relevant = relevant;
475 switch (def_type)
477 case vect_reduction_def:
478 switch (tmp_relevant)
480 case vect_unused_in_scope:
481 relevant = vect_used_by_reduction;
482 break;
484 case vect_used_by_reduction:
485 if (gimple_code (stmt) == GIMPLE_PHI)
486 break;
487 /* fall through */
489 default:
490 if (vect_print_dump_info (REPORT_DETAILS))
491 fprintf (vect_dump, "unsupported use of reduction.");
493 VEC_free (gimple, heap, worklist);
494 return false;
497 live_p = false;
498 break;
500 case vect_nested_cycle:
501 if (tmp_relevant != vect_unused_in_scope
502 && tmp_relevant != vect_used_in_outer_by_reduction
503 && tmp_relevant != vect_used_in_outer)
505 if (vect_print_dump_info (REPORT_DETAILS))
506 fprintf (vect_dump, "unsupported use of nested cycle.");
508 VEC_free (gimple, heap, worklist);
509 return false;
512 live_p = false;
513 break;
515 case vect_double_reduction_def:
516 if (tmp_relevant != vect_unused_in_scope
517 && tmp_relevant != vect_used_by_reduction)
519 if (vect_print_dump_info (REPORT_DETAILS))
520 fprintf (vect_dump, "unsupported use of double reduction.");
522 VEC_free (gimple, heap, worklist);
523 return false;
526 live_p = false;
527 break;
529 default:
530 break;
533 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
535 tree op = USE_FROM_PTR (use_p);
536 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
538 VEC_free (gimple, heap, worklist);
539 return false;
542 } /* while worklist */
544 VEC_free (gimple, heap, worklist);
545 return true;
549 /* Get cost by calling cost target builtin. */
551 static inline
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
554 tree dummy_type = NULL;
555 int dummy = 0;
557 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
558 dummy_type, dummy);
562 /* Get cost for STMT. */
565 cost_for_stmt (gimple stmt)
567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
569 switch (STMT_VINFO_TYPE (stmt_info))
571 case load_vec_info_type:
572 return vect_get_stmt_cost (scalar_load);
573 case store_vec_info_type:
574 return vect_get_stmt_cost (scalar_store);
575 case op_vec_info_type:
576 case condition_vec_info_type:
577 case assignment_vec_info_type:
578 case reduc_vec_info_type:
579 case induc_vec_info_type:
580 case type_promotion_vec_info_type:
581 case type_demotion_vec_info_type:
582 case type_conversion_vec_info_type:
583 case call_vec_info_type:
584 return vect_get_stmt_cost (scalar_stmt);
585 case undef_vec_info_type:
586 default:
587 gcc_unreachable ();
591 /* Function vect_model_simple_cost.
593 Models cost for simple operations, i.e. those that only emit ncopies of a
594 single op. Right now, this does not account for multiple insns that could
595 be generated for the single vector op. We will handle that shortly. */
597 void
598 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
599 enum vect_def_type *dt, slp_tree slp_node)
601 int i;
602 int inside_cost = 0, outside_cost = 0;
604 /* The SLP costs were already calculated during SLP tree build. */
605 if (PURE_SLP_STMT (stmt_info))
606 return;
608 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
610 /* FORNOW: Assuming maximum 2 args per stmts. */
611 for (i = 0; i < 2; i++)
613 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
614 outside_cost += vect_get_stmt_cost (vector_stmt);
617 if (vect_print_dump_info (REPORT_COST))
618 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
619 "outside_cost = %d .", inside_cost, outside_cost);
621 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
622 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
623 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
627 /* Function vect_cost_strided_group_size
629 For strided load or store, return the group_size only if it is the first
630 load or store of a group, else return 1. This ensures that group size is
631 only returned once per group. */
633 static int
634 vect_cost_strided_group_size (stmt_vec_info stmt_info)
636 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
638 if (first_stmt == STMT_VINFO_STMT (stmt_info))
639 return DR_GROUP_SIZE (stmt_info);
641 return 1;
645 /* Function vect_model_store_cost
647 Models cost for stores. In the case of strided accesses, one access
648 has the overhead of the strided access attributed to it. */
650 void
651 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
652 enum vect_def_type dt, slp_tree slp_node)
654 int group_size;
655 unsigned int inside_cost = 0, outside_cost = 0;
656 struct data_reference *first_dr;
657 gimple first_stmt;
659 /* The SLP costs were already calculated during SLP tree build. */
660 if (PURE_SLP_STMT (stmt_info))
661 return;
663 if (dt == vect_constant_def || dt == vect_external_def)
664 outside_cost = vect_get_stmt_cost (scalar_to_vec);
666 /* Strided access? */
667 if (DR_GROUP_FIRST_DR (stmt_info))
669 if (slp_node)
671 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
672 group_size = 1;
674 else
676 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
677 group_size = vect_cost_strided_group_size (stmt_info);
680 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
682 /* Not a strided access. */
683 else
685 group_size = 1;
686 first_dr = STMT_VINFO_DATA_REF (stmt_info);
689 /* Is this an access in a group of stores, which provide strided access?
690 If so, add in the cost of the permutes. */
691 if (group_size > 1)
693 /* Uses a high and low interleave operation for each needed permute. */
694 inside_cost = ncopies * exact_log2(group_size) * group_size
695 * vect_get_stmt_cost (vector_stmt);
697 if (vect_print_dump_info (REPORT_COST))
698 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
699 group_size);
703 /* Costs of the stores. */
704 vect_get_store_cost (first_dr, ncopies, &inside_cost);
706 if (vect_print_dump_info (REPORT_COST))
707 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
708 "outside_cost = %d .", inside_cost, outside_cost);
710 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
711 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
712 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
716 /* Calculate cost of DR's memory access. */
717 void
718 vect_get_store_cost (struct data_reference *dr, int ncopies,
719 unsigned int *inside_cost)
721 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
723 switch (alignment_support_scheme)
725 case dr_aligned:
727 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
729 if (vect_print_dump_info (REPORT_COST))
730 fprintf (vect_dump, "vect_model_store_cost: aligned.");
732 break;
735 case dr_unaligned_supported:
737 gimple stmt = DR_STMT (dr);
738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
741 /* Here, we assign an additional cost for the unaligned store. */
742 *inside_cost += ncopies
743 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
744 vectype, DR_MISALIGNMENT (dr));
746 if (vect_print_dump_info (REPORT_COST))
747 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
748 "hardware.");
750 break;
753 default:
754 gcc_unreachable ();
759 /* Function vect_model_load_cost
761 Models cost for loads. In the case of strided accesses, the last access
762 has the overhead of the strided access attributed to it. Since unaligned
763 accesses are supported for loads, we also account for the costs of the
764 access scheme chosen. */
766 void
767 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
770 int group_size;
771 gimple first_stmt;
772 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
773 unsigned int inside_cost = 0, outside_cost = 0;
775 /* The SLP costs were already calculated during SLP tree build. */
776 if (PURE_SLP_STMT (stmt_info))
777 return;
779 /* Strided accesses? */
780 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
781 if (first_stmt && !slp_node)
783 group_size = vect_cost_strided_group_size (stmt_info);
784 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
786 /* Not a strided access. */
787 else
789 group_size = 1;
790 first_dr = dr;
793 /* Is this an access in a group of loads providing strided access?
794 If so, add in the cost of the permutes. */
795 if (group_size > 1)
797 /* Uses an even and odd extract operations for each needed permute. */
798 inside_cost = ncopies * exact_log2(group_size) * group_size
799 * vect_get_stmt_cost (vector_stmt);
801 if (vect_print_dump_info (REPORT_COST))
802 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
803 group_size);
806 /* The loads themselves. */
807 vect_get_load_cost (first_dr, ncopies,
808 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
809 &inside_cost, &outside_cost);
811 if (vect_print_dump_info (REPORT_COST))
812 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
813 "outside_cost = %d .", inside_cost, outside_cost);
815 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
816 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
817 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
821 /* Calculate cost of DR's memory access. */
822 void
823 vect_get_load_cost (struct data_reference *dr, int ncopies,
824 bool add_realign_cost, unsigned int *inside_cost,
825 unsigned int *outside_cost)
827 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
829 switch (alignment_support_scheme)
831 case dr_aligned:
833 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
835 if (vect_print_dump_info (REPORT_COST))
836 fprintf (vect_dump, "vect_model_load_cost: aligned.");
838 break;
840 case dr_unaligned_supported:
842 gimple stmt = DR_STMT (dr);
843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
844 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
846 /* Here, we assign an additional cost for the unaligned load. */
847 *inside_cost += ncopies
848 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
849 vectype, DR_MISALIGNMENT (dr));
850 if (vect_print_dump_info (REPORT_COST))
851 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
852 "hardware.");
854 break;
856 case dr_explicit_realign:
858 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
859 + vect_get_stmt_cost (vector_stmt));
861 /* FIXME: If the misalignment remains fixed across the iterations of
862 the containing loop, the following cost should be added to the
863 outside costs. */
864 if (targetm.vectorize.builtin_mask_for_load)
865 *inside_cost += vect_get_stmt_cost (vector_stmt);
867 break;
869 case dr_explicit_realign_optimized:
871 if (vect_print_dump_info (REPORT_COST))
872 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
873 "pipelined.");
875 /* Unaligned software pipeline has a load of an address, an initial
876 load, and possibly a mask operation to "prime" the loop. However,
877 if this is an access in a group of loads, which provide strided
878 access, then the above cost should only be considered for one
879 access in the group. Inside the loop, there is a load op
880 and a realignment op. */
882 if (add_realign_cost)
884 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
885 if (targetm.vectorize.builtin_mask_for_load)
886 *outside_cost += vect_get_stmt_cost (vector_stmt);
889 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
890 + vect_get_stmt_cost (vector_stmt));
891 break;
894 default:
895 gcc_unreachable ();
900 /* Function vect_init_vector.
902 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
903 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
904 is not NULL. Otherwise, place the initialization at the loop preheader.
905 Return the DEF of INIT_STMT.
906 It will be used in the vectorization of STMT. */
908 tree
909 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
910 gimple_stmt_iterator *gsi)
912 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
913 tree new_var;
914 gimple init_stmt;
915 tree vec_oprnd;
916 edge pe;
917 tree new_temp;
918 basic_block new_bb;
920 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
921 add_referenced_var (new_var);
922 init_stmt = gimple_build_assign (new_var, vector_var);
923 new_temp = make_ssa_name (new_var, init_stmt);
924 gimple_assign_set_lhs (init_stmt, new_temp);
926 if (gsi)
927 vect_finish_stmt_generation (stmt, init_stmt, gsi);
928 else
930 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
932 if (loop_vinfo)
934 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
936 if (nested_in_vect_loop_p (loop, stmt))
937 loop = loop->inner;
939 pe = loop_preheader_edge (loop);
940 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
941 gcc_assert (!new_bb);
943 else
945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
946 basic_block bb;
947 gimple_stmt_iterator gsi_bb_start;
949 gcc_assert (bb_vinfo);
950 bb = BB_VINFO_BB (bb_vinfo);
951 gsi_bb_start = gsi_after_labels (bb);
952 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
956 if (vect_print_dump_info (REPORT_DETAILS))
958 fprintf (vect_dump, "created new init_stmt: ");
959 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
962 vec_oprnd = gimple_assign_lhs (init_stmt);
963 return vec_oprnd;
967 /* Function vect_get_vec_def_for_operand.
969 OP is an operand in STMT. This function returns a (vector) def that will be
970 used in the vectorized stmt for STMT.
972 In the case that OP is an SSA_NAME which is defined in the loop, then
973 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
975 In case OP is an invariant or constant, a new stmt that creates a vector def
976 needs to be introduced. */
978 tree
979 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
981 tree vec_oprnd;
982 gimple vec_stmt;
983 gimple def_stmt;
984 stmt_vec_info def_stmt_info = NULL;
985 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
986 unsigned int nunits;
987 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
988 tree vec_inv;
989 tree vec_cst;
990 tree t = NULL_TREE;
991 tree def;
992 int i;
993 enum vect_def_type dt;
994 bool is_simple_use;
995 tree vector_type;
997 if (vect_print_dump_info (REPORT_DETAILS))
999 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1000 print_generic_expr (vect_dump, op, TDF_SLIM);
1003 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1004 &dt);
1005 gcc_assert (is_simple_use);
1006 if (vect_print_dump_info (REPORT_DETAILS))
1008 if (def)
1010 fprintf (vect_dump, "def = ");
1011 print_generic_expr (vect_dump, def, TDF_SLIM);
1013 if (def_stmt)
1015 fprintf (vect_dump, " def_stmt = ");
1016 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1020 switch (dt)
1022 /* Case 1: operand is a constant. */
1023 case vect_constant_def:
1025 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1026 gcc_assert (vector_type);
1027 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1029 if (scalar_def)
1030 *scalar_def = op;
1032 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1033 if (vect_print_dump_info (REPORT_DETAILS))
1034 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1036 vec_cst = build_vector_from_val (vector_type, op);
1037 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1040 /* Case 2: operand is defined outside the loop - loop invariant. */
1041 case vect_external_def:
1043 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1044 gcc_assert (vector_type);
1045 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1047 if (scalar_def)
1048 *scalar_def = def;
1050 /* Create 'vec_inv = {inv,inv,..,inv}' */
1051 if (vect_print_dump_info (REPORT_DETAILS))
1052 fprintf (vect_dump, "Create vector_inv.");
1054 for (i = nunits - 1; i >= 0; --i)
1056 t = tree_cons (NULL_TREE, def, t);
1059 /* FIXME: use build_constructor directly. */
1060 vec_inv = build_constructor_from_list (vector_type, t);
1061 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1064 /* Case 3: operand is defined inside the loop. */
1065 case vect_internal_def:
1067 if (scalar_def)
1068 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1070 /* Get the def from the vectorized stmt. */
1071 def_stmt_info = vinfo_for_stmt (def_stmt);
1072 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1073 gcc_assert (vec_stmt);
1074 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1075 vec_oprnd = PHI_RESULT (vec_stmt);
1076 else if (is_gimple_call (vec_stmt))
1077 vec_oprnd = gimple_call_lhs (vec_stmt);
1078 else
1079 vec_oprnd = gimple_assign_lhs (vec_stmt);
1080 return vec_oprnd;
1083 /* Case 4: operand is defined by a loop header phi - reduction */
1084 case vect_reduction_def:
1085 case vect_double_reduction_def:
1086 case vect_nested_cycle:
1088 struct loop *loop;
1090 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1091 loop = (gimple_bb (def_stmt))->loop_father;
1093 /* Get the def before the loop */
1094 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1095 return get_initial_def_for_reduction (stmt, op, scalar_def);
1098 /* Case 5: operand is defined by loop-header phi - induction. */
1099 case vect_induction_def:
1101 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1103 /* Get the def from the vectorized stmt. */
1104 def_stmt_info = vinfo_for_stmt (def_stmt);
1105 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1106 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1107 vec_oprnd = PHI_RESULT (vec_stmt);
1108 return vec_oprnd;
1111 default:
1112 gcc_unreachable ();
1117 /* Function vect_get_vec_def_for_stmt_copy
1119 Return a vector-def for an operand. This function is used when the
1120 vectorized stmt to be created (by the caller to this function) is a "copy"
1121 created in case the vectorized result cannot fit in one vector, and several
1122 copies of the vector-stmt are required. In this case the vector-def is
1123 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1124 of the stmt that defines VEC_OPRND.
1125 DT is the type of the vector def VEC_OPRND.
1127 Context:
1128 In case the vectorization factor (VF) is bigger than the number
1129 of elements that can fit in a vectype (nunits), we have to generate
1130 more than one vector stmt to vectorize the scalar stmt. This situation
1131 arises when there are multiple data-types operated upon in the loop; the
1132 smallest data-type determines the VF, and as a result, when vectorizing
1133 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1134 vector stmt (each computing a vector of 'nunits' results, and together
1135 computing 'VF' results in each iteration). This function is called when
1136 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1137 which VF=16 and nunits=4, so the number of copies required is 4):
1139 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1141 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1142 VS1.1: vx.1 = memref1 VS1.2
1143 VS1.2: vx.2 = memref2 VS1.3
1144 VS1.3: vx.3 = memref3
1146 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1147 VSnew.1: vz1 = vx.1 + ... VSnew.2
1148 VSnew.2: vz2 = vx.2 + ... VSnew.3
1149 VSnew.3: vz3 = vx.3 + ...
1151 The vectorization of S1 is explained in vectorizable_load.
1152 The vectorization of S2:
1153 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1154 the function 'vect_get_vec_def_for_operand' is called to
1155 get the relevant vector-def for each operand of S2. For operand x it
1156 returns the vector-def 'vx.0'.
1158 To create the remaining copies of the vector-stmt (VSnew.j), this
1159 function is called to get the relevant vector-def for each operand. It is
1160 obtained from the respective VS1.j stmt, which is recorded in the
1161 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1163 For example, to obtain the vector-def 'vx.1' in order to create the
1164 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1165 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1166 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1167 and return its def ('vx.1').
1168 Overall, to create the above sequence this function will be called 3 times:
1169 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1170 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1171 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1173 tree
1174 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1176 gimple vec_stmt_for_operand;
1177 stmt_vec_info def_stmt_info;
1179 /* Do nothing; can reuse same def. */
1180 if (dt == vect_external_def || dt == vect_constant_def )
1181 return vec_oprnd;
1183 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1184 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1185 gcc_assert (def_stmt_info);
1186 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1187 gcc_assert (vec_stmt_for_operand);
1188 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1189 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1190 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1191 else
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 return vec_oprnd;
1197 /* Get vectorized definitions for the operands to create a copy of an original
1198 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1200 static void
1201 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1202 VEC(tree,heap) **vec_oprnds0,
1203 VEC(tree,heap) **vec_oprnds1)
1205 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1207 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1208 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1210 if (vec_oprnds1 && *vec_oprnds1)
1212 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1213 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1214 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1219 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1220 NULL. */
1222 static void
1223 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1224 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1225 slp_tree slp_node)
1227 if (slp_node)
1228 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1229 else
1231 tree vec_oprnd;
1233 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1234 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1235 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1237 if (op1)
1239 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1240 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1241 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1247 /* Function vect_finish_stmt_generation.
1249 Insert a new stmt. */
1251 void
1252 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1253 gimple_stmt_iterator *gsi)
1255 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1256 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1257 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1259 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1261 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1263 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1264 bb_vinfo));
1266 if (vect_print_dump_info (REPORT_DETAILS))
1268 fprintf (vect_dump, "add new stmt: ");
1269 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1272 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1275 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1276 a function declaration if the target has a vectorized version
1277 of the function, or NULL_TREE if the function cannot be vectorized. */
1279 tree
1280 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1282 tree fndecl = gimple_call_fndecl (call);
1284 /* We only handle functions that do not read or clobber memory -- i.e.
1285 const or novops ones. */
1286 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1287 return NULL_TREE;
1289 if (!fndecl
1290 || TREE_CODE (fndecl) != FUNCTION_DECL
1291 || !DECL_BUILT_IN (fndecl))
1292 return NULL_TREE;
1294 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1295 vectype_in);
1298 /* Function vectorizable_call.
1300 Check if STMT performs a function call that can be vectorized.
1301 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1302 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1303 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1305 static bool
1306 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1308 tree vec_dest;
1309 tree scalar_dest;
1310 tree op, type;
1311 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1312 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1313 tree vectype_out, vectype_in;
1314 int nunits_in;
1315 int nunits_out;
1316 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1317 tree fndecl, new_temp, def, rhs_type;
1318 gimple def_stmt;
1319 enum vect_def_type dt[3]
1320 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1321 gimple new_stmt = NULL;
1322 int ncopies, j;
1323 VEC(tree, heap) *vargs = NULL;
1324 enum { NARROW, NONE, WIDEN } modifier;
1325 size_t i, nargs;
1327 /* FORNOW: unsupported in basic block SLP. */
1328 gcc_assert (loop_vinfo);
1330 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1331 return false;
1333 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1334 return false;
1336 /* FORNOW: SLP not supported. */
1337 if (STMT_SLP_TYPE (stmt_info))
1338 return false;
1340 /* Is STMT a vectorizable call? */
1341 if (!is_gimple_call (stmt))
1342 return false;
1344 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1345 return false;
1347 if (stmt_could_throw_p (stmt))
1348 return false;
1350 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1352 /* Process function arguments. */
1353 rhs_type = NULL_TREE;
1354 vectype_in = NULL_TREE;
1355 nargs = gimple_call_num_args (stmt);
1357 /* Bail out if the function has more than three arguments, we do not have
1358 interesting builtin functions to vectorize with more than two arguments
1359 except for fma. No arguments is also not good. */
1360 if (nargs == 0 || nargs > 3)
1361 return false;
1363 for (i = 0; i < nargs; i++)
1365 tree opvectype;
1367 op = gimple_call_arg (stmt, i);
1369 /* We can only handle calls with arguments of the same type. */
1370 if (rhs_type
1371 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1373 if (vect_print_dump_info (REPORT_DETAILS))
1374 fprintf (vect_dump, "argument types differ.");
1375 return false;
1377 if (!rhs_type)
1378 rhs_type = TREE_TYPE (op);
1380 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1381 &def_stmt, &def, &dt[i], &opvectype))
1383 if (vect_print_dump_info (REPORT_DETAILS))
1384 fprintf (vect_dump, "use not simple.");
1385 return false;
1388 if (!vectype_in)
1389 vectype_in = opvectype;
1390 else if (opvectype
1391 && opvectype != vectype_in)
1393 if (vect_print_dump_info (REPORT_DETAILS))
1394 fprintf (vect_dump, "argument vector types differ.");
1395 return false;
1398 /* If all arguments are external or constant defs use a vector type with
1399 the same size as the output vector type. */
1400 if (!vectype_in)
1401 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1402 if (vec_stmt)
1403 gcc_assert (vectype_in);
1404 if (!vectype_in)
1406 if (vect_print_dump_info (REPORT_DETAILS))
1408 fprintf (vect_dump, "no vectype for scalar type ");
1409 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1412 return false;
1415 /* FORNOW */
1416 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1417 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1418 if (nunits_in == nunits_out / 2)
1419 modifier = NARROW;
1420 else if (nunits_out == nunits_in)
1421 modifier = NONE;
1422 else if (nunits_out == nunits_in / 2)
1423 modifier = WIDEN;
1424 else
1425 return false;
1427 /* For now, we only vectorize functions if a target specific builtin
1428 is available. TODO -- in some cases, it might be profitable to
1429 insert the calls for pieces of the vector, in order to be able
1430 to vectorize other operations in the loop. */
1431 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1432 if (fndecl == NULL_TREE)
1434 if (vect_print_dump_info (REPORT_DETAILS))
1435 fprintf (vect_dump, "function is not vectorizable.");
1437 return false;
1440 gcc_assert (!gimple_vuse (stmt));
1442 if (modifier == NARROW)
1443 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1444 else
1445 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1447 /* Sanity check: make sure that at least one copy of the vectorized stmt
1448 needs to be generated. */
1449 gcc_assert (ncopies >= 1);
1451 if (!vec_stmt) /* transformation not required. */
1453 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1454 if (vect_print_dump_info (REPORT_DETAILS))
1455 fprintf (vect_dump, "=== vectorizable_call ===");
1456 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1457 return true;
1460 /** Transform. **/
1462 if (vect_print_dump_info (REPORT_DETAILS))
1463 fprintf (vect_dump, "transform operation.");
1465 /* Handle def. */
1466 scalar_dest = gimple_call_lhs (stmt);
1467 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1469 prev_stmt_info = NULL;
1470 switch (modifier)
1472 case NONE:
1473 for (j = 0; j < ncopies; ++j)
1475 /* Build argument list for the vectorized call. */
1476 if (j == 0)
1477 vargs = VEC_alloc (tree, heap, nargs);
1478 else
1479 VEC_truncate (tree, vargs, 0);
1481 for (i = 0; i < nargs; i++)
1483 op = gimple_call_arg (stmt, i);
1484 if (j == 0)
1485 vec_oprnd0
1486 = vect_get_vec_def_for_operand (op, stmt, NULL);
1487 else
1489 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1490 vec_oprnd0
1491 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1494 VEC_quick_push (tree, vargs, vec_oprnd0);
1497 new_stmt = gimple_build_call_vec (fndecl, vargs);
1498 new_temp = make_ssa_name (vec_dest, new_stmt);
1499 gimple_call_set_lhs (new_stmt, new_temp);
1501 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1502 mark_symbols_for_renaming (new_stmt);
1504 if (j == 0)
1505 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1506 else
1507 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1509 prev_stmt_info = vinfo_for_stmt (new_stmt);
1512 break;
1514 case NARROW:
1515 for (j = 0; j < ncopies; ++j)
1517 /* Build argument list for the vectorized call. */
1518 if (j == 0)
1519 vargs = VEC_alloc (tree, heap, nargs * 2);
1520 else
1521 VEC_truncate (tree, vargs, 0);
1523 for (i = 0; i < nargs; i++)
1525 op = gimple_call_arg (stmt, i);
1526 if (j == 0)
1528 vec_oprnd0
1529 = vect_get_vec_def_for_operand (op, stmt, NULL);
1530 vec_oprnd1
1531 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1533 else
1535 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1536 vec_oprnd0
1537 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1538 vec_oprnd1
1539 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1542 VEC_quick_push (tree, vargs, vec_oprnd0);
1543 VEC_quick_push (tree, vargs, vec_oprnd1);
1546 new_stmt = gimple_build_call_vec (fndecl, vargs);
1547 new_temp = make_ssa_name (vec_dest, new_stmt);
1548 gimple_call_set_lhs (new_stmt, new_temp);
1550 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1551 mark_symbols_for_renaming (new_stmt);
1553 if (j == 0)
1554 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1555 else
1556 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1558 prev_stmt_info = vinfo_for_stmt (new_stmt);
1561 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1563 break;
1565 case WIDEN:
1566 /* No current target implements this case. */
1567 return false;
1570 VEC_free (tree, heap, vargs);
1572 /* Update the exception handling table with the vector stmt if necessary. */
1573 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1574 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1576 /* The call in STMT might prevent it from being removed in dce.
1577 We however cannot remove it here, due to the way the ssa name
1578 it defines is mapped to the new definition. So just replace
1579 rhs of the statement with something harmless. */
1581 type = TREE_TYPE (scalar_dest);
1582 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1583 build_zero_cst (type));
1584 set_vinfo_for_stmt (new_stmt, stmt_info);
1585 set_vinfo_for_stmt (stmt, NULL);
1586 STMT_VINFO_STMT (stmt_info) = new_stmt;
1587 gsi_replace (gsi, new_stmt, false);
1588 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1590 return true;
1594 /* Function vect_gen_widened_results_half
1596 Create a vector stmt whose code, type, number of arguments, and result
1597 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1598 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1599 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1600 needs to be created (DECL is a function-decl of a target-builtin).
1601 STMT is the original scalar stmt that we are vectorizing. */
1603 static gimple
1604 vect_gen_widened_results_half (enum tree_code code,
1605 tree decl,
1606 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1607 tree vec_dest, gimple_stmt_iterator *gsi,
1608 gimple stmt)
1610 gimple new_stmt;
1611 tree new_temp;
1613 /* Generate half of the widened result: */
1614 if (code == CALL_EXPR)
1616 /* Target specific support */
1617 if (op_type == binary_op)
1618 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1619 else
1620 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1621 new_temp = make_ssa_name (vec_dest, new_stmt);
1622 gimple_call_set_lhs (new_stmt, new_temp);
1624 else
1626 /* Generic support */
1627 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1628 if (op_type != binary_op)
1629 vec_oprnd1 = NULL;
1630 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1631 vec_oprnd1);
1632 new_temp = make_ssa_name (vec_dest, new_stmt);
1633 gimple_assign_set_lhs (new_stmt, new_temp);
1635 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1637 return new_stmt;
1641 /* Check if STMT performs a conversion operation, that can be vectorized.
1642 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1643 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1644 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1646 static bool
1647 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1648 gimple *vec_stmt, slp_tree slp_node)
1650 tree vec_dest;
1651 tree scalar_dest;
1652 tree op0;
1653 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1654 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1655 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1656 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1657 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1658 tree new_temp;
1659 tree def;
1660 gimple def_stmt;
1661 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1662 gimple new_stmt = NULL;
1663 stmt_vec_info prev_stmt_info;
1664 int nunits_in;
1665 int nunits_out;
1666 tree vectype_out, vectype_in;
1667 int ncopies, j;
1668 tree rhs_type;
1669 tree builtin_decl;
1670 enum { NARROW, NONE, WIDEN } modifier;
1671 int i;
1672 VEC(tree,heap) *vec_oprnds0 = NULL;
1673 tree vop0;
1674 VEC(tree,heap) *dummy = NULL;
1675 int dummy_int;
1677 /* Is STMT a vectorizable conversion? */
1679 /* FORNOW: unsupported in basic block SLP. */
1680 gcc_assert (loop_vinfo);
1682 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1683 return false;
1685 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1686 return false;
1688 if (!is_gimple_assign (stmt))
1689 return false;
1691 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1692 return false;
1694 code = gimple_assign_rhs_code (stmt);
1695 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1696 return false;
1698 /* Check types of lhs and rhs. */
1699 scalar_dest = gimple_assign_lhs (stmt);
1700 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1702 op0 = gimple_assign_rhs1 (stmt);
1703 rhs_type = TREE_TYPE (op0);
1704 /* Check the operands of the operation. */
1705 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1706 &def_stmt, &def, &dt[0], &vectype_in))
1708 if (vect_print_dump_info (REPORT_DETAILS))
1709 fprintf (vect_dump, "use not simple.");
1710 return false;
1712 /* If op0 is an external or constant defs use a vector type of
1713 the same size as the output vector type. */
1714 if (!vectype_in)
1715 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1716 if (vec_stmt)
1717 gcc_assert (vectype_in);
1718 if (!vectype_in)
1720 if (vect_print_dump_info (REPORT_DETAILS))
1722 fprintf (vect_dump, "no vectype for scalar type ");
1723 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1726 return false;
1729 /* FORNOW */
1730 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1731 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1732 if (nunits_in == nunits_out / 2)
1733 modifier = NARROW;
1734 else if (nunits_out == nunits_in)
1735 modifier = NONE;
1736 else if (nunits_out == nunits_in / 2)
1737 modifier = WIDEN;
1738 else
1739 return false;
1741 if (modifier == NARROW)
1742 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1743 else
1744 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1746 /* Multiple types in SLP are handled by creating the appropriate number of
1747 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1748 case of SLP. */
1749 if (slp_node)
1750 ncopies = 1;
1752 /* Sanity check: make sure that at least one copy of the vectorized stmt
1753 needs to be generated. */
1754 gcc_assert (ncopies >= 1);
1756 /* Supportable by target? */
1757 if ((modifier == NONE
1758 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1759 || (modifier == WIDEN
1760 && !supportable_widening_operation (code, stmt,
1761 vectype_out, vectype_in,
1762 &decl1, &decl2,
1763 &code1, &code2,
1764 &dummy_int, &dummy))
1765 || (modifier == NARROW
1766 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1767 &code1, &dummy_int, &dummy)))
1769 if (vect_print_dump_info (REPORT_DETAILS))
1770 fprintf (vect_dump, "conversion not supported by target.");
1771 return false;
1774 if (modifier != NONE)
1776 /* FORNOW: SLP not supported. */
1777 if (STMT_SLP_TYPE (stmt_info))
1778 return false;
1781 if (!vec_stmt) /* transformation not required. */
1783 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1784 return true;
1787 /** Transform. **/
1788 if (vect_print_dump_info (REPORT_DETAILS))
1789 fprintf (vect_dump, "transform conversion.");
1791 /* Handle def. */
1792 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1794 if (modifier == NONE && !slp_node)
1795 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1797 prev_stmt_info = NULL;
1798 switch (modifier)
1800 case NONE:
1801 for (j = 0; j < ncopies; j++)
1803 if (j == 0)
1804 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1805 else
1806 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1808 builtin_decl =
1809 targetm.vectorize.builtin_conversion (code,
1810 vectype_out, vectype_in);
1811 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1813 /* Arguments are ready. create the new vector stmt. */
1814 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1815 new_temp = make_ssa_name (vec_dest, new_stmt);
1816 gimple_call_set_lhs (new_stmt, new_temp);
1817 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1818 if (slp_node)
1819 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1822 if (j == 0)
1823 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1824 else
1825 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1826 prev_stmt_info = vinfo_for_stmt (new_stmt);
1828 break;
1830 case WIDEN:
1831 /* In case the vectorization factor (VF) is bigger than the number
1832 of elements that we can fit in a vectype (nunits), we have to
1833 generate more than one vector stmt - i.e - we need to "unroll"
1834 the vector stmt by a factor VF/nunits. */
1835 for (j = 0; j < ncopies; j++)
1837 if (j == 0)
1838 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1839 else
1840 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1842 /* Generate first half of the widened result: */
1843 new_stmt
1844 = vect_gen_widened_results_half (code1, decl1,
1845 vec_oprnd0, vec_oprnd1,
1846 unary_op, vec_dest, gsi, stmt);
1847 if (j == 0)
1848 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1849 else
1850 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1851 prev_stmt_info = vinfo_for_stmt (new_stmt);
1853 /* Generate second half of the widened result: */
1854 new_stmt
1855 = vect_gen_widened_results_half (code2, decl2,
1856 vec_oprnd0, vec_oprnd1,
1857 unary_op, vec_dest, gsi, stmt);
1858 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1859 prev_stmt_info = vinfo_for_stmt (new_stmt);
1861 break;
1863 case NARROW:
1864 /* In case the vectorization factor (VF) is bigger than the number
1865 of elements that we can fit in a vectype (nunits), we have to
1866 generate more than one vector stmt - i.e - we need to "unroll"
1867 the vector stmt by a factor VF/nunits. */
1868 for (j = 0; j < ncopies; j++)
1870 /* Handle uses. */
1871 if (j == 0)
1873 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1874 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1876 else
1878 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1879 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1882 /* Arguments are ready. Create the new vector stmt. */
1883 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1884 vec_oprnd1);
1885 new_temp = make_ssa_name (vec_dest, new_stmt);
1886 gimple_assign_set_lhs (new_stmt, new_temp);
1887 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1889 if (j == 0)
1890 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1891 else
1892 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1894 prev_stmt_info = vinfo_for_stmt (new_stmt);
1897 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1900 if (vec_oprnds0)
1901 VEC_free (tree, heap, vec_oprnds0);
1903 return true;
1907 /* Function vectorizable_assignment.
1909 Check if STMT performs an assignment (copy) that can be vectorized.
1910 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1911 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1912 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1914 static bool
1915 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1916 gimple *vec_stmt, slp_tree slp_node)
1918 tree vec_dest;
1919 tree scalar_dest;
1920 tree op;
1921 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1922 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1923 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1924 tree new_temp;
1925 tree def;
1926 gimple def_stmt;
1927 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1928 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1929 int ncopies;
1930 int i, j;
1931 VEC(tree,heap) *vec_oprnds = NULL;
1932 tree vop;
1933 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1934 gimple new_stmt = NULL;
1935 stmt_vec_info prev_stmt_info = NULL;
1936 enum tree_code code;
1937 tree vectype_in;
1939 /* Multiple types in SLP are handled by creating the appropriate number of
1940 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1941 case of SLP. */
1942 if (slp_node)
1943 ncopies = 1;
1944 else
1945 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1947 gcc_assert (ncopies >= 1);
1949 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1950 return false;
1952 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1953 return false;
1955 /* Is vectorizable assignment? */
1956 if (!is_gimple_assign (stmt))
1957 return false;
1959 scalar_dest = gimple_assign_lhs (stmt);
1960 if (TREE_CODE (scalar_dest) != SSA_NAME)
1961 return false;
1963 code = gimple_assign_rhs_code (stmt);
1964 if (gimple_assign_single_p (stmt)
1965 || code == PAREN_EXPR
1966 || CONVERT_EXPR_CODE_P (code))
1967 op = gimple_assign_rhs1 (stmt);
1968 else
1969 return false;
1971 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1972 &def_stmt, &def, &dt[0], &vectype_in))
1974 if (vect_print_dump_info (REPORT_DETAILS))
1975 fprintf (vect_dump, "use not simple.");
1976 return false;
1979 /* We can handle NOP_EXPR conversions that do not change the number
1980 of elements or the vector size. */
1981 if (CONVERT_EXPR_CODE_P (code)
1982 && (!vectype_in
1983 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1984 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1985 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1986 return false;
1988 if (!vec_stmt) /* transformation not required. */
1990 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1991 if (vect_print_dump_info (REPORT_DETAILS))
1992 fprintf (vect_dump, "=== vectorizable_assignment ===");
1993 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1994 return true;
1997 /** Transform. **/
1998 if (vect_print_dump_info (REPORT_DETAILS))
1999 fprintf (vect_dump, "transform assignment.");
2001 /* Handle def. */
2002 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2004 /* Handle use. */
2005 for (j = 0; j < ncopies; j++)
2007 /* Handle uses. */
2008 if (j == 0)
2009 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2010 else
2011 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2013 /* Arguments are ready. create the new vector stmt. */
2014 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2016 if (CONVERT_EXPR_CODE_P (code))
2017 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2018 new_stmt = gimple_build_assign (vec_dest, vop);
2019 new_temp = make_ssa_name (vec_dest, new_stmt);
2020 gimple_assign_set_lhs (new_stmt, new_temp);
2021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2022 if (slp_node)
2023 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2026 if (slp_node)
2027 continue;
2029 if (j == 0)
2030 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2031 else
2032 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2034 prev_stmt_info = vinfo_for_stmt (new_stmt);
2037 VEC_free (tree, heap, vec_oprnds);
2038 return true;
2042 /* Function vectorizable_shift.
2044 Check if STMT performs a shift operation that can be vectorized.
2045 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2046 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2047 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2049 static bool
2050 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2051 gimple *vec_stmt, slp_tree slp_node)
2053 tree vec_dest;
2054 tree scalar_dest;
2055 tree op0, op1 = NULL;
2056 tree vec_oprnd1 = NULL_TREE;
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 tree vectype;
2059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2060 enum tree_code code;
2061 enum machine_mode vec_mode;
2062 tree new_temp;
2063 optab optab;
2064 int icode;
2065 enum machine_mode optab_op2_mode;
2066 tree def;
2067 gimple def_stmt;
2068 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2069 gimple new_stmt = NULL;
2070 stmt_vec_info prev_stmt_info;
2071 int nunits_in;
2072 int nunits_out;
2073 tree vectype_out;
2074 int ncopies;
2075 int j, i;
2076 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2077 tree vop0, vop1;
2078 unsigned int k;
2079 bool scalar_shift_arg = false;
2080 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2081 int vf;
2083 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2084 return false;
2086 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2087 return false;
2089 /* Is STMT a vectorizable binary/unary operation? */
2090 if (!is_gimple_assign (stmt))
2091 return false;
2093 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2094 return false;
2096 code = gimple_assign_rhs_code (stmt);
2098 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2099 || code == RROTATE_EXPR))
2100 return false;
2102 scalar_dest = gimple_assign_lhs (stmt);
2103 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2105 op0 = gimple_assign_rhs1 (stmt);
2106 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2107 &def_stmt, &def, &dt[0], &vectype))
2109 if (vect_print_dump_info (REPORT_DETAILS))
2110 fprintf (vect_dump, "use not simple.");
2111 return false;
2113 /* If op0 is an external or constant def use a vector type with
2114 the same size as the output vector type. */
2115 if (!vectype)
2116 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2117 if (vec_stmt)
2118 gcc_assert (vectype);
2119 if (!vectype)
2121 if (vect_print_dump_info (REPORT_DETAILS))
2123 fprintf (vect_dump, "no vectype for scalar type ");
2124 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2127 return false;
2130 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2131 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2132 if (nunits_out != nunits_in)
2133 return false;
2135 op1 = gimple_assign_rhs2 (stmt);
2136 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2138 if (vect_print_dump_info (REPORT_DETAILS))
2139 fprintf (vect_dump, "use not simple.");
2140 return false;
2143 if (loop_vinfo)
2144 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2145 else
2146 vf = 1;
2148 /* Multiple types in SLP are handled by creating the appropriate number of
2149 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2150 case of SLP. */
2151 if (slp_node)
2152 ncopies = 1;
2153 else
2154 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2156 gcc_assert (ncopies >= 1);
2158 /* Determine whether the shift amount is a vector, or scalar. If the
2159 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2161 /* Vector shifted by vector. */
2162 if (dt[1] == vect_internal_def)
2164 optab = optab_for_tree_code (code, vectype, optab_vector);
2165 if (vect_print_dump_info (REPORT_DETAILS))
2166 fprintf (vect_dump, "vector/vector shift/rotate found.");
2168 /* See if the machine has a vector shifted by scalar insn and if not
2169 then see if it has a vector shifted by vector insn. */
2170 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2172 optab = optab_for_tree_code (code, vectype, optab_scalar);
2173 if (optab
2174 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2176 scalar_shift_arg = true;
2177 if (vect_print_dump_info (REPORT_DETAILS))
2178 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2180 else
2182 optab = optab_for_tree_code (code, vectype, optab_vector);
2183 if (optab
2184 && (optab_handler (optab, TYPE_MODE (vectype))
2185 != CODE_FOR_nothing))
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 fprintf (vect_dump, "vector/vector shift/rotate found.");
2190 /* Unlike the other binary operators, shifts/rotates have
2191 the rhs being int, instead of the same type as the lhs,
2192 so make sure the scalar is the right type if we are
2193 dealing with vectors of short/char. */
2194 if (dt[1] == vect_constant_def)
2195 op1 = fold_convert (TREE_TYPE (vectype), op1);
2199 else
2201 if (vect_print_dump_info (REPORT_DETAILS))
2202 fprintf (vect_dump, "operand mode requires invariant argument.");
2203 return false;
2206 /* Supportable by target? */
2207 if (!optab)
2209 if (vect_print_dump_info (REPORT_DETAILS))
2210 fprintf (vect_dump, "no optab.");
2211 return false;
2213 vec_mode = TYPE_MODE (vectype);
2214 icode = (int) optab_handler (optab, vec_mode);
2215 if (icode == CODE_FOR_nothing)
2217 if (vect_print_dump_info (REPORT_DETAILS))
2218 fprintf (vect_dump, "op not supported by target.");
2219 /* Check only during analysis. */
2220 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2221 || (vf < vect_min_worthwhile_factor (code)
2222 && !vec_stmt))
2223 return false;
2224 if (vect_print_dump_info (REPORT_DETAILS))
2225 fprintf (vect_dump, "proceeding using word mode.");
2228 /* Worthwhile without SIMD support? Check only during analysis. */
2229 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2230 && vf < vect_min_worthwhile_factor (code)
2231 && !vec_stmt)
2233 if (vect_print_dump_info (REPORT_DETAILS))
2234 fprintf (vect_dump, "not worthwhile without SIMD support.");
2235 return false;
2238 if (!vec_stmt) /* transformation not required. */
2240 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2241 if (vect_print_dump_info (REPORT_DETAILS))
2242 fprintf (vect_dump, "=== vectorizable_shift ===");
2243 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2244 return true;
2247 /** Transform. **/
2249 if (vect_print_dump_info (REPORT_DETAILS))
2250 fprintf (vect_dump, "transform binary/unary operation.");
2252 /* Handle def. */
2253 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2255 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2256 created in the previous stages of the recursion, so no allocation is
2257 needed, except for the case of shift with scalar shift argument. In that
2258 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2259 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2260 In case of loop-based vectorization we allocate VECs of size 1. We
2261 allocate VEC_OPRNDS1 only in case of binary operation. */
2262 if (!slp_node)
2264 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2265 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2267 else if (scalar_shift_arg)
2268 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2270 prev_stmt_info = NULL;
2271 for (j = 0; j < ncopies; j++)
2273 /* Handle uses. */
2274 if (j == 0)
2276 if (scalar_shift_arg)
2278 /* Vector shl and shr insn patterns can be defined with scalar
2279 operand 2 (shift operand). In this case, use constant or loop
2280 invariant op1 directly, without extending it to vector mode
2281 first. */
2282 optab_op2_mode = insn_data[icode].operand[2].mode;
2283 if (!VECTOR_MODE_P (optab_op2_mode))
2285 if (vect_print_dump_info (REPORT_DETAILS))
2286 fprintf (vect_dump, "operand 1 using scalar mode.");
2287 vec_oprnd1 = op1;
2288 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2289 if (slp_node)
2291 /* Store vec_oprnd1 for every vector stmt to be created
2292 for SLP_NODE. We check during the analysis that all
2293 the shift arguments are the same.
2294 TODO: Allow different constants for different vector
2295 stmts generated for an SLP instance. */
2296 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2297 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2302 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2303 (a special case for certain kind of vector shifts); otherwise,
2304 operand 1 should be of a vector type (the usual case). */
2305 if (vec_oprnd1)
2306 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2307 slp_node);
2308 else
2309 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2310 slp_node);
2312 else
2313 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2315 /* Arguments are ready. Create the new vector stmt. */
2316 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2318 vop1 = VEC_index (tree, vec_oprnds1, i);
2319 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2320 new_temp = make_ssa_name (vec_dest, new_stmt);
2321 gimple_assign_set_lhs (new_stmt, new_temp);
2322 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2323 if (slp_node)
2324 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2327 if (slp_node)
2328 continue;
2330 if (j == 0)
2331 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2332 else
2333 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2334 prev_stmt_info = vinfo_for_stmt (new_stmt);
2337 VEC_free (tree, heap, vec_oprnds0);
2338 VEC_free (tree, heap, vec_oprnds1);
2340 return true;
2344 /* Function vectorizable_operation.
2346 Check if STMT performs a binary, unary or ternary operation that can
2347 be vectorized.
2348 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2349 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2350 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2352 static bool
2353 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2354 gimple *vec_stmt, slp_tree slp_node)
2356 tree vec_dest;
2357 tree scalar_dest;
2358 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2359 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2360 tree vectype;
2361 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2362 enum tree_code code;
2363 enum machine_mode vec_mode;
2364 tree new_temp;
2365 int op_type;
2366 optab optab;
2367 int icode;
2368 tree def;
2369 gimple def_stmt;
2370 enum vect_def_type dt[3]
2371 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2372 gimple new_stmt = NULL;
2373 stmt_vec_info prev_stmt_info;
2374 int nunits_in;
2375 int nunits_out;
2376 tree vectype_out;
2377 int ncopies;
2378 int j, i;
2379 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2380 tree vop0, vop1, vop2;
2381 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2382 int vf;
2384 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2385 return false;
2387 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2388 return false;
2390 /* Is STMT a vectorizable binary/unary operation? */
2391 if (!is_gimple_assign (stmt))
2392 return false;
2394 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2395 return false;
2397 code = gimple_assign_rhs_code (stmt);
2399 /* For pointer addition, we should use the normal plus for
2400 the vector addition. */
2401 if (code == POINTER_PLUS_EXPR)
2402 code = PLUS_EXPR;
2404 /* Support only unary or binary operations. */
2405 op_type = TREE_CODE_LENGTH (code);
2406 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2408 if (vect_print_dump_info (REPORT_DETAILS))
2409 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2410 op_type);
2411 return false;
2414 scalar_dest = gimple_assign_lhs (stmt);
2415 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2417 op0 = gimple_assign_rhs1 (stmt);
2418 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2419 &def_stmt, &def, &dt[0], &vectype))
2421 if (vect_print_dump_info (REPORT_DETAILS))
2422 fprintf (vect_dump, "use not simple.");
2423 return false;
2425 /* If op0 is an external or constant def use a vector type with
2426 the same size as the output vector type. */
2427 if (!vectype)
2428 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2429 if (vec_stmt)
2430 gcc_assert (vectype);
2431 if (!vectype)
2433 if (vect_print_dump_info (REPORT_DETAILS))
2435 fprintf (vect_dump, "no vectype for scalar type ");
2436 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2439 return false;
2442 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2443 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2444 if (nunits_out != nunits_in)
2445 return false;
2447 if (op_type == binary_op || op_type == ternary_op)
2449 op1 = gimple_assign_rhs2 (stmt);
2450 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2451 &dt[1]))
2453 if (vect_print_dump_info (REPORT_DETAILS))
2454 fprintf (vect_dump, "use not simple.");
2455 return false;
2458 if (op_type == ternary_op)
2460 op2 = gimple_assign_rhs3 (stmt);
2461 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2462 &dt[2]))
2464 if (vect_print_dump_info (REPORT_DETAILS))
2465 fprintf (vect_dump, "use not simple.");
2466 return false;
2470 if (loop_vinfo)
2471 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2472 else
2473 vf = 1;
2475 /* Multiple types in SLP are handled by creating the appropriate number of
2476 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2477 case of SLP. */
2478 if (slp_node)
2479 ncopies = 1;
2480 else
2481 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2483 gcc_assert (ncopies >= 1);
2485 /* Shifts are handled in vectorizable_shift (). */
2486 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2487 || code == RROTATE_EXPR)
2488 return false;
2490 optab = optab_for_tree_code (code, vectype, optab_default);
2492 /* Supportable by target? */
2493 if (!optab)
2495 if (vect_print_dump_info (REPORT_DETAILS))
2496 fprintf (vect_dump, "no optab.");
2497 return false;
2499 vec_mode = TYPE_MODE (vectype);
2500 icode = (int) optab_handler (optab, vec_mode);
2501 if (icode == CODE_FOR_nothing)
2503 if (vect_print_dump_info (REPORT_DETAILS))
2504 fprintf (vect_dump, "op not supported by target.");
2505 /* Check only during analysis. */
2506 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2507 || (vf < vect_min_worthwhile_factor (code)
2508 && !vec_stmt))
2509 return false;
2510 if (vect_print_dump_info (REPORT_DETAILS))
2511 fprintf (vect_dump, "proceeding using word mode.");
2514 /* Worthwhile without SIMD support? Check only during analysis. */
2515 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2516 && vf < vect_min_worthwhile_factor (code)
2517 && !vec_stmt)
2519 if (vect_print_dump_info (REPORT_DETAILS))
2520 fprintf (vect_dump, "not worthwhile without SIMD support.");
2521 return false;
2524 if (!vec_stmt) /* transformation not required. */
2526 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2527 if (vect_print_dump_info (REPORT_DETAILS))
2528 fprintf (vect_dump, "=== vectorizable_operation ===");
2529 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2530 return true;
2533 /** Transform. **/
2535 if (vect_print_dump_info (REPORT_DETAILS))
2536 fprintf (vect_dump, "transform binary/unary operation.");
2538 /* Handle def. */
2539 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2541 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2542 created in the previous stages of the recursion, so no allocation is
2543 needed, except for the case of shift with scalar shift argument. In that
2544 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2545 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2546 In case of loop-based vectorization we allocate VECs of size 1. We
2547 allocate VEC_OPRNDS1 only in case of binary operation. */
2548 if (!slp_node)
2550 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2551 if (op_type == binary_op || op_type == ternary_op)
2552 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2553 if (op_type == ternary_op)
2554 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2557 /* In case the vectorization factor (VF) is bigger than the number
2558 of elements that we can fit in a vectype (nunits), we have to generate
2559 more than one vector stmt - i.e - we need to "unroll" the
2560 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2561 from one copy of the vector stmt to the next, in the field
2562 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2563 stages to find the correct vector defs to be used when vectorizing
2564 stmts that use the defs of the current stmt. The example below
2565 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2566 we need to create 4 vectorized stmts):
2568 before vectorization:
2569 RELATED_STMT VEC_STMT
2570 S1: x = memref - -
2571 S2: z = x + 1 - -
2573 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2574 there):
2575 RELATED_STMT VEC_STMT
2576 VS1_0: vx0 = memref0 VS1_1 -
2577 VS1_1: vx1 = memref1 VS1_2 -
2578 VS1_2: vx2 = memref2 VS1_3 -
2579 VS1_3: vx3 = memref3 - -
2580 S1: x = load - VS1_0
2581 S2: z = x + 1 - -
2583 step2: vectorize stmt S2 (done here):
2584 To vectorize stmt S2 we first need to find the relevant vector
2585 def for the first operand 'x'. This is, as usual, obtained from
2586 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2587 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2588 relevant vector def 'vx0'. Having found 'vx0' we can generate
2589 the vector stmt VS2_0, and as usual, record it in the
2590 STMT_VINFO_VEC_STMT of stmt S2.
2591 When creating the second copy (VS2_1), we obtain the relevant vector
2592 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2593 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2594 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2595 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2596 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2597 chain of stmts and pointers:
2598 RELATED_STMT VEC_STMT
2599 VS1_0: vx0 = memref0 VS1_1 -
2600 VS1_1: vx1 = memref1 VS1_2 -
2601 VS1_2: vx2 = memref2 VS1_3 -
2602 VS1_3: vx3 = memref3 - -
2603 S1: x = load - VS1_0
2604 VS2_0: vz0 = vx0 + v1 VS2_1 -
2605 VS2_1: vz1 = vx1 + v1 VS2_2 -
2606 VS2_2: vz2 = vx2 + v1 VS2_3 -
2607 VS2_3: vz3 = vx3 + v1 - -
2608 S2: z = x + 1 - VS2_0 */
2610 prev_stmt_info = NULL;
2611 for (j = 0; j < ncopies; j++)
2613 /* Handle uses. */
2614 if (j == 0)
2616 if (op_type == binary_op || op_type == ternary_op)
2617 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2618 slp_node);
2619 else
2620 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2621 slp_node);
2622 if (op_type == ternary_op)
2624 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2625 VEC_quick_push (tree, vec_oprnds2,
2626 vect_get_vec_def_for_operand (op2, stmt, NULL));
2629 else
2631 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2632 if (op_type == ternary_op)
2634 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2635 VEC_quick_push (tree, vec_oprnds2,
2636 vect_get_vec_def_for_stmt_copy (dt[2],
2637 vec_oprnd));
2641 /* Arguments are ready. Create the new vector stmt. */
2642 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2644 vop1 = ((op_type == binary_op || op_type == ternary_op)
2645 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2646 vop2 = ((op_type == ternary_op)
2647 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2648 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2649 vop0, vop1, vop2);
2650 new_temp = make_ssa_name (vec_dest, new_stmt);
2651 gimple_assign_set_lhs (new_stmt, new_temp);
2652 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2653 if (slp_node)
2654 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2657 if (slp_node)
2658 continue;
2660 if (j == 0)
2661 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2662 else
2663 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2664 prev_stmt_info = vinfo_for_stmt (new_stmt);
2667 VEC_free (tree, heap, vec_oprnds0);
2668 if (vec_oprnds1)
2669 VEC_free (tree, heap, vec_oprnds1);
2670 if (vec_oprnds2)
2671 VEC_free (tree, heap, vec_oprnds2);
2673 return true;
2677 /* Get vectorized definitions for loop-based vectorization. For the first
2678 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2679 scalar operand), and for the rest we get a copy with
2680 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2681 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2682 The vectors are collected into VEC_OPRNDS. */
2684 static void
2685 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2686 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2688 tree vec_oprnd;
2690 /* Get first vector operand. */
2691 /* All the vector operands except the very first one (that is scalar oprnd)
2692 are stmt copies. */
2693 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2694 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2695 else
2696 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2698 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2700 /* Get second vector operand. */
2701 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2702 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2704 *oprnd = vec_oprnd;
2706 /* For conversion in multiple steps, continue to get operands
2707 recursively. */
2708 if (multi_step_cvt)
2709 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2713 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2714 For multi-step conversions store the resulting vectors and call the function
2715 recursively. */
2717 static void
2718 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2719 int multi_step_cvt, gimple stmt,
2720 VEC (tree, heap) *vec_dsts,
2721 gimple_stmt_iterator *gsi,
2722 slp_tree slp_node, enum tree_code code,
2723 stmt_vec_info *prev_stmt_info)
2725 unsigned int i;
2726 tree vop0, vop1, new_tmp, vec_dest;
2727 gimple new_stmt;
2728 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2730 vec_dest = VEC_pop (tree, vec_dsts);
2732 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2734 /* Create demotion operation. */
2735 vop0 = VEC_index (tree, *vec_oprnds, i);
2736 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2737 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2738 new_tmp = make_ssa_name (vec_dest, new_stmt);
2739 gimple_assign_set_lhs (new_stmt, new_tmp);
2740 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2742 if (multi_step_cvt)
2743 /* Store the resulting vector for next recursive call. */
2744 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2745 else
2747 /* This is the last step of the conversion sequence. Store the
2748 vectors in SLP_NODE or in vector info of the scalar statement
2749 (or in STMT_VINFO_RELATED_STMT chain). */
2750 if (slp_node)
2751 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2752 else
2754 if (!*prev_stmt_info)
2755 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2756 else
2757 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2759 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2764 /* For multi-step demotion operations we first generate demotion operations
2765 from the source type to the intermediate types, and then combine the
2766 results (stored in VEC_OPRNDS) in demotion operation to the destination
2767 type. */
2768 if (multi_step_cvt)
2770 /* At each level of recursion we have have of the operands we had at the
2771 previous level. */
2772 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2773 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2774 stmt, vec_dsts, gsi, slp_node,
2775 code, prev_stmt_info);
2780 /* Function vectorizable_type_demotion
2782 Check if STMT performs a binary or unary operation that involves
2783 type demotion, and if it can be vectorized.
2784 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2785 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2786 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2788 static bool
2789 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2790 gimple *vec_stmt, slp_tree slp_node)
2792 tree vec_dest;
2793 tree scalar_dest;
2794 tree op0;
2795 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2796 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2797 enum tree_code code, code1 = ERROR_MARK;
2798 tree def;
2799 gimple def_stmt;
2800 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2801 stmt_vec_info prev_stmt_info;
2802 int nunits_in;
2803 int nunits_out;
2804 tree vectype_out;
2805 int ncopies;
2806 int j, i;
2807 tree vectype_in;
2808 int multi_step_cvt = 0;
2809 VEC (tree, heap) *vec_oprnds0 = NULL;
2810 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2811 tree last_oprnd, intermediate_type;
2813 /* FORNOW: not supported by basic block SLP vectorization. */
2814 gcc_assert (loop_vinfo);
2816 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2817 return false;
2819 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2820 return false;
2822 /* Is STMT a vectorizable type-demotion operation? */
2823 if (!is_gimple_assign (stmt))
2824 return false;
2826 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2827 return false;
2829 code = gimple_assign_rhs_code (stmt);
2830 if (!CONVERT_EXPR_CODE_P (code))
2831 return false;
2833 scalar_dest = gimple_assign_lhs (stmt);
2834 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2836 /* Check the operands of the operation. */
2837 op0 = gimple_assign_rhs1 (stmt);
2838 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2839 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2840 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2841 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2842 && CONVERT_EXPR_CODE_P (code))))
2843 return false;
2844 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2845 &def_stmt, &def, &dt[0], &vectype_in))
2847 if (vect_print_dump_info (REPORT_DETAILS))
2848 fprintf (vect_dump, "use not simple.");
2849 return false;
2851 /* If op0 is an external def use a vector type with the
2852 same size as the output vector type if possible. */
2853 if (!vectype_in)
2854 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2855 if (vec_stmt)
2856 gcc_assert (vectype_in);
2857 if (!vectype_in)
2859 if (vect_print_dump_info (REPORT_DETAILS))
2861 fprintf (vect_dump, "no vectype for scalar type ");
2862 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2865 return false;
2868 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2869 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2870 if (nunits_in >= nunits_out)
2871 return false;
2873 /* Multiple types in SLP are handled by creating the appropriate number of
2874 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2875 case of SLP. */
2876 if (slp_node)
2877 ncopies = 1;
2878 else
2879 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2880 gcc_assert (ncopies >= 1);
2882 /* Supportable by target? */
2883 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2884 &code1, &multi_step_cvt, &interm_types))
2885 return false;
2887 if (!vec_stmt) /* transformation not required. */
2889 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2890 if (vect_print_dump_info (REPORT_DETAILS))
2891 fprintf (vect_dump, "=== vectorizable_demotion ===");
2892 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2893 return true;
2896 /** Transform. **/
2897 if (vect_print_dump_info (REPORT_DETAILS))
2898 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2899 ncopies);
2901 /* In case of multi-step demotion, we first generate demotion operations to
2902 the intermediate types, and then from that types to the final one.
2903 We create vector destinations for the intermediate type (TYPES) received
2904 from supportable_narrowing_operation, and store them in the correct order
2905 for future use in vect_create_vectorized_demotion_stmts(). */
2906 if (multi_step_cvt)
2907 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2908 else
2909 vec_dsts = VEC_alloc (tree, heap, 1);
2911 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2912 VEC_quick_push (tree, vec_dsts, vec_dest);
2914 if (multi_step_cvt)
2916 for (i = VEC_length (tree, interm_types) - 1;
2917 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2919 vec_dest = vect_create_destination_var (scalar_dest,
2920 intermediate_type);
2921 VEC_quick_push (tree, vec_dsts, vec_dest);
2925 /* In case the vectorization factor (VF) is bigger than the number
2926 of elements that we can fit in a vectype (nunits), we have to generate
2927 more than one vector stmt - i.e - we need to "unroll" the
2928 vector stmt by a factor VF/nunits. */
2929 last_oprnd = op0;
2930 prev_stmt_info = NULL;
2931 for (j = 0; j < ncopies; j++)
2933 /* Handle uses. */
2934 if (slp_node)
2935 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
2936 else
2938 VEC_free (tree, heap, vec_oprnds0);
2939 vec_oprnds0 = VEC_alloc (tree, heap,
2940 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2941 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2942 vect_pow2 (multi_step_cvt) - 1);
2945 /* Arguments are ready. Create the new vector stmts. */
2946 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2947 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2948 multi_step_cvt, stmt, tmp_vec_dsts,
2949 gsi, slp_node, code1,
2950 &prev_stmt_info);
2953 VEC_free (tree, heap, vec_oprnds0);
2954 VEC_free (tree, heap, vec_dsts);
2955 VEC_free (tree, heap, tmp_vec_dsts);
2956 VEC_free (tree, heap, interm_types);
2958 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2959 return true;
2963 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2964 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2965 the resulting vectors and call the function recursively. */
2967 static void
2968 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2969 VEC (tree, heap) **vec_oprnds1,
2970 int multi_step_cvt, gimple stmt,
2971 VEC (tree, heap) *vec_dsts,
2972 gimple_stmt_iterator *gsi,
2973 slp_tree slp_node, enum tree_code code1,
2974 enum tree_code code2, tree decl1,
2975 tree decl2, int op_type,
2976 stmt_vec_info *prev_stmt_info)
2978 int i;
2979 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2980 gimple new_stmt1, new_stmt2;
2981 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2982 VEC (tree, heap) *vec_tmp;
2984 vec_dest = VEC_pop (tree, vec_dsts);
2985 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2987 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
2989 if (op_type == binary_op)
2990 vop1 = VEC_index (tree, *vec_oprnds1, i);
2991 else
2992 vop1 = NULL_TREE;
2994 /* Generate the two halves of promotion operation. */
2995 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2996 op_type, vec_dest, gsi, stmt);
2997 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2998 op_type, vec_dest, gsi, stmt);
2999 if (is_gimple_call (new_stmt1))
3001 new_tmp1 = gimple_call_lhs (new_stmt1);
3002 new_tmp2 = gimple_call_lhs (new_stmt2);
3004 else
3006 new_tmp1 = gimple_assign_lhs (new_stmt1);
3007 new_tmp2 = gimple_assign_lhs (new_stmt2);
3010 if (multi_step_cvt)
3012 /* Store the results for the recursive call. */
3013 VEC_quick_push (tree, vec_tmp, new_tmp1);
3014 VEC_quick_push (tree, vec_tmp, new_tmp2);
3016 else
3018 /* Last step of promotion sequience - store the results. */
3019 if (slp_node)
3021 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3022 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3024 else
3026 if (!*prev_stmt_info)
3027 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3028 else
3029 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3031 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3032 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3033 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3038 if (multi_step_cvt)
3040 /* For multi-step promotion operation we first generate we call the
3041 function recurcively for every stage. We start from the input type,
3042 create promotion operations to the intermediate types, and then
3043 create promotions to the output type. */
3044 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3045 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3046 multi_step_cvt - 1, stmt,
3047 vec_dsts, gsi, slp_node, code1,
3048 code2, decl2, decl2, op_type,
3049 prev_stmt_info);
3052 VEC_free (tree, heap, vec_tmp);
3056 /* Function vectorizable_type_promotion
3058 Check if STMT performs a binary or unary operation that involves
3059 type promotion, and if it can be vectorized.
3060 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3061 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3062 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3064 static bool
3065 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3066 gimple *vec_stmt, slp_tree slp_node)
3068 tree vec_dest;
3069 tree scalar_dest;
3070 tree op0, op1 = NULL;
3071 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3072 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3073 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3074 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3075 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3076 int op_type;
3077 tree def;
3078 gimple def_stmt;
3079 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3080 stmt_vec_info prev_stmt_info;
3081 int nunits_in;
3082 int nunits_out;
3083 tree vectype_out;
3084 int ncopies;
3085 int j, i;
3086 tree vectype_in;
3087 tree intermediate_type = NULL_TREE;
3088 int multi_step_cvt = 0;
3089 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3090 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3092 /* FORNOW: not supported by basic block SLP vectorization. */
3093 gcc_assert (loop_vinfo);
3095 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3096 return false;
3098 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3099 return false;
3101 /* Is STMT a vectorizable type-promotion operation? */
3102 if (!is_gimple_assign (stmt))
3103 return false;
3105 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3106 return false;
3108 code = gimple_assign_rhs_code (stmt);
3109 if (!CONVERT_EXPR_CODE_P (code)
3110 && code != WIDEN_MULT_EXPR)
3111 return false;
3113 scalar_dest = gimple_assign_lhs (stmt);
3114 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3116 /* Check the operands of the operation. */
3117 op0 = gimple_assign_rhs1 (stmt);
3118 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3119 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3120 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3121 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3122 && CONVERT_EXPR_CODE_P (code))))
3123 return false;
3124 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3125 &def_stmt, &def, &dt[0], &vectype_in))
3127 if (vect_print_dump_info (REPORT_DETAILS))
3128 fprintf (vect_dump, "use not simple.");
3129 return false;
3131 /* If op0 is an external or constant def use a vector type with
3132 the same size as the output vector type. */
3133 if (!vectype_in)
3134 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3135 if (vec_stmt)
3136 gcc_assert (vectype_in);
3137 if (!vectype_in)
3139 if (vect_print_dump_info (REPORT_DETAILS))
3141 fprintf (vect_dump, "no vectype for scalar type ");
3142 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3145 return false;
3148 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3149 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3150 if (nunits_in <= nunits_out)
3151 return false;
3153 /* Multiple types in SLP are handled by creating the appropriate number of
3154 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3155 case of SLP. */
3156 if (slp_node)
3157 ncopies = 1;
3158 else
3159 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3161 gcc_assert (ncopies >= 1);
3163 op_type = TREE_CODE_LENGTH (code);
3164 if (op_type == binary_op)
3166 op1 = gimple_assign_rhs2 (stmt);
3167 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3169 if (vect_print_dump_info (REPORT_DETAILS))
3170 fprintf (vect_dump, "use not simple.");
3171 return false;
3175 /* Supportable by target? */
3176 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3177 &decl1, &decl2, &code1, &code2,
3178 &multi_step_cvt, &interm_types))
3179 return false;
3181 /* Binary widening operation can only be supported directly by the
3182 architecture. */
3183 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3185 if (!vec_stmt) /* transformation not required. */
3187 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3188 if (vect_print_dump_info (REPORT_DETAILS))
3189 fprintf (vect_dump, "=== vectorizable_promotion ===");
3190 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3191 return true;
3194 /** Transform. **/
3196 if (vect_print_dump_info (REPORT_DETAILS))
3197 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3198 ncopies);
3200 /* Handle def. */
3201 /* In case of multi-step promotion, we first generate promotion operations
3202 to the intermediate types, and then from that types to the final one.
3203 We store vector destination in VEC_DSTS in the correct order for
3204 recursive creation of promotion operations in
3205 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3206 according to TYPES recieved from supportable_widening_operation(). */
3207 if (multi_step_cvt)
3208 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3209 else
3210 vec_dsts = VEC_alloc (tree, heap, 1);
3212 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3213 VEC_quick_push (tree, vec_dsts, vec_dest);
3215 if (multi_step_cvt)
3217 for (i = VEC_length (tree, interm_types) - 1;
3218 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3220 vec_dest = vect_create_destination_var (scalar_dest,
3221 intermediate_type);
3222 VEC_quick_push (tree, vec_dsts, vec_dest);
3226 if (!slp_node)
3228 vec_oprnds0 = VEC_alloc (tree, heap,
3229 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3230 if (op_type == binary_op)
3231 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3234 /* In case the vectorization factor (VF) is bigger than the number
3235 of elements that we can fit in a vectype (nunits), we have to generate
3236 more than one vector stmt - i.e - we need to "unroll" the
3237 vector stmt by a factor VF/nunits. */
3239 prev_stmt_info = NULL;
3240 for (j = 0; j < ncopies; j++)
3242 /* Handle uses. */
3243 if (j == 0)
3245 if (slp_node)
3246 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3247 &vec_oprnds1, -1);
3248 else
3250 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3251 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3252 if (op_type == binary_op)
3254 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3255 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3259 else
3261 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3262 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3263 if (op_type == binary_op)
3265 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3266 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3270 /* Arguments are ready. Create the new vector stmts. */
3271 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3272 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3273 multi_step_cvt, stmt,
3274 tmp_vec_dsts,
3275 gsi, slp_node, code1, code2,
3276 decl1, decl2, op_type,
3277 &prev_stmt_info);
3280 VEC_free (tree, heap, vec_dsts);
3281 VEC_free (tree, heap, tmp_vec_dsts);
3282 VEC_free (tree, heap, interm_types);
3283 VEC_free (tree, heap, vec_oprnds0);
3284 VEC_free (tree, heap, vec_oprnds1);
3286 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3287 return true;
3291 /* Function vectorizable_store.
3293 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3294 can be vectorized.
3295 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3296 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3297 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3299 static bool
3300 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3301 slp_tree slp_node)
3303 tree scalar_dest;
3304 tree data_ref;
3305 tree op;
3306 tree vec_oprnd = NULL_TREE;
3307 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3308 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3309 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3310 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3311 struct loop *loop = NULL;
3312 enum machine_mode vec_mode;
3313 tree dummy;
3314 enum dr_alignment_support alignment_support_scheme;
3315 tree def;
3316 gimple def_stmt;
3317 enum vect_def_type dt;
3318 stmt_vec_info prev_stmt_info = NULL;
3319 tree dataref_ptr = NULL_TREE;
3320 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3321 int ncopies;
3322 int j;
3323 gimple next_stmt, first_stmt = NULL;
3324 bool strided_store = false;
3325 unsigned int group_size, i;
3326 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3327 bool inv_p;
3328 VEC(tree,heap) *vec_oprnds = NULL;
3329 bool slp = (slp_node != NULL);
3330 unsigned int vec_num;
3331 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3333 if (loop_vinfo)
3334 loop = LOOP_VINFO_LOOP (loop_vinfo);
3336 /* Multiple types in SLP are handled by creating the appropriate number of
3337 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3338 case of SLP. */
3339 if (slp)
3340 ncopies = 1;
3341 else
3342 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3344 gcc_assert (ncopies >= 1);
3346 /* FORNOW. This restriction should be relaxed. */
3347 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3349 if (vect_print_dump_info (REPORT_DETAILS))
3350 fprintf (vect_dump, "multiple types in nested loop.");
3351 return false;
3354 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3355 return false;
3357 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3358 return false;
3360 /* Is vectorizable store? */
3362 if (!is_gimple_assign (stmt))
3363 return false;
3365 scalar_dest = gimple_assign_lhs (stmt);
3366 if (TREE_CODE (scalar_dest) != ARRAY_REF
3367 && TREE_CODE (scalar_dest) != INDIRECT_REF
3368 && TREE_CODE (scalar_dest) != COMPONENT_REF
3369 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3370 && TREE_CODE (scalar_dest) != REALPART_EXPR
3371 && TREE_CODE (scalar_dest) != MEM_REF)
3372 return false;
3374 gcc_assert (gimple_assign_single_p (stmt));
3375 op = gimple_assign_rhs1 (stmt);
3376 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3378 if (vect_print_dump_info (REPORT_DETAILS))
3379 fprintf (vect_dump, "use not simple.");
3380 return false;
3383 /* The scalar rhs type needs to be trivially convertible to the vector
3384 component type. This should always be the case. */
3385 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3387 if (vect_print_dump_info (REPORT_DETAILS))
3388 fprintf (vect_dump, "??? operands of different types");
3389 return false;
3392 vec_mode = TYPE_MODE (vectype);
3393 /* FORNOW. In some cases can vectorize even if data-type not supported
3394 (e.g. - array initialization with 0). */
3395 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3396 return false;
3398 if (!STMT_VINFO_DATA_REF (stmt_info))
3399 return false;
3401 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3403 if (vect_print_dump_info (REPORT_DETAILS))
3404 fprintf (vect_dump, "negative step for store.");
3405 return false;
3408 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3410 strided_store = true;
3411 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3412 if (!vect_strided_store_supported (vectype)
3413 && !PURE_SLP_STMT (stmt_info) && !slp)
3414 return false;
3416 if (first_stmt == stmt)
3418 /* STMT is the leader of the group. Check the operands of all the
3419 stmts of the group. */
3420 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3421 while (next_stmt)
3423 gcc_assert (gimple_assign_single_p (next_stmt));
3424 op = gimple_assign_rhs1 (next_stmt);
3425 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3426 &def, &dt))
3428 if (vect_print_dump_info (REPORT_DETAILS))
3429 fprintf (vect_dump, "use not simple.");
3430 return false;
3432 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3437 if (!vec_stmt) /* transformation not required. */
3439 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3440 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3441 return true;
3444 /** Transform. **/
3446 if (strided_store)
3448 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3449 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3451 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3453 /* FORNOW */
3454 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3456 /* We vectorize all the stmts of the interleaving group when we
3457 reach the last stmt in the group. */
3458 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3459 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3460 && !slp)
3462 *vec_stmt = NULL;
3463 return true;
3466 if (slp)
3468 strided_store = false;
3469 /* VEC_NUM is the number of vect stmts to be created for this
3470 group. */
3471 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3472 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3473 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3475 else
3476 /* VEC_NUM is the number of vect stmts to be created for this
3477 group. */
3478 vec_num = group_size;
3480 else
3482 first_stmt = stmt;
3483 first_dr = dr;
3484 group_size = vec_num = 1;
3487 if (vect_print_dump_info (REPORT_DETAILS))
3488 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3490 dr_chain = VEC_alloc (tree, heap, group_size);
3491 oprnds = VEC_alloc (tree, heap, group_size);
3493 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3494 gcc_assert (alignment_support_scheme);
3496 /* In case the vectorization factor (VF) is bigger than the number
3497 of elements that we can fit in a vectype (nunits), we have to generate
3498 more than one vector stmt - i.e - we need to "unroll" the
3499 vector stmt by a factor VF/nunits. For more details see documentation in
3500 vect_get_vec_def_for_copy_stmt. */
3502 /* In case of interleaving (non-unit strided access):
3504 S1: &base + 2 = x2
3505 S2: &base = x0
3506 S3: &base + 1 = x1
3507 S4: &base + 3 = x3
3509 We create vectorized stores starting from base address (the access of the
3510 first stmt in the chain (S2 in the above example), when the last store stmt
3511 of the chain (S4) is reached:
3513 VS1: &base = vx2
3514 VS2: &base + vec_size*1 = vx0
3515 VS3: &base + vec_size*2 = vx1
3516 VS4: &base + vec_size*3 = vx3
3518 Then permutation statements are generated:
3520 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3521 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3524 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3525 (the order of the data-refs in the output of vect_permute_store_chain
3526 corresponds to the order of scalar stmts in the interleaving chain - see
3527 the documentation of vect_permute_store_chain()).
3529 In case of both multiple types and interleaving, above vector stores and
3530 permutation stmts are created for every copy. The result vector stmts are
3531 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3532 STMT_VINFO_RELATED_STMT for the next copies.
3535 prev_stmt_info = NULL;
3536 for (j = 0; j < ncopies; j++)
3538 gimple new_stmt;
3539 gimple ptr_incr;
3541 if (j == 0)
3543 if (slp)
3545 /* Get vectorized arguments for SLP_NODE. */
3546 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3547 NULL, -1);
3549 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3551 else
3553 /* For interleaved stores we collect vectorized defs for all the
3554 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3555 used as an input to vect_permute_store_chain(), and OPRNDS as
3556 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3558 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3559 OPRNDS are of size 1. */
3560 next_stmt = first_stmt;
3561 for (i = 0; i < group_size; i++)
3563 /* Since gaps are not supported for interleaved stores,
3564 GROUP_SIZE is the exact number of stmts in the chain.
3565 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3566 there is no interleaving, GROUP_SIZE is 1, and only one
3567 iteration of the loop will be executed. */
3568 gcc_assert (next_stmt
3569 && gimple_assign_single_p (next_stmt));
3570 op = gimple_assign_rhs1 (next_stmt);
3572 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3573 NULL);
3574 VEC_quick_push(tree, dr_chain, vec_oprnd);
3575 VEC_quick_push(tree, oprnds, vec_oprnd);
3576 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3580 /* We should have catched mismatched types earlier. */
3581 gcc_assert (useless_type_conversion_p (vectype,
3582 TREE_TYPE (vec_oprnd)));
3583 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3584 &dummy, &ptr_incr, false,
3585 &inv_p);
3586 gcc_assert (bb_vinfo || !inv_p);
3588 else
3590 /* For interleaved stores we created vectorized defs for all the
3591 defs stored in OPRNDS in the previous iteration (previous copy).
3592 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3593 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3594 next copy.
3595 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3596 OPRNDS are of size 1. */
3597 for (i = 0; i < group_size; i++)
3599 op = VEC_index (tree, oprnds, i);
3600 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3601 &dt);
3602 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3603 VEC_replace(tree, dr_chain, i, vec_oprnd);
3604 VEC_replace(tree, oprnds, i, vec_oprnd);
3606 dataref_ptr =
3607 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3610 if (strided_store)
3612 result_chain = VEC_alloc (tree, heap, group_size);
3613 /* Permute. */
3614 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3615 &result_chain))
3616 return false;
3619 next_stmt = first_stmt;
3620 for (i = 0; i < vec_num; i++)
3622 struct ptr_info_def *pi;
3624 if (i > 0)
3625 /* Bump the vector pointer. */
3626 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3627 NULL_TREE);
3629 if (slp)
3630 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3631 else if (strided_store)
3632 /* For strided stores vectorized defs are interleaved in
3633 vect_permute_store_chain(). */
3634 vec_oprnd = VEC_index (tree, result_chain, i);
3636 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3637 build_int_cst (reference_alias_ptr_type
3638 (DR_REF (first_dr)), 0));
3639 pi = get_ptr_info (dataref_ptr);
3640 pi->align = TYPE_ALIGN_UNIT (vectype);
3641 if (aligned_access_p (first_dr))
3642 pi->misalign = 0;
3643 else if (DR_MISALIGNMENT (first_dr) == -1)
3645 TREE_TYPE (data_ref)
3646 = build_aligned_type (TREE_TYPE (data_ref),
3647 TYPE_ALIGN (TREE_TYPE (vectype)));
3648 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
3649 pi->misalign = 0;
3651 else
3653 TREE_TYPE (data_ref)
3654 = build_aligned_type (TREE_TYPE (data_ref),
3655 TYPE_ALIGN (TREE_TYPE (vectype)));
3656 pi->misalign = DR_MISALIGNMENT (first_dr);
3659 /* Arguments are ready. Create the new vector stmt. */
3660 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3661 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3662 mark_symbols_for_renaming (new_stmt);
3664 if (slp)
3665 continue;
3667 if (j == 0)
3668 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3669 else
3670 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3672 prev_stmt_info = vinfo_for_stmt (new_stmt);
3673 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3674 if (!next_stmt)
3675 break;
3679 VEC_free (tree, heap, dr_chain);
3680 VEC_free (tree, heap, oprnds);
3681 if (result_chain)
3682 VEC_free (tree, heap, result_chain);
3683 if (vec_oprnds)
3684 VEC_free (tree, heap, vec_oprnds);
3686 return true;
3689 /* Given a vector type VECTYPE returns a builtin DECL to be used
3690 for vector permutation and stores a mask into *MASK that implements
3691 reversal of the vector elements. If that is impossible to do
3692 returns NULL (and *MASK is unchanged). */
3694 static tree
3695 perm_mask_for_reverse (tree vectype, tree *mask)
3697 tree builtin_decl;
3698 tree mask_element_type, mask_type;
3699 tree mask_vec = NULL;
3700 int i;
3701 int nunits;
3702 if (!targetm.vectorize.builtin_vec_perm)
3703 return NULL;
3705 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3706 &mask_element_type);
3707 if (!builtin_decl || !mask_element_type)
3708 return NULL;
3710 mask_type = get_vectype_for_scalar_type (mask_element_type);
3711 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3712 if (!mask_type
3713 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3714 return NULL;
3716 for (i = 0; i < nunits; i++)
3717 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3718 mask_vec = build_vector (mask_type, mask_vec);
3720 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3721 return NULL;
3722 if (mask)
3723 *mask = mask_vec;
3724 return builtin_decl;
3727 /* Given a vector variable X, that was generated for the scalar LHS of
3728 STMT, generate instructions to reverse the vector elements of X,
3729 insert them a *GSI and return the permuted vector variable. */
3731 static tree
3732 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3734 tree vectype = TREE_TYPE (x);
3735 tree mask_vec, builtin_decl;
3736 tree perm_dest, data_ref;
3737 gimple perm_stmt;
3739 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3741 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3743 /* Generate the permute statement. */
3744 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3745 if (!useless_type_conversion_p (vectype,
3746 TREE_TYPE (TREE_TYPE (builtin_decl))))
3748 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3749 tem = make_ssa_name (tem, perm_stmt);
3750 gimple_call_set_lhs (perm_stmt, tem);
3751 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3752 perm_stmt = gimple_build_assign (NULL_TREE,
3753 build1 (VIEW_CONVERT_EXPR,
3754 vectype, tem));
3756 data_ref = make_ssa_name (perm_dest, perm_stmt);
3757 gimple_set_lhs (perm_stmt, data_ref);
3758 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3760 return data_ref;
3763 /* vectorizable_load.
3765 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3766 can be vectorized.
3767 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3768 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3769 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3771 static bool
3772 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3773 slp_tree slp_node, slp_instance slp_node_instance)
3775 tree scalar_dest;
3776 tree vec_dest = NULL;
3777 tree data_ref = NULL;
3778 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3779 stmt_vec_info prev_stmt_info;
3780 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3781 struct loop *loop = NULL;
3782 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3783 bool nested_in_vect_loop = false;
3784 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3785 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3786 tree new_temp;
3787 enum machine_mode mode;
3788 gimple new_stmt = NULL;
3789 tree dummy;
3790 enum dr_alignment_support alignment_support_scheme;
3791 tree dataref_ptr = NULL_TREE;
3792 gimple ptr_incr;
3793 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3794 int ncopies;
3795 int i, j, group_size;
3796 tree msq = NULL_TREE, lsq;
3797 tree offset = NULL_TREE;
3798 tree realignment_token = NULL_TREE;
3799 gimple phi = NULL;
3800 VEC(tree,heap) *dr_chain = NULL;
3801 bool strided_load = false;
3802 gimple first_stmt;
3803 tree scalar_type;
3804 bool inv_p;
3805 bool negative;
3806 bool compute_in_loop = false;
3807 struct loop *at_loop;
3808 int vec_num;
3809 bool slp = (slp_node != NULL);
3810 bool slp_perm = false;
3811 enum tree_code code;
3812 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3813 int vf;
3815 if (loop_vinfo)
3817 loop = LOOP_VINFO_LOOP (loop_vinfo);
3818 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3819 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3821 else
3822 vf = 1;
3824 /* Multiple types in SLP are handled by creating the appropriate number of
3825 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3826 case of SLP. */
3827 if (slp)
3828 ncopies = 1;
3829 else
3830 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3832 gcc_assert (ncopies >= 1);
3834 /* FORNOW. This restriction should be relaxed. */
3835 if (nested_in_vect_loop && ncopies > 1)
3837 if (vect_print_dump_info (REPORT_DETAILS))
3838 fprintf (vect_dump, "multiple types in nested loop.");
3839 return false;
3842 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3843 return false;
3845 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3846 return false;
3848 /* Is vectorizable load? */
3849 if (!is_gimple_assign (stmt))
3850 return false;
3852 scalar_dest = gimple_assign_lhs (stmt);
3853 if (TREE_CODE (scalar_dest) != SSA_NAME)
3854 return false;
3856 code = gimple_assign_rhs_code (stmt);
3857 if (code != ARRAY_REF
3858 && code != INDIRECT_REF
3859 && code != COMPONENT_REF
3860 && code != IMAGPART_EXPR
3861 && code != REALPART_EXPR
3862 && code != MEM_REF)
3863 return false;
3865 if (!STMT_VINFO_DATA_REF (stmt_info))
3866 return false;
3868 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
3869 if (negative && ncopies > 1)
3871 if (vect_print_dump_info (REPORT_DETAILS))
3872 fprintf (vect_dump, "multiple types with negative step.");
3873 return false;
3876 scalar_type = TREE_TYPE (DR_REF (dr));
3877 mode = TYPE_MODE (vectype);
3879 /* FORNOW. In some cases can vectorize even if data-type not supported
3880 (e.g. - data copies). */
3881 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3883 if (vect_print_dump_info (REPORT_DETAILS))
3884 fprintf (vect_dump, "Aligned load, but unsupported type.");
3885 return false;
3888 /* The vector component type needs to be trivially convertible to the
3889 scalar lhs. This should always be the case. */
3890 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3892 if (vect_print_dump_info (REPORT_DETAILS))
3893 fprintf (vect_dump, "??? operands of different types");
3894 return false;
3897 /* Check if the load is a part of an interleaving chain. */
3898 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3900 strided_load = true;
3901 /* FORNOW */
3902 gcc_assert (! nested_in_vect_loop);
3904 /* Check if interleaving is supported. */
3905 if (!vect_strided_load_supported (vectype)
3906 && !PURE_SLP_STMT (stmt_info) && !slp)
3907 return false;
3910 if (negative)
3912 gcc_assert (!strided_load);
3913 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
3914 if (alignment_support_scheme != dr_aligned
3915 && alignment_support_scheme != dr_unaligned_supported)
3917 if (vect_print_dump_info (REPORT_DETAILS))
3918 fprintf (vect_dump, "negative step but alignment required.");
3919 return false;
3921 if (!perm_mask_for_reverse (vectype, NULL))
3923 if (vect_print_dump_info (REPORT_DETAILS))
3924 fprintf (vect_dump, "negative step and reversing not supported.");
3925 return false;
3929 if (!vec_stmt) /* transformation not required. */
3931 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3932 vect_model_load_cost (stmt_info, ncopies, NULL);
3933 return true;
3936 if (vect_print_dump_info (REPORT_DETAILS))
3937 fprintf (vect_dump, "transform load.");
3939 /** Transform. **/
3941 if (strided_load)
3943 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3944 /* Check if the chain of loads is already vectorized. */
3945 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3947 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3948 return true;
3950 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3951 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3953 /* VEC_NUM is the number of vect stmts to be created for this group. */
3954 if (slp)
3956 strided_load = false;
3957 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3958 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3959 slp_perm = true;
3961 else
3962 vec_num = group_size;
3964 dr_chain = VEC_alloc (tree, heap, vec_num);
3966 else
3968 first_stmt = stmt;
3969 first_dr = dr;
3970 group_size = vec_num = 1;
3973 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3974 gcc_assert (alignment_support_scheme);
3976 /* In case the vectorization factor (VF) is bigger than the number
3977 of elements that we can fit in a vectype (nunits), we have to generate
3978 more than one vector stmt - i.e - we need to "unroll" the
3979 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3980 from one copy of the vector stmt to the next, in the field
3981 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3982 stages to find the correct vector defs to be used when vectorizing
3983 stmts that use the defs of the current stmt. The example below
3984 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
3985 need to create 4 vectorized stmts):
3987 before vectorization:
3988 RELATED_STMT VEC_STMT
3989 S1: x = memref - -
3990 S2: z = x + 1 - -
3992 step 1: vectorize stmt S1:
3993 We first create the vector stmt VS1_0, and, as usual, record a
3994 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3995 Next, we create the vector stmt VS1_1, and record a pointer to
3996 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3997 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3998 stmts and pointers:
3999 RELATED_STMT VEC_STMT
4000 VS1_0: vx0 = memref0 VS1_1 -
4001 VS1_1: vx1 = memref1 VS1_2 -
4002 VS1_2: vx2 = memref2 VS1_3 -
4003 VS1_3: vx3 = memref3 - -
4004 S1: x = load - VS1_0
4005 S2: z = x + 1 - -
4007 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4008 information we recorded in RELATED_STMT field is used to vectorize
4009 stmt S2. */
4011 /* In case of interleaving (non-unit strided access):
4013 S1: x2 = &base + 2
4014 S2: x0 = &base
4015 S3: x1 = &base + 1
4016 S4: x3 = &base + 3
4018 Vectorized loads are created in the order of memory accesses
4019 starting from the access of the first stmt of the chain:
4021 VS1: vx0 = &base
4022 VS2: vx1 = &base + vec_size*1
4023 VS3: vx3 = &base + vec_size*2
4024 VS4: vx4 = &base + vec_size*3
4026 Then permutation statements are generated:
4028 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4029 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4032 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4033 (the order of the data-refs in the output of vect_permute_load_chain
4034 corresponds to the order of scalar stmts in the interleaving chain - see
4035 the documentation of vect_permute_load_chain()).
4036 The generation of permutation stmts and recording them in
4037 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4039 In case of both multiple types and interleaving, the vector loads and
4040 permutation stmts above are created for every copy. The result vector
4041 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4042 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4044 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4045 on a target that supports unaligned accesses (dr_unaligned_supported)
4046 we generate the following code:
4047 p = initial_addr;
4048 indx = 0;
4049 loop {
4050 p = p + indx * vectype_size;
4051 vec_dest = *(p);
4052 indx = indx + 1;
4055 Otherwise, the data reference is potentially unaligned on a target that
4056 does not support unaligned accesses (dr_explicit_realign_optimized) -
4057 then generate the following code, in which the data in each iteration is
4058 obtained by two vector loads, one from the previous iteration, and one
4059 from the current iteration:
4060 p1 = initial_addr;
4061 msq_init = *(floor(p1))
4062 p2 = initial_addr + VS - 1;
4063 realignment_token = call target_builtin;
4064 indx = 0;
4065 loop {
4066 p2 = p2 + indx * vectype_size
4067 lsq = *(floor(p2))
4068 vec_dest = realign_load (msq, lsq, realignment_token)
4069 indx = indx + 1;
4070 msq = lsq;
4071 } */
4073 /* If the misalignment remains the same throughout the execution of the
4074 loop, we can create the init_addr and permutation mask at the loop
4075 preheader. Otherwise, it needs to be created inside the loop.
4076 This can only occur when vectorizing memory accesses in the inner-loop
4077 nested within an outer-loop that is being vectorized. */
4079 if (loop && nested_in_vect_loop_p (loop, stmt)
4080 && (TREE_INT_CST_LOW (DR_STEP (dr))
4081 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4083 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4084 compute_in_loop = true;
4087 if ((alignment_support_scheme == dr_explicit_realign_optimized
4088 || alignment_support_scheme == dr_explicit_realign)
4089 && !compute_in_loop)
4091 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4092 alignment_support_scheme, NULL_TREE,
4093 &at_loop);
4094 if (alignment_support_scheme == dr_explicit_realign_optimized)
4096 phi = SSA_NAME_DEF_STMT (msq);
4097 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4100 else
4101 at_loop = loop;
4103 if (negative)
4104 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4106 prev_stmt_info = NULL;
4107 for (j = 0; j < ncopies; j++)
4109 /* 1. Create the vector pointer update chain. */
4110 if (j == 0)
4111 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
4112 at_loop, offset,
4113 &dummy, &ptr_incr, false,
4114 &inv_p);
4115 else
4116 dataref_ptr =
4117 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
4119 for (i = 0; i < vec_num; i++)
4121 if (i > 0)
4122 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4123 NULL_TREE);
4125 /* 2. Create the vector-load in the loop. */
4126 switch (alignment_support_scheme)
4128 case dr_aligned:
4129 case dr_unaligned_supported:
4131 struct ptr_info_def *pi;
4132 data_ref
4133 = build2 (MEM_REF, vectype, dataref_ptr,
4134 build_int_cst (reference_alias_ptr_type
4135 (DR_REF (first_dr)), 0));
4136 pi = get_ptr_info (dataref_ptr);
4137 pi->align = TYPE_ALIGN_UNIT (vectype);
4138 if (alignment_support_scheme == dr_aligned)
4140 gcc_assert (aligned_access_p (first_dr));
4141 pi->misalign = 0;
4143 else if (DR_MISALIGNMENT (first_dr) == -1)
4145 TREE_TYPE (data_ref)
4146 = build_aligned_type (TREE_TYPE (data_ref),
4147 TYPE_ALIGN (TREE_TYPE (vectype)));
4148 pi->align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
4149 pi->misalign = 0;
4151 else
4153 TREE_TYPE (data_ref)
4154 = build_aligned_type (TREE_TYPE (data_ref),
4155 TYPE_ALIGN (TREE_TYPE (vectype)));
4156 pi->misalign = DR_MISALIGNMENT (first_dr);
4158 break;
4160 case dr_explicit_realign:
4162 tree ptr, bump;
4163 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4165 if (compute_in_loop)
4166 msq = vect_setup_realignment (first_stmt, gsi,
4167 &realignment_token,
4168 dr_explicit_realign,
4169 dataref_ptr, NULL);
4171 new_stmt = gimple_build_assign_with_ops
4172 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4173 build_int_cst
4174 (TREE_TYPE (dataref_ptr),
4175 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4176 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4177 gimple_assign_set_lhs (new_stmt, ptr);
4178 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4179 data_ref
4180 = build2 (MEM_REF, vectype, ptr,
4181 build_int_cst (reference_alias_ptr_type
4182 (DR_REF (first_dr)), 0));
4183 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4184 new_stmt = gimple_build_assign (vec_dest, data_ref);
4185 new_temp = make_ssa_name (vec_dest, new_stmt);
4186 gimple_assign_set_lhs (new_stmt, new_temp);
4187 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4188 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4189 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4190 msq = new_temp;
4192 bump = size_binop (MULT_EXPR, vs_minus_1,
4193 TYPE_SIZE_UNIT (scalar_type));
4194 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4195 new_stmt = gimple_build_assign_with_ops
4196 (BIT_AND_EXPR, NULL_TREE, ptr,
4197 build_int_cst
4198 (TREE_TYPE (ptr),
4199 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4200 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4201 gimple_assign_set_lhs (new_stmt, ptr);
4202 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4203 data_ref
4204 = build2 (MEM_REF, vectype, ptr,
4205 build_int_cst (reference_alias_ptr_type
4206 (DR_REF (first_dr)), 0));
4207 break;
4209 case dr_explicit_realign_optimized:
4210 new_stmt = gimple_build_assign_with_ops
4211 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4212 build_int_cst
4213 (TREE_TYPE (dataref_ptr),
4214 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4215 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4216 gimple_assign_set_lhs (new_stmt, new_temp);
4217 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4218 data_ref
4219 = build2 (MEM_REF, vectype, new_temp,
4220 build_int_cst (reference_alias_ptr_type
4221 (DR_REF (first_dr)), 0));
4222 break;
4223 default:
4224 gcc_unreachable ();
4226 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4227 new_stmt = gimple_build_assign (vec_dest, data_ref);
4228 new_temp = make_ssa_name (vec_dest, new_stmt);
4229 gimple_assign_set_lhs (new_stmt, new_temp);
4230 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4231 mark_symbols_for_renaming (new_stmt);
4233 /* 3. Handle explicit realignment if necessary/supported. Create in
4234 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4235 if (alignment_support_scheme == dr_explicit_realign_optimized
4236 || alignment_support_scheme == dr_explicit_realign)
4238 tree tmp;
4240 lsq = gimple_assign_lhs (new_stmt);
4241 if (!realignment_token)
4242 realignment_token = dataref_ptr;
4243 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4244 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
4245 realignment_token);
4246 new_stmt = gimple_build_assign (vec_dest, tmp);
4247 new_temp = make_ssa_name (vec_dest, new_stmt);
4248 gimple_assign_set_lhs (new_stmt, new_temp);
4249 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4251 if (alignment_support_scheme == dr_explicit_realign_optimized)
4253 gcc_assert (phi);
4254 if (i == vec_num - 1 && j == ncopies - 1)
4255 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
4256 UNKNOWN_LOCATION);
4257 msq = lsq;
4261 /* 4. Handle invariant-load. */
4262 if (inv_p && !bb_vinfo)
4264 gcc_assert (!strided_load);
4265 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4266 if (j == 0)
4268 int k;
4269 tree t = NULL_TREE;
4270 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4272 /* CHECKME: bitpos depends on endianess? */
4273 bitpos = bitsize_zero_node;
4274 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4275 bitsize, bitpos);
4276 vec_dest =
4277 vect_create_destination_var (scalar_dest, NULL_TREE);
4278 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4279 new_temp = make_ssa_name (vec_dest, new_stmt);
4280 gimple_assign_set_lhs (new_stmt, new_temp);
4281 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4283 for (k = nunits - 1; k >= 0; --k)
4284 t = tree_cons (NULL_TREE, new_temp, t);
4285 /* FIXME: use build_constructor directly. */
4286 vec_inv = build_constructor_from_list (vectype, t);
4287 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
4288 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4290 else
4291 gcc_unreachable (); /* FORNOW. */
4294 if (negative)
4296 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4297 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4300 /* Collect vector loads and later create their permutation in
4301 vect_transform_strided_load (). */
4302 if (strided_load || slp_perm)
4303 VEC_quick_push (tree, dr_chain, new_temp);
4305 /* Store vector loads in the corresponding SLP_NODE. */
4306 if (slp && !slp_perm)
4307 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
4310 if (slp && !slp_perm)
4311 continue;
4313 if (slp_perm)
4315 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4316 slp_node_instance, false))
4318 VEC_free (tree, heap, dr_chain);
4319 return false;
4322 else
4324 if (strided_load)
4326 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
4327 return false;
4329 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4330 VEC_free (tree, heap, dr_chain);
4331 dr_chain = VEC_alloc (tree, heap, group_size);
4333 else
4335 if (j == 0)
4336 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4337 else
4338 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4339 prev_stmt_info = vinfo_for_stmt (new_stmt);
4344 if (dr_chain)
4345 VEC_free (tree, heap, dr_chain);
4347 return true;
4350 /* Function vect_is_simple_cond.
4352 Input:
4353 LOOP - the loop that is being vectorized.
4354 COND - Condition that is checked for simple use.
4356 Returns whether a COND can be vectorized. Checks whether
4357 condition operands are supportable using vec_is_simple_use. */
4359 static bool
4360 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4362 tree lhs, rhs;
4363 tree def;
4364 enum vect_def_type dt;
4366 if (!COMPARISON_CLASS_P (cond))
4367 return false;
4369 lhs = TREE_OPERAND (cond, 0);
4370 rhs = TREE_OPERAND (cond, 1);
4372 if (TREE_CODE (lhs) == SSA_NAME)
4374 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4375 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4376 &dt))
4377 return false;
4379 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4380 && TREE_CODE (lhs) != FIXED_CST)
4381 return false;
4383 if (TREE_CODE (rhs) == SSA_NAME)
4385 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4386 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4387 &dt))
4388 return false;
4390 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4391 && TREE_CODE (rhs) != FIXED_CST)
4392 return false;
4394 return true;
4397 /* vectorizable_condition.
4399 Check if STMT is conditional modify expression that can be vectorized.
4400 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4401 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4402 at GSI.
4404 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4405 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4406 else caluse if it is 2).
4408 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4410 bool
4411 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4412 gimple *vec_stmt, tree reduc_def, int reduc_index)
4414 tree scalar_dest = NULL_TREE;
4415 tree vec_dest = NULL_TREE;
4416 tree op = NULL_TREE;
4417 tree cond_expr, then_clause, else_clause;
4418 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4419 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4420 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4421 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4422 tree vec_compare, vec_cond_expr;
4423 tree new_temp;
4424 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4425 enum machine_mode vec_mode;
4426 tree def;
4427 enum vect_def_type dt, dts[4];
4428 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4429 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4430 enum tree_code code;
4431 stmt_vec_info prev_stmt_info = NULL;
4432 int j;
4434 /* FORNOW: unsupported in basic block SLP. */
4435 gcc_assert (loop_vinfo);
4437 gcc_assert (ncopies >= 1);
4438 if (reduc_index && ncopies > 1)
4439 return false; /* FORNOW */
4441 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4442 return false;
4444 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4445 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4446 && reduc_def))
4447 return false;
4449 /* FORNOW: SLP not supported. */
4450 if (STMT_SLP_TYPE (stmt_info))
4451 return false;
4453 /* FORNOW: not yet supported. */
4454 if (STMT_VINFO_LIVE_P (stmt_info))
4456 if (vect_print_dump_info (REPORT_DETAILS))
4457 fprintf (vect_dump, "value used after loop.");
4458 return false;
4461 /* Is vectorizable conditional operation? */
4462 if (!is_gimple_assign (stmt))
4463 return false;
4465 code = gimple_assign_rhs_code (stmt);
4467 if (code != COND_EXPR)
4468 return false;
4470 gcc_assert (gimple_assign_single_p (stmt));
4471 op = gimple_assign_rhs1 (stmt);
4472 cond_expr = TREE_OPERAND (op, 0);
4473 then_clause = TREE_OPERAND (op, 1);
4474 else_clause = TREE_OPERAND (op, 2);
4476 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4477 return false;
4479 /* We do not handle two different vector types for the condition
4480 and the values. */
4481 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4482 TREE_TYPE (vectype)))
4483 return false;
4485 if (TREE_CODE (then_clause) == SSA_NAME)
4487 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4488 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4489 &then_def_stmt, &def, &dt))
4490 return false;
4492 else if (TREE_CODE (then_clause) != INTEGER_CST
4493 && TREE_CODE (then_clause) != REAL_CST
4494 && TREE_CODE (then_clause) != FIXED_CST)
4495 return false;
4497 if (TREE_CODE (else_clause) == SSA_NAME)
4499 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4500 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4501 &else_def_stmt, &def, &dt))
4502 return false;
4504 else if (TREE_CODE (else_clause) != INTEGER_CST
4505 && TREE_CODE (else_clause) != REAL_CST
4506 && TREE_CODE (else_clause) != FIXED_CST)
4507 return false;
4510 vec_mode = TYPE_MODE (vectype);
4512 if (!vec_stmt)
4514 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4515 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4518 /* Transform */
4520 /* Handle def. */
4521 scalar_dest = gimple_assign_lhs (stmt);
4522 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4524 /* Handle cond expr. */
4525 for (j = 0; j < ncopies; j++)
4527 gimple new_stmt;
4528 if (j == 0)
4530 gimple gtemp;
4531 vec_cond_lhs =
4532 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4533 stmt, NULL);
4534 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4535 NULL, &gtemp, &def, &dts[0]);
4536 vec_cond_rhs =
4537 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4538 stmt, NULL);
4539 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4540 NULL, &gtemp, &def, &dts[1]);
4541 if (reduc_index == 1)
4542 vec_then_clause = reduc_def;
4543 else
4545 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4546 stmt, NULL);
4547 vect_is_simple_use (then_clause, loop_vinfo,
4548 NULL, &gtemp, &def, &dts[2]);
4550 if (reduc_index == 2)
4551 vec_else_clause = reduc_def;
4552 else
4554 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4555 stmt, NULL);
4556 vect_is_simple_use (else_clause, loop_vinfo,
4557 NULL, &gtemp, &def, &dts[3]);
4560 else
4562 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4563 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4564 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4565 vec_then_clause);
4566 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4567 vec_else_clause);
4570 /* Arguments are ready. Create the new vector stmt. */
4571 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4572 vec_cond_lhs, vec_cond_rhs);
4573 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4574 vec_compare, vec_then_clause, vec_else_clause);
4576 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4577 new_temp = make_ssa_name (vec_dest, new_stmt);
4578 gimple_assign_set_lhs (new_stmt, new_temp);
4579 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4580 if (j == 0)
4581 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4582 else
4583 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4585 prev_stmt_info = vinfo_for_stmt (new_stmt);
4588 return true;
4592 /* Make sure the statement is vectorizable. */
4594 bool
4595 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4597 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4598 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4599 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4600 bool ok;
4601 tree scalar_type, vectype;
4603 if (vect_print_dump_info (REPORT_DETAILS))
4605 fprintf (vect_dump, "==> examining statement: ");
4606 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4609 if (gimple_has_volatile_ops (stmt))
4611 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4612 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4614 return false;
4617 /* Skip stmts that do not need to be vectorized. In loops this is expected
4618 to include:
4619 - the COND_EXPR which is the loop exit condition
4620 - any LABEL_EXPRs in the loop
4621 - computations that are used only for array indexing or loop control.
4622 In basic blocks we only analyze statements that are a part of some SLP
4623 instance, therefore, all the statements are relevant. */
4625 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4626 && !STMT_VINFO_LIVE_P (stmt_info))
4628 if (vect_print_dump_info (REPORT_DETAILS))
4629 fprintf (vect_dump, "irrelevant.");
4631 return true;
4634 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4636 case vect_internal_def:
4637 break;
4639 case vect_reduction_def:
4640 case vect_nested_cycle:
4641 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4642 || relevance == vect_used_in_outer_by_reduction
4643 || relevance == vect_unused_in_scope));
4644 break;
4646 case vect_induction_def:
4647 case vect_constant_def:
4648 case vect_external_def:
4649 case vect_unknown_def_type:
4650 default:
4651 gcc_unreachable ();
4654 if (bb_vinfo)
4656 gcc_assert (PURE_SLP_STMT (stmt_info));
4658 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4659 if (vect_print_dump_info (REPORT_DETAILS))
4661 fprintf (vect_dump, "get vectype for scalar type: ");
4662 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4665 vectype = get_vectype_for_scalar_type (scalar_type);
4666 if (!vectype)
4668 if (vect_print_dump_info (REPORT_DETAILS))
4670 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4671 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4673 return false;
4676 if (vect_print_dump_info (REPORT_DETAILS))
4678 fprintf (vect_dump, "vectype: ");
4679 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4682 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4685 if (STMT_VINFO_RELEVANT_P (stmt_info))
4687 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4688 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4689 *need_to_vectorize = true;
4692 ok = true;
4693 if (!bb_vinfo
4694 && (STMT_VINFO_RELEVANT_P (stmt_info)
4695 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4696 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4697 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4698 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4699 || vectorizable_shift (stmt, NULL, NULL, NULL)
4700 || vectorizable_operation (stmt, NULL, NULL, NULL)
4701 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4702 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4703 || vectorizable_call (stmt, NULL, NULL)
4704 || vectorizable_store (stmt, NULL, NULL, NULL)
4705 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4706 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4707 else
4709 if (bb_vinfo)
4710 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4711 || vectorizable_operation (stmt, NULL, NULL, node)
4712 || vectorizable_assignment (stmt, NULL, NULL, node)
4713 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4714 || vectorizable_store (stmt, NULL, NULL, node));
4717 if (!ok)
4719 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4721 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4722 fprintf (vect_dump, "supported: ");
4723 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4726 return false;
4729 if (bb_vinfo)
4730 return true;
4732 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4733 need extra handling, except for vectorizable reductions. */
4734 if (STMT_VINFO_LIVE_P (stmt_info)
4735 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4736 ok = vectorizable_live_operation (stmt, NULL, NULL);
4738 if (!ok)
4740 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4742 fprintf (vect_dump, "not vectorized: live stmt not ");
4743 fprintf (vect_dump, "supported: ");
4744 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4747 return false;
4750 if (!PURE_SLP_STMT (stmt_info))
4752 /* Groups of strided accesses whose size is not a power of 2 are not
4753 vectorizable yet using loop-vectorization. Therefore, if this stmt
4754 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4755 loop-based vectorized), the loop cannot be vectorized. */
4756 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4757 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4758 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4760 if (vect_print_dump_info (REPORT_DETAILS))
4762 fprintf (vect_dump, "not vectorized: the size of group "
4763 "of strided accesses is not a power of 2");
4764 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4767 return false;
4771 return true;
4775 /* Function vect_transform_stmt.
4777 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4779 bool
4780 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4781 bool *strided_store, slp_tree slp_node,
4782 slp_instance slp_node_instance)
4784 bool is_store = false;
4785 gimple vec_stmt = NULL;
4786 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4787 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4788 bool done;
4790 switch (STMT_VINFO_TYPE (stmt_info))
4792 case type_demotion_vec_info_type:
4793 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4794 gcc_assert (done);
4795 break;
4797 case type_promotion_vec_info_type:
4798 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4799 gcc_assert (done);
4800 break;
4802 case type_conversion_vec_info_type:
4803 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4804 gcc_assert (done);
4805 break;
4807 case induc_vec_info_type:
4808 gcc_assert (!slp_node);
4809 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4810 gcc_assert (done);
4811 break;
4813 case shift_vec_info_type:
4814 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4815 gcc_assert (done);
4816 break;
4818 case op_vec_info_type:
4819 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4820 gcc_assert (done);
4821 break;
4823 case assignment_vec_info_type:
4824 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4825 gcc_assert (done);
4826 break;
4828 case load_vec_info_type:
4829 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4830 slp_node_instance);
4831 gcc_assert (done);
4832 break;
4834 case store_vec_info_type:
4835 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4836 gcc_assert (done);
4837 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4839 /* In case of interleaving, the whole chain is vectorized when the
4840 last store in the chain is reached. Store stmts before the last
4841 one are skipped, and there vec_stmt_info shouldn't be freed
4842 meanwhile. */
4843 *strided_store = true;
4844 if (STMT_VINFO_VEC_STMT (stmt_info))
4845 is_store = true;
4847 else
4848 is_store = true;
4849 break;
4851 case condition_vec_info_type:
4852 gcc_assert (!slp_node);
4853 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4854 gcc_assert (done);
4855 break;
4857 case call_vec_info_type:
4858 gcc_assert (!slp_node);
4859 done = vectorizable_call (stmt, gsi, &vec_stmt);
4860 stmt = gsi_stmt (*gsi);
4861 break;
4863 case reduc_vec_info_type:
4864 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4865 gcc_assert (done);
4866 break;
4868 default:
4869 if (!STMT_VINFO_LIVE_P (stmt_info))
4871 if (vect_print_dump_info (REPORT_DETAILS))
4872 fprintf (vect_dump, "stmt not supported.");
4873 gcc_unreachable ();
4877 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4878 is being vectorized, but outside the immediately enclosing loop. */
4879 if (vec_stmt
4880 && STMT_VINFO_LOOP_VINFO (stmt_info)
4881 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4882 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4883 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4884 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4885 || STMT_VINFO_RELEVANT (stmt_info) ==
4886 vect_used_in_outer_by_reduction))
4888 struct loop *innerloop = LOOP_VINFO_LOOP (
4889 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4890 imm_use_iterator imm_iter;
4891 use_operand_p use_p;
4892 tree scalar_dest;
4893 gimple exit_phi;
4895 if (vect_print_dump_info (REPORT_DETAILS))
4896 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4898 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4899 (to be used when vectorizing outer-loop stmts that use the DEF of
4900 STMT). */
4901 if (gimple_code (stmt) == GIMPLE_PHI)
4902 scalar_dest = PHI_RESULT (stmt);
4903 else
4904 scalar_dest = gimple_assign_lhs (stmt);
4906 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4908 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4910 exit_phi = USE_STMT (use_p);
4911 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4916 /* Handle stmts whose DEF is used outside the loop-nest that is
4917 being vectorized. */
4918 if (STMT_VINFO_LIVE_P (stmt_info)
4919 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4921 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4922 gcc_assert (done);
4925 if (vec_stmt)
4927 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4928 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4929 if (orig_stmt_in_pattern)
4931 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4932 /* STMT was inserted by the vectorizer to replace a computation idiom.
4933 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4934 computed this idiom. We need to record a pointer to VEC_STMT in
4935 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4936 documentation of vect_pattern_recog. */
4937 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4939 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
4940 == orig_scalar_stmt);
4941 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4946 return is_store;
4950 /* Remove a group of stores (for SLP or interleaving), free their
4951 stmt_vec_info. */
4953 void
4954 vect_remove_stores (gimple first_stmt)
4956 gimple next = first_stmt;
4957 gimple tmp;
4958 gimple_stmt_iterator next_si;
4960 while (next)
4962 /* Free the attached stmt_vec_info and remove the stmt. */
4963 next_si = gsi_for_stmt (next);
4964 gsi_remove (&next_si, true);
4965 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4966 free_stmt_vec_info (next);
4967 next = tmp;
4972 /* Function new_stmt_vec_info.
4974 Create and initialize a new stmt_vec_info struct for STMT. */
4976 stmt_vec_info
4977 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4978 bb_vec_info bb_vinfo)
4980 stmt_vec_info res;
4981 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4983 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4984 STMT_VINFO_STMT (res) = stmt;
4985 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4986 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4987 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4988 STMT_VINFO_LIVE_P (res) = false;
4989 STMT_VINFO_VECTYPE (res) = NULL;
4990 STMT_VINFO_VEC_STMT (res) = NULL;
4991 STMT_VINFO_VECTORIZABLE (res) = true;
4992 STMT_VINFO_IN_PATTERN_P (res) = false;
4993 STMT_VINFO_RELATED_STMT (res) = NULL;
4994 STMT_VINFO_DATA_REF (res) = NULL;
4996 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4997 STMT_VINFO_DR_OFFSET (res) = NULL;
4998 STMT_VINFO_DR_INIT (res) = NULL;
4999 STMT_VINFO_DR_STEP (res) = NULL;
5000 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5002 if (gimple_code (stmt) == GIMPLE_PHI
5003 && is_loop_header_bb_p (gimple_bb (stmt)))
5004 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5005 else
5006 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5008 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5009 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5010 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5011 STMT_SLP_TYPE (res) = loop_vect;
5012 DR_GROUP_FIRST_DR (res) = NULL;
5013 DR_GROUP_NEXT_DR (res) = NULL;
5014 DR_GROUP_SIZE (res) = 0;
5015 DR_GROUP_STORE_COUNT (res) = 0;
5016 DR_GROUP_GAP (res) = 0;
5017 DR_GROUP_SAME_DR_STMT (res) = NULL;
5018 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
5020 return res;
5024 /* Create a hash table for stmt_vec_info. */
5026 void
5027 init_stmt_vec_info_vec (void)
5029 gcc_assert (!stmt_vec_info_vec);
5030 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5034 /* Free hash table for stmt_vec_info. */
5036 void
5037 free_stmt_vec_info_vec (void)
5039 gcc_assert (stmt_vec_info_vec);
5040 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5044 /* Free stmt vectorization related info. */
5046 void
5047 free_stmt_vec_info (gimple stmt)
5049 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5051 if (!stmt_info)
5052 return;
5054 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5055 set_vinfo_for_stmt (stmt, NULL);
5056 free (stmt_info);
5060 /* Function get_vectype_for_scalar_type_and_size.
5062 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5063 by the target. */
5065 static tree
5066 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5068 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5069 enum machine_mode simd_mode;
5070 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5071 int nunits;
5072 tree vectype;
5074 if (nbytes == 0)
5075 return NULL_TREE;
5077 /* We can't build a vector type of elements with alignment bigger than
5078 their size. */
5079 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5080 return NULL_TREE;
5082 /* If we'd build a vector type of elements whose mode precision doesn't
5083 match their types precision we'll get mismatched types on vector
5084 extracts via BIT_FIELD_REFs. This effectively means we disable
5085 vectorization of bool and/or enum types in some languages. */
5086 if (INTEGRAL_TYPE_P (scalar_type)
5087 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5088 return NULL_TREE;
5090 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5091 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5092 return NULL_TREE;
5094 /* If no size was supplied use the mode the target prefers. Otherwise
5095 lookup a vector mode of the specified size. */
5096 if (size == 0)
5097 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5098 else
5099 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5100 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5101 if (nunits <= 1)
5102 return NULL_TREE;
5104 vectype = build_vector_type (scalar_type, nunits);
5105 if (vect_print_dump_info (REPORT_DETAILS))
5107 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5108 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5111 if (!vectype)
5112 return NULL_TREE;
5114 if (vect_print_dump_info (REPORT_DETAILS))
5116 fprintf (vect_dump, "vectype: ");
5117 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5120 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5121 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5123 if (vect_print_dump_info (REPORT_DETAILS))
5124 fprintf (vect_dump, "mode not supported by target.");
5125 return NULL_TREE;
5128 return vectype;
5131 unsigned int current_vector_size;
5133 /* Function get_vectype_for_scalar_type.
5135 Returns the vector type corresponding to SCALAR_TYPE as supported
5136 by the target. */
5138 tree
5139 get_vectype_for_scalar_type (tree scalar_type)
5141 tree vectype;
5142 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5143 current_vector_size);
5144 if (vectype
5145 && current_vector_size == 0)
5146 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5147 return vectype;
5150 /* Function get_same_sized_vectype
5152 Returns a vector type corresponding to SCALAR_TYPE of size
5153 VECTOR_TYPE if supported by the target. */
5155 tree
5156 get_same_sized_vectype (tree scalar_type, tree vector_type)
5158 return get_vectype_for_scalar_type_and_size
5159 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5162 /* Function vect_is_simple_use.
5164 Input:
5165 LOOP_VINFO - the vect info of the loop that is being vectorized.
5166 BB_VINFO - the vect info of the basic block that is being vectorized.
5167 OPERAND - operand of a stmt in the loop or bb.
5168 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5170 Returns whether a stmt with OPERAND can be vectorized.
5171 For loops, supportable operands are constants, loop invariants, and operands
5172 that are defined by the current iteration of the loop. Unsupportable
5173 operands are those that are defined by a previous iteration of the loop (as
5174 is the case in reduction/induction computations).
5175 For basic blocks, supportable operands are constants and bb invariants.
5176 For now, operands defined outside the basic block are not supported. */
5178 bool
5179 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5180 bb_vec_info bb_vinfo, gimple *def_stmt,
5181 tree *def, enum vect_def_type *dt)
5183 basic_block bb;
5184 stmt_vec_info stmt_vinfo;
5185 struct loop *loop = NULL;
5187 if (loop_vinfo)
5188 loop = LOOP_VINFO_LOOP (loop_vinfo);
5190 *def_stmt = NULL;
5191 *def = NULL_TREE;
5193 if (vect_print_dump_info (REPORT_DETAILS))
5195 fprintf (vect_dump, "vect_is_simple_use: operand ");
5196 print_generic_expr (vect_dump, operand, TDF_SLIM);
5199 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5201 *dt = vect_constant_def;
5202 return true;
5205 if (is_gimple_min_invariant (operand))
5207 *def = operand;
5208 *dt = vect_external_def;
5209 return true;
5212 if (TREE_CODE (operand) == PAREN_EXPR)
5214 if (vect_print_dump_info (REPORT_DETAILS))
5215 fprintf (vect_dump, "non-associatable copy.");
5216 operand = TREE_OPERAND (operand, 0);
5219 if (TREE_CODE (operand) != SSA_NAME)
5221 if (vect_print_dump_info (REPORT_DETAILS))
5222 fprintf (vect_dump, "not ssa-name.");
5223 return false;
5226 *def_stmt = SSA_NAME_DEF_STMT (operand);
5227 if (*def_stmt == NULL)
5229 if (vect_print_dump_info (REPORT_DETAILS))
5230 fprintf (vect_dump, "no def_stmt.");
5231 return false;
5234 if (vect_print_dump_info (REPORT_DETAILS))
5236 fprintf (vect_dump, "def_stmt: ");
5237 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5240 /* Empty stmt is expected only in case of a function argument.
5241 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5242 if (gimple_nop_p (*def_stmt))
5244 *def = operand;
5245 *dt = vect_external_def;
5246 return true;
5249 bb = gimple_bb (*def_stmt);
5251 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5252 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5253 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5254 *dt = vect_external_def;
5255 else
5257 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5258 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5261 if (*dt == vect_unknown_def_type)
5263 if (vect_print_dump_info (REPORT_DETAILS))
5264 fprintf (vect_dump, "Unsupported pattern.");
5265 return false;
5268 if (vect_print_dump_info (REPORT_DETAILS))
5269 fprintf (vect_dump, "type of def: %d.",*dt);
5271 switch (gimple_code (*def_stmt))
5273 case GIMPLE_PHI:
5274 *def = gimple_phi_result (*def_stmt);
5275 break;
5277 case GIMPLE_ASSIGN:
5278 *def = gimple_assign_lhs (*def_stmt);
5279 break;
5281 case GIMPLE_CALL:
5282 *def = gimple_call_lhs (*def_stmt);
5283 if (*def != NULL)
5284 break;
5285 /* FALLTHRU */
5286 default:
5287 if (vect_print_dump_info (REPORT_DETAILS))
5288 fprintf (vect_dump, "unsupported defining stmt: ");
5289 return false;
5292 return true;
5295 /* Function vect_is_simple_use_1.
5297 Same as vect_is_simple_use_1 but also determines the vector operand
5298 type of OPERAND and stores it to *VECTYPE. If the definition of
5299 OPERAND is vect_uninitialized_def, vect_constant_def or
5300 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5301 is responsible to compute the best suited vector type for the
5302 scalar operand. */
5304 bool
5305 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5306 bb_vec_info bb_vinfo, gimple *def_stmt,
5307 tree *def, enum vect_def_type *dt, tree *vectype)
5309 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5310 return false;
5312 /* Now get a vector type if the def is internal, otherwise supply
5313 NULL_TREE and leave it up to the caller to figure out a proper
5314 type for the use stmt. */
5315 if (*dt == vect_internal_def
5316 || *dt == vect_induction_def
5317 || *dt == vect_reduction_def
5318 || *dt == vect_double_reduction_def
5319 || *dt == vect_nested_cycle)
5321 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5322 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5323 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5324 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5325 gcc_assert (*vectype != NULL_TREE);
5327 else if (*dt == vect_uninitialized_def
5328 || *dt == vect_constant_def
5329 || *dt == vect_external_def)
5330 *vectype = NULL_TREE;
5331 else
5332 gcc_unreachable ();
5334 return true;
5338 /* Function supportable_widening_operation
5340 Check whether an operation represented by the code CODE is a
5341 widening operation that is supported by the target platform in
5342 vector form (i.e., when operating on arguments of type VECTYPE_IN
5343 producing a result of type VECTYPE_OUT).
5345 Widening operations we currently support are NOP (CONVERT), FLOAT
5346 and WIDEN_MULT. This function checks if these operations are supported
5347 by the target platform either directly (via vector tree-codes), or via
5348 target builtins.
5350 Output:
5351 - CODE1 and CODE2 are codes of vector operations to be used when
5352 vectorizing the operation, if available.
5353 - DECL1 and DECL2 are decls of target builtin functions to be used
5354 when vectorizing the operation, if available. In this case,
5355 CODE1 and CODE2 are CALL_EXPR.
5356 - MULTI_STEP_CVT determines the number of required intermediate steps in
5357 case of multi-step conversion (like char->short->int - in that case
5358 MULTI_STEP_CVT will be 1).
5359 - INTERM_TYPES contains the intermediate type required to perform the
5360 widening operation (short in the above example). */
5362 bool
5363 supportable_widening_operation (enum tree_code code, gimple stmt,
5364 tree vectype_out, tree vectype_in,
5365 tree *decl1, tree *decl2,
5366 enum tree_code *code1, enum tree_code *code2,
5367 int *multi_step_cvt,
5368 VEC (tree, heap) **interm_types)
5370 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5371 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5372 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5373 bool ordered_p;
5374 enum machine_mode vec_mode;
5375 enum insn_code icode1, icode2;
5376 optab optab1, optab2;
5377 tree vectype = vectype_in;
5378 tree wide_vectype = vectype_out;
5379 enum tree_code c1, c2;
5381 /* The result of a vectorized widening operation usually requires two vectors
5382 (because the widened results do not fit int one vector). The generated
5383 vector results would normally be expected to be generated in the same
5384 order as in the original scalar computation, i.e. if 8 results are
5385 generated in each vector iteration, they are to be organized as follows:
5386 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5388 However, in the special case that the result of the widening operation is
5389 used in a reduction computation only, the order doesn't matter (because
5390 when vectorizing a reduction we change the order of the computation).
5391 Some targets can take advantage of this and generate more efficient code.
5392 For example, targets like Altivec, that support widen_mult using a sequence
5393 of {mult_even,mult_odd} generate the following vectors:
5394 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5396 When vectorizing outer-loops, we execute the inner-loop sequentially
5397 (each vectorized inner-loop iteration contributes to VF outer-loop
5398 iterations in parallel). We therefore don't allow to change the order
5399 of the computation in the inner-loop during outer-loop vectorization. */
5401 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5402 && !nested_in_vect_loop_p (vect_loop, stmt))
5403 ordered_p = false;
5404 else
5405 ordered_p = true;
5407 if (!ordered_p
5408 && code == WIDEN_MULT_EXPR
5409 && targetm.vectorize.builtin_mul_widen_even
5410 && targetm.vectorize.builtin_mul_widen_even (vectype)
5411 && targetm.vectorize.builtin_mul_widen_odd
5412 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5414 if (vect_print_dump_info (REPORT_DETAILS))
5415 fprintf (vect_dump, "Unordered widening operation detected.");
5417 *code1 = *code2 = CALL_EXPR;
5418 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5419 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5420 return true;
5423 switch (code)
5425 case WIDEN_MULT_EXPR:
5426 if (BYTES_BIG_ENDIAN)
5428 c1 = VEC_WIDEN_MULT_HI_EXPR;
5429 c2 = VEC_WIDEN_MULT_LO_EXPR;
5431 else
5433 c2 = VEC_WIDEN_MULT_HI_EXPR;
5434 c1 = VEC_WIDEN_MULT_LO_EXPR;
5436 break;
5438 CASE_CONVERT:
5439 if (BYTES_BIG_ENDIAN)
5441 c1 = VEC_UNPACK_HI_EXPR;
5442 c2 = VEC_UNPACK_LO_EXPR;
5444 else
5446 c2 = VEC_UNPACK_HI_EXPR;
5447 c1 = VEC_UNPACK_LO_EXPR;
5449 break;
5451 case FLOAT_EXPR:
5452 if (BYTES_BIG_ENDIAN)
5454 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5455 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5457 else
5459 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5460 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5462 break;
5464 case FIX_TRUNC_EXPR:
5465 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5466 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5467 computing the operation. */
5468 return false;
5470 default:
5471 gcc_unreachable ();
5474 if (code == FIX_TRUNC_EXPR)
5476 /* The signedness is determined from output operand. */
5477 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5478 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5480 else
5482 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5483 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5486 if (!optab1 || !optab2)
5487 return false;
5489 vec_mode = TYPE_MODE (vectype);
5490 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5491 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5492 return false;
5494 /* Check if it's a multi-step conversion that can be done using intermediate
5495 types. */
5496 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5497 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5499 int i;
5500 tree prev_type = vectype, intermediate_type;
5501 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5502 optab optab3, optab4;
5504 if (!CONVERT_EXPR_CODE_P (code))
5505 return false;
5507 *code1 = c1;
5508 *code2 = c2;
5510 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5511 intermediate steps in promotion sequence. We try
5512 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5513 not. */
5514 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5515 for (i = 0; i < 3; i++)
5517 intermediate_mode = insn_data[icode1].operand[0].mode;
5518 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5519 TYPE_UNSIGNED (prev_type));
5520 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5521 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5523 if (!optab3 || !optab4
5524 || ((icode1 = optab_handler (optab1, prev_mode))
5525 == CODE_FOR_nothing)
5526 || insn_data[icode1].operand[0].mode != intermediate_mode
5527 || ((icode2 = optab_handler (optab2, prev_mode))
5528 == CODE_FOR_nothing)
5529 || insn_data[icode2].operand[0].mode != intermediate_mode
5530 || ((icode1 = optab_handler (optab3, intermediate_mode))
5531 == CODE_FOR_nothing)
5532 || ((icode2 = optab_handler (optab4, intermediate_mode))
5533 == CODE_FOR_nothing))
5534 return false;
5536 VEC_quick_push (tree, *interm_types, intermediate_type);
5537 (*multi_step_cvt)++;
5539 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5540 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5541 return true;
5543 prev_type = intermediate_type;
5544 prev_mode = intermediate_mode;
5547 return false;
5550 *code1 = c1;
5551 *code2 = c2;
5552 return true;
5556 /* Function supportable_narrowing_operation
5558 Check whether an operation represented by the code CODE is a
5559 narrowing operation that is supported by the target platform in
5560 vector form (i.e., when operating on arguments of type VECTYPE_IN
5561 and producing a result of type VECTYPE_OUT).
5563 Narrowing operations we currently support are NOP (CONVERT) and
5564 FIX_TRUNC. This function checks if these operations are supported by
5565 the target platform directly via vector tree-codes.
5567 Output:
5568 - CODE1 is the code of a vector operation to be used when
5569 vectorizing the operation, if available.
5570 - MULTI_STEP_CVT determines the number of required intermediate steps in
5571 case of multi-step conversion (like int->short->char - in that case
5572 MULTI_STEP_CVT will be 1).
5573 - INTERM_TYPES contains the intermediate type required to perform the
5574 narrowing operation (short in the above example). */
5576 bool
5577 supportable_narrowing_operation (enum tree_code code,
5578 tree vectype_out, tree vectype_in,
5579 enum tree_code *code1, int *multi_step_cvt,
5580 VEC (tree, heap) **interm_types)
5582 enum machine_mode vec_mode;
5583 enum insn_code icode1;
5584 optab optab1, interm_optab;
5585 tree vectype = vectype_in;
5586 tree narrow_vectype = vectype_out;
5587 enum tree_code c1;
5588 tree intermediate_type, prev_type;
5589 int i;
5591 switch (code)
5593 CASE_CONVERT:
5594 c1 = VEC_PACK_TRUNC_EXPR;
5595 break;
5597 case FIX_TRUNC_EXPR:
5598 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5599 break;
5601 case FLOAT_EXPR:
5602 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5603 tree code and optabs used for computing the operation. */
5604 return false;
5606 default:
5607 gcc_unreachable ();
5610 if (code == FIX_TRUNC_EXPR)
5611 /* The signedness is determined from output operand. */
5612 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5613 else
5614 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5616 if (!optab1)
5617 return false;
5619 vec_mode = TYPE_MODE (vectype);
5620 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5621 return false;
5623 /* Check if it's a multi-step conversion that can be done using intermediate
5624 types. */
5625 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5627 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5629 *code1 = c1;
5630 prev_type = vectype;
5631 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5632 intermediate steps in promotion sequence. We try
5633 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5634 not. */
5635 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5636 for (i = 0; i < 3; i++)
5638 intermediate_mode = insn_data[icode1].operand[0].mode;
5639 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5640 TYPE_UNSIGNED (prev_type));
5641 interm_optab = optab_for_tree_code (c1, intermediate_type,
5642 optab_default);
5643 if (!interm_optab
5644 || ((icode1 = optab_handler (optab1, prev_mode))
5645 == CODE_FOR_nothing)
5646 || insn_data[icode1].operand[0].mode != intermediate_mode
5647 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5648 == CODE_FOR_nothing))
5649 return false;
5651 VEC_quick_push (tree, *interm_types, intermediate_type);
5652 (*multi_step_cvt)++;
5654 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5655 return true;
5657 prev_type = intermediate_type;
5658 prev_mode = intermediate_mode;
5661 return false;
5664 *code1 = c1;
5665 return true;