Enable dumping of alias graphs.
[official-gcc/Ramakrishna.git] / gcc / tree-vect-stmts.c
blob0a2ab19a232e2202eede110b29c0542306b0c7a8
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 /* We expect all such uses to be in the loop exit phis
149 (because of loop closed form) */
150 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
151 gcc_assert (bb == single_exit (loop)->dest);
153 *live_p = true;
158 return (*live_p || *relevant);
162 /* Function exist_non_indexing_operands_for_use_p
164 USE is one of the uses attached to STMT. Check if USE is
165 used in STMT for anything other than indexing an array. */
167 static bool
168 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
170 tree operand;
171 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
173 /* USE corresponds to some operand in STMT. If there is no data
174 reference in STMT, then any operand that corresponds to USE
175 is not indexing an array. */
176 if (!STMT_VINFO_DATA_REF (stmt_info))
177 return true;
179 /* STMT has a data_ref. FORNOW this means that its of one of
180 the following forms:
181 -1- ARRAY_REF = var
182 -2- var = ARRAY_REF
183 (This should have been verified in analyze_data_refs).
185 'var' in the second case corresponds to a def, not a use,
186 so USE cannot correspond to any operands that are not used
187 for array indexing.
189 Therefore, all we need to check is if STMT falls into the
190 first case, and whether var corresponds to USE. */
192 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
193 return false;
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 operand = gimple_assign_rhs1 (stmt);
199 if (TREE_CODE (operand) != SSA_NAME)
200 return false;
202 if (operand == use)
203 return true;
205 return false;
210 Function process_use.
212 Inputs:
213 - a USE in STMT in a loop represented by LOOP_VINFO
214 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
215 that defined USE. This is done by calling mark_relevant and passing it
216 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
218 Outputs:
219 Generally, LIVE_P and RELEVANT are used to define the liveness and
220 relevance info of the DEF_STMT of this USE:
221 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
222 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
223 Exceptions:
224 - case 1: If USE is used only for address computations (e.g. array indexing),
225 which does not need to be directly vectorized, then the liveness/relevance
226 of the respective DEF_STMT is left unchanged.
227 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
228 skip DEF_STMT cause it had already been processed.
229 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
230 be modified accordingly.
232 Return true if everything is as expected. Return false otherwise. */
234 static bool
235 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
236 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
239 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
240 stmt_vec_info dstmt_vinfo;
241 basic_block bb, def_bb;
242 tree def;
243 gimple def_stmt;
244 enum vect_def_type dt;
246 /* case 1: we are only interested in uses that need to be vectorized. Uses
247 that are used for address computation are not considered relevant. */
248 if (!exist_non_indexing_operands_for_use_p (use, stmt))
249 return true;
251 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
253 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
254 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
255 return false;
258 if (!def_stmt || gimple_nop_p (def_stmt))
259 return true;
261 def_bb = gimple_bb (def_stmt);
262 if (!flow_bb_inside_loop_p (loop, def_bb))
264 if (vect_print_dump_info (REPORT_DETAILS))
265 fprintf (vect_dump, "def_stmt is out of loop.");
266 return true;
269 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
270 DEF_STMT must have already been processed, because this should be the
271 only way that STMT, which is a reduction-phi, was put in the worklist,
272 as there should be no other uses for DEF_STMT in the loop. So we just
273 check that everything is as expected, and we are done. */
274 dstmt_vinfo = vinfo_for_stmt (def_stmt);
275 bb = gimple_bb (stmt);
276 if (gimple_code (stmt) == GIMPLE_PHI
277 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
278 && gimple_code (def_stmt) != GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
280 && bb->loop_father == def_bb->loop_father)
282 if (vect_print_dump_info (REPORT_DETAILS))
283 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
284 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
285 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
286 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
287 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
288 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
289 return true;
292 /* case 3a: outer-loop stmt defining an inner-loop stmt:
293 outer-loop-header-bb:
294 d = def_stmt
295 inner-loop:
296 stmt # use (d)
297 outer-loop-tail-bb:
298 ... */
299 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
301 if (vect_print_dump_info (REPORT_DETAILS))
302 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
304 switch (relevant)
306 case vect_unused_in_scope:
307 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
308 vect_used_in_scope : vect_unused_in_scope;
309 break;
311 case vect_used_in_outer_by_reduction:
312 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
313 relevant = vect_used_by_reduction;
314 break;
316 case vect_used_in_outer:
317 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
318 relevant = vect_used_in_scope;
319 break;
321 case vect_used_in_scope:
322 break;
324 default:
325 gcc_unreachable ();
329 /* case 3b: inner-loop stmt defining an outer-loop stmt:
330 outer-loop-header-bb:
332 inner-loop:
333 d = def_stmt
334 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
335 stmt # use (d) */
336 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
338 if (vect_print_dump_info (REPORT_DETAILS))
339 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
341 switch (relevant)
343 case vect_unused_in_scope:
344 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
345 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
346 vect_used_in_outer_by_reduction : vect_unused_in_scope;
347 break;
349 case vect_used_by_reduction:
350 relevant = vect_used_in_outer_by_reduction;
351 break;
353 case vect_used_in_scope:
354 relevant = vect_used_in_outer;
355 break;
357 default:
358 gcc_unreachable ();
362 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
363 return true;
367 /* Function vect_mark_stmts_to_be_vectorized.
369 Not all stmts in the loop need to be vectorized. For example:
371 for i...
372 for j...
373 1. T0 = i + j
374 2. T1 = a[T0]
376 3. j = j + 1
378 Stmt 1 and 3 do not need to be vectorized, because loop control and
379 addressing of vectorized data-refs are handled differently.
381 This pass detects such stmts. */
383 bool
384 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
386 VEC(gimple,heap) *worklist;
387 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
388 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
389 unsigned int nbbs = loop->num_nodes;
390 gimple_stmt_iterator si;
391 gimple stmt;
392 unsigned int i;
393 stmt_vec_info stmt_vinfo;
394 basic_block bb;
395 gimple phi;
396 bool live_p;
397 enum vect_relevant relevant, tmp_relevant;
398 enum vect_def_type def_type;
400 if (vect_print_dump_info (REPORT_DETAILS))
401 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
403 worklist = VEC_alloc (gimple, heap, 64);
405 /* 1. Init worklist. */
406 for (i = 0; i < nbbs; i++)
408 bb = bbs[i];
409 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
411 phi = gsi_stmt (si);
412 if (vect_print_dump_info (REPORT_DETAILS))
414 fprintf (vect_dump, "init: phi relevant? ");
415 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
418 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
419 vect_mark_relevant (&worklist, phi, relevant, live_p);
421 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
423 stmt = gsi_stmt (si);
424 if (vect_print_dump_info (REPORT_DETAILS))
426 fprintf (vect_dump, "init: stmt relevant? ");
427 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
430 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
431 vect_mark_relevant (&worklist, stmt, relevant, live_p);
435 /* 2. Process_worklist */
436 while (VEC_length (gimple, worklist) > 0)
438 use_operand_p use_p;
439 ssa_op_iter iter;
441 stmt = VEC_pop (gimple, worklist);
442 if (vect_print_dump_info (REPORT_DETAILS))
444 fprintf (vect_dump, "worklist: examine stmt: ");
445 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
448 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
449 (DEF_STMT) as relevant/irrelevant and live/dead according to the
450 liveness and relevance properties of STMT. */
451 stmt_vinfo = vinfo_for_stmt (stmt);
452 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
453 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
455 /* Generally, the liveness and relevance properties of STMT are
456 propagated as is to the DEF_STMTs of its USEs:
457 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
458 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
460 One exception is when STMT has been identified as defining a reduction
461 variable; in this case we set the liveness/relevance as follows:
462 live_p = false
463 relevant = vect_used_by_reduction
464 This is because we distinguish between two kinds of relevant stmts -
465 those that are used by a reduction computation, and those that are
466 (also) used by a regular computation. This allows us later on to
467 identify stmts that are used solely by a reduction, and therefore the
468 order of the results that they produce does not have to be kept. */
470 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
471 tmp_relevant = relevant;
472 switch (def_type)
474 case vect_reduction_def:
475 switch (tmp_relevant)
477 case vect_unused_in_scope:
478 relevant = vect_used_by_reduction;
479 break;
481 case vect_used_by_reduction:
482 if (gimple_code (stmt) == GIMPLE_PHI)
483 break;
484 /* fall through */
486 default:
487 if (vect_print_dump_info (REPORT_DETAILS))
488 fprintf (vect_dump, "unsupported use of reduction.");
490 VEC_free (gimple, heap, worklist);
491 return false;
494 live_p = false;
495 break;
497 case vect_nested_cycle:
498 if (tmp_relevant != vect_unused_in_scope
499 && tmp_relevant != vect_used_in_outer_by_reduction
500 && tmp_relevant != vect_used_in_outer)
502 if (vect_print_dump_info (REPORT_DETAILS))
503 fprintf (vect_dump, "unsupported use of nested cycle.");
505 VEC_free (gimple, heap, worklist);
506 return false;
509 live_p = false;
510 break;
512 case vect_double_reduction_def:
513 if (tmp_relevant != vect_unused_in_scope
514 && tmp_relevant != vect_used_by_reduction)
516 if (vect_print_dump_info (REPORT_DETAILS))
517 fprintf (vect_dump, "unsupported use of double reduction.");
519 VEC_free (gimple, heap, worklist);
520 return false;
523 live_p = false;
524 break;
526 default:
527 break;
530 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
532 tree op = USE_FROM_PTR (use_p);
533 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
535 VEC_free (gimple, heap, worklist);
536 return false;
539 } /* while worklist */
541 VEC_free (gimple, heap, worklist);
542 return true;
547 cost_for_stmt (gimple stmt)
549 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
551 switch (STMT_VINFO_TYPE (stmt_info))
553 case load_vec_info_type:
554 return TARG_SCALAR_LOAD_COST;
555 case store_vec_info_type:
556 return TARG_SCALAR_STORE_COST;
557 case op_vec_info_type:
558 case condition_vec_info_type:
559 case assignment_vec_info_type:
560 case reduc_vec_info_type:
561 case induc_vec_info_type:
562 case type_promotion_vec_info_type:
563 case type_demotion_vec_info_type:
564 case type_conversion_vec_info_type:
565 case call_vec_info_type:
566 return TARG_SCALAR_STMT_COST;
567 case undef_vec_info_type:
568 default:
569 gcc_unreachable ();
573 /* Function vect_model_simple_cost.
575 Models cost for simple operations, i.e. those that only emit ncopies of a
576 single op. Right now, this does not account for multiple insns that could
577 be generated for the single vector op. We will handle that shortly. */
579 void
580 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
581 enum vect_def_type *dt, slp_tree slp_node)
583 int i;
584 int inside_cost = 0, outside_cost = 0;
586 /* The SLP costs were already calculated during SLP tree build. */
587 if (PURE_SLP_STMT (stmt_info))
588 return;
590 inside_cost = ncopies * TARG_VEC_STMT_COST;
592 /* FORNOW: Assuming maximum 2 args per stmts. */
593 for (i = 0; i < 2; i++)
595 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
596 outside_cost += TARG_SCALAR_TO_VEC_COST;
599 if (vect_print_dump_info (REPORT_COST))
600 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
601 "outside_cost = %d .", inside_cost, outside_cost);
603 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
604 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
605 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
609 /* Function vect_cost_strided_group_size
611 For strided load or store, return the group_size only if it is the first
612 load or store of a group, else return 1. This ensures that group size is
613 only returned once per group. */
615 static int
616 vect_cost_strided_group_size (stmt_vec_info stmt_info)
618 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
620 if (first_stmt == STMT_VINFO_STMT (stmt_info))
621 return DR_GROUP_SIZE (stmt_info);
623 return 1;
627 /* Function vect_model_store_cost
629 Models cost for stores. In the case of strided accesses, one access
630 has the overhead of the strided access attributed to it. */
632 void
633 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
634 enum vect_def_type dt, slp_tree slp_node)
636 int group_size;
637 int inside_cost = 0, outside_cost = 0;
639 /* The SLP costs were already calculated during SLP tree build. */
640 if (PURE_SLP_STMT (stmt_info))
641 return;
643 if (dt == vect_constant_def || dt == vect_external_def)
644 outside_cost = TARG_SCALAR_TO_VEC_COST;
646 /* Strided access? */
647 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
648 group_size = vect_cost_strided_group_size (stmt_info);
649 /* Not a strided access. */
650 else
651 group_size = 1;
653 /* Is this an access in a group of stores, which provide strided access?
654 If so, add in the cost of the permutes. */
655 if (group_size > 1)
657 /* Uses a high and low interleave operation for each needed permute. */
658 inside_cost = ncopies * exact_log2(group_size) * group_size
659 * TARG_VEC_STMT_COST;
661 if (vect_print_dump_info (REPORT_COST))
662 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
663 group_size);
667 /* Costs of the stores. */
668 inside_cost += ncopies * TARG_VEC_STORE_COST;
670 if (vect_print_dump_info (REPORT_COST))
671 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
672 "outside_cost = %d .", inside_cost, outside_cost);
674 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
675 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
676 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
680 /* Function vect_model_load_cost
682 Models cost for loads. In the case of strided accesses, the last access
683 has the overhead of the strided access attributed to it. Since unaligned
684 accesses are supported for loads, we also account for the costs of the
685 access scheme chosen. */
687 void
688 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
691 int group_size;
692 int alignment_support_cheme;
693 gimple first_stmt;
694 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
695 int inside_cost = 0, outside_cost = 0;
697 /* The SLP costs were already calculated during SLP tree build. */
698 if (PURE_SLP_STMT (stmt_info))
699 return;
701 /* Strided accesses? */
702 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
703 if (first_stmt && !slp_node)
705 group_size = vect_cost_strided_group_size (stmt_info);
706 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
708 /* Not a strided access. */
709 else
711 group_size = 1;
712 first_dr = dr;
715 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
717 /* Is this an access in a group of loads providing strided access?
718 If so, add in the cost of the permutes. */
719 if (group_size > 1)
721 /* Uses an even and odd extract operations for each needed permute. */
722 inside_cost = ncopies * exact_log2(group_size) * group_size
723 * TARG_VEC_STMT_COST;
725 if (vect_print_dump_info (REPORT_COST))
726 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
727 group_size);
731 /* The loads themselves. */
732 switch (alignment_support_cheme)
734 case dr_aligned:
736 inside_cost += ncopies * TARG_VEC_LOAD_COST;
738 if (vect_print_dump_info (REPORT_COST))
739 fprintf (vect_dump, "vect_model_load_cost: aligned.");
741 break;
743 case dr_unaligned_supported:
745 /* Here, we assign an additional cost for the unaligned load. */
746 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
748 if (vect_print_dump_info (REPORT_COST))
749 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
750 "hardware.");
752 break;
754 case dr_explicit_realign:
756 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
758 /* FIXME: If the misalignment remains fixed across the iterations of
759 the containing loop, the following cost should be added to the
760 outside costs. */
761 if (targetm.vectorize.builtin_mask_for_load)
762 inside_cost += TARG_VEC_STMT_COST;
764 break;
766 case dr_explicit_realign_optimized:
768 if (vect_print_dump_info (REPORT_COST))
769 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
770 "pipelined.");
772 /* Unaligned software pipeline has a load of an address, an initial
773 load, and possibly a mask operation to "prime" the loop. However,
774 if this is an access in a group of loads, which provide strided
775 access, then the above cost should only be considered for one
776 access in the group. Inside the loop, there is a load op
777 and a realignment op. */
779 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
781 outside_cost = 2*TARG_VEC_STMT_COST;
782 if (targetm.vectorize.builtin_mask_for_load)
783 outside_cost += TARG_VEC_STMT_COST;
786 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
788 break;
791 default:
792 gcc_unreachable ();
795 if (vect_print_dump_info (REPORT_COST))
796 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
797 "outside_cost = %d .", inside_cost, outside_cost);
799 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
800 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
801 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
805 /* Function vect_init_vector.
807 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
808 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
809 is not NULL. Otherwise, place the initialization at the loop preheader.
810 Return the DEF of INIT_STMT.
811 It will be used in the vectorization of STMT. */
813 tree
814 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
815 gimple_stmt_iterator *gsi)
817 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
818 tree new_var;
819 gimple init_stmt;
820 tree vec_oprnd;
821 edge pe;
822 tree new_temp;
823 basic_block new_bb;
825 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
826 add_referenced_var (new_var);
827 init_stmt = gimple_build_assign (new_var, vector_var);
828 new_temp = make_ssa_name (new_var, init_stmt);
829 gimple_assign_set_lhs (init_stmt, new_temp);
831 if (gsi)
832 vect_finish_stmt_generation (stmt, init_stmt, gsi);
833 else
835 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
837 if (loop_vinfo)
839 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
841 if (nested_in_vect_loop_p (loop, stmt))
842 loop = loop->inner;
844 pe = loop_preheader_edge (loop);
845 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
846 gcc_assert (!new_bb);
848 else
850 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
851 basic_block bb;
852 gimple_stmt_iterator gsi_bb_start;
854 gcc_assert (bb_vinfo);
855 bb = BB_VINFO_BB (bb_vinfo);
856 gsi_bb_start = gsi_after_labels (bb);
857 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
861 if (vect_print_dump_info (REPORT_DETAILS))
863 fprintf (vect_dump, "created new init_stmt: ");
864 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
867 vec_oprnd = gimple_assign_lhs (init_stmt);
868 return vec_oprnd;
872 /* Function vect_get_vec_def_for_operand.
874 OP is an operand in STMT. This function returns a (vector) def that will be
875 used in the vectorized stmt for STMT.
877 In the case that OP is an SSA_NAME which is defined in the loop, then
878 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
880 In case OP is an invariant or constant, a new stmt that creates a vector def
881 needs to be introduced. */
883 tree
884 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
886 tree vec_oprnd;
887 gimple vec_stmt;
888 gimple def_stmt;
889 stmt_vec_info def_stmt_info = NULL;
890 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
891 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
892 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
893 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
894 tree vec_inv;
895 tree vec_cst;
896 tree t = NULL_TREE;
897 tree def;
898 int i;
899 enum vect_def_type dt;
900 bool is_simple_use;
901 tree vector_type;
903 if (vect_print_dump_info (REPORT_DETAILS))
905 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
906 print_generic_expr (vect_dump, op, TDF_SLIM);
909 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
910 &dt);
911 gcc_assert (is_simple_use);
912 if (vect_print_dump_info (REPORT_DETAILS))
914 if (def)
916 fprintf (vect_dump, "def = ");
917 print_generic_expr (vect_dump, def, TDF_SLIM);
919 if (def_stmt)
921 fprintf (vect_dump, " def_stmt = ");
922 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
926 switch (dt)
928 /* Case 1: operand is a constant. */
929 case vect_constant_def:
931 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
932 gcc_assert (vector_type);
934 if (scalar_def)
935 *scalar_def = op;
937 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
938 if (vect_print_dump_info (REPORT_DETAILS))
939 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
941 for (i = nunits - 1; i >= 0; --i)
943 t = tree_cons (NULL_TREE, op, t);
945 vec_cst = build_vector (vector_type, t);
946 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
949 /* Case 2: operand is defined outside the loop - loop invariant. */
950 case vect_external_def:
952 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
953 gcc_assert (vector_type);
954 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
956 if (scalar_def)
957 *scalar_def = def;
959 /* Create 'vec_inv = {inv,inv,..,inv}' */
960 if (vect_print_dump_info (REPORT_DETAILS))
961 fprintf (vect_dump, "Create vector_inv.");
963 for (i = nunits - 1; i >= 0; --i)
965 t = tree_cons (NULL_TREE, def, t);
968 /* FIXME: use build_constructor directly. */
969 vec_inv = build_constructor_from_list (vector_type, t);
970 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
973 /* Case 3: operand is defined inside the loop. */
974 case vect_internal_def:
976 if (scalar_def)
977 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
979 /* Get the def from the vectorized stmt. */
980 def_stmt_info = vinfo_for_stmt (def_stmt);
981 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
982 gcc_assert (vec_stmt);
983 if (gimple_code (vec_stmt) == GIMPLE_PHI)
984 vec_oprnd = PHI_RESULT (vec_stmt);
985 else if (is_gimple_call (vec_stmt))
986 vec_oprnd = gimple_call_lhs (vec_stmt);
987 else
988 vec_oprnd = gimple_assign_lhs (vec_stmt);
989 return vec_oprnd;
992 /* Case 4: operand is defined by a loop header phi - reduction */
993 case vect_reduction_def:
994 case vect_double_reduction_def:
995 case vect_nested_cycle:
997 struct loop *loop;
999 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1000 loop = (gimple_bb (def_stmt))->loop_father;
1002 /* Get the def before the loop */
1003 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1004 return get_initial_def_for_reduction (stmt, op, scalar_def);
1007 /* Case 5: operand is defined by loop-header phi - induction. */
1008 case vect_induction_def:
1010 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1012 /* Get the def from the vectorized stmt. */
1013 def_stmt_info = vinfo_for_stmt (def_stmt);
1014 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1015 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1016 vec_oprnd = PHI_RESULT (vec_stmt);
1017 return vec_oprnd;
1020 default:
1021 gcc_unreachable ();
1026 /* Function vect_get_vec_def_for_stmt_copy
1028 Return a vector-def for an operand. This function is used when the
1029 vectorized stmt to be created (by the caller to this function) is a "copy"
1030 created in case the vectorized result cannot fit in one vector, and several
1031 copies of the vector-stmt are required. In this case the vector-def is
1032 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1033 of the stmt that defines VEC_OPRND.
1034 DT is the type of the vector def VEC_OPRND.
1036 Context:
1037 In case the vectorization factor (VF) is bigger than the number
1038 of elements that can fit in a vectype (nunits), we have to generate
1039 more than one vector stmt to vectorize the scalar stmt. This situation
1040 arises when there are multiple data-types operated upon in the loop; the
1041 smallest data-type determines the VF, and as a result, when vectorizing
1042 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1043 vector stmt (each computing a vector of 'nunits' results, and together
1044 computing 'VF' results in each iteration). This function is called when
1045 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1046 which VF=16 and nunits=4, so the number of copies required is 4):
1048 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1050 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1051 VS1.1: vx.1 = memref1 VS1.2
1052 VS1.2: vx.2 = memref2 VS1.3
1053 VS1.3: vx.3 = memref3
1055 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1056 VSnew.1: vz1 = vx.1 + ... VSnew.2
1057 VSnew.2: vz2 = vx.2 + ... VSnew.3
1058 VSnew.3: vz3 = vx.3 + ...
1060 The vectorization of S1 is explained in vectorizable_load.
1061 The vectorization of S2:
1062 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1063 the function 'vect_get_vec_def_for_operand' is called to
1064 get the relevant vector-def for each operand of S2. For operand x it
1065 returns the vector-def 'vx.0'.
1067 To create the remaining copies of the vector-stmt (VSnew.j), this
1068 function is called to get the relevant vector-def for each operand. It is
1069 obtained from the respective VS1.j stmt, which is recorded in the
1070 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1072 For example, to obtain the vector-def 'vx.1' in order to create the
1073 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1074 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1075 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1076 and return its def ('vx.1').
1077 Overall, to create the above sequence this function will be called 3 times:
1078 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1079 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1080 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1082 tree
1083 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1085 gimple vec_stmt_for_operand;
1086 stmt_vec_info def_stmt_info;
1088 /* Do nothing; can reuse same def. */
1089 if (dt == vect_external_def || dt == vect_constant_def )
1090 return vec_oprnd;
1092 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1093 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1094 gcc_assert (def_stmt_info);
1095 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1096 gcc_assert (vec_stmt_for_operand);
1097 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1098 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1099 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1100 else
1101 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1102 return vec_oprnd;
1106 /* Get vectorized definitions for the operands to create a copy of an original
1107 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1109 static void
1110 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1111 VEC(tree,heap) **vec_oprnds0,
1112 VEC(tree,heap) **vec_oprnds1)
1114 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1116 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1117 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1119 if (vec_oprnds1 && *vec_oprnds1)
1121 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1122 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1123 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1128 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1130 static void
1131 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1132 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1133 slp_tree slp_node)
1135 if (slp_node)
1136 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1137 else
1139 tree vec_oprnd;
1141 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1142 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1143 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1145 if (op1)
1147 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1148 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1149 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1155 /* Function vect_finish_stmt_generation.
1157 Insert a new stmt. */
1159 void
1160 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1161 gimple_stmt_iterator *gsi)
1163 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1164 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1165 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1167 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1169 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1171 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1172 bb_vinfo));
1174 if (vect_print_dump_info (REPORT_DETAILS))
1176 fprintf (vect_dump, "add new stmt: ");
1177 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1180 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1183 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1184 a function declaration if the target has a vectorized version
1185 of the function, or NULL_TREE if the function cannot be vectorized. */
1187 tree
1188 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1190 tree fndecl = gimple_call_fndecl (call);
1191 enum built_in_function code;
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1196 return NULL_TREE;
1198 if (!fndecl
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1201 return NULL_TREE;
1203 code = DECL_FUNCTION_CODE (fndecl);
1204 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
1205 vectype_in);
1208 /* Function vectorizable_call.
1210 Check if STMT performs a function call that can be vectorized.
1211 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1212 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1213 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1215 static bool
1216 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1218 tree vec_dest;
1219 tree scalar_dest;
1220 tree op, type;
1221 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1222 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1223 tree vectype_out, vectype_in;
1224 int nunits_in;
1225 int nunits_out;
1226 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1227 tree fndecl, new_temp, def, rhs_type, lhs_type;
1228 gimple def_stmt;
1229 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1230 gimple new_stmt = NULL;
1231 int ncopies, j;
1232 VEC(tree, heap) *vargs = NULL;
1233 enum { NARROW, NONE, WIDEN } modifier;
1234 size_t i, nargs;
1236 /* FORNOW: unsupported in basic block SLP. */
1237 gcc_assert (loop_vinfo);
1239 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1240 return false;
1242 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1243 return false;
1245 /* FORNOW: SLP not supported. */
1246 if (STMT_SLP_TYPE (stmt_info))
1247 return false;
1249 /* Is STMT a vectorizable call? */
1250 if (!is_gimple_call (stmt))
1251 return false;
1253 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1254 return false;
1256 /* Process function arguments. */
1257 rhs_type = NULL_TREE;
1258 nargs = gimple_call_num_args (stmt);
1260 /* Bail out if the function has more than two arguments, we
1261 do not have interesting builtin functions to vectorize with
1262 more than two arguments. No arguments is also not good. */
1263 if (nargs == 0 || nargs > 2)
1264 return false;
1266 for (i = 0; i < nargs; i++)
1268 op = gimple_call_arg (stmt, i);
1270 /* We can only handle calls with arguments of the same type. */
1271 if (rhs_type
1272 && rhs_type != TREE_TYPE (op))
1274 if (vect_print_dump_info (REPORT_DETAILS))
1275 fprintf (vect_dump, "argument types differ.");
1276 return false;
1278 rhs_type = TREE_TYPE (op);
1280 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
1282 if (vect_print_dump_info (REPORT_DETAILS))
1283 fprintf (vect_dump, "use not simple.");
1284 return false;
1288 vectype_in = get_vectype_for_scalar_type (rhs_type);
1289 if (!vectype_in)
1290 return false;
1291 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1293 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1294 vectype_out = get_vectype_for_scalar_type (lhs_type);
1295 if (!vectype_out)
1296 return false;
1297 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1299 /* FORNOW */
1300 if (nunits_in == nunits_out / 2)
1301 modifier = NARROW;
1302 else if (nunits_out == nunits_in)
1303 modifier = NONE;
1304 else if (nunits_out == nunits_in / 2)
1305 modifier = WIDEN;
1306 else
1307 return false;
1309 /* For now, we only vectorize functions if a target specific builtin
1310 is available. TODO -- in some cases, it might be profitable to
1311 insert the calls for pieces of the vector, in order to be able
1312 to vectorize other operations in the loop. */
1313 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1314 if (fndecl == NULL_TREE)
1316 if (vect_print_dump_info (REPORT_DETAILS))
1317 fprintf (vect_dump, "function is not vectorizable.");
1319 return false;
1322 gcc_assert (!gimple_vuse (stmt));
1324 if (modifier == NARROW)
1325 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1326 else
1327 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1329 /* Sanity check: make sure that at least one copy of the vectorized stmt
1330 needs to be generated. */
1331 gcc_assert (ncopies >= 1);
1333 if (!vec_stmt) /* transformation not required. */
1335 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1336 if (vect_print_dump_info (REPORT_DETAILS))
1337 fprintf (vect_dump, "=== vectorizable_call ===");
1338 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1339 return true;
1342 /** Transform. **/
1344 if (vect_print_dump_info (REPORT_DETAILS))
1345 fprintf (vect_dump, "transform operation.");
1347 /* Handle def. */
1348 scalar_dest = gimple_call_lhs (stmt);
1349 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1351 prev_stmt_info = NULL;
1352 switch (modifier)
1354 case NONE:
1355 for (j = 0; j < ncopies; ++j)
1357 /* Build argument list for the vectorized call. */
1358 if (j == 0)
1359 vargs = VEC_alloc (tree, heap, nargs);
1360 else
1361 VEC_truncate (tree, vargs, 0);
1363 for (i = 0; i < nargs; i++)
1365 op = gimple_call_arg (stmt, i);
1366 if (j == 0)
1367 vec_oprnd0
1368 = vect_get_vec_def_for_operand (op, stmt, NULL);
1369 else
1371 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1372 vec_oprnd0
1373 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1376 VEC_quick_push (tree, vargs, vec_oprnd0);
1379 new_stmt = gimple_build_call_vec (fndecl, vargs);
1380 new_temp = make_ssa_name (vec_dest, new_stmt);
1381 gimple_call_set_lhs (new_stmt, new_temp);
1383 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1385 if (j == 0)
1386 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1387 else
1388 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1390 prev_stmt_info = vinfo_for_stmt (new_stmt);
1393 break;
1395 case NARROW:
1396 for (j = 0; j < ncopies; ++j)
1398 /* Build argument list for the vectorized call. */
1399 if (j == 0)
1400 vargs = VEC_alloc (tree, heap, nargs * 2);
1401 else
1402 VEC_truncate (tree, vargs, 0);
1404 for (i = 0; i < nargs; i++)
1406 op = gimple_call_arg (stmt, i);
1407 if (j == 0)
1409 vec_oprnd0
1410 = vect_get_vec_def_for_operand (op, stmt, NULL);
1411 vec_oprnd1
1412 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1414 else
1416 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1417 vec_oprnd0
1418 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1419 vec_oprnd1
1420 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1423 VEC_quick_push (tree, vargs, vec_oprnd0);
1424 VEC_quick_push (tree, vargs, vec_oprnd1);
1427 new_stmt = gimple_build_call_vec (fndecl, vargs);
1428 new_temp = make_ssa_name (vec_dest, new_stmt);
1429 gimple_call_set_lhs (new_stmt, new_temp);
1431 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1433 if (j == 0)
1434 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1435 else
1436 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1438 prev_stmt_info = vinfo_for_stmt (new_stmt);
1441 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1443 break;
1445 case WIDEN:
1446 /* No current target implements this case. */
1447 return false;
1450 VEC_free (tree, heap, vargs);
1452 /* Update the exception handling table with the vector stmt if necessary. */
1453 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1454 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1456 /* The call in STMT might prevent it from being removed in dce.
1457 We however cannot remove it here, due to the way the ssa name
1458 it defines is mapped to the new definition. So just replace
1459 rhs of the statement with something harmless. */
1461 type = TREE_TYPE (scalar_dest);
1462 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1463 fold_convert (type, integer_zero_node));
1464 set_vinfo_for_stmt (new_stmt, stmt_info);
1465 set_vinfo_for_stmt (stmt, NULL);
1466 STMT_VINFO_STMT (stmt_info) = new_stmt;
1467 gsi_replace (gsi, new_stmt, false);
1468 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1470 return true;
1474 /* Function vect_gen_widened_results_half
1476 Create a vector stmt whose code, type, number of arguments, and result
1477 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1478 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1479 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1480 needs to be created (DECL is a function-decl of a target-builtin).
1481 STMT is the original scalar stmt that we are vectorizing. */
1483 static gimple
1484 vect_gen_widened_results_half (enum tree_code code,
1485 tree decl,
1486 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1487 tree vec_dest, gimple_stmt_iterator *gsi,
1488 gimple stmt)
1490 gimple new_stmt;
1491 tree new_temp;
1493 /* Generate half of the widened result: */
1494 if (code == CALL_EXPR)
1496 /* Target specific support */
1497 if (op_type == binary_op)
1498 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1499 else
1500 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1501 new_temp = make_ssa_name (vec_dest, new_stmt);
1502 gimple_call_set_lhs (new_stmt, new_temp);
1504 else
1506 /* Generic support */
1507 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1508 if (op_type != binary_op)
1509 vec_oprnd1 = NULL;
1510 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1511 vec_oprnd1);
1512 new_temp = make_ssa_name (vec_dest, new_stmt);
1513 gimple_assign_set_lhs (new_stmt, new_temp);
1515 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1517 return new_stmt;
1521 /* Check if STMT performs a conversion operation, that can be vectorized.
1522 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1523 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1524 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1526 static bool
1527 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1528 gimple *vec_stmt, slp_tree slp_node)
1530 tree vec_dest;
1531 tree scalar_dest;
1532 tree op0;
1533 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1534 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1535 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1536 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1537 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1538 tree new_temp;
1539 tree def;
1540 gimple def_stmt;
1541 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1542 gimple new_stmt = NULL;
1543 stmt_vec_info prev_stmt_info;
1544 int nunits_in;
1545 int nunits_out;
1546 tree vectype_out, vectype_in;
1547 int ncopies, j;
1548 tree expr;
1549 tree rhs_type, lhs_type;
1550 tree builtin_decl;
1551 enum { NARROW, NONE, WIDEN } modifier;
1552 int i;
1553 VEC(tree,heap) *vec_oprnds0 = NULL;
1554 tree vop0;
1555 tree integral_type;
1556 VEC(tree,heap) *dummy = NULL;
1557 int dummy_int;
1559 /* Is STMT a vectorizable conversion? */
1561 /* FORNOW: unsupported in basic block SLP. */
1562 gcc_assert (loop_vinfo);
1564 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1565 return false;
1567 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1568 return false;
1570 if (!is_gimple_assign (stmt))
1571 return false;
1573 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1574 return false;
1576 code = gimple_assign_rhs_code (stmt);
1577 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1578 return false;
1580 /* Check types of lhs and rhs. */
1581 op0 = gimple_assign_rhs1 (stmt);
1582 rhs_type = TREE_TYPE (op0);
1583 vectype_in = get_vectype_for_scalar_type (rhs_type);
1584 if (!vectype_in)
1585 return false;
1586 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1588 scalar_dest = gimple_assign_lhs (stmt);
1589 lhs_type = TREE_TYPE (scalar_dest);
1590 vectype_out = get_vectype_for_scalar_type (lhs_type);
1591 if (!vectype_out)
1592 return false;
1593 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1595 /* FORNOW */
1596 if (nunits_in == nunits_out / 2)
1597 modifier = NARROW;
1598 else if (nunits_out == nunits_in)
1599 modifier = NONE;
1600 else if (nunits_out == nunits_in / 2)
1601 modifier = WIDEN;
1602 else
1603 return false;
1605 if (modifier == NONE)
1606 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1608 /* Bail out if the types are both integral or non-integral. */
1609 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1610 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1611 return false;
1613 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1615 if (modifier == NARROW)
1616 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1617 else
1618 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1620 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1621 this, so we can safely override NCOPIES with 1 here. */
1622 if (slp_node)
1623 ncopies = 1;
1625 /* Sanity check: make sure that at least one copy of the vectorized stmt
1626 needs to be generated. */
1627 gcc_assert (ncopies >= 1);
1629 /* Check the operands of the operation. */
1630 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
1632 if (vect_print_dump_info (REPORT_DETAILS))
1633 fprintf (vect_dump, "use not simple.");
1634 return false;
1637 /* Supportable by target? */
1638 if ((modifier == NONE
1639 && !targetm.vectorize.builtin_conversion (code, integral_type))
1640 || (modifier == WIDEN
1641 && !supportable_widening_operation (code, stmt, vectype_in,
1642 &decl1, &decl2,
1643 &code1, &code2,
1644 &dummy_int, &dummy))
1645 || (modifier == NARROW
1646 && !supportable_narrowing_operation (code, stmt, vectype_in,
1647 &code1, &dummy_int, &dummy)))
1649 if (vect_print_dump_info (REPORT_DETAILS))
1650 fprintf (vect_dump, "conversion not supported by target.");
1651 return false;
1654 if (modifier != NONE)
1656 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1657 /* FORNOW: SLP not supported. */
1658 if (STMT_SLP_TYPE (stmt_info))
1659 return false;
1662 if (!vec_stmt) /* transformation not required. */
1664 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1665 return true;
1668 /** Transform. **/
1669 if (vect_print_dump_info (REPORT_DETAILS))
1670 fprintf (vect_dump, "transform conversion.");
1672 /* Handle def. */
1673 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1675 if (modifier == NONE && !slp_node)
1676 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1678 prev_stmt_info = NULL;
1679 switch (modifier)
1681 case NONE:
1682 for (j = 0; j < ncopies; j++)
1684 if (j == 0)
1685 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1686 else
1687 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1689 builtin_decl =
1690 targetm.vectorize.builtin_conversion (code, integral_type);
1691 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1693 /* Arguments are ready. create the new vector stmt. */
1694 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1695 new_temp = make_ssa_name (vec_dest, new_stmt);
1696 gimple_call_set_lhs (new_stmt, new_temp);
1697 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1698 if (slp_node)
1699 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1702 if (j == 0)
1703 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1704 else
1705 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1706 prev_stmt_info = vinfo_for_stmt (new_stmt);
1708 break;
1710 case WIDEN:
1711 /* In case the vectorization factor (VF) is bigger than the number
1712 of elements that we can fit in a vectype (nunits), we have to
1713 generate more than one vector stmt - i.e - we need to "unroll"
1714 the vector stmt by a factor VF/nunits. */
1715 for (j = 0; j < ncopies; j++)
1717 if (j == 0)
1718 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1719 else
1720 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1722 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1724 /* Generate first half of the widened result: */
1725 new_stmt
1726 = vect_gen_widened_results_half (code1, decl1,
1727 vec_oprnd0, vec_oprnd1,
1728 unary_op, vec_dest, gsi, stmt);
1729 if (j == 0)
1730 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1731 else
1732 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1733 prev_stmt_info = vinfo_for_stmt (new_stmt);
1735 /* Generate second half of the widened result: */
1736 new_stmt
1737 = vect_gen_widened_results_half (code2, decl2,
1738 vec_oprnd0, vec_oprnd1,
1739 unary_op, vec_dest, gsi, stmt);
1740 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1741 prev_stmt_info = vinfo_for_stmt (new_stmt);
1743 break;
1745 case NARROW:
1746 /* In case the vectorization factor (VF) is bigger than the number
1747 of elements that we can fit in a vectype (nunits), we have to
1748 generate more than one vector stmt - i.e - we need to "unroll"
1749 the vector stmt by a factor VF/nunits. */
1750 for (j = 0; j < ncopies; j++)
1752 /* Handle uses. */
1753 if (j == 0)
1755 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1756 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1758 else
1760 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1761 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1764 /* Arguments are ready. Create the new vector stmt. */
1765 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
1766 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1767 vec_oprnd1);
1768 new_temp = make_ssa_name (vec_dest, new_stmt);
1769 gimple_assign_set_lhs (new_stmt, new_temp);
1770 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1772 if (j == 0)
1773 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1774 else
1775 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1777 prev_stmt_info = vinfo_for_stmt (new_stmt);
1780 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1783 if (vec_oprnds0)
1784 VEC_free (tree, heap, vec_oprnds0);
1786 return true;
1788 /* Function vectorizable_assignment.
1790 Check if STMT performs an assignment (copy) that can be vectorized.
1791 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1792 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1793 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1795 static bool
1796 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1797 gimple *vec_stmt, slp_tree slp_node)
1799 tree vec_dest;
1800 tree scalar_dest;
1801 tree op;
1802 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1803 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1804 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1805 tree new_temp;
1806 tree def;
1807 gimple def_stmt;
1808 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1809 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1810 int ncopies;
1811 int i;
1812 VEC(tree,heap) *vec_oprnds = NULL;
1813 tree vop;
1814 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1816 /* Multiple types in SLP are handled by creating the appropriate number of
1817 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1818 case of SLP. */
1819 if (slp_node)
1820 ncopies = 1;
1821 else
1822 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1824 gcc_assert (ncopies >= 1);
1825 if (ncopies > 1)
1826 return false; /* FORNOW */
1828 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1829 return false;
1831 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1832 return false;
1834 /* Is vectorizable assignment? */
1835 if (!is_gimple_assign (stmt))
1836 return false;
1838 scalar_dest = gimple_assign_lhs (stmt);
1839 if (TREE_CODE (scalar_dest) != SSA_NAME)
1840 return false;
1842 if (gimple_assign_single_p (stmt)
1843 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1844 op = gimple_assign_rhs1 (stmt);
1845 else
1846 return false;
1848 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1850 if (vect_print_dump_info (REPORT_DETAILS))
1851 fprintf (vect_dump, "use not simple.");
1852 return false;
1855 if (!vec_stmt) /* transformation not required. */
1857 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1858 if (vect_print_dump_info (REPORT_DETAILS))
1859 fprintf (vect_dump, "=== vectorizable_assignment ===");
1860 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1861 return true;
1864 /** Transform. **/
1865 if (vect_print_dump_info (REPORT_DETAILS))
1866 fprintf (vect_dump, "transform assignment.");
1868 /* Handle def. */
1869 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1871 /* Handle use. */
1872 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1874 /* Arguments are ready. create the new vector stmt. */
1875 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1877 *vec_stmt = gimple_build_assign (vec_dest, vop);
1878 new_temp = make_ssa_name (vec_dest, *vec_stmt);
1879 gimple_assign_set_lhs (*vec_stmt, new_temp);
1880 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
1881 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
1883 if (slp_node)
1884 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
1887 VEC_free (tree, heap, vec_oprnds);
1888 return true;
1891 /* Function vectorizable_operation.
1893 Check if STMT performs a binary or unary operation that can be vectorized.
1894 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1895 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1896 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1898 static bool
1899 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1900 gimple *vec_stmt, slp_tree slp_node)
1902 tree vec_dest;
1903 tree scalar_dest;
1904 tree op0, op1 = NULL;
1905 tree vec_oprnd1 = NULL_TREE;
1906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1907 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1908 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1909 enum tree_code code;
1910 enum machine_mode vec_mode;
1911 tree new_temp;
1912 int op_type;
1913 optab optab;
1914 int icode;
1915 enum machine_mode optab_op2_mode;
1916 tree def;
1917 gimple def_stmt;
1918 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1919 gimple new_stmt = NULL;
1920 stmt_vec_info prev_stmt_info;
1921 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1922 int nunits_out;
1923 tree vectype_out;
1924 int ncopies;
1925 int j, i;
1926 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1927 tree vop0, vop1;
1928 unsigned int k;
1929 bool shift_p = false;
1930 bool scalar_shift_arg = false;
1931 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1932 int vf;
1934 if (loop_vinfo)
1935 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1936 else
1937 /* FORNOW: multiple types are not supported in basic block SLP. */
1938 vf = nunits_in;
1940 /* Multiple types in SLP are handled by creating the appropriate number of
1941 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1942 case of SLP. */
1943 if (slp_node)
1944 ncopies = 1;
1945 else
1946 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1948 gcc_assert (ncopies >= 1);
1950 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1951 return false;
1953 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1954 return false;
1956 /* Is STMT a vectorizable binary/unary operation? */
1957 if (!is_gimple_assign (stmt))
1958 return false;
1960 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1961 return false;
1963 scalar_dest = gimple_assign_lhs (stmt);
1964 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1965 if (!vectype_out)
1966 return false;
1967 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1968 if (nunits_out != nunits_in)
1969 return false;
1971 code = gimple_assign_rhs_code (stmt);
1973 /* For pointer addition, we should use the normal plus for
1974 the vector addition. */
1975 if (code == POINTER_PLUS_EXPR)
1976 code = PLUS_EXPR;
1978 /* Support only unary or binary operations. */
1979 op_type = TREE_CODE_LENGTH (code);
1980 if (op_type != unary_op && op_type != binary_op)
1982 if (vect_print_dump_info (REPORT_DETAILS))
1983 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1984 return false;
1987 op0 = gimple_assign_rhs1 (stmt);
1988 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1990 if (vect_print_dump_info (REPORT_DETAILS))
1991 fprintf (vect_dump, "use not simple.");
1992 return false;
1995 if (op_type == binary_op)
1997 op1 = gimple_assign_rhs2 (stmt);
1998 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
1999 &dt[1]))
2001 if (vect_print_dump_info (REPORT_DETAILS))
2002 fprintf (vect_dump, "use not simple.");
2003 return false;
2007 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2008 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2009 shift optabs. */
2010 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2011 || code == RROTATE_EXPR)
2013 shift_p = true;
2015 /* vector shifted by vector */
2016 if (dt[1] == vect_internal_def)
2018 optab = optab_for_tree_code (code, vectype, optab_vector);
2019 if (vect_print_dump_info (REPORT_DETAILS))
2020 fprintf (vect_dump, "vector/vector shift/rotate found.");
2023 /* See if the machine has a vector shifted by scalar insn and if not
2024 then see if it has a vector shifted by vector insn */
2025 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2027 optab = optab_for_tree_code (code, vectype, optab_scalar);
2028 if (optab
2029 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2030 != CODE_FOR_nothing))
2032 scalar_shift_arg = true;
2033 if (vect_print_dump_info (REPORT_DETAILS))
2034 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2036 else
2038 optab = optab_for_tree_code (code, vectype, optab_vector);
2039 if (optab
2040 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2041 != CODE_FOR_nothing))
2043 if (vect_print_dump_info (REPORT_DETAILS))
2044 fprintf (vect_dump, "vector/vector shift/rotate found.");
2046 /* Unlike the other binary operators, shifts/rotates have
2047 the rhs being int, instead of the same type as the lhs,
2048 so make sure the scalar is the right type if we are
2049 dealing with vectors of short/char. */
2050 if (dt[1] == vect_constant_def)
2051 op1 = fold_convert (TREE_TYPE (vectype), op1);
2056 else
2058 if (vect_print_dump_info (REPORT_DETAILS))
2059 fprintf (vect_dump, "operand mode requires invariant argument.");
2060 return false;
2063 else
2064 optab = optab_for_tree_code (code, vectype, optab_default);
2066 /* Supportable by target? */
2067 if (!optab)
2069 if (vect_print_dump_info (REPORT_DETAILS))
2070 fprintf (vect_dump, "no optab.");
2071 return false;
2073 vec_mode = TYPE_MODE (vectype);
2074 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2075 if (icode == CODE_FOR_nothing)
2077 if (vect_print_dump_info (REPORT_DETAILS))
2078 fprintf (vect_dump, "op not supported by target.");
2079 /* Check only during analysis. */
2080 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2081 || (vf < vect_min_worthwhile_factor (code)
2082 && !vec_stmt))
2083 return false;
2084 if (vect_print_dump_info (REPORT_DETAILS))
2085 fprintf (vect_dump, "proceeding using word mode.");
2088 /* Worthwhile without SIMD support? Check only during analysis. */
2089 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2090 && vf < vect_min_worthwhile_factor (code)
2091 && !vec_stmt)
2093 if (vect_print_dump_info (REPORT_DETAILS))
2094 fprintf (vect_dump, "not worthwhile without SIMD support.");
2095 return false;
2098 if (!vec_stmt) /* transformation not required. */
2100 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2101 if (vect_print_dump_info (REPORT_DETAILS))
2102 fprintf (vect_dump, "=== vectorizable_operation ===");
2103 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2104 return true;
2107 /** Transform. **/
2109 if (vect_print_dump_info (REPORT_DETAILS))
2110 fprintf (vect_dump, "transform binary/unary operation.");
2112 /* Handle def. */
2113 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2115 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2116 created in the previous stages of the recursion, so no allocation is
2117 needed, except for the case of shift with scalar shift argument. In that
2118 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2119 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2120 In case of loop-based vectorization we allocate VECs of size 1. We
2121 allocate VEC_OPRNDS1 only in case of binary operation. */
2122 if (!slp_node)
2124 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2125 if (op_type == binary_op)
2126 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2128 else if (scalar_shift_arg)
2129 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2131 /* In case the vectorization factor (VF) is bigger than the number
2132 of elements that we can fit in a vectype (nunits), we have to generate
2133 more than one vector stmt - i.e - we need to "unroll" the
2134 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2135 from one copy of the vector stmt to the next, in the field
2136 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2137 stages to find the correct vector defs to be used when vectorizing
2138 stmts that use the defs of the current stmt. The example below illustrates
2139 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2140 4 vectorized stmts):
2142 before vectorization:
2143 RELATED_STMT VEC_STMT
2144 S1: x = memref - -
2145 S2: z = x + 1 - -
2147 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2148 there):
2149 RELATED_STMT VEC_STMT
2150 VS1_0: vx0 = memref0 VS1_1 -
2151 VS1_1: vx1 = memref1 VS1_2 -
2152 VS1_2: vx2 = memref2 VS1_3 -
2153 VS1_3: vx3 = memref3 - -
2154 S1: x = load - VS1_0
2155 S2: z = x + 1 - -
2157 step2: vectorize stmt S2 (done here):
2158 To vectorize stmt S2 we first need to find the relevant vector
2159 def for the first operand 'x'. This is, as usual, obtained from
2160 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2161 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2162 relevant vector def 'vx0'. Having found 'vx0' we can generate
2163 the vector stmt VS2_0, and as usual, record it in the
2164 STMT_VINFO_VEC_STMT of stmt S2.
2165 When creating the second copy (VS2_1), we obtain the relevant vector
2166 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2167 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2168 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2169 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2170 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2171 chain of stmts and pointers:
2172 RELATED_STMT VEC_STMT
2173 VS1_0: vx0 = memref0 VS1_1 -
2174 VS1_1: vx1 = memref1 VS1_2 -
2175 VS1_2: vx2 = memref2 VS1_3 -
2176 VS1_3: vx3 = memref3 - -
2177 S1: x = load - VS1_0
2178 VS2_0: vz0 = vx0 + v1 VS2_1 -
2179 VS2_1: vz1 = vx1 + v1 VS2_2 -
2180 VS2_2: vz2 = vx2 + v1 VS2_3 -
2181 VS2_3: vz3 = vx3 + v1 - -
2182 S2: z = x + 1 - VS2_0 */
2184 prev_stmt_info = NULL;
2185 for (j = 0; j < ncopies; j++)
2187 /* Handle uses. */
2188 if (j == 0)
2190 if (op_type == binary_op && scalar_shift_arg)
2192 /* Vector shl and shr insn patterns can be defined with scalar
2193 operand 2 (shift operand). In this case, use constant or loop
2194 invariant op1 directly, without extending it to vector mode
2195 first. */
2196 optab_op2_mode = insn_data[icode].operand[2].mode;
2197 if (!VECTOR_MODE_P (optab_op2_mode))
2199 if (vect_print_dump_info (REPORT_DETAILS))
2200 fprintf (vect_dump, "operand 1 using scalar mode.");
2201 vec_oprnd1 = op1;
2202 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2203 if (slp_node)
2205 /* Store vec_oprnd1 for every vector stmt to be created
2206 for SLP_NODE. We check during the analysis that all the
2207 shift arguments are the same.
2208 TODO: Allow different constants for different vector
2209 stmts generated for an SLP instance. */
2210 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2211 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2216 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2217 (a special case for certain kind of vector shifts); otherwise,
2218 operand 1 should be of a vector type (the usual case). */
2219 if (op_type == binary_op && !vec_oprnd1)
2220 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2221 slp_node);
2222 else
2223 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2224 slp_node);
2226 else
2227 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2229 /* Arguments are ready. Create the new vector stmt. */
2230 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2232 vop1 = ((op_type == binary_op)
2233 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2234 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2235 new_temp = make_ssa_name (vec_dest, new_stmt);
2236 gimple_assign_set_lhs (new_stmt, new_temp);
2237 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2238 if (slp_node)
2239 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2242 if (slp_node)
2243 continue;
2245 if (j == 0)
2246 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2247 else
2248 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2249 prev_stmt_info = vinfo_for_stmt (new_stmt);
2252 VEC_free (tree, heap, vec_oprnds0);
2253 if (vec_oprnds1)
2254 VEC_free (tree, heap, vec_oprnds1);
2256 return true;
2260 /* Get vectorized definitions for loop-based vectorization. For the first
2261 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2262 scalar operand), and for the rest we get a copy with
2263 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2264 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2265 The vectors are collected into VEC_OPRNDS. */
2267 static void
2268 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2269 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2271 tree vec_oprnd;
2273 /* Get first vector operand. */
2274 /* All the vector operands except the very first one (that is scalar oprnd)
2275 are stmt copies. */
2276 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2277 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2278 else
2279 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2281 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2283 /* Get second vector operand. */
2284 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2285 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2287 *oprnd = vec_oprnd;
2289 /* For conversion in multiple steps, continue to get operands
2290 recursively. */
2291 if (multi_step_cvt)
2292 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2296 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2297 For multi-step conversions store the resulting vectors and call the function
2298 recursively. */
2300 static void
2301 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2302 int multi_step_cvt, gimple stmt,
2303 VEC (tree, heap) *vec_dsts,
2304 gimple_stmt_iterator *gsi,
2305 slp_tree slp_node, enum tree_code code,
2306 stmt_vec_info *prev_stmt_info)
2308 unsigned int i;
2309 tree vop0, vop1, new_tmp, vec_dest;
2310 gimple new_stmt;
2311 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2313 vec_dest = VEC_pop (tree, vec_dsts);
2315 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2317 /* Create demotion operation. */
2318 vop0 = VEC_index (tree, *vec_oprnds, i);
2319 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2320 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2321 new_tmp = make_ssa_name (vec_dest, new_stmt);
2322 gimple_assign_set_lhs (new_stmt, new_tmp);
2323 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2325 if (multi_step_cvt)
2326 /* Store the resulting vector for next recursive call. */
2327 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2328 else
2330 /* This is the last step of the conversion sequence. Store the
2331 vectors in SLP_NODE or in vector info of the scalar statement
2332 (or in STMT_VINFO_RELATED_STMT chain). */
2333 if (slp_node)
2334 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2335 else
2337 if (!*prev_stmt_info)
2338 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2339 else
2340 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2342 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2347 /* For multi-step demotion operations we first generate demotion operations
2348 from the source type to the intermediate types, and then combine the
2349 results (stored in VEC_OPRNDS) in demotion operation to the destination
2350 type. */
2351 if (multi_step_cvt)
2353 /* At each level of recursion we have have of the operands we had at the
2354 previous level. */
2355 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2356 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2357 stmt, vec_dsts, gsi, slp_node,
2358 code, prev_stmt_info);
2363 /* Function vectorizable_type_demotion
2365 Check if STMT performs a binary or unary operation that involves
2366 type demotion, and if it can be vectorized.
2367 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2368 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2369 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2371 static bool
2372 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2373 gimple *vec_stmt, slp_tree slp_node)
2375 tree vec_dest;
2376 tree scalar_dest;
2377 tree op0;
2378 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2379 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2380 enum tree_code code, code1 = ERROR_MARK;
2381 tree def;
2382 gimple def_stmt;
2383 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2384 stmt_vec_info prev_stmt_info;
2385 int nunits_in;
2386 int nunits_out;
2387 tree vectype_out;
2388 int ncopies;
2389 int j, i;
2390 tree vectype_in;
2391 int multi_step_cvt = 0;
2392 VEC (tree, heap) *vec_oprnds0 = NULL;
2393 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2394 tree last_oprnd, intermediate_type;
2396 /* FORNOW: not supported by basic block SLP vectorization. */
2397 gcc_assert (loop_vinfo);
2399 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2400 return false;
2402 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2403 return false;
2405 /* Is STMT a vectorizable type-demotion operation? */
2406 if (!is_gimple_assign (stmt))
2407 return false;
2409 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2410 return false;
2412 code = gimple_assign_rhs_code (stmt);
2413 if (!CONVERT_EXPR_CODE_P (code))
2414 return false;
2416 op0 = gimple_assign_rhs1 (stmt);
2417 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2418 if (!vectype_in)
2419 return false;
2420 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2422 scalar_dest = gimple_assign_lhs (stmt);
2423 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2424 if (!vectype_out)
2425 return false;
2426 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2427 if (nunits_in >= nunits_out)
2428 return false;
2430 /* Multiple types in SLP are handled by creating the appropriate number of
2431 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2432 case of SLP. */
2433 if (slp_node)
2434 ncopies = 1;
2435 else
2436 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2437 gcc_assert (ncopies >= 1);
2439 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2440 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2441 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2442 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2443 && CONVERT_EXPR_CODE_P (code))))
2444 return false;
2446 /* Check the operands of the operation. */
2447 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2449 if (vect_print_dump_info (REPORT_DETAILS))
2450 fprintf (vect_dump, "use not simple.");
2451 return false;
2454 /* Supportable by target? */
2455 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2456 &multi_step_cvt, &interm_types))
2457 return false;
2459 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2461 if (!vec_stmt) /* transformation not required. */
2463 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2464 if (vect_print_dump_info (REPORT_DETAILS))
2465 fprintf (vect_dump, "=== vectorizable_demotion ===");
2466 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2467 return true;
2470 /** Transform. **/
2471 if (vect_print_dump_info (REPORT_DETAILS))
2472 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2473 ncopies);
2475 /* In case of multi-step demotion, we first generate demotion operations to
2476 the intermediate types, and then from that types to the final one.
2477 We create vector destinations for the intermediate type (TYPES) received
2478 from supportable_narrowing_operation, and store them in the correct order
2479 for future use in vect_create_vectorized_demotion_stmts(). */
2480 if (multi_step_cvt)
2481 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2482 else
2483 vec_dsts = VEC_alloc (tree, heap, 1);
2485 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2486 VEC_quick_push (tree, vec_dsts, vec_dest);
2488 if (multi_step_cvt)
2490 for (i = VEC_length (tree, interm_types) - 1;
2491 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2493 vec_dest = vect_create_destination_var (scalar_dest,
2494 intermediate_type);
2495 VEC_quick_push (tree, vec_dsts, vec_dest);
2499 /* In case the vectorization factor (VF) is bigger than the number
2500 of elements that we can fit in a vectype (nunits), we have to generate
2501 more than one vector stmt - i.e - we need to "unroll" the
2502 vector stmt by a factor VF/nunits. */
2503 last_oprnd = op0;
2504 prev_stmt_info = NULL;
2505 for (j = 0; j < ncopies; j++)
2507 /* Handle uses. */
2508 if (slp_node)
2509 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2510 else
2512 VEC_free (tree, heap, vec_oprnds0);
2513 vec_oprnds0 = VEC_alloc (tree, heap,
2514 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2515 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2516 vect_pow2 (multi_step_cvt) - 1);
2519 /* Arguments are ready. Create the new vector stmts. */
2520 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2521 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2522 multi_step_cvt, stmt, tmp_vec_dsts,
2523 gsi, slp_node, code1,
2524 &prev_stmt_info);
2527 VEC_free (tree, heap, vec_oprnds0);
2528 VEC_free (tree, heap, vec_dsts);
2529 VEC_free (tree, heap, tmp_vec_dsts);
2530 VEC_free (tree, heap, interm_types);
2532 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2533 return true;
2537 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2538 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2539 the resulting vectors and call the function recursively. */
2541 static void
2542 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2543 VEC (tree, heap) **vec_oprnds1,
2544 int multi_step_cvt, gimple stmt,
2545 VEC (tree, heap) *vec_dsts,
2546 gimple_stmt_iterator *gsi,
2547 slp_tree slp_node, enum tree_code code1,
2548 enum tree_code code2, tree decl1,
2549 tree decl2, int op_type,
2550 stmt_vec_info *prev_stmt_info)
2552 int i;
2553 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2554 gimple new_stmt1, new_stmt2;
2555 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2556 VEC (tree, heap) *vec_tmp;
2558 vec_dest = VEC_pop (tree, vec_dsts);
2559 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2561 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2563 if (op_type == binary_op)
2564 vop1 = VEC_index (tree, *vec_oprnds1, i);
2565 else
2566 vop1 = NULL_TREE;
2568 /* Generate the two halves of promotion operation. */
2569 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2570 op_type, vec_dest, gsi, stmt);
2571 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2572 op_type, vec_dest, gsi, stmt);
2573 if (is_gimple_call (new_stmt1))
2575 new_tmp1 = gimple_call_lhs (new_stmt1);
2576 new_tmp2 = gimple_call_lhs (new_stmt2);
2578 else
2580 new_tmp1 = gimple_assign_lhs (new_stmt1);
2581 new_tmp2 = gimple_assign_lhs (new_stmt2);
2584 if (multi_step_cvt)
2586 /* Store the results for the recursive call. */
2587 VEC_quick_push (tree, vec_tmp, new_tmp1);
2588 VEC_quick_push (tree, vec_tmp, new_tmp2);
2590 else
2592 /* Last step of promotion sequience - store the results. */
2593 if (slp_node)
2595 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2596 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2598 else
2600 if (!*prev_stmt_info)
2601 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2602 else
2603 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2605 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2606 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2607 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2612 if (multi_step_cvt)
2614 /* For multi-step promotion operation we first generate we call the
2615 function recurcively for every stage. We start from the input type,
2616 create promotion operations to the intermediate types, and then
2617 create promotions to the output type. */
2618 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2619 VEC_free (tree, heap, vec_tmp);
2620 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2621 multi_step_cvt - 1, stmt,
2622 vec_dsts, gsi, slp_node, code1,
2623 code2, decl2, decl2, op_type,
2624 prev_stmt_info);
2629 /* Function vectorizable_type_promotion
2631 Check if STMT performs a binary or unary operation that involves
2632 type promotion, and if it can be vectorized.
2633 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2634 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2635 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2637 static bool
2638 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2639 gimple *vec_stmt, slp_tree slp_node)
2641 tree vec_dest;
2642 tree scalar_dest;
2643 tree op0, op1 = NULL;
2644 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2647 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2648 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2649 int op_type;
2650 tree def;
2651 gimple def_stmt;
2652 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2653 stmt_vec_info prev_stmt_info;
2654 int nunits_in;
2655 int nunits_out;
2656 tree vectype_out;
2657 int ncopies;
2658 int j, i;
2659 tree vectype_in;
2660 tree intermediate_type = NULL_TREE;
2661 int multi_step_cvt = 0;
2662 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2663 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2665 /* FORNOW: not supported by basic block SLP vectorization. */
2666 gcc_assert (loop_vinfo);
2668 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2669 return false;
2671 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2672 return false;
2674 /* Is STMT a vectorizable type-promotion operation? */
2675 if (!is_gimple_assign (stmt))
2676 return false;
2678 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2679 return false;
2681 code = gimple_assign_rhs_code (stmt);
2682 if (!CONVERT_EXPR_CODE_P (code)
2683 && code != WIDEN_MULT_EXPR)
2684 return false;
2686 op0 = gimple_assign_rhs1 (stmt);
2687 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2688 if (!vectype_in)
2689 return false;
2690 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2692 scalar_dest = gimple_assign_lhs (stmt);
2693 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2694 if (!vectype_out)
2695 return false;
2696 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2697 if (nunits_in <= nunits_out)
2698 return false;
2700 /* Multiple types in SLP are handled by creating the appropriate number of
2701 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2702 case of SLP. */
2703 if (slp_node)
2704 ncopies = 1;
2705 else
2706 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2708 gcc_assert (ncopies >= 1);
2710 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2711 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2712 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2713 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2714 && CONVERT_EXPR_CODE_P (code))))
2715 return false;
2717 /* Check the operands of the operation. */
2718 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2720 if (vect_print_dump_info (REPORT_DETAILS))
2721 fprintf (vect_dump, "use not simple.");
2722 return false;
2725 op_type = TREE_CODE_LENGTH (code);
2726 if (op_type == binary_op)
2728 op1 = gimple_assign_rhs2 (stmt);
2729 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2731 if (vect_print_dump_info (REPORT_DETAILS))
2732 fprintf (vect_dump, "use not simple.");
2733 return false;
2737 /* Supportable by target? */
2738 if (!supportable_widening_operation (code, stmt, vectype_in,
2739 &decl1, &decl2, &code1, &code2,
2740 &multi_step_cvt, &interm_types))
2741 return false;
2743 /* Binary widening operation can only be supported directly by the
2744 architecture. */
2745 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2747 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2749 if (!vec_stmt) /* transformation not required. */
2751 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2752 if (vect_print_dump_info (REPORT_DETAILS))
2753 fprintf (vect_dump, "=== vectorizable_promotion ===");
2754 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2755 return true;
2758 /** Transform. **/
2760 if (vect_print_dump_info (REPORT_DETAILS))
2761 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2762 ncopies);
2764 /* Handle def. */
2765 /* In case of multi-step promotion, we first generate promotion operations
2766 to the intermediate types, and then from that types to the final one.
2767 We store vector destination in VEC_DSTS in the correct order for
2768 recursive creation of promotion operations in
2769 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2770 according to TYPES recieved from supportable_widening_operation(). */
2771 if (multi_step_cvt)
2772 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2773 else
2774 vec_dsts = VEC_alloc (tree, heap, 1);
2776 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2777 VEC_quick_push (tree, vec_dsts, vec_dest);
2779 if (multi_step_cvt)
2781 for (i = VEC_length (tree, interm_types) - 1;
2782 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2784 vec_dest = vect_create_destination_var (scalar_dest,
2785 intermediate_type);
2786 VEC_quick_push (tree, vec_dsts, vec_dest);
2790 if (!slp_node)
2792 vec_oprnds0 = VEC_alloc (tree, heap,
2793 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2794 if (op_type == binary_op)
2795 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2798 /* In case the vectorization factor (VF) is bigger than the number
2799 of elements that we can fit in a vectype (nunits), we have to generate
2800 more than one vector stmt - i.e - we need to "unroll" the
2801 vector stmt by a factor VF/nunits. */
2803 prev_stmt_info = NULL;
2804 for (j = 0; j < ncopies; j++)
2806 /* Handle uses. */
2807 if (j == 0)
2809 if (slp_node)
2810 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2811 else
2813 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2814 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2815 if (op_type == binary_op)
2817 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2818 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2822 else
2824 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2825 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2826 if (op_type == binary_op)
2828 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2829 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2833 /* Arguments are ready. Create the new vector stmts. */
2834 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2835 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2836 multi_step_cvt, stmt,
2837 tmp_vec_dsts,
2838 gsi, slp_node, code1, code2,
2839 decl1, decl2, op_type,
2840 &prev_stmt_info);
2843 VEC_free (tree, heap, vec_dsts);
2844 VEC_free (tree, heap, tmp_vec_dsts);
2845 VEC_free (tree, heap, interm_types);
2846 VEC_free (tree, heap, vec_oprnds0);
2847 VEC_free (tree, heap, vec_oprnds1);
2849 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2850 return true;
2854 /* Function vectorizable_store.
2856 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2857 can be vectorized.
2858 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2859 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2860 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2862 static bool
2863 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2864 slp_tree slp_node)
2866 tree scalar_dest;
2867 tree data_ref;
2868 tree op;
2869 tree vec_oprnd = NULL_TREE;
2870 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2871 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2872 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2873 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2874 struct loop *loop = NULL;
2875 enum machine_mode vec_mode;
2876 tree dummy;
2877 enum dr_alignment_support alignment_support_scheme;
2878 tree def;
2879 gimple def_stmt;
2880 enum vect_def_type dt;
2881 stmt_vec_info prev_stmt_info = NULL;
2882 tree dataref_ptr = NULL_TREE;
2883 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2884 int ncopies;
2885 int j;
2886 gimple next_stmt, first_stmt = NULL;
2887 bool strided_store = false;
2888 unsigned int group_size, i;
2889 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2890 bool inv_p;
2891 VEC(tree,heap) *vec_oprnds = NULL;
2892 bool slp = (slp_node != NULL);
2893 stmt_vec_info first_stmt_vinfo;
2894 unsigned int vec_num;
2895 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2897 if (loop_vinfo)
2898 loop = LOOP_VINFO_LOOP (loop_vinfo);
2900 /* Multiple types in SLP are handled by creating the appropriate number of
2901 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2902 case of SLP. */
2903 if (slp)
2904 ncopies = 1;
2905 else
2906 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2908 gcc_assert (ncopies >= 1);
2910 /* FORNOW. This restriction should be relaxed. */
2911 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2913 if (vect_print_dump_info (REPORT_DETAILS))
2914 fprintf (vect_dump, "multiple types in nested loop.");
2915 return false;
2918 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2919 return false;
2921 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2922 return false;
2924 /* Is vectorizable store? */
2926 if (!is_gimple_assign (stmt))
2927 return false;
2929 scalar_dest = gimple_assign_lhs (stmt);
2930 if (TREE_CODE (scalar_dest) != ARRAY_REF
2931 && TREE_CODE (scalar_dest) != INDIRECT_REF
2932 && TREE_CODE (scalar_dest) != COMPONENT_REF
2933 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2934 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2935 return false;
2937 gcc_assert (gimple_assign_single_p (stmt));
2938 op = gimple_assign_rhs1 (stmt);
2939 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2941 if (vect_print_dump_info (REPORT_DETAILS))
2942 fprintf (vect_dump, "use not simple.");
2943 return false;
2946 /* The scalar rhs type needs to be trivially convertible to the vector
2947 component type. This should always be the case. */
2948 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2950 if (vect_print_dump_info (REPORT_DETAILS))
2951 fprintf (vect_dump, "??? operands of different types");
2952 return false;
2955 vec_mode = TYPE_MODE (vectype);
2956 /* FORNOW. In some cases can vectorize even if data-type not supported
2957 (e.g. - array initialization with 0). */
2958 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2959 return false;
2961 if (!STMT_VINFO_DATA_REF (stmt_info))
2962 return false;
2964 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2966 strided_store = true;
2967 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2968 if (!vect_strided_store_supported (vectype)
2969 && !PURE_SLP_STMT (stmt_info) && !slp)
2970 return false;
2972 if (first_stmt == stmt)
2974 /* STMT is the leader of the group. Check the operands of all the
2975 stmts of the group. */
2976 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2977 while (next_stmt)
2979 gcc_assert (gimple_assign_single_p (next_stmt));
2980 op = gimple_assign_rhs1 (next_stmt);
2981 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2982 &def, &dt))
2984 if (vect_print_dump_info (REPORT_DETAILS))
2985 fprintf (vect_dump, "use not simple.");
2986 return false;
2988 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
2993 if (!vec_stmt) /* transformation not required. */
2995 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2996 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
2997 return true;
3000 /** Transform. **/
3002 if (strided_store)
3004 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3005 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3007 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3009 /* FORNOW */
3010 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3012 /* We vectorize all the stmts of the interleaving group when we
3013 reach the last stmt in the group. */
3014 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3015 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3016 && !slp)
3018 *vec_stmt = NULL;
3019 return true;
3022 if (slp)
3023 strided_store = false;
3025 /* VEC_NUM is the number of vect stmts to be created for this group. */
3026 if (slp)
3027 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3028 else
3029 vec_num = group_size;
3031 else
3033 first_stmt = stmt;
3034 first_dr = dr;
3035 group_size = vec_num = 1;
3036 first_stmt_vinfo = stmt_info;
3039 if (vect_print_dump_info (REPORT_DETAILS))
3040 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3042 dr_chain = VEC_alloc (tree, heap, group_size);
3043 oprnds = VEC_alloc (tree, heap, group_size);
3045 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3046 gcc_assert (alignment_support_scheme);
3048 /* In case the vectorization factor (VF) is bigger than the number
3049 of elements that we can fit in a vectype (nunits), we have to generate
3050 more than one vector stmt - i.e - we need to "unroll" the
3051 vector stmt by a factor VF/nunits. For more details see documentation in
3052 vect_get_vec_def_for_copy_stmt. */
3054 /* In case of interleaving (non-unit strided access):
3056 S1: &base + 2 = x2
3057 S2: &base = x0
3058 S3: &base + 1 = x1
3059 S4: &base + 3 = x3
3061 We create vectorized stores starting from base address (the access of the
3062 first stmt in the chain (S2 in the above example), when the last store stmt
3063 of the chain (S4) is reached:
3065 VS1: &base = vx2
3066 VS2: &base + vec_size*1 = vx0
3067 VS3: &base + vec_size*2 = vx1
3068 VS4: &base + vec_size*3 = vx3
3070 Then permutation statements are generated:
3072 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3073 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3076 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3077 (the order of the data-refs in the output of vect_permute_store_chain
3078 corresponds to the order of scalar stmts in the interleaving chain - see
3079 the documentation of vect_permute_store_chain()).
3081 In case of both multiple types and interleaving, above vector stores and
3082 permutation stmts are created for every copy. The result vector stmts are
3083 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3084 STMT_VINFO_RELATED_STMT for the next copies.
3087 prev_stmt_info = NULL;
3088 for (j = 0; j < ncopies; j++)
3090 gimple new_stmt;
3091 gimple ptr_incr;
3093 if (j == 0)
3095 if (slp)
3097 /* Get vectorized arguments for SLP_NODE. */
3098 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3100 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3102 else
3104 /* For interleaved stores we collect vectorized defs for all the
3105 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3106 used as an input to vect_permute_store_chain(), and OPRNDS as
3107 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3109 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3110 OPRNDS are of size 1. */
3111 next_stmt = first_stmt;
3112 for (i = 0; i < group_size; i++)
3114 /* Since gaps are not supported for interleaved stores,
3115 GROUP_SIZE is the exact number of stmts in the chain.
3116 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3117 there is no interleaving, GROUP_SIZE is 1, and only one
3118 iteration of the loop will be executed. */
3119 gcc_assert (next_stmt
3120 && gimple_assign_single_p (next_stmt));
3121 op = gimple_assign_rhs1 (next_stmt);
3123 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3124 NULL);
3125 VEC_quick_push(tree, dr_chain, vec_oprnd);
3126 VEC_quick_push(tree, oprnds, vec_oprnd);
3127 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3131 /* We should have catched mismatched types earlier. */
3132 gcc_assert (useless_type_conversion_p (vectype,
3133 TREE_TYPE (vec_oprnd)));
3134 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3135 &dummy, &ptr_incr, false,
3136 &inv_p);
3137 gcc_assert (bb_vinfo || !inv_p);
3139 else
3141 /* For interleaved stores we created vectorized defs for all the
3142 defs stored in OPRNDS in the previous iteration (previous copy).
3143 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3144 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3145 next copy.
3146 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3147 OPRNDS are of size 1. */
3148 for (i = 0; i < group_size; i++)
3150 op = VEC_index (tree, oprnds, i);
3151 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3152 &dt);
3153 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3154 VEC_replace(tree, dr_chain, i, vec_oprnd);
3155 VEC_replace(tree, oprnds, i, vec_oprnd);
3157 dataref_ptr =
3158 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3161 if (strided_store)
3163 result_chain = VEC_alloc (tree, heap, group_size);
3164 /* Permute. */
3165 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3166 &result_chain))
3167 return false;
3170 next_stmt = first_stmt;
3171 for (i = 0; i < vec_num; i++)
3173 if (i > 0)
3174 /* Bump the vector pointer. */
3175 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3176 NULL_TREE);
3178 if (slp)
3179 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3180 else if (strided_store)
3181 /* For strided stores vectorized defs are interleaved in
3182 vect_permute_store_chain(). */
3183 vec_oprnd = VEC_index (tree, result_chain, i);
3185 if (aligned_access_p (first_dr))
3186 data_ref = build_fold_indirect_ref (dataref_ptr);
3187 else
3189 int mis = DR_MISALIGNMENT (first_dr);
3190 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3191 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3192 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3195 /* If accesses through a pointer to vectype do not alias the original
3196 memory reference we have a problem. This should never happen. */
3197 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3198 get_alias_set (gimple_assign_lhs (stmt))));
3200 /* Arguments are ready. Create the new vector stmt. */
3201 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3202 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3203 mark_symbols_for_renaming (new_stmt);
3205 if (slp)
3206 continue;
3208 if (j == 0)
3209 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3210 else
3211 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3213 prev_stmt_info = vinfo_for_stmt (new_stmt);
3214 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3215 if (!next_stmt)
3216 break;
3220 VEC_free (tree, heap, dr_chain);
3221 VEC_free (tree, heap, oprnds);
3222 if (result_chain)
3223 VEC_free (tree, heap, result_chain);
3225 return true;
3228 /* vectorizable_load.
3230 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3231 can be vectorized.
3232 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3233 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3234 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3236 static bool
3237 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3238 slp_tree slp_node, slp_instance slp_node_instance)
3240 tree scalar_dest;
3241 tree vec_dest = NULL;
3242 tree data_ref = NULL;
3243 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3244 stmt_vec_info prev_stmt_info;
3245 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3246 struct loop *loop = NULL;
3247 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3248 bool nested_in_vect_loop = false;
3249 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3250 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3251 tree new_temp;
3252 int mode;
3253 gimple new_stmt = NULL;
3254 tree dummy;
3255 enum dr_alignment_support alignment_support_scheme;
3256 tree dataref_ptr = NULL_TREE;
3257 gimple ptr_incr;
3258 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3259 int ncopies;
3260 int i, j, group_size;
3261 tree msq = NULL_TREE, lsq;
3262 tree offset = NULL_TREE;
3263 tree realignment_token = NULL_TREE;
3264 gimple phi = NULL;
3265 VEC(tree,heap) *dr_chain = NULL;
3266 bool strided_load = false;
3267 gimple first_stmt;
3268 tree scalar_type;
3269 bool inv_p;
3270 bool compute_in_loop = false;
3271 struct loop *at_loop;
3272 int vec_num;
3273 bool slp = (slp_node != NULL);
3274 bool slp_perm = false;
3275 enum tree_code code;
3276 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3277 int vf;
3279 if (loop_vinfo)
3281 loop = LOOP_VINFO_LOOP (loop_vinfo);
3282 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3283 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3285 else
3286 /* FORNOW: multiple types are not supported in basic block SLP. */
3287 vf = nunits;
3289 /* Multiple types in SLP are handled by creating the appropriate number of
3290 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3291 case of SLP. */
3292 if (slp)
3293 ncopies = 1;
3294 else
3295 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3297 gcc_assert (ncopies >= 1);
3299 /* FORNOW. This restriction should be relaxed. */
3300 if (nested_in_vect_loop && ncopies > 1)
3302 if (vect_print_dump_info (REPORT_DETAILS))
3303 fprintf (vect_dump, "multiple types in nested loop.");
3304 return false;
3307 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3308 return false;
3310 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3311 return false;
3313 /* Is vectorizable load? */
3314 if (!is_gimple_assign (stmt))
3315 return false;
3317 scalar_dest = gimple_assign_lhs (stmt);
3318 if (TREE_CODE (scalar_dest) != SSA_NAME)
3319 return false;
3321 code = gimple_assign_rhs_code (stmt);
3322 if (code != ARRAY_REF
3323 && code != INDIRECT_REF
3324 && code != COMPONENT_REF
3325 && code != IMAGPART_EXPR
3326 && code != REALPART_EXPR)
3327 return false;
3329 if (!STMT_VINFO_DATA_REF (stmt_info))
3330 return false;
3332 scalar_type = TREE_TYPE (DR_REF (dr));
3333 mode = (int) TYPE_MODE (vectype);
3335 /* FORNOW. In some cases can vectorize even if data-type not supported
3336 (e.g. - data copies). */
3337 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3339 if (vect_print_dump_info (REPORT_DETAILS))
3340 fprintf (vect_dump, "Aligned load, but unsupported type.");
3341 return false;
3344 /* The vector component type needs to be trivially convertible to the
3345 scalar lhs. This should always be the case. */
3346 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3348 if (vect_print_dump_info (REPORT_DETAILS))
3349 fprintf (vect_dump, "??? operands of different types");
3350 return false;
3353 /* Check if the load is a part of an interleaving chain. */
3354 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3356 strided_load = true;
3357 /* FORNOW */
3358 gcc_assert (! nested_in_vect_loop);
3360 /* Check if interleaving is supported. */
3361 if (!vect_strided_load_supported (vectype)
3362 && !PURE_SLP_STMT (stmt_info) && !slp)
3363 return false;
3366 if (!vec_stmt) /* transformation not required. */
3368 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3369 vect_model_load_cost (stmt_info, ncopies, NULL);
3370 return true;
3373 if (vect_print_dump_info (REPORT_DETAILS))
3374 fprintf (vect_dump, "transform load.");
3376 /** Transform. **/
3378 if (strided_load)
3380 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3381 /* Check if the chain of loads is already vectorized. */
3382 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3384 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3385 return true;
3387 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3388 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3390 /* VEC_NUM is the number of vect stmts to be created for this group. */
3391 if (slp)
3393 strided_load = false;
3394 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3395 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3396 slp_perm = true;
3398 else
3399 vec_num = group_size;
3401 dr_chain = VEC_alloc (tree, heap, vec_num);
3403 else
3405 first_stmt = stmt;
3406 first_dr = dr;
3407 group_size = vec_num = 1;
3410 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3411 gcc_assert (alignment_support_scheme);
3413 /* In case the vectorization factor (VF) is bigger than the number
3414 of elements that we can fit in a vectype (nunits), we have to generate
3415 more than one vector stmt - i.e - we need to "unroll" the
3416 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3417 from one copy of the vector stmt to the next, in the field
3418 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3419 stages to find the correct vector defs to be used when vectorizing
3420 stmts that use the defs of the current stmt. The example below illustrates
3421 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3422 4 vectorized stmts):
3424 before vectorization:
3425 RELATED_STMT VEC_STMT
3426 S1: x = memref - -
3427 S2: z = x + 1 - -
3429 step 1: vectorize stmt S1:
3430 We first create the vector stmt VS1_0, and, as usual, record a
3431 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3432 Next, we create the vector stmt VS1_1, and record a pointer to
3433 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3434 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3435 stmts and pointers:
3436 RELATED_STMT VEC_STMT
3437 VS1_0: vx0 = memref0 VS1_1 -
3438 VS1_1: vx1 = memref1 VS1_2 -
3439 VS1_2: vx2 = memref2 VS1_3 -
3440 VS1_3: vx3 = memref3 - -
3441 S1: x = load - VS1_0
3442 S2: z = x + 1 - -
3444 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3445 information we recorded in RELATED_STMT field is used to vectorize
3446 stmt S2. */
3448 /* In case of interleaving (non-unit strided access):
3450 S1: x2 = &base + 2
3451 S2: x0 = &base
3452 S3: x1 = &base + 1
3453 S4: x3 = &base + 3
3455 Vectorized loads are created in the order of memory accesses
3456 starting from the access of the first stmt of the chain:
3458 VS1: vx0 = &base
3459 VS2: vx1 = &base + vec_size*1
3460 VS3: vx3 = &base + vec_size*2
3461 VS4: vx4 = &base + vec_size*3
3463 Then permutation statements are generated:
3465 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3466 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3469 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3470 (the order of the data-refs in the output of vect_permute_load_chain
3471 corresponds to the order of scalar stmts in the interleaving chain - see
3472 the documentation of vect_permute_load_chain()).
3473 The generation of permutation stmts and recording them in
3474 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3476 In case of both multiple types and interleaving, the vector loads and
3477 permutation stmts above are created for every copy. The result vector stmts
3478 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3479 STMT_VINFO_RELATED_STMT for the next copies. */
3481 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3482 on a target that supports unaligned accesses (dr_unaligned_supported)
3483 we generate the following code:
3484 p = initial_addr;
3485 indx = 0;
3486 loop {
3487 p = p + indx * vectype_size;
3488 vec_dest = *(p);
3489 indx = indx + 1;
3492 Otherwise, the data reference is potentially unaligned on a target that
3493 does not support unaligned accesses (dr_explicit_realign_optimized) -
3494 then generate the following code, in which the data in each iteration is
3495 obtained by two vector loads, one from the previous iteration, and one
3496 from the current iteration:
3497 p1 = initial_addr;
3498 msq_init = *(floor(p1))
3499 p2 = initial_addr + VS - 1;
3500 realignment_token = call target_builtin;
3501 indx = 0;
3502 loop {
3503 p2 = p2 + indx * vectype_size
3504 lsq = *(floor(p2))
3505 vec_dest = realign_load (msq, lsq, realignment_token)
3506 indx = indx + 1;
3507 msq = lsq;
3508 } */
3510 /* If the misalignment remains the same throughout the execution of the
3511 loop, we can create the init_addr and permutation mask at the loop
3512 preheader. Otherwise, it needs to be created inside the loop.
3513 This can only occur when vectorizing memory accesses in the inner-loop
3514 nested within an outer-loop that is being vectorized. */
3516 if (loop && nested_in_vect_loop_p (loop, stmt)
3517 && (TREE_INT_CST_LOW (DR_STEP (dr))
3518 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3520 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3521 compute_in_loop = true;
3524 if ((alignment_support_scheme == dr_explicit_realign_optimized
3525 || alignment_support_scheme == dr_explicit_realign)
3526 && !compute_in_loop)
3528 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3529 alignment_support_scheme, NULL_TREE,
3530 &at_loop);
3531 if (alignment_support_scheme == dr_explicit_realign_optimized)
3533 phi = SSA_NAME_DEF_STMT (msq);
3534 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3537 else
3538 at_loop = loop;
3540 prev_stmt_info = NULL;
3541 for (j = 0; j < ncopies; j++)
3543 /* 1. Create the vector pointer update chain. */
3544 if (j == 0)
3545 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3546 at_loop, offset,
3547 &dummy, &ptr_incr, false,
3548 &inv_p);
3549 else
3550 dataref_ptr =
3551 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3553 for (i = 0; i < vec_num; i++)
3555 if (i > 0)
3556 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3557 NULL_TREE);
3559 /* 2. Create the vector-load in the loop. */
3560 switch (alignment_support_scheme)
3562 case dr_aligned:
3563 gcc_assert (aligned_access_p (first_dr));
3564 data_ref = build_fold_indirect_ref (dataref_ptr);
3565 break;
3566 case dr_unaligned_supported:
3568 int mis = DR_MISALIGNMENT (first_dr);
3569 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3571 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3572 data_ref =
3573 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3574 break;
3576 case dr_explicit_realign:
3578 tree ptr, bump;
3579 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3581 if (compute_in_loop)
3582 msq = vect_setup_realignment (first_stmt, gsi,
3583 &realignment_token,
3584 dr_explicit_realign,
3585 dataref_ptr, NULL);
3587 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3588 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3589 new_stmt = gimple_build_assign (vec_dest, data_ref);
3590 new_temp = make_ssa_name (vec_dest, new_stmt);
3591 gimple_assign_set_lhs (new_stmt, new_temp);
3592 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3593 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3594 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3595 msq = new_temp;
3597 bump = size_binop (MULT_EXPR, vs_minus_1,
3598 TYPE_SIZE_UNIT (scalar_type));
3599 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3600 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3601 break;
3603 case dr_explicit_realign_optimized:
3604 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3605 break;
3606 default:
3607 gcc_unreachable ();
3609 /* If accesses through a pointer to vectype do not alias the original
3610 memory reference we have a problem. This should never happen. */
3611 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3612 get_alias_set (gimple_assign_rhs1 (stmt))));
3613 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3614 new_stmt = gimple_build_assign (vec_dest, data_ref);
3615 new_temp = make_ssa_name (vec_dest, new_stmt);
3616 gimple_assign_set_lhs (new_stmt, new_temp);
3617 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3618 mark_symbols_for_renaming (new_stmt);
3620 /* 3. Handle explicit realignment if necessary/supported. Create in
3621 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3622 if (alignment_support_scheme == dr_explicit_realign_optimized
3623 || alignment_support_scheme == dr_explicit_realign)
3625 tree tmp;
3627 lsq = gimple_assign_lhs (new_stmt);
3628 if (!realignment_token)
3629 realignment_token = dataref_ptr;
3630 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3631 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3632 realignment_token);
3633 new_stmt = gimple_build_assign (vec_dest, tmp);
3634 new_temp = make_ssa_name (vec_dest, new_stmt);
3635 gimple_assign_set_lhs (new_stmt, new_temp);
3636 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3638 if (alignment_support_scheme == dr_explicit_realign_optimized)
3640 gcc_assert (phi);
3641 if (i == vec_num - 1 && j == ncopies - 1)
3642 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3643 UNKNOWN_LOCATION);
3644 msq = lsq;
3648 /* 4. Handle invariant-load. */
3649 if (inv_p && !bb_vinfo)
3651 gcc_assert (!strided_load);
3652 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3653 if (j == 0)
3655 int k;
3656 tree t = NULL_TREE;
3657 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3659 /* CHECKME: bitpos depends on endianess? */
3660 bitpos = bitsize_zero_node;
3661 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3662 bitsize, bitpos);
3663 vec_dest =
3664 vect_create_destination_var (scalar_dest, NULL_TREE);
3665 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3666 new_temp = make_ssa_name (vec_dest, new_stmt);
3667 gimple_assign_set_lhs (new_stmt, new_temp);
3668 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3670 for (k = nunits - 1; k >= 0; --k)
3671 t = tree_cons (NULL_TREE, new_temp, t);
3672 /* FIXME: use build_constructor directly. */
3673 vec_inv = build_constructor_from_list (vectype, t);
3674 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3675 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3677 else
3678 gcc_unreachable (); /* FORNOW. */
3681 /* Collect vector loads and later create their permutation in
3682 vect_transform_strided_load (). */
3683 if (strided_load || slp_perm)
3684 VEC_quick_push (tree, dr_chain, new_temp);
3686 /* Store vector loads in the corresponding SLP_NODE. */
3687 if (slp && !slp_perm)
3688 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3691 if (slp && !slp_perm)
3692 continue;
3694 if (slp_perm)
3696 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3697 slp_node_instance, false))
3699 VEC_free (tree, heap, dr_chain);
3700 return false;
3703 else
3705 if (strided_load)
3707 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3708 return false;
3710 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3711 VEC_free (tree, heap, dr_chain);
3712 dr_chain = VEC_alloc (tree, heap, group_size);
3714 else
3716 if (j == 0)
3717 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3718 else
3719 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3720 prev_stmt_info = vinfo_for_stmt (new_stmt);
3725 if (dr_chain)
3726 VEC_free (tree, heap, dr_chain);
3728 return true;
3731 /* Function vect_is_simple_cond.
3733 Input:
3734 LOOP - the loop that is being vectorized.
3735 COND - Condition that is checked for simple use.
3737 Returns whether a COND can be vectorized. Checks whether
3738 condition operands are supportable using vec_is_simple_use. */
3740 static bool
3741 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3743 tree lhs, rhs;
3744 tree def;
3745 enum vect_def_type dt;
3747 if (!COMPARISON_CLASS_P (cond))
3748 return false;
3750 lhs = TREE_OPERAND (cond, 0);
3751 rhs = TREE_OPERAND (cond, 1);
3753 if (TREE_CODE (lhs) == SSA_NAME)
3755 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3756 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3757 &dt))
3758 return false;
3760 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3761 && TREE_CODE (lhs) != FIXED_CST)
3762 return false;
3764 if (TREE_CODE (rhs) == SSA_NAME)
3766 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3767 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3768 &dt))
3769 return false;
3771 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3772 && TREE_CODE (rhs) != FIXED_CST)
3773 return false;
3775 return true;
3778 /* vectorizable_condition.
3780 Check if STMT is conditional modify expression that can be vectorized.
3781 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3782 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3783 at GSI.
3785 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3786 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3787 else caluse if it is 2).
3789 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3791 bool
3792 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3793 gimple *vec_stmt, tree reduc_def, int reduc_index)
3795 tree scalar_dest = NULL_TREE;
3796 tree vec_dest = NULL_TREE;
3797 tree op = NULL_TREE;
3798 tree cond_expr, then_clause, else_clause;
3799 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3800 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3801 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3802 tree vec_compare, vec_cond_expr;
3803 tree new_temp;
3804 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3805 enum machine_mode vec_mode;
3806 tree def;
3807 enum vect_def_type dt;
3808 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3809 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3810 enum tree_code code;
3812 /* FORNOW: unsupported in basic block SLP. */
3813 gcc_assert (loop_vinfo);
3815 gcc_assert (ncopies >= 1);
3816 if (ncopies > 1)
3817 return false; /* FORNOW */
3819 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3820 return false;
3822 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3823 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3824 && reduc_def))
3825 return false;
3827 /* FORNOW: SLP not supported. */
3828 if (STMT_SLP_TYPE (stmt_info))
3829 return false;
3831 /* FORNOW: not yet supported. */
3832 if (STMT_VINFO_LIVE_P (stmt_info))
3834 if (vect_print_dump_info (REPORT_DETAILS))
3835 fprintf (vect_dump, "value used after loop.");
3836 return false;
3839 /* Is vectorizable conditional operation? */
3840 if (!is_gimple_assign (stmt))
3841 return false;
3843 code = gimple_assign_rhs_code (stmt);
3845 if (code != COND_EXPR)
3846 return false;
3848 gcc_assert (gimple_assign_single_p (stmt));
3849 op = gimple_assign_rhs1 (stmt);
3850 cond_expr = TREE_OPERAND (op, 0);
3851 then_clause = TREE_OPERAND (op, 1);
3852 else_clause = TREE_OPERAND (op, 2);
3854 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3855 return false;
3857 /* We do not handle two different vector types for the condition
3858 and the values. */
3859 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3860 return false;
3862 if (TREE_CODE (then_clause) == SSA_NAME)
3864 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3865 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3866 &then_def_stmt, &def, &dt))
3867 return false;
3869 else if (TREE_CODE (then_clause) != INTEGER_CST
3870 && TREE_CODE (then_clause) != REAL_CST
3871 && TREE_CODE (then_clause) != FIXED_CST)
3872 return false;
3874 if (TREE_CODE (else_clause) == SSA_NAME)
3876 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3877 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3878 &else_def_stmt, &def, &dt))
3879 return false;
3881 else if (TREE_CODE (else_clause) != INTEGER_CST
3882 && TREE_CODE (else_clause) != REAL_CST
3883 && TREE_CODE (else_clause) != FIXED_CST)
3884 return false;
3887 vec_mode = TYPE_MODE (vectype);
3889 if (!vec_stmt)
3891 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3892 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3895 /* Transform */
3897 /* Handle def. */
3898 scalar_dest = gimple_assign_lhs (stmt);
3899 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3901 /* Handle cond expr. */
3902 vec_cond_lhs =
3903 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3904 vec_cond_rhs =
3905 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3906 if (reduc_index == 1)
3907 vec_then_clause = reduc_def;
3908 else
3909 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3910 if (reduc_index == 2)
3911 vec_else_clause = reduc_def;
3912 else
3913 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3915 /* Arguments are ready. Create the new vector stmt. */
3916 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3917 vec_cond_lhs, vec_cond_rhs);
3918 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3919 vec_compare, vec_then_clause, vec_else_clause);
3921 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3922 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3923 gimple_assign_set_lhs (*vec_stmt, new_temp);
3924 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3926 return true;
3930 /* Make sure the statement is vectorizable. */
3932 bool
3933 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3935 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3936 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3937 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3938 bool ok;
3939 HOST_WIDE_INT dummy;
3940 tree scalar_type, vectype;
3942 if (vect_print_dump_info (REPORT_DETAILS))
3944 fprintf (vect_dump, "==> examining statement: ");
3945 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3948 if (gimple_has_volatile_ops (stmt))
3950 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3951 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3953 return false;
3956 /* Skip stmts that do not need to be vectorized. In loops this is expected
3957 to include:
3958 - the COND_EXPR which is the loop exit condition
3959 - any LABEL_EXPRs in the loop
3960 - computations that are used only for array indexing or loop control.
3961 In basic blocks we only analyze statements that are a part of some SLP
3962 instance, therefore, all the statements are relevant. */
3964 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3965 && !STMT_VINFO_LIVE_P (stmt_info))
3967 if (vect_print_dump_info (REPORT_DETAILS))
3968 fprintf (vect_dump, "irrelevant.");
3970 return true;
3973 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3975 case vect_internal_def:
3976 break;
3978 case vect_reduction_def:
3979 case vect_nested_cycle:
3980 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3981 || relevance == vect_used_in_outer_by_reduction
3982 || relevance == vect_unused_in_scope));
3983 break;
3985 case vect_induction_def:
3986 case vect_constant_def:
3987 case vect_external_def:
3988 case vect_unknown_def_type:
3989 default:
3990 gcc_unreachable ();
3993 if (bb_vinfo)
3995 gcc_assert (PURE_SLP_STMT (stmt_info));
3997 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
3998 if (vect_print_dump_info (REPORT_DETAILS))
4000 fprintf (vect_dump, "get vectype for scalar type: ");
4001 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4004 vectype = get_vectype_for_scalar_type (scalar_type);
4005 if (!vectype)
4007 if (vect_print_dump_info (REPORT_DETAILS))
4009 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4010 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4012 return false;
4015 if (vect_print_dump_info (REPORT_DETAILS))
4017 fprintf (vect_dump, "vectype: ");
4018 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4021 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4024 if (STMT_VINFO_RELEVANT_P (stmt_info))
4026 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4027 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4028 *need_to_vectorize = true;
4031 ok = true;
4032 if (!bb_vinfo
4033 && (STMT_VINFO_RELEVANT_P (stmt_info)
4034 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4035 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4036 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4037 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4038 || vectorizable_operation (stmt, NULL, NULL, NULL)
4039 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4040 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4041 || vectorizable_call (stmt, NULL, NULL)
4042 || vectorizable_store (stmt, NULL, NULL, NULL)
4043 || vectorizable_reduction (stmt, NULL, NULL)
4044 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4045 else
4047 if (bb_vinfo)
4048 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4049 || vectorizable_assignment (stmt, NULL, NULL, node)
4050 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4051 || vectorizable_store (stmt, NULL, NULL, node));
4054 if (!ok)
4056 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4058 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4059 fprintf (vect_dump, "supported: ");
4060 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4063 return false;
4066 if (bb_vinfo)
4067 return true;
4069 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4070 need extra handling, except for vectorizable reductions. */
4071 if (STMT_VINFO_LIVE_P (stmt_info)
4072 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4073 ok = vectorizable_live_operation (stmt, NULL, NULL);
4075 if (!ok)
4077 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4079 fprintf (vect_dump, "not vectorized: live stmt not ");
4080 fprintf (vect_dump, "supported: ");
4081 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4084 return false;
4087 if (!PURE_SLP_STMT (stmt_info))
4089 /* Groups of strided accesses whose size is not a power of 2 are not
4090 vectorizable yet using loop-vectorization. Therefore, if this stmt
4091 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4092 loop-based vectorized), the loop cannot be vectorized. */
4093 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4094 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4095 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4097 if (vect_print_dump_info (REPORT_DETAILS))
4099 fprintf (vect_dump, "not vectorized: the size of group "
4100 "of strided accesses is not a power of 2");
4101 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4104 return false;
4108 return true;
4112 /* Function vect_transform_stmt.
4114 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4116 bool
4117 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4118 bool *strided_store, slp_tree slp_node,
4119 slp_instance slp_node_instance)
4121 bool is_store = false;
4122 gimple vec_stmt = NULL;
4123 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4124 gimple orig_stmt_in_pattern;
4125 bool done;
4127 switch (STMT_VINFO_TYPE (stmt_info))
4129 case type_demotion_vec_info_type:
4130 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4131 gcc_assert (done);
4132 break;
4134 case type_promotion_vec_info_type:
4135 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4136 gcc_assert (done);
4137 break;
4139 case type_conversion_vec_info_type:
4140 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4141 gcc_assert (done);
4142 break;
4144 case induc_vec_info_type:
4145 gcc_assert (!slp_node);
4146 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4147 gcc_assert (done);
4148 break;
4150 case op_vec_info_type:
4151 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4152 gcc_assert (done);
4153 break;
4155 case assignment_vec_info_type:
4156 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4157 gcc_assert (done);
4158 break;
4160 case load_vec_info_type:
4161 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4162 slp_node_instance);
4163 gcc_assert (done);
4164 break;
4166 case store_vec_info_type:
4167 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4168 gcc_assert (done);
4169 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4171 /* In case of interleaving, the whole chain is vectorized when the
4172 last store in the chain is reached. Store stmts before the last
4173 one are skipped, and there vec_stmt_info shouldn't be freed
4174 meanwhile. */
4175 *strided_store = true;
4176 if (STMT_VINFO_VEC_STMT (stmt_info))
4177 is_store = true;
4179 else
4180 is_store = true;
4181 break;
4183 case condition_vec_info_type:
4184 gcc_assert (!slp_node);
4185 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4186 gcc_assert (done);
4187 break;
4189 case call_vec_info_type:
4190 gcc_assert (!slp_node);
4191 done = vectorizable_call (stmt, gsi, &vec_stmt);
4192 break;
4194 case reduc_vec_info_type:
4195 gcc_assert (!slp_node);
4196 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4197 gcc_assert (done);
4198 break;
4200 default:
4201 if (!STMT_VINFO_LIVE_P (stmt_info))
4203 if (vect_print_dump_info (REPORT_DETAILS))
4204 fprintf (vect_dump, "stmt not supported.");
4205 gcc_unreachable ();
4209 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4210 is being vectorized, but outside the immediately enclosing loop. */
4211 if (vec_stmt
4212 && STMT_VINFO_LOOP_VINFO (stmt_info)
4213 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4214 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4215 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4216 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4217 || STMT_VINFO_RELEVANT (stmt_info) ==
4218 vect_used_in_outer_by_reduction))
4220 struct loop *innerloop = LOOP_VINFO_LOOP (
4221 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4222 imm_use_iterator imm_iter;
4223 use_operand_p use_p;
4224 tree scalar_dest;
4225 gimple exit_phi;
4227 if (vect_print_dump_info (REPORT_DETAILS))
4228 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4230 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4231 (to be used when vectorizing outer-loop stmts that use the DEF of
4232 STMT). */
4233 if (gimple_code (stmt) == GIMPLE_PHI)
4234 scalar_dest = PHI_RESULT (stmt);
4235 else
4236 scalar_dest = gimple_assign_lhs (stmt);
4238 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4240 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4242 exit_phi = USE_STMT (use_p);
4243 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4248 /* Handle stmts whose DEF is used outside the loop-nest that is
4249 being vectorized. */
4250 if (STMT_VINFO_LIVE_P (stmt_info)
4251 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4253 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4254 gcc_assert (done);
4257 if (vec_stmt)
4259 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4260 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4261 if (orig_stmt_in_pattern)
4263 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4264 /* STMT was inserted by the vectorizer to replace a computation idiom.
4265 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4266 computed this idiom. We need to record a pointer to VEC_STMT in
4267 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4268 documentation of vect_pattern_recog. */
4269 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4271 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4272 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4277 return is_store;
4281 /* Remove a group of stores (for SLP or interleaving), free their
4282 stmt_vec_info. */
4284 void
4285 vect_remove_stores (gimple first_stmt)
4287 gimple next = first_stmt;
4288 gimple tmp;
4289 gimple_stmt_iterator next_si;
4291 while (next)
4293 /* Free the attached stmt_vec_info and remove the stmt. */
4294 next_si = gsi_for_stmt (next);
4295 gsi_remove (&next_si, true);
4296 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4297 free_stmt_vec_info (next);
4298 next = tmp;
4303 /* Function new_stmt_vec_info.
4305 Create and initialize a new stmt_vec_info struct for STMT. */
4307 stmt_vec_info
4308 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4309 bb_vec_info bb_vinfo)
4311 stmt_vec_info res;
4312 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4314 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4315 STMT_VINFO_STMT (res) = stmt;
4316 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4317 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4318 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4319 STMT_VINFO_LIVE_P (res) = false;
4320 STMT_VINFO_VECTYPE (res) = NULL;
4321 STMT_VINFO_VEC_STMT (res) = NULL;
4322 STMT_VINFO_IN_PATTERN_P (res) = false;
4323 STMT_VINFO_RELATED_STMT (res) = NULL;
4324 STMT_VINFO_DATA_REF (res) = NULL;
4326 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4327 STMT_VINFO_DR_OFFSET (res) = NULL;
4328 STMT_VINFO_DR_INIT (res) = NULL;
4329 STMT_VINFO_DR_STEP (res) = NULL;
4330 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4332 if (gimple_code (stmt) == GIMPLE_PHI
4333 && is_loop_header_bb_p (gimple_bb (stmt)))
4334 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4335 else
4336 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4338 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4339 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4340 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4341 STMT_SLP_TYPE (res) = loop_vect;
4342 DR_GROUP_FIRST_DR (res) = NULL;
4343 DR_GROUP_NEXT_DR (res) = NULL;
4344 DR_GROUP_SIZE (res) = 0;
4345 DR_GROUP_STORE_COUNT (res) = 0;
4346 DR_GROUP_GAP (res) = 0;
4347 DR_GROUP_SAME_DR_STMT (res) = NULL;
4348 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4350 return res;
4354 /* Create a hash table for stmt_vec_info. */
4356 void
4357 init_stmt_vec_info_vec (void)
4359 gcc_assert (!stmt_vec_info_vec);
4360 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4364 /* Free hash table for stmt_vec_info. */
4366 void
4367 free_stmt_vec_info_vec (void)
4369 gcc_assert (stmt_vec_info_vec);
4370 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4374 /* Free stmt vectorization related info. */
4376 void
4377 free_stmt_vec_info (gimple stmt)
4379 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4381 if (!stmt_info)
4382 return;
4384 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4385 set_vinfo_for_stmt (stmt, NULL);
4386 free (stmt_info);
4390 /* Function get_vectype_for_scalar_type.
4392 Returns the vector type corresponding to SCALAR_TYPE as supported
4393 by the target. */
4395 tree
4396 get_vectype_for_scalar_type (tree scalar_type)
4398 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4399 int nbytes = GET_MODE_SIZE (inner_mode);
4400 int nunits;
4401 tree vectype;
4403 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4404 return NULL_TREE;
4406 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4407 is expected. */
4408 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4410 vectype = build_vector_type (scalar_type, nunits);
4411 if (vect_print_dump_info (REPORT_DETAILS))
4413 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4414 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4417 if (!vectype)
4418 return NULL_TREE;
4420 if (vect_print_dump_info (REPORT_DETAILS))
4422 fprintf (vect_dump, "vectype: ");
4423 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4426 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4427 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4429 if (vect_print_dump_info (REPORT_DETAILS))
4430 fprintf (vect_dump, "mode not supported by target.");
4431 return NULL_TREE;
4434 return vectype;
4437 /* Function vect_is_simple_use.
4439 Input:
4440 LOOP_VINFO - the vect info of the loop that is being vectorized.
4441 BB_VINFO - the vect info of the basic block that is being vectorized.
4442 OPERAND - operand of a stmt in the loop or bb.
4443 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4445 Returns whether a stmt with OPERAND can be vectorized.
4446 For loops, supportable operands are constants, loop invariants, and operands
4447 that are defined by the current iteration of the loop. Unsupportable
4448 operands are those that are defined by a previous iteration of the loop (as
4449 is the case in reduction/induction computations).
4450 For basic blocks, supportable operands are constants and bb invariants.
4451 For now, operands defined outside the basic block are not supported. */
4453 bool
4454 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4455 bb_vec_info bb_vinfo, gimple *def_stmt,
4456 tree *def, enum vect_def_type *dt)
4458 basic_block bb;
4459 stmt_vec_info stmt_vinfo;
4460 struct loop *loop = NULL;
4462 if (loop_vinfo)
4463 loop = LOOP_VINFO_LOOP (loop_vinfo);
4465 *def_stmt = NULL;
4466 *def = NULL_TREE;
4468 if (vect_print_dump_info (REPORT_DETAILS))
4470 fprintf (vect_dump, "vect_is_simple_use: operand ");
4471 print_generic_expr (vect_dump, operand, TDF_SLIM);
4474 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4476 *dt = vect_constant_def;
4477 return true;
4480 if (is_gimple_min_invariant (operand))
4482 *def = operand;
4483 *dt = vect_external_def;
4484 return true;
4487 if (TREE_CODE (operand) == PAREN_EXPR)
4489 if (vect_print_dump_info (REPORT_DETAILS))
4490 fprintf (vect_dump, "non-associatable copy.");
4491 operand = TREE_OPERAND (operand, 0);
4494 if (TREE_CODE (operand) != SSA_NAME)
4496 if (vect_print_dump_info (REPORT_DETAILS))
4497 fprintf (vect_dump, "not ssa-name.");
4498 return false;
4501 *def_stmt = SSA_NAME_DEF_STMT (operand);
4502 if (*def_stmt == NULL)
4504 if (vect_print_dump_info (REPORT_DETAILS))
4505 fprintf (vect_dump, "no def_stmt.");
4506 return false;
4509 if (vect_print_dump_info (REPORT_DETAILS))
4511 fprintf (vect_dump, "def_stmt: ");
4512 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4515 /* Empty stmt is expected only in case of a function argument.
4516 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4517 if (gimple_nop_p (*def_stmt))
4519 *def = operand;
4520 *dt = vect_external_def;
4521 return true;
4524 bb = gimple_bb (*def_stmt);
4526 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4527 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4528 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4529 *dt = vect_external_def;
4530 else
4532 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4533 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4536 if (*dt == vect_unknown_def_type)
4538 if (vect_print_dump_info (REPORT_DETAILS))
4539 fprintf (vect_dump, "Unsupported pattern.");
4540 return false;
4543 if (vect_print_dump_info (REPORT_DETAILS))
4544 fprintf (vect_dump, "type of def: %d.",*dt);
4546 switch (gimple_code (*def_stmt))
4548 case GIMPLE_PHI:
4549 *def = gimple_phi_result (*def_stmt);
4550 break;
4552 case GIMPLE_ASSIGN:
4553 *def = gimple_assign_lhs (*def_stmt);
4554 break;
4556 case GIMPLE_CALL:
4557 *def = gimple_call_lhs (*def_stmt);
4558 if (*def != NULL)
4559 break;
4560 /* FALLTHRU */
4561 default:
4562 if (vect_print_dump_info (REPORT_DETAILS))
4563 fprintf (vect_dump, "unsupported defining stmt: ");
4564 return false;
4567 return true;
4571 /* Function supportable_widening_operation
4573 Check whether an operation represented by the code CODE is a
4574 widening operation that is supported by the target platform in
4575 vector form (i.e., when operating on arguments of type VECTYPE).
4577 Widening operations we currently support are NOP (CONVERT), FLOAT
4578 and WIDEN_MULT. This function checks if these operations are supported
4579 by the target platform either directly (via vector tree-codes), or via
4580 target builtins.
4582 Output:
4583 - CODE1 and CODE2 are codes of vector operations to be used when
4584 vectorizing the operation, if available.
4585 - DECL1 and DECL2 are decls of target builtin functions to be used
4586 when vectorizing the operation, if available. In this case,
4587 CODE1 and CODE2 are CALL_EXPR.
4588 - MULTI_STEP_CVT determines the number of required intermediate steps in
4589 case of multi-step conversion (like char->short->int - in that case
4590 MULTI_STEP_CVT will be 1).
4591 - INTERM_TYPES contains the intermediate type required to perform the
4592 widening operation (short in the above example). */
4594 bool
4595 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4596 tree *decl1, tree *decl2,
4597 enum tree_code *code1, enum tree_code *code2,
4598 int *multi_step_cvt,
4599 VEC (tree, heap) **interm_types)
4601 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4602 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4603 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4604 bool ordered_p;
4605 enum machine_mode vec_mode;
4606 enum insn_code icode1, icode2;
4607 optab optab1, optab2;
4608 tree type = gimple_expr_type (stmt);
4609 tree wide_vectype = get_vectype_for_scalar_type (type);
4610 enum tree_code c1, c2;
4612 /* The result of a vectorized widening operation usually requires two vectors
4613 (because the widened results do not fit int one vector). The generated
4614 vector results would normally be expected to be generated in the same
4615 order as in the original scalar computation, i.e. if 8 results are
4616 generated in each vector iteration, they are to be organized as follows:
4617 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4619 However, in the special case that the result of the widening operation is
4620 used in a reduction computation only, the order doesn't matter (because
4621 when vectorizing a reduction we change the order of the computation).
4622 Some targets can take advantage of this and generate more efficient code.
4623 For example, targets like Altivec, that support widen_mult using a sequence
4624 of {mult_even,mult_odd} generate the following vectors:
4625 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4627 When vectorizing outer-loops, we execute the inner-loop sequentially
4628 (each vectorized inner-loop iteration contributes to VF outer-loop
4629 iterations in parallel). We therefore don't allow to change the order
4630 of the computation in the inner-loop during outer-loop vectorization. */
4632 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4633 && !nested_in_vect_loop_p (vect_loop, stmt))
4634 ordered_p = false;
4635 else
4636 ordered_p = true;
4638 if (!ordered_p
4639 && code == WIDEN_MULT_EXPR
4640 && targetm.vectorize.builtin_mul_widen_even
4641 && targetm.vectorize.builtin_mul_widen_even (vectype)
4642 && targetm.vectorize.builtin_mul_widen_odd
4643 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4645 if (vect_print_dump_info (REPORT_DETAILS))
4646 fprintf (vect_dump, "Unordered widening operation detected.");
4648 *code1 = *code2 = CALL_EXPR;
4649 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4650 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4651 return true;
4654 switch (code)
4656 case WIDEN_MULT_EXPR:
4657 if (BYTES_BIG_ENDIAN)
4659 c1 = VEC_WIDEN_MULT_HI_EXPR;
4660 c2 = VEC_WIDEN_MULT_LO_EXPR;
4662 else
4664 c2 = VEC_WIDEN_MULT_HI_EXPR;
4665 c1 = VEC_WIDEN_MULT_LO_EXPR;
4667 break;
4669 CASE_CONVERT:
4670 if (BYTES_BIG_ENDIAN)
4672 c1 = VEC_UNPACK_HI_EXPR;
4673 c2 = VEC_UNPACK_LO_EXPR;
4675 else
4677 c2 = VEC_UNPACK_HI_EXPR;
4678 c1 = VEC_UNPACK_LO_EXPR;
4680 break;
4682 case FLOAT_EXPR:
4683 if (BYTES_BIG_ENDIAN)
4685 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4686 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4688 else
4690 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4691 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4693 break;
4695 case FIX_TRUNC_EXPR:
4696 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4697 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4698 computing the operation. */
4699 return false;
4701 default:
4702 gcc_unreachable ();
4705 if (code == FIX_TRUNC_EXPR)
4707 /* The signedness is determined from output operand. */
4708 optab1 = optab_for_tree_code (c1, type, optab_default);
4709 optab2 = optab_for_tree_code (c2, type, optab_default);
4711 else
4713 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4714 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4717 if (!optab1 || !optab2)
4718 return false;
4720 vec_mode = TYPE_MODE (vectype);
4721 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4722 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4723 == CODE_FOR_nothing)
4724 return false;
4726 /* Check if it's a multi-step conversion that can be done using intermediate
4727 types. */
4728 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4729 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4731 int i;
4732 tree prev_type = vectype, intermediate_type;
4733 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4734 optab optab3, optab4;
4736 if (!CONVERT_EXPR_CODE_P (code))
4737 return false;
4739 *code1 = c1;
4740 *code2 = c2;
4742 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4743 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4744 to get to NARROW_VECTYPE, and fail if we do not. */
4745 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4746 for (i = 0; i < 3; i++)
4748 intermediate_mode = insn_data[icode1].operand[0].mode;
4749 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4750 TYPE_UNSIGNED (prev_type));
4751 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4752 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4754 if (!optab3 || !optab4
4755 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4756 == CODE_FOR_nothing
4757 || insn_data[icode1].operand[0].mode != intermediate_mode
4758 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4759 == CODE_FOR_nothing
4760 || insn_data[icode2].operand[0].mode != intermediate_mode
4761 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4762 == CODE_FOR_nothing
4763 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4764 == CODE_FOR_nothing)
4765 return false;
4767 VEC_quick_push (tree, *interm_types, intermediate_type);
4768 (*multi_step_cvt)++;
4770 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4771 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4772 return true;
4774 prev_type = intermediate_type;
4775 prev_mode = intermediate_mode;
4778 return false;
4781 *code1 = c1;
4782 *code2 = c2;
4783 return true;
4787 /* Function supportable_narrowing_operation
4789 Check whether an operation represented by the code CODE is a
4790 narrowing operation that is supported by the target platform in
4791 vector form (i.e., when operating on arguments of type VECTYPE).
4793 Narrowing operations we currently support are NOP (CONVERT) and
4794 FIX_TRUNC. This function checks if these operations are supported by
4795 the target platform directly via vector tree-codes.
4797 Output:
4798 - CODE1 is the code of a vector operation to be used when
4799 vectorizing the operation, if available.
4800 - MULTI_STEP_CVT determines the number of required intermediate steps in
4801 case of multi-step conversion (like int->short->char - in that case
4802 MULTI_STEP_CVT will be 1).
4803 - INTERM_TYPES contains the intermediate type required to perform the
4804 narrowing operation (short in the above example). */
4806 bool
4807 supportable_narrowing_operation (enum tree_code code,
4808 const_gimple stmt, tree vectype,
4809 enum tree_code *code1, int *multi_step_cvt,
4810 VEC (tree, heap) **interm_types)
4812 enum machine_mode vec_mode;
4813 enum insn_code icode1;
4814 optab optab1, interm_optab;
4815 tree type = gimple_expr_type (stmt);
4816 tree narrow_vectype = get_vectype_for_scalar_type (type);
4817 enum tree_code c1;
4818 tree intermediate_type, prev_type;
4819 int i;
4821 switch (code)
4823 CASE_CONVERT:
4824 c1 = VEC_PACK_TRUNC_EXPR;
4825 break;
4827 case FIX_TRUNC_EXPR:
4828 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4829 break;
4831 case FLOAT_EXPR:
4832 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4833 tree code and optabs used for computing the operation. */
4834 return false;
4836 default:
4837 gcc_unreachable ();
4840 if (code == FIX_TRUNC_EXPR)
4841 /* The signedness is determined from output operand. */
4842 optab1 = optab_for_tree_code (c1, type, optab_default);
4843 else
4844 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4846 if (!optab1)
4847 return false;
4849 vec_mode = TYPE_MODE (vectype);
4850 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4851 == CODE_FOR_nothing)
4852 return false;
4854 /* Check if it's a multi-step conversion that can be done using intermediate
4855 types. */
4856 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4858 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4860 *code1 = c1;
4861 prev_type = vectype;
4862 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4863 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4864 to get to NARROW_VECTYPE, and fail if we do not. */
4865 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4866 for (i = 0; i < 3; i++)
4868 intermediate_mode = insn_data[icode1].operand[0].mode;
4869 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4870 TYPE_UNSIGNED (prev_type));
4871 interm_optab = optab_for_tree_code (c1, intermediate_type,
4872 optab_default);
4873 if (!interm_optab
4874 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4875 == CODE_FOR_nothing
4876 || insn_data[icode1].operand[0].mode != intermediate_mode
4877 || (icode1
4878 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4879 == CODE_FOR_nothing)
4880 return false;
4882 VEC_quick_push (tree, *interm_types, intermediate_type);
4883 (*multi_step_cvt)++;
4885 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4886 return true;
4888 prev_type = intermediate_type;
4889 prev_mode = intermediate_mode;
4892 return false;
4895 *code1 = c1;
4896 return true;