* xcoffout.h (xcoffout_source_line): Update prototype.
[official-gcc.git] / gcc / tree-vect-stmts.c
blobf9fc0f59aaf3e560106c55aa849aae04f8f70723
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 /* We expect all such uses to be in the loop exit phis
149 (because of loop closed form) */
150 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
151 gcc_assert (bb == single_exit (loop)->dest);
153 *live_p = true;
158 return (*live_p || *relevant);
162 /* Function exist_non_indexing_operands_for_use_p
164 USE is one of the uses attached to STMT. Check if USE is
165 used in STMT for anything other than indexing an array. */
167 static bool
168 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
170 tree operand;
171 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
173 /* USE corresponds to some operand in STMT. If there is no data
174 reference in STMT, then any operand that corresponds to USE
175 is not indexing an array. */
176 if (!STMT_VINFO_DATA_REF (stmt_info))
177 return true;
179 /* STMT has a data_ref. FORNOW this means that its of one of
180 the following forms:
181 -1- ARRAY_REF = var
182 -2- var = ARRAY_REF
183 (This should have been verified in analyze_data_refs).
185 'var' in the second case corresponds to a def, not a use,
186 so USE cannot correspond to any operands that are not used
187 for array indexing.
189 Therefore, all we need to check is if STMT falls into the
190 first case, and whether var corresponds to USE. */
192 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
193 return false;
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 operand = gimple_assign_rhs1 (stmt);
199 if (TREE_CODE (operand) != SSA_NAME)
200 return false;
202 if (operand == use)
203 return true;
205 return false;
210 Function process_use.
212 Inputs:
213 - a USE in STMT in a loop represented by LOOP_VINFO
214 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
215 that defined USE. This is done by calling mark_relevant and passing it
216 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
218 Outputs:
219 Generally, LIVE_P and RELEVANT are used to define the liveness and
220 relevance info of the DEF_STMT of this USE:
221 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
222 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
223 Exceptions:
224 - case 1: If USE is used only for address computations (e.g. array indexing),
225 which does not need to be directly vectorized, then the liveness/relevance
226 of the respective DEF_STMT is left unchanged.
227 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
228 skip DEF_STMT cause it had already been processed.
229 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
230 be modified accordingly.
232 Return true if everything is as expected. Return false otherwise. */
234 static bool
235 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
236 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
239 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
240 stmt_vec_info dstmt_vinfo;
241 basic_block bb, def_bb;
242 tree def;
243 gimple def_stmt;
244 enum vect_def_type dt;
246 /* case 1: we are only interested in uses that need to be vectorized. Uses
247 that are used for address computation are not considered relevant. */
248 if (!exist_non_indexing_operands_for_use_p (use, stmt))
249 return true;
251 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
253 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
254 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
255 return false;
258 if (!def_stmt || gimple_nop_p (def_stmt))
259 return true;
261 def_bb = gimple_bb (def_stmt);
262 if (!flow_bb_inside_loop_p (loop, def_bb))
264 if (vect_print_dump_info (REPORT_DETAILS))
265 fprintf (vect_dump, "def_stmt is out of loop.");
266 return true;
269 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
270 DEF_STMT must have already been processed, because this should be the
271 only way that STMT, which is a reduction-phi, was put in the worklist,
272 as there should be no other uses for DEF_STMT in the loop. So we just
273 check that everything is as expected, and we are done. */
274 dstmt_vinfo = vinfo_for_stmt (def_stmt);
275 bb = gimple_bb (stmt);
276 if (gimple_code (stmt) == GIMPLE_PHI
277 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
278 && gimple_code (def_stmt) != GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
280 && bb->loop_father == def_bb->loop_father)
282 if (vect_print_dump_info (REPORT_DETAILS))
283 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
284 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
285 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
286 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
287 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
288 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
289 return true;
292 /* case 3a: outer-loop stmt defining an inner-loop stmt:
293 outer-loop-header-bb:
294 d = def_stmt
295 inner-loop:
296 stmt # use (d)
297 outer-loop-tail-bb:
298 ... */
299 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
301 if (vect_print_dump_info (REPORT_DETAILS))
302 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
303 switch (relevant)
305 case vect_unused_in_scope:
306 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def) ?
307 vect_used_by_reduction : vect_unused_in_scope;
308 break;
309 case vect_used_in_outer_by_reduction:
310 relevant = vect_used_by_reduction;
311 break;
312 case vect_used_in_outer:
313 relevant = vect_used_in_scope;
314 break;
315 case vect_used_by_reduction:
316 case vect_used_in_scope:
317 break;
319 default:
320 gcc_unreachable ();
324 /* case 3b: inner-loop stmt defining an outer-loop stmt:
325 outer-loop-header-bb:
327 inner-loop:
328 d = def_stmt
329 outer-loop-tail-bb:
330 stmt # use (d) */
331 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
333 if (vect_print_dump_info (REPORT_DETAILS))
334 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
335 switch (relevant)
337 case vect_unused_in_scope:
338 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def) ?
339 vect_used_in_outer_by_reduction : vect_unused_in_scope;
340 break;
342 case vect_used_in_outer_by_reduction:
343 case vect_used_in_outer:
344 break;
346 case vect_used_by_reduction:
347 relevant = vect_used_in_outer_by_reduction;
348 break;
350 case vect_used_in_scope:
351 relevant = vect_used_in_outer;
352 break;
354 default:
355 gcc_unreachable ();
359 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
360 return true;
364 /* Function vect_mark_stmts_to_be_vectorized.
366 Not all stmts in the loop need to be vectorized. For example:
368 for i...
369 for j...
370 1. T0 = i + j
371 2. T1 = a[T0]
373 3. j = j + 1
375 Stmt 1 and 3 do not need to be vectorized, because loop control and
376 addressing of vectorized data-refs are handled differently.
378 This pass detects such stmts. */
380 bool
381 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
383 VEC(gimple,heap) *worklist;
384 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
385 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
386 unsigned int nbbs = loop->num_nodes;
387 gimple_stmt_iterator si;
388 gimple stmt;
389 unsigned int i;
390 stmt_vec_info stmt_vinfo;
391 basic_block bb;
392 gimple phi;
393 bool live_p;
394 enum vect_relevant relevant;
396 if (vect_print_dump_info (REPORT_DETAILS))
397 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
399 worklist = VEC_alloc (gimple, heap, 64);
401 /* 1. Init worklist. */
402 for (i = 0; i < nbbs; i++)
404 bb = bbs[i];
405 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
407 phi = gsi_stmt (si);
408 if (vect_print_dump_info (REPORT_DETAILS))
410 fprintf (vect_dump, "init: phi relevant? ");
411 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
414 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
415 vect_mark_relevant (&worklist, phi, relevant, live_p);
417 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
419 stmt = gsi_stmt (si);
420 if (vect_print_dump_info (REPORT_DETAILS))
422 fprintf (vect_dump, "init: stmt relevant? ");
423 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
426 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
427 vect_mark_relevant (&worklist, stmt, relevant, live_p);
431 /* 2. Process_worklist */
432 while (VEC_length (gimple, worklist) > 0)
434 use_operand_p use_p;
435 ssa_op_iter iter;
437 stmt = VEC_pop (gimple, worklist);
438 if (vect_print_dump_info (REPORT_DETAILS))
440 fprintf (vect_dump, "worklist: examine stmt: ");
441 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
444 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
445 (DEF_STMT) as relevant/irrelevant and live/dead according to the
446 liveness and relevance properties of STMT. */
447 stmt_vinfo = vinfo_for_stmt (stmt);
448 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
449 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
451 /* Generally, the liveness and relevance properties of STMT are
452 propagated as is to the DEF_STMTs of its USEs:
453 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
454 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
456 One exception is when STMT has been identified as defining a reduction
457 variable; in this case we set the liveness/relevance as follows:
458 live_p = false
459 relevant = vect_used_by_reduction
460 This is because we distinguish between two kinds of relevant stmts -
461 those that are used by a reduction computation, and those that are
462 (also) used by a regular computation. This allows us later on to
463 identify stmts that are used solely by a reduction, and therefore the
464 order of the results that they produce does not have to be kept.
466 Reduction phis are expected to be used by a reduction stmt, or by
467 in an outer loop; Other reduction stmts are expected to be
468 in the loop, and possibly used by a stmt in an outer loop.
469 Here are the expected values of "relevant" for reduction phis/stmts:
471 relevance: phi stmt
472 vect_unused_in_scope ok
473 vect_used_in_outer_by_reduction ok ok
474 vect_used_in_outer ok ok
475 vect_used_by_reduction ok
476 vect_used_in_scope */
478 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def)
480 enum vect_relevant tmp_relevant = relevant;
481 switch (tmp_relevant)
483 case vect_unused_in_scope:
484 gcc_assert (gimple_code (stmt) != GIMPLE_PHI);
485 relevant = vect_used_by_reduction;
486 break;
488 case vect_used_in_outer_by_reduction:
489 case vect_used_in_outer:
490 gcc_assert (gimple_code (stmt) != GIMPLE_ASSIGN
491 || (gimple_assign_rhs_code (stmt) != WIDEN_SUM_EXPR
492 && (gimple_assign_rhs_code (stmt)
493 != DOT_PROD_EXPR)));
494 break;
496 case vect_used_by_reduction:
497 if (gimple_code (stmt) == GIMPLE_PHI)
498 break;
499 /* fall through */
500 case vect_used_in_scope:
501 default:
502 if (vect_print_dump_info (REPORT_DETAILS))
503 fprintf (vect_dump, "unsupported use of reduction.");
504 VEC_free (gimple, heap, worklist);
505 return false;
507 live_p = false;
510 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
512 tree op = USE_FROM_PTR (use_p);
513 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
515 VEC_free (gimple, heap, worklist);
516 return false;
519 } /* while worklist */
521 VEC_free (gimple, heap, worklist);
522 return true;
527 cost_for_stmt (gimple stmt)
529 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
531 switch (STMT_VINFO_TYPE (stmt_info))
533 case load_vec_info_type:
534 return TARG_SCALAR_LOAD_COST;
535 case store_vec_info_type:
536 return TARG_SCALAR_STORE_COST;
537 case op_vec_info_type:
538 case condition_vec_info_type:
539 case assignment_vec_info_type:
540 case reduc_vec_info_type:
541 case induc_vec_info_type:
542 case type_promotion_vec_info_type:
543 case type_demotion_vec_info_type:
544 case type_conversion_vec_info_type:
545 case call_vec_info_type:
546 return TARG_SCALAR_STMT_COST;
547 case undef_vec_info_type:
548 default:
549 gcc_unreachable ();
553 /* Function vect_model_simple_cost.
555 Models cost for simple operations, i.e. those that only emit ncopies of a
556 single op. Right now, this does not account for multiple insns that could
557 be generated for the single vector op. We will handle that shortly. */
559 void
560 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
561 enum vect_def_type *dt, slp_tree slp_node)
563 int i;
564 int inside_cost = 0, outside_cost = 0;
566 /* The SLP costs were already calculated during SLP tree build. */
567 if (PURE_SLP_STMT (stmt_info))
568 return;
570 inside_cost = ncopies * TARG_VEC_STMT_COST;
572 /* FORNOW: Assuming maximum 2 args per stmts. */
573 for (i = 0; i < 2; i++)
575 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
576 outside_cost += TARG_SCALAR_TO_VEC_COST;
579 if (vect_print_dump_info (REPORT_COST))
580 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
581 "outside_cost = %d .", inside_cost, outside_cost);
583 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
584 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
585 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
589 /* Function vect_cost_strided_group_size
591 For strided load or store, return the group_size only if it is the first
592 load or store of a group, else return 1. This ensures that group size is
593 only returned once per group. */
595 static int
596 vect_cost_strided_group_size (stmt_vec_info stmt_info)
598 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
600 if (first_stmt == STMT_VINFO_STMT (stmt_info))
601 return DR_GROUP_SIZE (stmt_info);
603 return 1;
607 /* Function vect_model_store_cost
609 Models cost for stores. In the case of strided accesses, one access
610 has the overhead of the strided access attributed to it. */
612 void
613 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
614 enum vect_def_type dt, slp_tree slp_node)
616 int group_size;
617 int inside_cost = 0, outside_cost = 0;
619 /* The SLP costs were already calculated during SLP tree build. */
620 if (PURE_SLP_STMT (stmt_info))
621 return;
623 if (dt == vect_constant_def || dt == vect_external_def)
624 outside_cost = TARG_SCALAR_TO_VEC_COST;
626 /* Strided access? */
627 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
628 group_size = vect_cost_strided_group_size (stmt_info);
629 /* Not a strided access. */
630 else
631 group_size = 1;
633 /* Is this an access in a group of stores, which provide strided access?
634 If so, add in the cost of the permutes. */
635 if (group_size > 1)
637 /* Uses a high and low interleave operation for each needed permute. */
638 inside_cost = ncopies * exact_log2(group_size) * group_size
639 * TARG_VEC_STMT_COST;
641 if (vect_print_dump_info (REPORT_COST))
642 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
643 group_size);
647 /* Costs of the stores. */
648 inside_cost += ncopies * TARG_VEC_STORE_COST;
650 if (vect_print_dump_info (REPORT_COST))
651 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
652 "outside_cost = %d .", inside_cost, outside_cost);
654 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
655 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
656 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
660 /* Function vect_model_load_cost
662 Models cost for loads. In the case of strided accesses, the last access
663 has the overhead of the strided access attributed to it. Since unaligned
664 accesses are supported for loads, we also account for the costs of the
665 access scheme chosen. */
667 void
668 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
671 int group_size;
672 int alignment_support_cheme;
673 gimple first_stmt;
674 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
675 int inside_cost = 0, outside_cost = 0;
677 /* The SLP costs were already calculated during SLP tree build. */
678 if (PURE_SLP_STMT (stmt_info))
679 return;
681 /* Strided accesses? */
682 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
683 if (first_stmt && !slp_node)
685 group_size = vect_cost_strided_group_size (stmt_info);
686 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
688 /* Not a strided access. */
689 else
691 group_size = 1;
692 first_dr = dr;
695 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
697 /* Is this an access in a group of loads providing strided access?
698 If so, add in the cost of the permutes. */
699 if (group_size > 1)
701 /* Uses an even and odd extract operations for each needed permute. */
702 inside_cost = ncopies * exact_log2(group_size) * group_size
703 * TARG_VEC_STMT_COST;
705 if (vect_print_dump_info (REPORT_COST))
706 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
707 group_size);
711 /* The loads themselves. */
712 switch (alignment_support_cheme)
714 case dr_aligned:
716 inside_cost += ncopies * TARG_VEC_LOAD_COST;
718 if (vect_print_dump_info (REPORT_COST))
719 fprintf (vect_dump, "vect_model_load_cost: aligned.");
721 break;
723 case dr_unaligned_supported:
725 /* Here, we assign an additional cost for the unaligned load. */
726 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
728 if (vect_print_dump_info (REPORT_COST))
729 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
730 "hardware.");
732 break;
734 case dr_explicit_realign:
736 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
738 /* FIXME: If the misalignment remains fixed across the iterations of
739 the containing loop, the following cost should be added to the
740 outside costs. */
741 if (targetm.vectorize.builtin_mask_for_load)
742 inside_cost += TARG_VEC_STMT_COST;
744 break;
746 case dr_explicit_realign_optimized:
748 if (vect_print_dump_info (REPORT_COST))
749 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
750 "pipelined.");
752 /* Unaligned software pipeline has a load of an address, an initial
753 load, and possibly a mask operation to "prime" the loop. However,
754 if this is an access in a group of loads, which provide strided
755 access, then the above cost should only be considered for one
756 access in the group. Inside the loop, there is a load op
757 and a realignment op. */
759 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
761 outside_cost = 2*TARG_VEC_STMT_COST;
762 if (targetm.vectorize.builtin_mask_for_load)
763 outside_cost += TARG_VEC_STMT_COST;
766 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
768 break;
771 default:
772 gcc_unreachable ();
775 if (vect_print_dump_info (REPORT_COST))
776 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
777 "outside_cost = %d .", inside_cost, outside_cost);
779 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
780 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
781 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
785 /* Function vect_init_vector.
787 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
788 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
789 is not NULL. Otherwise, place the initialization at the loop preheader.
790 Return the DEF of INIT_STMT.
791 It will be used in the vectorization of STMT. */
793 tree
794 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
795 gimple_stmt_iterator *gsi)
797 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
798 tree new_var;
799 gimple init_stmt;
800 tree vec_oprnd;
801 edge pe;
802 tree new_temp;
803 basic_block new_bb;
805 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
806 add_referenced_var (new_var);
807 init_stmt = gimple_build_assign (new_var, vector_var);
808 new_temp = make_ssa_name (new_var, init_stmt);
809 gimple_assign_set_lhs (init_stmt, new_temp);
811 if (gsi)
812 vect_finish_stmt_generation (stmt, init_stmt, gsi);
813 else
815 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
817 if (loop_vinfo)
819 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
821 if (nested_in_vect_loop_p (loop, stmt))
822 loop = loop->inner;
824 pe = loop_preheader_edge (loop);
825 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
826 gcc_assert (!new_bb);
828 else
830 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
831 basic_block bb;
832 gimple_stmt_iterator gsi_bb_start;
834 gcc_assert (bb_vinfo);
835 bb = BB_VINFO_BB (bb_vinfo);
836 gsi_bb_start = gsi_after_labels (bb);
837 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
841 if (vect_print_dump_info (REPORT_DETAILS))
843 fprintf (vect_dump, "created new init_stmt: ");
844 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
847 vec_oprnd = gimple_assign_lhs (init_stmt);
848 return vec_oprnd;
852 /* Function vect_get_vec_def_for_operand.
854 OP is an operand in STMT. This function returns a (vector) def that will be
855 used in the vectorized stmt for STMT.
857 In the case that OP is an SSA_NAME which is defined in the loop, then
858 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
860 In case OP is an invariant or constant, a new stmt that creates a vector def
861 needs to be introduced. */
863 tree
864 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
866 tree vec_oprnd;
867 gimple vec_stmt;
868 gimple def_stmt;
869 stmt_vec_info def_stmt_info = NULL;
870 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
871 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
872 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
873 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
874 tree vec_inv;
875 tree vec_cst;
876 tree t = NULL_TREE;
877 tree def;
878 int i;
879 enum vect_def_type dt;
880 bool is_simple_use;
881 tree vector_type;
883 if (vect_print_dump_info (REPORT_DETAILS))
885 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
886 print_generic_expr (vect_dump, op, TDF_SLIM);
889 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
890 &dt);
891 gcc_assert (is_simple_use);
892 if (vect_print_dump_info (REPORT_DETAILS))
894 if (def)
896 fprintf (vect_dump, "def = ");
897 print_generic_expr (vect_dump, def, TDF_SLIM);
899 if (def_stmt)
901 fprintf (vect_dump, " def_stmt = ");
902 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
906 switch (dt)
908 /* Case 1: operand is a constant. */
909 case vect_constant_def:
911 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
912 gcc_assert (vector_type);
914 if (scalar_def)
915 *scalar_def = op;
917 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
918 if (vect_print_dump_info (REPORT_DETAILS))
919 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
921 for (i = nunits - 1; i >= 0; --i)
923 t = tree_cons (NULL_TREE, op, t);
925 vec_cst = build_vector (vector_type, t);
926 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
929 /* Case 2: operand is defined outside the loop - loop invariant. */
930 case vect_external_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
933 gcc_assert (vector_type);
934 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
936 if (scalar_def)
937 *scalar_def = def;
939 /* Create 'vec_inv = {inv,inv,..,inv}' */
940 if (vect_print_dump_info (REPORT_DETAILS))
941 fprintf (vect_dump, "Create vector_inv.");
943 for (i = nunits - 1; i >= 0; --i)
945 t = tree_cons (NULL_TREE, def, t);
948 /* FIXME: use build_constructor directly. */
949 vec_inv = build_constructor_from_list (vector_type, t);
950 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
953 /* Case 3: operand is defined inside the loop. */
954 case vect_internal_def:
956 if (scalar_def)
957 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
959 /* Get the def from the vectorized stmt. */
960 def_stmt_info = vinfo_for_stmt (def_stmt);
961 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
962 gcc_assert (vec_stmt);
963 if (gimple_code (vec_stmt) == GIMPLE_PHI)
964 vec_oprnd = PHI_RESULT (vec_stmt);
965 else if (is_gimple_call (vec_stmt))
966 vec_oprnd = gimple_call_lhs (vec_stmt);
967 else
968 vec_oprnd = gimple_assign_lhs (vec_stmt);
969 return vec_oprnd;
972 /* Case 4: operand is defined by a loop header phi - reduction */
973 case vect_reduction_def:
975 struct loop *loop;
977 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
978 loop = (gimple_bb (def_stmt))->loop_father;
980 /* Get the def before the loop */
981 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
982 return get_initial_def_for_reduction (stmt, op, scalar_def);
985 /* Case 5: operand is defined by loop-header phi - induction. */
986 case vect_induction_def:
988 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
990 /* Get the def from the vectorized stmt. */
991 def_stmt_info = vinfo_for_stmt (def_stmt);
992 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
993 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
994 vec_oprnd = PHI_RESULT (vec_stmt);
995 return vec_oprnd;
998 default:
999 gcc_unreachable ();
1004 /* Function vect_get_vec_def_for_stmt_copy
1006 Return a vector-def for an operand. This function is used when the
1007 vectorized stmt to be created (by the caller to this function) is a "copy"
1008 created in case the vectorized result cannot fit in one vector, and several
1009 copies of the vector-stmt are required. In this case the vector-def is
1010 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1011 of the stmt that defines VEC_OPRND.
1012 DT is the type of the vector def VEC_OPRND.
1014 Context:
1015 In case the vectorization factor (VF) is bigger than the number
1016 of elements that can fit in a vectype (nunits), we have to generate
1017 more than one vector stmt to vectorize the scalar stmt. This situation
1018 arises when there are multiple data-types operated upon in the loop; the
1019 smallest data-type determines the VF, and as a result, when vectorizing
1020 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1021 vector stmt (each computing a vector of 'nunits' results, and together
1022 computing 'VF' results in each iteration). This function is called when
1023 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1024 which VF=16 and nunits=4, so the number of copies required is 4):
1026 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1028 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1029 VS1.1: vx.1 = memref1 VS1.2
1030 VS1.2: vx.2 = memref2 VS1.3
1031 VS1.3: vx.3 = memref3
1033 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1034 VSnew.1: vz1 = vx.1 + ... VSnew.2
1035 VSnew.2: vz2 = vx.2 + ... VSnew.3
1036 VSnew.3: vz3 = vx.3 + ...
1038 The vectorization of S1 is explained in vectorizable_load.
1039 The vectorization of S2:
1040 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1041 the function 'vect_get_vec_def_for_operand' is called to
1042 get the relevant vector-def for each operand of S2. For operand x it
1043 returns the vector-def 'vx.0'.
1045 To create the remaining copies of the vector-stmt (VSnew.j), this
1046 function is called to get the relevant vector-def for each operand. It is
1047 obtained from the respective VS1.j stmt, which is recorded in the
1048 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1050 For example, to obtain the vector-def 'vx.1' in order to create the
1051 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1052 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1053 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1054 and return its def ('vx.1').
1055 Overall, to create the above sequence this function will be called 3 times:
1056 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1057 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1058 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1060 tree
1061 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1063 gimple vec_stmt_for_operand;
1064 stmt_vec_info def_stmt_info;
1066 /* Do nothing; can reuse same def. */
1067 if (dt == vect_external_def || dt == vect_constant_def )
1068 return vec_oprnd;
1070 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1071 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1072 gcc_assert (def_stmt_info);
1073 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1074 gcc_assert (vec_stmt_for_operand);
1075 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1076 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1077 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1078 else
1079 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1080 return vec_oprnd;
1084 /* Get vectorized definitions for the operands to create a copy of an original
1085 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1087 static void
1088 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1089 VEC(tree,heap) **vec_oprnds0,
1090 VEC(tree,heap) **vec_oprnds1)
1092 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1094 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1095 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1097 if (vec_oprnds1 && *vec_oprnds1)
1099 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1100 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1101 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1106 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1108 static void
1109 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1110 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1111 slp_tree slp_node)
1113 if (slp_node)
1114 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1115 else
1117 tree vec_oprnd;
1119 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1120 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1121 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1123 if (op1)
1125 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1126 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1127 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1133 /* Function vect_finish_stmt_generation.
1135 Insert a new stmt. */
1137 void
1138 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1139 gimple_stmt_iterator *gsi)
1141 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1143 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1145 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1147 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1149 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1150 bb_vinfo));
1152 if (vect_print_dump_info (REPORT_DETAILS))
1154 fprintf (vect_dump, "add new stmt: ");
1155 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1158 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1161 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1162 a function declaration if the target has a vectorized version
1163 of the function, or NULL_TREE if the function cannot be vectorized. */
1165 tree
1166 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1168 tree fndecl = gimple_call_fndecl (call);
1169 enum built_in_function code;
1171 /* We only handle functions that do not read or clobber memory -- i.e.
1172 const or novops ones. */
1173 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1174 return NULL_TREE;
1176 if (!fndecl
1177 || TREE_CODE (fndecl) != FUNCTION_DECL
1178 || !DECL_BUILT_IN (fndecl))
1179 return NULL_TREE;
1181 code = DECL_FUNCTION_CODE (fndecl);
1182 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
1183 vectype_in);
1186 /* Function vectorizable_call.
1188 Check if STMT performs a function call that can be vectorized.
1189 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1190 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1191 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1193 static bool
1194 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1196 tree vec_dest;
1197 tree scalar_dest;
1198 tree op, type;
1199 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1200 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1201 tree vectype_out, vectype_in;
1202 int nunits_in;
1203 int nunits_out;
1204 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1205 tree fndecl, new_temp, def, rhs_type, lhs_type;
1206 gimple def_stmt;
1207 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1208 gimple new_stmt;
1209 int ncopies, j;
1210 VEC(tree, heap) *vargs = NULL;
1211 enum { NARROW, NONE, WIDEN } modifier;
1212 size_t i, nargs;
1214 /* FORNOW: unsupported in basic block SLP. */
1215 gcc_assert (loop_vinfo);
1217 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1218 return false;
1220 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1221 return false;
1223 /* FORNOW: SLP not supported. */
1224 if (STMT_SLP_TYPE (stmt_info))
1225 return false;
1227 /* Is STMT a vectorizable call? */
1228 if (!is_gimple_call (stmt))
1229 return false;
1231 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1232 return false;
1234 /* Process function arguments. */
1235 rhs_type = NULL_TREE;
1236 nargs = gimple_call_num_args (stmt);
1238 /* Bail out if the function has more than two arguments, we
1239 do not have interesting builtin functions to vectorize with
1240 more than two arguments. No arguments is also not good. */
1241 if (nargs == 0 || nargs > 2)
1242 return false;
1244 for (i = 0; i < nargs; i++)
1246 op = gimple_call_arg (stmt, i);
1248 /* We can only handle calls with arguments of the same type. */
1249 if (rhs_type
1250 && rhs_type != TREE_TYPE (op))
1252 if (vect_print_dump_info (REPORT_DETAILS))
1253 fprintf (vect_dump, "argument types differ.");
1254 return false;
1256 rhs_type = TREE_TYPE (op);
1258 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
1260 if (vect_print_dump_info (REPORT_DETAILS))
1261 fprintf (vect_dump, "use not simple.");
1262 return false;
1266 vectype_in = get_vectype_for_scalar_type (rhs_type);
1267 if (!vectype_in)
1268 return false;
1269 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1271 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1272 vectype_out = get_vectype_for_scalar_type (lhs_type);
1273 if (!vectype_out)
1274 return false;
1275 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1277 /* FORNOW */
1278 if (nunits_in == nunits_out / 2)
1279 modifier = NARROW;
1280 else if (nunits_out == nunits_in)
1281 modifier = NONE;
1282 else if (nunits_out == nunits_in / 2)
1283 modifier = WIDEN;
1284 else
1285 return false;
1287 /* For now, we only vectorize functions if a target specific builtin
1288 is available. TODO -- in some cases, it might be profitable to
1289 insert the calls for pieces of the vector, in order to be able
1290 to vectorize other operations in the loop. */
1291 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1292 if (fndecl == NULL_TREE)
1294 if (vect_print_dump_info (REPORT_DETAILS))
1295 fprintf (vect_dump, "function is not vectorizable.");
1297 return false;
1300 gcc_assert (!gimple_vuse (stmt));
1302 if (modifier == NARROW)
1303 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1304 else
1305 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1307 /* Sanity check: make sure that at least one copy of the vectorized stmt
1308 needs to be generated. */
1309 gcc_assert (ncopies >= 1);
1311 if (!vec_stmt) /* transformation not required. */
1313 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1314 if (vect_print_dump_info (REPORT_DETAILS))
1315 fprintf (vect_dump, "=== vectorizable_call ===");
1316 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1317 return true;
1320 /** Transform. **/
1322 if (vect_print_dump_info (REPORT_DETAILS))
1323 fprintf (vect_dump, "transform operation.");
1325 /* Handle def. */
1326 scalar_dest = gimple_call_lhs (stmt);
1327 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1329 prev_stmt_info = NULL;
1330 switch (modifier)
1332 case NONE:
1333 for (j = 0; j < ncopies; ++j)
1335 /* Build argument list for the vectorized call. */
1336 if (j == 0)
1337 vargs = VEC_alloc (tree, heap, nargs);
1338 else
1339 VEC_truncate (tree, vargs, 0);
1341 for (i = 0; i < nargs; i++)
1343 op = gimple_call_arg (stmt, i);
1344 if (j == 0)
1345 vec_oprnd0
1346 = vect_get_vec_def_for_operand (op, stmt, NULL);
1347 else
1348 vec_oprnd0
1349 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1351 VEC_quick_push (tree, vargs, vec_oprnd0);
1354 new_stmt = gimple_build_call_vec (fndecl, vargs);
1355 new_temp = make_ssa_name (vec_dest, new_stmt);
1356 gimple_call_set_lhs (new_stmt, new_temp);
1358 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1360 if (j == 0)
1361 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1362 else
1363 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1365 prev_stmt_info = vinfo_for_stmt (new_stmt);
1368 break;
1370 case NARROW:
1371 for (j = 0; j < ncopies; ++j)
1373 /* Build argument list for the vectorized call. */
1374 if (j == 0)
1375 vargs = VEC_alloc (tree, heap, nargs * 2);
1376 else
1377 VEC_truncate (tree, vargs, 0);
1379 for (i = 0; i < nargs; i++)
1381 op = gimple_call_arg (stmt, i);
1382 if (j == 0)
1384 vec_oprnd0
1385 = vect_get_vec_def_for_operand (op, stmt, NULL);
1386 vec_oprnd1
1387 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1389 else
1391 vec_oprnd0
1392 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd1);
1393 vec_oprnd1
1394 = vect_get_vec_def_for_stmt_copy (dt[nargs], vec_oprnd0);
1397 VEC_quick_push (tree, vargs, vec_oprnd0);
1398 VEC_quick_push (tree, vargs, vec_oprnd1);
1401 new_stmt = gimple_build_call_vec (fndecl, vargs);
1402 new_temp = make_ssa_name (vec_dest, new_stmt);
1403 gimple_call_set_lhs (new_stmt, new_temp);
1405 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1407 if (j == 0)
1408 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1409 else
1410 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1412 prev_stmt_info = vinfo_for_stmt (new_stmt);
1415 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1417 break;
1419 case WIDEN:
1420 /* No current target implements this case. */
1421 return false;
1424 VEC_free (tree, heap, vargs);
1426 /* Update the exception handling table with the vector stmt if necessary. */
1427 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1428 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1430 /* The call in STMT might prevent it from being removed in dce.
1431 We however cannot remove it here, due to the way the ssa name
1432 it defines is mapped to the new definition. So just replace
1433 rhs of the statement with something harmless. */
1435 type = TREE_TYPE (scalar_dest);
1436 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1437 fold_convert (type, integer_zero_node));
1438 set_vinfo_for_stmt (new_stmt, stmt_info);
1439 set_vinfo_for_stmt (stmt, NULL);
1440 STMT_VINFO_STMT (stmt_info) = new_stmt;
1441 gsi_replace (gsi, new_stmt, false);
1442 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1444 return true;
1448 /* Function vect_gen_widened_results_half
1450 Create a vector stmt whose code, type, number of arguments, and result
1451 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1452 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1453 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1454 needs to be created (DECL is a function-decl of a target-builtin).
1455 STMT is the original scalar stmt that we are vectorizing. */
1457 static gimple
1458 vect_gen_widened_results_half (enum tree_code code,
1459 tree decl,
1460 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1461 tree vec_dest, gimple_stmt_iterator *gsi,
1462 gimple stmt)
1464 gimple new_stmt;
1465 tree new_temp;
1467 /* Generate half of the widened result: */
1468 if (code == CALL_EXPR)
1470 /* Target specific support */
1471 if (op_type == binary_op)
1472 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1473 else
1474 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1475 new_temp = make_ssa_name (vec_dest, new_stmt);
1476 gimple_call_set_lhs (new_stmt, new_temp);
1478 else
1480 /* Generic support */
1481 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1482 if (op_type != binary_op)
1483 vec_oprnd1 = NULL;
1484 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1485 vec_oprnd1);
1486 new_temp = make_ssa_name (vec_dest, new_stmt);
1487 gimple_assign_set_lhs (new_stmt, new_temp);
1489 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1491 return new_stmt;
1495 /* Check if STMT performs a conversion operation, that can be vectorized.
1496 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1497 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1498 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1500 static bool
1501 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1502 gimple *vec_stmt, slp_tree slp_node)
1504 tree vec_dest;
1505 tree scalar_dest;
1506 tree op0;
1507 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1508 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1509 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1510 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1511 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1512 tree new_temp;
1513 tree def;
1514 gimple def_stmt;
1515 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1516 gimple new_stmt = NULL;
1517 stmt_vec_info prev_stmt_info;
1518 int nunits_in;
1519 int nunits_out;
1520 tree vectype_out, vectype_in;
1521 int ncopies, j;
1522 tree expr;
1523 tree rhs_type, lhs_type;
1524 tree builtin_decl;
1525 enum { NARROW, NONE, WIDEN } modifier;
1526 int i;
1527 VEC(tree,heap) *vec_oprnds0 = NULL;
1528 tree vop0;
1529 tree integral_type;
1530 VEC(tree,heap) *dummy = NULL;
1531 int dummy_int;
1533 /* Is STMT a vectorizable conversion? */
1535 /* FORNOW: unsupported in basic block SLP. */
1536 gcc_assert (loop_vinfo);
1538 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1539 return false;
1541 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1542 return false;
1544 if (!is_gimple_assign (stmt))
1545 return false;
1547 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1548 return false;
1550 code = gimple_assign_rhs_code (stmt);
1551 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1552 return false;
1554 /* Check types of lhs and rhs. */
1555 op0 = gimple_assign_rhs1 (stmt);
1556 rhs_type = TREE_TYPE (op0);
1557 vectype_in = get_vectype_for_scalar_type (rhs_type);
1558 if (!vectype_in)
1559 return false;
1560 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1562 scalar_dest = gimple_assign_lhs (stmt);
1563 lhs_type = TREE_TYPE (scalar_dest);
1564 vectype_out = get_vectype_for_scalar_type (lhs_type);
1565 if (!vectype_out)
1566 return false;
1567 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1569 /* FORNOW */
1570 if (nunits_in == nunits_out / 2)
1571 modifier = NARROW;
1572 else if (nunits_out == nunits_in)
1573 modifier = NONE;
1574 else if (nunits_out == nunits_in / 2)
1575 modifier = WIDEN;
1576 else
1577 return false;
1579 if (modifier == NONE)
1580 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1582 /* Bail out if the types are both integral or non-integral. */
1583 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1584 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1585 return false;
1587 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1589 if (modifier == NARROW)
1590 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1591 else
1592 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1594 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1595 this, so we can safely override NCOPIES with 1 here. */
1596 if (slp_node)
1597 ncopies = 1;
1599 /* Sanity check: make sure that at least one copy of the vectorized stmt
1600 needs to be generated. */
1601 gcc_assert (ncopies >= 1);
1603 /* Check the operands of the operation. */
1604 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
1606 if (vect_print_dump_info (REPORT_DETAILS))
1607 fprintf (vect_dump, "use not simple.");
1608 return false;
1611 /* Supportable by target? */
1612 if ((modifier == NONE
1613 && !targetm.vectorize.builtin_conversion (code, integral_type))
1614 || (modifier == WIDEN
1615 && !supportable_widening_operation (code, stmt, vectype_in,
1616 &decl1, &decl2,
1617 &code1, &code2,
1618 &dummy_int, &dummy))
1619 || (modifier == NARROW
1620 && !supportable_narrowing_operation (code, stmt, vectype_in,
1621 &code1, &dummy_int, &dummy)))
1623 if (vect_print_dump_info (REPORT_DETAILS))
1624 fprintf (vect_dump, "conversion not supported by target.");
1625 return false;
1628 if (modifier != NONE)
1630 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1631 /* FORNOW: SLP not supported. */
1632 if (STMT_SLP_TYPE (stmt_info))
1633 return false;
1636 if (!vec_stmt) /* transformation not required. */
1638 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1639 return true;
1642 /** Transform. **/
1643 if (vect_print_dump_info (REPORT_DETAILS))
1644 fprintf (vect_dump, "transform conversion.");
1646 /* Handle def. */
1647 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1649 if (modifier == NONE && !slp_node)
1650 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1652 prev_stmt_info = NULL;
1653 switch (modifier)
1655 case NONE:
1656 for (j = 0; j < ncopies; j++)
1658 if (j == 0)
1659 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1660 else
1661 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1663 builtin_decl =
1664 targetm.vectorize.builtin_conversion (code, integral_type);
1665 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1667 /* Arguments are ready. create the new vector stmt. */
1668 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1669 new_temp = make_ssa_name (vec_dest, new_stmt);
1670 gimple_call_set_lhs (new_stmt, new_temp);
1671 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1672 if (slp_node)
1673 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1676 if (j == 0)
1677 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1678 else
1679 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1680 prev_stmt_info = vinfo_for_stmt (new_stmt);
1682 break;
1684 case WIDEN:
1685 /* In case the vectorization factor (VF) is bigger than the number
1686 of elements that we can fit in a vectype (nunits), we have to
1687 generate more than one vector stmt - i.e - we need to "unroll"
1688 the vector stmt by a factor VF/nunits. */
1689 for (j = 0; j < ncopies; j++)
1691 if (j == 0)
1692 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1693 else
1694 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1696 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1698 /* Generate first half of the widened result: */
1699 new_stmt
1700 = vect_gen_widened_results_half (code1, decl1,
1701 vec_oprnd0, vec_oprnd1,
1702 unary_op, vec_dest, gsi, stmt);
1703 if (j == 0)
1704 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1705 else
1706 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1707 prev_stmt_info = vinfo_for_stmt (new_stmt);
1709 /* Generate second half of the widened result: */
1710 new_stmt
1711 = vect_gen_widened_results_half (code2, decl2,
1712 vec_oprnd0, vec_oprnd1,
1713 unary_op, vec_dest, gsi, stmt);
1714 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1715 prev_stmt_info = vinfo_for_stmt (new_stmt);
1717 break;
1719 case NARROW:
1720 /* In case the vectorization factor (VF) is bigger than the number
1721 of elements that we can fit in a vectype (nunits), we have to
1722 generate more than one vector stmt - i.e - we need to "unroll"
1723 the vector stmt by a factor VF/nunits. */
1724 for (j = 0; j < ncopies; j++)
1726 /* Handle uses. */
1727 if (j == 0)
1729 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1730 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1732 else
1734 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1735 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1738 /* Arguments are ready. Create the new vector stmt. */
1739 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
1740 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1741 vec_oprnd1);
1742 new_temp = make_ssa_name (vec_dest, new_stmt);
1743 gimple_assign_set_lhs (new_stmt, new_temp);
1744 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1746 if (j == 0)
1747 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1748 else
1749 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1751 prev_stmt_info = vinfo_for_stmt (new_stmt);
1754 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1757 if (vec_oprnds0)
1758 VEC_free (tree, heap, vec_oprnds0);
1760 return true;
1762 /* Function vectorizable_assignment.
1764 Check if STMT performs an assignment (copy) that can be vectorized.
1765 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1766 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1767 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1769 static bool
1770 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1771 gimple *vec_stmt, slp_tree slp_node)
1773 tree vec_dest;
1774 tree scalar_dest;
1775 tree op;
1776 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1777 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1778 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1779 tree new_temp;
1780 tree def;
1781 gimple def_stmt;
1782 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1783 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1784 int ncopies;
1785 int i;
1786 VEC(tree,heap) *vec_oprnds = NULL;
1787 tree vop;
1788 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1790 /* Multiple types in SLP are handled by creating the appropriate number of
1791 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1792 case of SLP. */
1793 if (slp_node)
1794 ncopies = 1;
1795 else
1796 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1798 gcc_assert (ncopies >= 1);
1799 if (ncopies > 1)
1800 return false; /* FORNOW */
1802 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1803 return false;
1805 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1806 return false;
1808 /* Is vectorizable assignment? */
1809 if (!is_gimple_assign (stmt))
1810 return false;
1812 scalar_dest = gimple_assign_lhs (stmt);
1813 if (TREE_CODE (scalar_dest) != SSA_NAME)
1814 return false;
1816 if (gimple_assign_single_p (stmt)
1817 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1818 op = gimple_assign_rhs1 (stmt);
1819 else
1820 return false;
1822 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1824 if (vect_print_dump_info (REPORT_DETAILS))
1825 fprintf (vect_dump, "use not simple.");
1826 return false;
1829 if (!vec_stmt) /* transformation not required. */
1831 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1832 if (vect_print_dump_info (REPORT_DETAILS))
1833 fprintf (vect_dump, "=== vectorizable_assignment ===");
1834 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1835 return true;
1838 /** Transform. **/
1839 if (vect_print_dump_info (REPORT_DETAILS))
1840 fprintf (vect_dump, "transform assignment.");
1842 /* Handle def. */
1843 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1845 /* Handle use. */
1846 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1848 /* Arguments are ready. create the new vector stmt. */
1849 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1851 *vec_stmt = gimple_build_assign (vec_dest, vop);
1852 new_temp = make_ssa_name (vec_dest, *vec_stmt);
1853 gimple_assign_set_lhs (*vec_stmt, new_temp);
1854 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
1855 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
1857 if (slp_node)
1858 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
1861 VEC_free (tree, heap, vec_oprnds);
1862 return true;
1865 /* Function vectorizable_operation.
1867 Check if STMT performs a binary or unary operation that can be vectorized.
1868 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1869 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1870 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1872 static bool
1873 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1874 gimple *vec_stmt, slp_tree slp_node)
1876 tree vec_dest;
1877 tree scalar_dest;
1878 tree op0, op1 = NULL;
1879 tree vec_oprnd1 = NULL_TREE;
1880 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1881 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1882 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1883 enum tree_code code;
1884 enum machine_mode vec_mode;
1885 tree new_temp;
1886 int op_type;
1887 optab optab;
1888 int icode;
1889 enum machine_mode optab_op2_mode;
1890 tree def;
1891 gimple def_stmt;
1892 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1893 gimple new_stmt = NULL;
1894 stmt_vec_info prev_stmt_info;
1895 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1896 int nunits_out;
1897 tree vectype_out;
1898 int ncopies;
1899 int j, i;
1900 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1901 tree vop0, vop1;
1902 unsigned int k;
1903 bool shift_p = false;
1904 bool scalar_shift_arg = false;
1905 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1906 int vf;
1908 if (loop_vinfo)
1909 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1910 else
1911 /* FORNOW: multiple types are not supported in basic block SLP. */
1912 vf = nunits_in;
1914 /* Multiple types in SLP are handled by creating the appropriate number of
1915 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1916 case of SLP. */
1917 if (slp_node)
1918 ncopies = 1;
1919 else
1920 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1922 gcc_assert (ncopies >= 1);
1924 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1925 return false;
1927 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1928 return false;
1930 /* Is STMT a vectorizable binary/unary operation? */
1931 if (!is_gimple_assign (stmt))
1932 return false;
1934 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1935 return false;
1937 scalar_dest = gimple_assign_lhs (stmt);
1938 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1939 if (!vectype_out)
1940 return false;
1941 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1942 if (nunits_out != nunits_in)
1943 return false;
1945 code = gimple_assign_rhs_code (stmt);
1947 /* For pointer addition, we should use the normal plus for
1948 the vector addition. */
1949 if (code == POINTER_PLUS_EXPR)
1950 code = PLUS_EXPR;
1952 /* Support only unary or binary operations. */
1953 op_type = TREE_CODE_LENGTH (code);
1954 if (op_type != unary_op && op_type != binary_op)
1956 if (vect_print_dump_info (REPORT_DETAILS))
1957 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1958 return false;
1961 op0 = gimple_assign_rhs1 (stmt);
1962 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1964 if (vect_print_dump_info (REPORT_DETAILS))
1965 fprintf (vect_dump, "use not simple.");
1966 return false;
1969 if (op_type == binary_op)
1971 op1 = gimple_assign_rhs2 (stmt);
1972 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
1973 &dt[1]))
1975 if (vect_print_dump_info (REPORT_DETAILS))
1976 fprintf (vect_dump, "use not simple.");
1977 return false;
1981 /* If this is a shift/rotate, determine whether the shift amount is a vector,
1982 or scalar. If the shift/rotate amount is a vector, use the vector/vector
1983 shift optabs. */
1984 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
1985 || code == RROTATE_EXPR)
1987 shift_p = true;
1989 /* vector shifted by vector */
1990 if (dt[1] == vect_internal_def)
1992 optab = optab_for_tree_code (code, vectype, optab_vector);
1993 if (vect_print_dump_info (REPORT_DETAILS))
1994 fprintf (vect_dump, "vector/vector shift/rotate found.");
1997 /* See if the machine has a vector shifted by scalar insn and if not
1998 then see if it has a vector shifted by vector insn */
1999 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2001 optab = optab_for_tree_code (code, vectype, optab_scalar);
2002 if (optab
2003 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2004 != CODE_FOR_nothing))
2006 scalar_shift_arg = true;
2007 if (vect_print_dump_info (REPORT_DETAILS))
2008 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2010 else
2012 optab = optab_for_tree_code (code, vectype, optab_vector);
2013 if (optab
2014 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2015 != CODE_FOR_nothing))
2017 if (vect_print_dump_info (REPORT_DETAILS))
2018 fprintf (vect_dump, "vector/vector shift/rotate found.");
2020 /* Unlike the other binary operators, shifts/rotates have
2021 the rhs being int, instead of the same type as the lhs,
2022 so make sure the scalar is the right type if we are
2023 dealing with vectors of short/char. */
2024 if (dt[1] == vect_constant_def)
2025 op1 = fold_convert (TREE_TYPE (vectype), op1);
2030 else
2032 if (vect_print_dump_info (REPORT_DETAILS))
2033 fprintf (vect_dump, "operand mode requires invariant argument.");
2034 return false;
2037 else
2038 optab = optab_for_tree_code (code, vectype, optab_default);
2040 /* Supportable by target? */
2041 if (!optab)
2043 if (vect_print_dump_info (REPORT_DETAILS))
2044 fprintf (vect_dump, "no optab.");
2045 return false;
2047 vec_mode = TYPE_MODE (vectype);
2048 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2049 if (icode == CODE_FOR_nothing)
2051 if (vect_print_dump_info (REPORT_DETAILS))
2052 fprintf (vect_dump, "op not supported by target.");
2053 /* Check only during analysis. */
2054 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2055 || (vf < vect_min_worthwhile_factor (code)
2056 && !vec_stmt))
2057 return false;
2058 if (vect_print_dump_info (REPORT_DETAILS))
2059 fprintf (vect_dump, "proceeding using word mode.");
2062 /* Worthwhile without SIMD support? Check only during analysis. */
2063 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2064 && vf < vect_min_worthwhile_factor (code)
2065 && !vec_stmt)
2067 if (vect_print_dump_info (REPORT_DETAILS))
2068 fprintf (vect_dump, "not worthwhile without SIMD support.");
2069 return false;
2072 if (!vec_stmt) /* transformation not required. */
2074 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2075 if (vect_print_dump_info (REPORT_DETAILS))
2076 fprintf (vect_dump, "=== vectorizable_operation ===");
2077 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2078 return true;
2081 /** Transform. **/
2083 if (vect_print_dump_info (REPORT_DETAILS))
2084 fprintf (vect_dump, "transform binary/unary operation.");
2086 /* Handle def. */
2087 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2089 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2090 created in the previous stages of the recursion, so no allocation is
2091 needed, except for the case of shift with scalar shift argument. In that
2092 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2093 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2094 In case of loop-based vectorization we allocate VECs of size 1. We
2095 allocate VEC_OPRNDS1 only in case of binary operation. */
2096 if (!slp_node)
2098 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2099 if (op_type == binary_op)
2100 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2102 else if (scalar_shift_arg)
2103 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2105 /* In case the vectorization factor (VF) is bigger than the number
2106 of elements that we can fit in a vectype (nunits), we have to generate
2107 more than one vector stmt - i.e - we need to "unroll" the
2108 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2109 from one copy of the vector stmt to the next, in the field
2110 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2111 stages to find the correct vector defs to be used when vectorizing
2112 stmts that use the defs of the current stmt. The example below illustrates
2113 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2114 4 vectorized stmts):
2116 before vectorization:
2117 RELATED_STMT VEC_STMT
2118 S1: x = memref - -
2119 S2: z = x + 1 - -
2121 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2122 there):
2123 RELATED_STMT VEC_STMT
2124 VS1_0: vx0 = memref0 VS1_1 -
2125 VS1_1: vx1 = memref1 VS1_2 -
2126 VS1_2: vx2 = memref2 VS1_3 -
2127 VS1_3: vx3 = memref3 - -
2128 S1: x = load - VS1_0
2129 S2: z = x + 1 - -
2131 step2: vectorize stmt S2 (done here):
2132 To vectorize stmt S2 we first need to find the relevant vector
2133 def for the first operand 'x'. This is, as usual, obtained from
2134 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2135 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2136 relevant vector def 'vx0'. Having found 'vx0' we can generate
2137 the vector stmt VS2_0, and as usual, record it in the
2138 STMT_VINFO_VEC_STMT of stmt S2.
2139 When creating the second copy (VS2_1), we obtain the relevant vector
2140 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2141 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2142 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2143 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2144 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2145 chain of stmts and pointers:
2146 RELATED_STMT VEC_STMT
2147 VS1_0: vx0 = memref0 VS1_1 -
2148 VS1_1: vx1 = memref1 VS1_2 -
2149 VS1_2: vx2 = memref2 VS1_3 -
2150 VS1_3: vx3 = memref3 - -
2151 S1: x = load - VS1_0
2152 VS2_0: vz0 = vx0 + v1 VS2_1 -
2153 VS2_1: vz1 = vx1 + v1 VS2_2 -
2154 VS2_2: vz2 = vx2 + v1 VS2_3 -
2155 VS2_3: vz3 = vx3 + v1 - -
2156 S2: z = x + 1 - VS2_0 */
2158 prev_stmt_info = NULL;
2159 for (j = 0; j < ncopies; j++)
2161 /* Handle uses. */
2162 if (j == 0)
2164 if (op_type == binary_op && scalar_shift_arg)
2166 /* Vector shl and shr insn patterns can be defined with scalar
2167 operand 2 (shift operand). In this case, use constant or loop
2168 invariant op1 directly, without extending it to vector mode
2169 first. */
2170 optab_op2_mode = insn_data[icode].operand[2].mode;
2171 if (!VECTOR_MODE_P (optab_op2_mode))
2173 if (vect_print_dump_info (REPORT_DETAILS))
2174 fprintf (vect_dump, "operand 1 using scalar mode.");
2175 vec_oprnd1 = op1;
2176 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2177 if (slp_node)
2179 /* Store vec_oprnd1 for every vector stmt to be created
2180 for SLP_NODE. We check during the analysis that all the
2181 shift arguments are the same.
2182 TODO: Allow different constants for different vector
2183 stmts generated for an SLP instance. */
2184 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2185 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2190 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2191 (a special case for certain kind of vector shifts); otherwise,
2192 operand 1 should be of a vector type (the usual case). */
2193 if (op_type == binary_op && !vec_oprnd1)
2194 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2195 slp_node);
2196 else
2197 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2198 slp_node);
2200 else
2201 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2203 /* Arguments are ready. Create the new vector stmt. */
2204 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2206 vop1 = ((op_type == binary_op)
2207 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2208 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2209 new_temp = make_ssa_name (vec_dest, new_stmt);
2210 gimple_assign_set_lhs (new_stmt, new_temp);
2211 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2212 if (slp_node)
2213 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2216 if (slp_node)
2217 continue;
2219 if (j == 0)
2220 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2221 else
2222 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2223 prev_stmt_info = vinfo_for_stmt (new_stmt);
2226 VEC_free (tree, heap, vec_oprnds0);
2227 if (vec_oprnds1)
2228 VEC_free (tree, heap, vec_oprnds1);
2230 return true;
2234 /* Get vectorized definitions for loop-based vectorization. For the first
2235 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2236 scalar operand), and for the rest we get a copy with
2237 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2238 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2239 The vectors are collected into VEC_OPRNDS. */
2241 static void
2242 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2243 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2245 tree vec_oprnd;
2247 /* Get first vector operand. */
2248 /* All the vector operands except the very first one (that is scalar oprnd)
2249 are stmt copies. */
2250 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2251 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2252 else
2253 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2255 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2257 /* Get second vector operand. */
2258 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2259 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2261 *oprnd = vec_oprnd;
2263 /* For conversion in multiple steps, continue to get operands
2264 recursively. */
2265 if (multi_step_cvt)
2266 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2270 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2271 For multi-step conversions store the resulting vectors and call the function
2272 recursively. */
2274 static void
2275 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2276 int multi_step_cvt, gimple stmt,
2277 VEC (tree, heap) *vec_dsts,
2278 gimple_stmt_iterator *gsi,
2279 slp_tree slp_node, enum tree_code code,
2280 stmt_vec_info *prev_stmt_info)
2282 unsigned int i;
2283 tree vop0, vop1, new_tmp, vec_dest;
2284 gimple new_stmt;
2285 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2287 vec_dest = VEC_pop (tree, vec_dsts);
2289 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2291 /* Create demotion operation. */
2292 vop0 = VEC_index (tree, *vec_oprnds, i);
2293 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2294 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2295 new_tmp = make_ssa_name (vec_dest, new_stmt);
2296 gimple_assign_set_lhs (new_stmt, new_tmp);
2297 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2299 if (multi_step_cvt)
2300 /* Store the resulting vector for next recursive call. */
2301 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2302 else
2304 /* This is the last step of the conversion sequence. Store the
2305 vectors in SLP_NODE or in vector info of the scalar statement
2306 (or in STMT_VINFO_RELATED_STMT chain). */
2307 if (slp_node)
2308 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2309 else
2311 if (!*prev_stmt_info)
2312 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2313 else
2314 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2316 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2321 /* For multi-step demotion operations we first generate demotion operations
2322 from the source type to the intermediate types, and then combine the
2323 results (stored in VEC_OPRNDS) in demotion operation to the destination
2324 type. */
2325 if (multi_step_cvt)
2327 /* At each level of recursion we have have of the operands we had at the
2328 previous level. */
2329 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2330 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2331 stmt, vec_dsts, gsi, slp_node,
2332 code, prev_stmt_info);
2337 /* Function vectorizable_type_demotion
2339 Check if STMT performs a binary or unary operation that involves
2340 type demotion, and if it can be vectorized.
2341 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2342 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2343 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2345 static bool
2346 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2347 gimple *vec_stmt, slp_tree slp_node)
2349 tree vec_dest;
2350 tree scalar_dest;
2351 tree op0;
2352 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2353 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2354 enum tree_code code, code1 = ERROR_MARK;
2355 tree def;
2356 gimple def_stmt;
2357 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2358 stmt_vec_info prev_stmt_info;
2359 int nunits_in;
2360 int nunits_out;
2361 tree vectype_out;
2362 int ncopies;
2363 int j, i;
2364 tree vectype_in;
2365 int multi_step_cvt = 0;
2366 VEC (tree, heap) *vec_oprnds0 = NULL;
2367 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2368 tree last_oprnd, intermediate_type;
2370 /* FORNOW: not supported by basic block SLP vectorization. */
2371 gcc_assert (loop_vinfo);
2373 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2374 return false;
2376 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2377 return false;
2379 /* Is STMT a vectorizable type-demotion operation? */
2380 if (!is_gimple_assign (stmt))
2381 return false;
2383 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2384 return false;
2386 code = gimple_assign_rhs_code (stmt);
2387 if (!CONVERT_EXPR_CODE_P (code))
2388 return false;
2390 op0 = gimple_assign_rhs1 (stmt);
2391 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2392 if (!vectype_in)
2393 return false;
2394 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2396 scalar_dest = gimple_assign_lhs (stmt);
2397 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2398 if (!vectype_out)
2399 return false;
2400 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2401 if (nunits_in >= nunits_out)
2402 return false;
2404 /* Multiple types in SLP are handled by creating the appropriate number of
2405 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2406 case of SLP. */
2407 if (slp_node)
2408 ncopies = 1;
2409 else
2410 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2411 gcc_assert (ncopies >= 1);
2413 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2414 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2415 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2416 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2417 && CONVERT_EXPR_CODE_P (code))))
2418 return false;
2420 /* Check the operands of the operation. */
2421 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2423 if (vect_print_dump_info (REPORT_DETAILS))
2424 fprintf (vect_dump, "use not simple.");
2425 return false;
2428 /* Supportable by target? */
2429 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2430 &multi_step_cvt, &interm_types))
2431 return false;
2433 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2435 if (!vec_stmt) /* transformation not required. */
2437 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2438 if (vect_print_dump_info (REPORT_DETAILS))
2439 fprintf (vect_dump, "=== vectorizable_demotion ===");
2440 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2441 return true;
2444 /** Transform. **/
2445 if (vect_print_dump_info (REPORT_DETAILS))
2446 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2447 ncopies);
2449 /* In case of multi-step demotion, we first generate demotion operations to
2450 the intermediate types, and then from that types to the final one.
2451 We create vector destinations for the intermediate type (TYPES) received
2452 from supportable_narrowing_operation, and store them in the correct order
2453 for future use in vect_create_vectorized_demotion_stmts(). */
2454 if (multi_step_cvt)
2455 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2456 else
2457 vec_dsts = VEC_alloc (tree, heap, 1);
2459 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2460 VEC_quick_push (tree, vec_dsts, vec_dest);
2462 if (multi_step_cvt)
2464 for (i = VEC_length (tree, interm_types) - 1;
2465 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2467 vec_dest = vect_create_destination_var (scalar_dest,
2468 intermediate_type);
2469 VEC_quick_push (tree, vec_dsts, vec_dest);
2473 /* In case the vectorization factor (VF) is bigger than the number
2474 of elements that we can fit in a vectype (nunits), we have to generate
2475 more than one vector stmt - i.e - we need to "unroll" the
2476 vector stmt by a factor VF/nunits. */
2477 last_oprnd = op0;
2478 prev_stmt_info = NULL;
2479 for (j = 0; j < ncopies; j++)
2481 /* Handle uses. */
2482 if (slp_node)
2483 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2484 else
2486 VEC_free (tree, heap, vec_oprnds0);
2487 vec_oprnds0 = VEC_alloc (tree, heap,
2488 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2489 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2490 vect_pow2 (multi_step_cvt) - 1);
2493 /* Arguments are ready. Create the new vector stmts. */
2494 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2495 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2496 multi_step_cvt, stmt, tmp_vec_dsts,
2497 gsi, slp_node, code1,
2498 &prev_stmt_info);
2501 VEC_free (tree, heap, vec_oprnds0);
2502 VEC_free (tree, heap, vec_dsts);
2503 VEC_free (tree, heap, tmp_vec_dsts);
2504 VEC_free (tree, heap, interm_types);
2506 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2507 return true;
2511 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2512 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2513 the resulting vectors and call the function recursively. */
2515 static void
2516 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2517 VEC (tree, heap) **vec_oprnds1,
2518 int multi_step_cvt, gimple stmt,
2519 VEC (tree, heap) *vec_dsts,
2520 gimple_stmt_iterator *gsi,
2521 slp_tree slp_node, enum tree_code code1,
2522 enum tree_code code2, tree decl1,
2523 tree decl2, int op_type,
2524 stmt_vec_info *prev_stmt_info)
2526 int i;
2527 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2528 gimple new_stmt1, new_stmt2;
2529 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2530 VEC (tree, heap) *vec_tmp;
2532 vec_dest = VEC_pop (tree, vec_dsts);
2533 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2535 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2537 if (op_type == binary_op)
2538 vop1 = VEC_index (tree, *vec_oprnds1, i);
2539 else
2540 vop1 = NULL_TREE;
2542 /* Generate the two halves of promotion operation. */
2543 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2544 op_type, vec_dest, gsi, stmt);
2545 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2546 op_type, vec_dest, gsi, stmt);
2547 if (is_gimple_call (new_stmt1))
2549 new_tmp1 = gimple_call_lhs (new_stmt1);
2550 new_tmp2 = gimple_call_lhs (new_stmt2);
2552 else
2554 new_tmp1 = gimple_assign_lhs (new_stmt1);
2555 new_tmp2 = gimple_assign_lhs (new_stmt2);
2558 if (multi_step_cvt)
2560 /* Store the results for the recursive call. */
2561 VEC_quick_push (tree, vec_tmp, new_tmp1);
2562 VEC_quick_push (tree, vec_tmp, new_tmp2);
2564 else
2566 /* Last step of promotion sequience - store the results. */
2567 if (slp_node)
2569 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2570 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2572 else
2574 if (!*prev_stmt_info)
2575 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2576 else
2577 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2579 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2580 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2581 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2586 if (multi_step_cvt)
2588 /* For multi-step promotion operation we first generate we call the
2589 function recurcively for every stage. We start from the input type,
2590 create promotion operations to the intermediate types, and then
2591 create promotions to the output type. */
2592 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2593 VEC_free (tree, heap, vec_tmp);
2594 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2595 multi_step_cvt - 1, stmt,
2596 vec_dsts, gsi, slp_node, code1,
2597 code2, decl2, decl2, op_type,
2598 prev_stmt_info);
2603 /* Function vectorizable_type_promotion
2605 Check if STMT performs a binary or unary operation that involves
2606 type promotion, and if it can be vectorized.
2607 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2608 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2609 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2611 static bool
2612 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2613 gimple *vec_stmt, slp_tree slp_node)
2615 tree vec_dest;
2616 tree scalar_dest;
2617 tree op0, op1 = NULL;
2618 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2619 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2620 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2621 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2622 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2623 int op_type;
2624 tree def;
2625 gimple def_stmt;
2626 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2627 stmt_vec_info prev_stmt_info;
2628 int nunits_in;
2629 int nunits_out;
2630 tree vectype_out;
2631 int ncopies;
2632 int j, i;
2633 tree vectype_in;
2634 tree intermediate_type = NULL_TREE;
2635 int multi_step_cvt = 0;
2636 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2637 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2639 /* FORNOW: not supported by basic block SLP vectorization. */
2640 gcc_assert (loop_vinfo);
2642 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2643 return false;
2645 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2646 return false;
2648 /* Is STMT a vectorizable type-promotion operation? */
2649 if (!is_gimple_assign (stmt))
2650 return false;
2652 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2653 return false;
2655 code = gimple_assign_rhs_code (stmt);
2656 if (!CONVERT_EXPR_CODE_P (code)
2657 && code != WIDEN_MULT_EXPR)
2658 return false;
2660 op0 = gimple_assign_rhs1 (stmt);
2661 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2662 if (!vectype_in)
2663 return false;
2664 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2666 scalar_dest = gimple_assign_lhs (stmt);
2667 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2668 if (!vectype_out)
2669 return false;
2670 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2671 if (nunits_in <= nunits_out)
2672 return false;
2674 /* Multiple types in SLP are handled by creating the appropriate number of
2675 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2676 case of SLP. */
2677 if (slp_node)
2678 ncopies = 1;
2679 else
2680 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2682 gcc_assert (ncopies >= 1);
2684 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2685 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2686 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2687 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2688 && CONVERT_EXPR_CODE_P (code))))
2689 return false;
2691 /* Check the operands of the operation. */
2692 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2694 if (vect_print_dump_info (REPORT_DETAILS))
2695 fprintf (vect_dump, "use not simple.");
2696 return false;
2699 op_type = TREE_CODE_LENGTH (code);
2700 if (op_type == binary_op)
2702 op1 = gimple_assign_rhs2 (stmt);
2703 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2705 if (vect_print_dump_info (REPORT_DETAILS))
2706 fprintf (vect_dump, "use not simple.");
2707 return false;
2711 /* Supportable by target? */
2712 if (!supportable_widening_operation (code, stmt, vectype_in,
2713 &decl1, &decl2, &code1, &code2,
2714 &multi_step_cvt, &interm_types))
2715 return false;
2717 /* Binary widening operation can only be supported directly by the
2718 architecture. */
2719 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2721 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2723 if (!vec_stmt) /* transformation not required. */
2725 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2726 if (vect_print_dump_info (REPORT_DETAILS))
2727 fprintf (vect_dump, "=== vectorizable_promotion ===");
2728 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2729 return true;
2732 /** Transform. **/
2734 if (vect_print_dump_info (REPORT_DETAILS))
2735 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2736 ncopies);
2738 /* Handle def. */
2739 /* In case of multi-step promotion, we first generate promotion operations
2740 to the intermediate types, and then from that types to the final one.
2741 We store vector destination in VEC_DSTS in the correct order for
2742 recursive creation of promotion operations in
2743 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2744 according to TYPES recieved from supportable_widening_operation(). */
2745 if (multi_step_cvt)
2746 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2747 else
2748 vec_dsts = VEC_alloc (tree, heap, 1);
2750 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2751 VEC_quick_push (tree, vec_dsts, vec_dest);
2753 if (multi_step_cvt)
2755 for (i = VEC_length (tree, interm_types) - 1;
2756 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2758 vec_dest = vect_create_destination_var (scalar_dest,
2759 intermediate_type);
2760 VEC_quick_push (tree, vec_dsts, vec_dest);
2764 if (!slp_node)
2766 vec_oprnds0 = VEC_alloc (tree, heap,
2767 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2768 if (op_type == binary_op)
2769 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2772 /* In case the vectorization factor (VF) is bigger than the number
2773 of elements that we can fit in a vectype (nunits), we have to generate
2774 more than one vector stmt - i.e - we need to "unroll" the
2775 vector stmt by a factor VF/nunits. */
2777 prev_stmt_info = NULL;
2778 for (j = 0; j < ncopies; j++)
2780 /* Handle uses. */
2781 if (j == 0)
2783 if (slp_node)
2784 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2785 else
2787 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2788 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2789 if (op_type == binary_op)
2791 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2792 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2796 else
2798 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2799 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2800 if (op_type == binary_op)
2802 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2803 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2807 /* Arguments are ready. Create the new vector stmts. */
2808 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2809 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2810 multi_step_cvt, stmt,
2811 tmp_vec_dsts,
2812 gsi, slp_node, code1, code2,
2813 decl1, decl2, op_type,
2814 &prev_stmt_info);
2817 VEC_free (tree, heap, vec_dsts);
2818 VEC_free (tree, heap, tmp_vec_dsts);
2819 VEC_free (tree, heap, interm_types);
2820 VEC_free (tree, heap, vec_oprnds0);
2821 VEC_free (tree, heap, vec_oprnds1);
2823 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2824 return true;
2828 /* Function vectorizable_store.
2830 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2831 can be vectorized.
2832 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2833 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2834 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2836 static bool
2837 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2838 slp_tree slp_node)
2840 tree scalar_dest;
2841 tree data_ref;
2842 tree op;
2843 tree vec_oprnd = NULL_TREE;
2844 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2845 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2846 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2847 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2848 struct loop *loop = NULL;
2849 enum machine_mode vec_mode;
2850 tree dummy;
2851 enum dr_alignment_support alignment_support_scheme;
2852 tree def;
2853 gimple def_stmt;
2854 enum vect_def_type dt;
2855 stmt_vec_info prev_stmt_info = NULL;
2856 tree dataref_ptr = NULL_TREE;
2857 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2858 int ncopies;
2859 int j;
2860 gimple next_stmt, first_stmt = NULL;
2861 bool strided_store = false;
2862 unsigned int group_size, i;
2863 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2864 bool inv_p;
2865 VEC(tree,heap) *vec_oprnds = NULL;
2866 bool slp = (slp_node != NULL);
2867 stmt_vec_info first_stmt_vinfo;
2868 unsigned int vec_num;
2869 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2871 if (loop_vinfo)
2872 loop = LOOP_VINFO_LOOP (loop_vinfo);
2874 /* Multiple types in SLP are handled by creating the appropriate number of
2875 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2876 case of SLP. */
2877 if (slp)
2878 ncopies = 1;
2879 else
2880 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2882 gcc_assert (ncopies >= 1);
2884 /* FORNOW. This restriction should be relaxed. */
2885 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2887 if (vect_print_dump_info (REPORT_DETAILS))
2888 fprintf (vect_dump, "multiple types in nested loop.");
2889 return false;
2892 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2893 return false;
2895 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2896 return false;
2898 /* Is vectorizable store? */
2900 if (!is_gimple_assign (stmt))
2901 return false;
2903 scalar_dest = gimple_assign_lhs (stmt);
2904 if (TREE_CODE (scalar_dest) != ARRAY_REF
2905 && TREE_CODE (scalar_dest) != INDIRECT_REF
2906 && TREE_CODE (scalar_dest) != COMPONENT_REF
2907 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2908 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2909 return false;
2911 gcc_assert (gimple_assign_single_p (stmt));
2912 op = gimple_assign_rhs1 (stmt);
2913 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2915 if (vect_print_dump_info (REPORT_DETAILS))
2916 fprintf (vect_dump, "use not simple.");
2917 return false;
2920 /* The scalar rhs type needs to be trivially convertible to the vector
2921 component type. This should always be the case. */
2922 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2924 if (vect_print_dump_info (REPORT_DETAILS))
2925 fprintf (vect_dump, "??? operands of different types");
2926 return false;
2929 vec_mode = TYPE_MODE (vectype);
2930 /* FORNOW. In some cases can vectorize even if data-type not supported
2931 (e.g. - array initialization with 0). */
2932 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2933 return false;
2935 if (!STMT_VINFO_DATA_REF (stmt_info))
2936 return false;
2938 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2940 strided_store = true;
2941 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2942 if (!vect_strided_store_supported (vectype)
2943 && !PURE_SLP_STMT (stmt_info) && !slp)
2944 return false;
2946 if (first_stmt == stmt)
2948 /* STMT is the leader of the group. Check the operands of all the
2949 stmts of the group. */
2950 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2951 while (next_stmt)
2953 gcc_assert (gimple_assign_single_p (next_stmt));
2954 op = gimple_assign_rhs1 (next_stmt);
2955 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2956 &def, &dt))
2958 if (vect_print_dump_info (REPORT_DETAILS))
2959 fprintf (vect_dump, "use not simple.");
2960 return false;
2962 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
2967 if (!vec_stmt) /* transformation not required. */
2969 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2970 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
2971 return true;
2974 /** Transform. **/
2976 if (strided_store)
2978 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2979 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
2981 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
2983 /* FORNOW */
2984 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
2986 /* We vectorize all the stmts of the interleaving group when we
2987 reach the last stmt in the group. */
2988 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
2989 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
2990 && !slp)
2992 *vec_stmt = NULL;
2993 return true;
2996 if (slp)
2997 strided_store = false;
2999 /* VEC_NUM is the number of vect stmts to be created for this group. */
3000 if (slp)
3001 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3002 else
3003 vec_num = group_size;
3005 else
3007 first_stmt = stmt;
3008 first_dr = dr;
3009 group_size = vec_num = 1;
3010 first_stmt_vinfo = stmt_info;
3013 if (vect_print_dump_info (REPORT_DETAILS))
3014 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3016 dr_chain = VEC_alloc (tree, heap, group_size);
3017 oprnds = VEC_alloc (tree, heap, group_size);
3019 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3020 gcc_assert (alignment_support_scheme);
3022 /* In case the vectorization factor (VF) is bigger than the number
3023 of elements that we can fit in a vectype (nunits), we have to generate
3024 more than one vector stmt - i.e - we need to "unroll" the
3025 vector stmt by a factor VF/nunits. For more details see documentation in
3026 vect_get_vec_def_for_copy_stmt. */
3028 /* In case of interleaving (non-unit strided access):
3030 S1: &base + 2 = x2
3031 S2: &base = x0
3032 S3: &base + 1 = x1
3033 S4: &base + 3 = x3
3035 We create vectorized stores starting from base address (the access of the
3036 first stmt in the chain (S2 in the above example), when the last store stmt
3037 of the chain (S4) is reached:
3039 VS1: &base = vx2
3040 VS2: &base + vec_size*1 = vx0
3041 VS3: &base + vec_size*2 = vx1
3042 VS4: &base + vec_size*3 = vx3
3044 Then permutation statements are generated:
3046 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3047 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3050 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3051 (the order of the data-refs in the output of vect_permute_store_chain
3052 corresponds to the order of scalar stmts in the interleaving chain - see
3053 the documentation of vect_permute_store_chain()).
3055 In case of both multiple types and interleaving, above vector stores and
3056 permutation stmts are created for every copy. The result vector stmts are
3057 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3058 STMT_VINFO_RELATED_STMT for the next copies.
3061 prev_stmt_info = NULL;
3062 for (j = 0; j < ncopies; j++)
3064 gimple new_stmt;
3065 gimple ptr_incr;
3067 if (j == 0)
3069 if (slp)
3071 /* Get vectorized arguments for SLP_NODE. */
3072 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3074 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3076 else
3078 /* For interleaved stores we collect vectorized defs for all the
3079 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3080 used as an input to vect_permute_store_chain(), and OPRNDS as
3081 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3083 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3084 OPRNDS are of size 1. */
3085 next_stmt = first_stmt;
3086 for (i = 0; i < group_size; i++)
3088 /* Since gaps are not supported for interleaved stores,
3089 GROUP_SIZE is the exact number of stmts in the chain.
3090 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3091 there is no interleaving, GROUP_SIZE is 1, and only one
3092 iteration of the loop will be executed. */
3093 gcc_assert (next_stmt
3094 && gimple_assign_single_p (next_stmt));
3095 op = gimple_assign_rhs1 (next_stmt);
3097 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3098 NULL);
3099 VEC_quick_push(tree, dr_chain, vec_oprnd);
3100 VEC_quick_push(tree, oprnds, vec_oprnd);
3101 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3105 /* We should have catched mismatched types earlier. */
3106 gcc_assert (useless_type_conversion_p (vectype,
3107 TREE_TYPE (vec_oprnd)));
3108 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3109 &dummy, &ptr_incr, false,
3110 &inv_p);
3111 gcc_assert (bb_vinfo || !inv_p);
3113 else
3115 /* For interleaved stores we created vectorized defs for all the
3116 defs stored in OPRNDS in the previous iteration (previous copy).
3117 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3118 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3119 next copy.
3120 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3121 OPRNDS are of size 1. */
3122 for (i = 0; i < group_size; i++)
3124 op = VEC_index (tree, oprnds, i);
3125 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3126 &dt);
3127 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3128 VEC_replace(tree, dr_chain, i, vec_oprnd);
3129 VEC_replace(tree, oprnds, i, vec_oprnd);
3131 dataref_ptr =
3132 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3135 if (strided_store)
3137 result_chain = VEC_alloc (tree, heap, group_size);
3138 /* Permute. */
3139 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3140 &result_chain))
3141 return false;
3144 next_stmt = first_stmt;
3145 for (i = 0; i < vec_num; i++)
3147 if (i > 0)
3148 /* Bump the vector pointer. */
3149 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3150 NULL_TREE);
3152 if (slp)
3153 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3154 else if (strided_store)
3155 /* For strided stores vectorized defs are interleaved in
3156 vect_permute_store_chain(). */
3157 vec_oprnd = VEC_index (tree, result_chain, i);
3159 if (aligned_access_p (first_dr))
3160 data_ref = build_fold_indirect_ref (dataref_ptr);
3161 else
3163 int mis = DR_MISALIGNMENT (first_dr);
3164 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3165 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3166 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3169 /* If accesses through a pointer to vectype do not alias the original
3170 memory reference we have a problem. This should never happen. */
3171 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3172 get_alias_set (gimple_assign_lhs (stmt))));
3174 /* Arguments are ready. Create the new vector stmt. */
3175 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3176 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3177 mark_symbols_for_renaming (new_stmt);
3179 if (slp)
3180 continue;
3182 if (j == 0)
3183 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3184 else
3185 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3187 prev_stmt_info = vinfo_for_stmt (new_stmt);
3188 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3189 if (!next_stmt)
3190 break;
3194 VEC_free (tree, heap, dr_chain);
3195 VEC_free (tree, heap, oprnds);
3196 if (result_chain)
3197 VEC_free (tree, heap, result_chain);
3199 return true;
3202 /* vectorizable_load.
3204 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3205 can be vectorized.
3206 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3207 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3208 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3210 static bool
3211 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3212 slp_tree slp_node, slp_instance slp_node_instance)
3214 tree scalar_dest;
3215 tree vec_dest = NULL;
3216 tree data_ref = NULL;
3217 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3218 stmt_vec_info prev_stmt_info;
3219 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3220 struct loop *loop = NULL;
3221 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3222 bool nested_in_vect_loop = false;
3223 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3224 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3225 tree new_temp;
3226 int mode;
3227 gimple new_stmt = NULL;
3228 tree dummy;
3229 enum dr_alignment_support alignment_support_scheme;
3230 tree dataref_ptr = NULL_TREE;
3231 gimple ptr_incr;
3232 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3233 int ncopies;
3234 int i, j, group_size;
3235 tree msq = NULL_TREE, lsq;
3236 tree offset = NULL_TREE;
3237 tree realignment_token = NULL_TREE;
3238 gimple phi = NULL;
3239 VEC(tree,heap) *dr_chain = NULL;
3240 bool strided_load = false;
3241 gimple first_stmt;
3242 tree scalar_type;
3243 bool inv_p;
3244 bool compute_in_loop = false;
3245 struct loop *at_loop;
3246 int vec_num;
3247 bool slp = (slp_node != NULL);
3248 bool slp_perm = false;
3249 enum tree_code code;
3250 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3251 int vf;
3253 if (loop_vinfo)
3255 loop = LOOP_VINFO_LOOP (loop_vinfo);
3256 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3257 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3259 else
3260 /* FORNOW: multiple types are not supported in basic block SLP. */
3261 vf = nunits;
3263 /* Multiple types in SLP are handled by creating the appropriate number of
3264 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3265 case of SLP. */
3266 if (slp)
3267 ncopies = 1;
3268 else
3269 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3271 gcc_assert (ncopies >= 1);
3273 /* FORNOW. This restriction should be relaxed. */
3274 if (nested_in_vect_loop && ncopies > 1)
3276 if (vect_print_dump_info (REPORT_DETAILS))
3277 fprintf (vect_dump, "multiple types in nested loop.");
3278 return false;
3281 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3282 return false;
3284 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3285 return false;
3287 /* Is vectorizable load? */
3288 if (!is_gimple_assign (stmt))
3289 return false;
3291 scalar_dest = gimple_assign_lhs (stmt);
3292 if (TREE_CODE (scalar_dest) != SSA_NAME)
3293 return false;
3295 code = gimple_assign_rhs_code (stmt);
3296 if (code != ARRAY_REF
3297 && code != INDIRECT_REF
3298 && code != COMPONENT_REF
3299 && code != IMAGPART_EXPR
3300 && code != REALPART_EXPR)
3301 return false;
3303 if (!STMT_VINFO_DATA_REF (stmt_info))
3304 return false;
3306 scalar_type = TREE_TYPE (DR_REF (dr));
3307 mode = (int) TYPE_MODE (vectype);
3309 /* FORNOW. In some cases can vectorize even if data-type not supported
3310 (e.g. - data copies). */
3311 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3313 if (vect_print_dump_info (REPORT_DETAILS))
3314 fprintf (vect_dump, "Aligned load, but unsupported type.");
3315 return false;
3318 /* The vector component type needs to be trivially convertible to the
3319 scalar lhs. This should always be the case. */
3320 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3322 if (vect_print_dump_info (REPORT_DETAILS))
3323 fprintf (vect_dump, "??? operands of different types");
3324 return false;
3327 /* Check if the load is a part of an interleaving chain. */
3328 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3330 strided_load = true;
3331 /* FORNOW */
3332 gcc_assert (! nested_in_vect_loop);
3334 /* Check if interleaving is supported. */
3335 if (!vect_strided_load_supported (vectype)
3336 && !PURE_SLP_STMT (stmt_info) && !slp)
3337 return false;
3340 if (!vec_stmt) /* transformation not required. */
3342 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3343 vect_model_load_cost (stmt_info, ncopies, NULL);
3344 return true;
3347 if (vect_print_dump_info (REPORT_DETAILS))
3348 fprintf (vect_dump, "transform load.");
3350 /** Transform. **/
3352 if (strided_load)
3354 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3355 /* Check if the chain of loads is already vectorized. */
3356 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3358 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3359 return true;
3361 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3362 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3364 /* VEC_NUM is the number of vect stmts to be created for this group. */
3365 if (slp)
3367 strided_load = false;
3368 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3369 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3370 slp_perm = true;
3372 else
3373 vec_num = group_size;
3375 dr_chain = VEC_alloc (tree, heap, vec_num);
3377 else
3379 first_stmt = stmt;
3380 first_dr = dr;
3381 group_size = vec_num = 1;
3384 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3385 gcc_assert (alignment_support_scheme);
3387 /* In case the vectorization factor (VF) is bigger than the number
3388 of elements that we can fit in a vectype (nunits), we have to generate
3389 more than one vector stmt - i.e - we need to "unroll" the
3390 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3391 from one copy of the vector stmt to the next, in the field
3392 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3393 stages to find the correct vector defs to be used when vectorizing
3394 stmts that use the defs of the current stmt. The example below illustrates
3395 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3396 4 vectorized stmts):
3398 before vectorization:
3399 RELATED_STMT VEC_STMT
3400 S1: x = memref - -
3401 S2: z = x + 1 - -
3403 step 1: vectorize stmt S1:
3404 We first create the vector stmt VS1_0, and, as usual, record a
3405 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3406 Next, we create the vector stmt VS1_1, and record a pointer to
3407 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3408 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3409 stmts and pointers:
3410 RELATED_STMT VEC_STMT
3411 VS1_0: vx0 = memref0 VS1_1 -
3412 VS1_1: vx1 = memref1 VS1_2 -
3413 VS1_2: vx2 = memref2 VS1_3 -
3414 VS1_3: vx3 = memref3 - -
3415 S1: x = load - VS1_0
3416 S2: z = x + 1 - -
3418 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3419 information we recorded in RELATED_STMT field is used to vectorize
3420 stmt S2. */
3422 /* In case of interleaving (non-unit strided access):
3424 S1: x2 = &base + 2
3425 S2: x0 = &base
3426 S3: x1 = &base + 1
3427 S4: x3 = &base + 3
3429 Vectorized loads are created in the order of memory accesses
3430 starting from the access of the first stmt of the chain:
3432 VS1: vx0 = &base
3433 VS2: vx1 = &base + vec_size*1
3434 VS3: vx3 = &base + vec_size*2
3435 VS4: vx4 = &base + vec_size*3
3437 Then permutation statements are generated:
3439 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3440 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3443 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3444 (the order of the data-refs in the output of vect_permute_load_chain
3445 corresponds to the order of scalar stmts in the interleaving chain - see
3446 the documentation of vect_permute_load_chain()).
3447 The generation of permutation stmts and recording them in
3448 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3450 In case of both multiple types and interleaving, the vector loads and
3451 permutation stmts above are created for every copy. The result vector stmts
3452 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3453 STMT_VINFO_RELATED_STMT for the next copies. */
3455 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3456 on a target that supports unaligned accesses (dr_unaligned_supported)
3457 we generate the following code:
3458 p = initial_addr;
3459 indx = 0;
3460 loop {
3461 p = p + indx * vectype_size;
3462 vec_dest = *(p);
3463 indx = indx + 1;
3466 Otherwise, the data reference is potentially unaligned on a target that
3467 does not support unaligned accesses (dr_explicit_realign_optimized) -
3468 then generate the following code, in which the data in each iteration is
3469 obtained by two vector loads, one from the previous iteration, and one
3470 from the current iteration:
3471 p1 = initial_addr;
3472 msq_init = *(floor(p1))
3473 p2 = initial_addr + VS - 1;
3474 realignment_token = call target_builtin;
3475 indx = 0;
3476 loop {
3477 p2 = p2 + indx * vectype_size
3478 lsq = *(floor(p2))
3479 vec_dest = realign_load (msq, lsq, realignment_token)
3480 indx = indx + 1;
3481 msq = lsq;
3482 } */
3484 /* If the misalignment remains the same throughout the execution of the
3485 loop, we can create the init_addr and permutation mask at the loop
3486 preheader. Otherwise, it needs to be created inside the loop.
3487 This can only occur when vectorizing memory accesses in the inner-loop
3488 nested within an outer-loop that is being vectorized. */
3490 if (loop && nested_in_vect_loop_p (loop, stmt)
3491 && (TREE_INT_CST_LOW (DR_STEP (dr))
3492 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3494 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3495 compute_in_loop = true;
3498 if ((alignment_support_scheme == dr_explicit_realign_optimized
3499 || alignment_support_scheme == dr_explicit_realign)
3500 && !compute_in_loop)
3502 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3503 alignment_support_scheme, NULL_TREE,
3504 &at_loop);
3505 if (alignment_support_scheme == dr_explicit_realign_optimized)
3507 phi = SSA_NAME_DEF_STMT (msq);
3508 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3511 else
3512 at_loop = loop;
3514 prev_stmt_info = NULL;
3515 for (j = 0; j < ncopies; j++)
3517 /* 1. Create the vector pointer update chain. */
3518 if (j == 0)
3519 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3520 at_loop, offset,
3521 &dummy, &ptr_incr, false,
3522 &inv_p);
3523 else
3524 dataref_ptr =
3525 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3527 for (i = 0; i < vec_num; i++)
3529 if (i > 0)
3530 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3531 NULL_TREE);
3533 /* 2. Create the vector-load in the loop. */
3534 switch (alignment_support_scheme)
3536 case dr_aligned:
3537 gcc_assert (aligned_access_p (first_dr));
3538 data_ref = build_fold_indirect_ref (dataref_ptr);
3539 break;
3540 case dr_unaligned_supported:
3542 int mis = DR_MISALIGNMENT (first_dr);
3543 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3545 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3546 data_ref =
3547 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3548 break;
3550 case dr_explicit_realign:
3552 tree ptr, bump;
3553 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3555 if (compute_in_loop)
3556 msq = vect_setup_realignment (first_stmt, gsi,
3557 &realignment_token,
3558 dr_explicit_realign,
3559 dataref_ptr, NULL);
3561 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3562 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3563 new_stmt = gimple_build_assign (vec_dest, data_ref);
3564 new_temp = make_ssa_name (vec_dest, new_stmt);
3565 gimple_assign_set_lhs (new_stmt, new_temp);
3566 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3567 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3568 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3569 msq = new_temp;
3571 bump = size_binop (MULT_EXPR, vs_minus_1,
3572 TYPE_SIZE_UNIT (scalar_type));
3573 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3574 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3575 break;
3577 case dr_explicit_realign_optimized:
3578 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3579 break;
3580 default:
3581 gcc_unreachable ();
3583 /* If accesses through a pointer to vectype do not alias the original
3584 memory reference we have a problem. This should never happen. */
3585 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3586 get_alias_set (gimple_assign_rhs1 (stmt))));
3587 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3588 new_stmt = gimple_build_assign (vec_dest, data_ref);
3589 new_temp = make_ssa_name (vec_dest, new_stmt);
3590 gimple_assign_set_lhs (new_stmt, new_temp);
3591 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3592 mark_symbols_for_renaming (new_stmt);
3594 /* 3. Handle explicit realignment if necessary/supported. Create in
3595 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3596 if (alignment_support_scheme == dr_explicit_realign_optimized
3597 || alignment_support_scheme == dr_explicit_realign)
3599 tree tmp;
3601 lsq = gimple_assign_lhs (new_stmt);
3602 if (!realignment_token)
3603 realignment_token = dataref_ptr;
3604 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3605 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3606 realignment_token);
3607 new_stmt = gimple_build_assign (vec_dest, tmp);
3608 new_temp = make_ssa_name (vec_dest, new_stmt);
3609 gimple_assign_set_lhs (new_stmt, new_temp);
3610 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3612 if (alignment_support_scheme == dr_explicit_realign_optimized)
3614 gcc_assert (phi);
3615 if (i == vec_num - 1 && j == ncopies - 1)
3616 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop));
3617 msq = lsq;
3621 /* 4. Handle invariant-load. */
3622 if (inv_p && !bb_vinfo)
3624 gcc_assert (!strided_load);
3625 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3626 if (j == 0)
3628 int k;
3629 tree t = NULL_TREE;
3630 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3632 /* CHECKME: bitpos depends on endianess? */
3633 bitpos = bitsize_zero_node;
3634 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3635 bitsize, bitpos);
3636 vec_dest =
3637 vect_create_destination_var (scalar_dest, NULL_TREE);
3638 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3639 new_temp = make_ssa_name (vec_dest, new_stmt);
3640 gimple_assign_set_lhs (new_stmt, new_temp);
3641 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3643 for (k = nunits - 1; k >= 0; --k)
3644 t = tree_cons (NULL_TREE, new_temp, t);
3645 /* FIXME: use build_constructor directly. */
3646 vec_inv = build_constructor_from_list (vectype, t);
3647 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3648 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3650 else
3651 gcc_unreachable (); /* FORNOW. */
3654 /* Collect vector loads and later create their permutation in
3655 vect_transform_strided_load (). */
3656 if (strided_load || slp_perm)
3657 VEC_quick_push (tree, dr_chain, new_temp);
3659 /* Store vector loads in the corresponding SLP_NODE. */
3660 if (slp && !slp_perm)
3661 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3664 if (slp && !slp_perm)
3665 continue;
3667 if (slp_perm)
3669 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3670 slp_node_instance, false))
3672 VEC_free (tree, heap, dr_chain);
3673 return false;
3676 else
3678 if (strided_load)
3680 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3681 return false;
3683 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3684 VEC_free (tree, heap, dr_chain);
3685 dr_chain = VEC_alloc (tree, heap, group_size);
3687 else
3689 if (j == 0)
3690 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3691 else
3692 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3693 prev_stmt_info = vinfo_for_stmt (new_stmt);
3698 if (dr_chain)
3699 VEC_free (tree, heap, dr_chain);
3701 return true;
3704 /* Function vect_is_simple_cond.
3706 Input:
3707 LOOP - the loop that is being vectorized.
3708 COND - Condition that is checked for simple use.
3710 Returns whether a COND can be vectorized. Checks whether
3711 condition operands are supportable using vec_is_simple_use. */
3713 static bool
3714 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3716 tree lhs, rhs;
3717 tree def;
3718 enum vect_def_type dt;
3720 if (!COMPARISON_CLASS_P (cond))
3721 return false;
3723 lhs = TREE_OPERAND (cond, 0);
3724 rhs = TREE_OPERAND (cond, 1);
3726 if (TREE_CODE (lhs) == SSA_NAME)
3728 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3729 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3730 &dt))
3731 return false;
3733 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3734 && TREE_CODE (lhs) != FIXED_CST)
3735 return false;
3737 if (TREE_CODE (rhs) == SSA_NAME)
3739 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3740 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3741 &dt))
3742 return false;
3744 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3745 && TREE_CODE (rhs) != FIXED_CST)
3746 return false;
3748 return true;
3751 /* vectorizable_condition.
3753 Check if STMT is conditional modify expression that can be vectorized.
3754 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3755 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3756 at BSI.
3758 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3760 static bool
3761 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3762 gimple *vec_stmt)
3764 tree scalar_dest = NULL_TREE;
3765 tree vec_dest = NULL_TREE;
3766 tree op = NULL_TREE;
3767 tree cond_expr, then_clause, else_clause;
3768 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3769 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3770 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3771 tree vec_compare, vec_cond_expr;
3772 tree new_temp;
3773 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3774 enum machine_mode vec_mode;
3775 tree def;
3776 enum vect_def_type dt;
3777 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3778 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3779 enum tree_code code;
3781 /* FORNOW: unsupported in basic block SLP. */
3782 gcc_assert (loop_vinfo);
3784 gcc_assert (ncopies >= 1);
3785 if (ncopies > 1)
3786 return false; /* FORNOW */
3788 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3789 return false;
3791 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3792 return false;
3794 /* FORNOW: SLP not supported. */
3795 if (STMT_SLP_TYPE (stmt_info))
3796 return false;
3798 /* FORNOW: not yet supported. */
3799 if (STMT_VINFO_LIVE_P (stmt_info))
3801 if (vect_print_dump_info (REPORT_DETAILS))
3802 fprintf (vect_dump, "value used after loop.");
3803 return false;
3806 /* Is vectorizable conditional operation? */
3807 if (!is_gimple_assign (stmt))
3808 return false;
3810 code = gimple_assign_rhs_code (stmt);
3812 if (code != COND_EXPR)
3813 return false;
3815 gcc_assert (gimple_assign_single_p (stmt));
3816 op = gimple_assign_rhs1 (stmt);
3817 cond_expr = TREE_OPERAND (op, 0);
3818 then_clause = TREE_OPERAND (op, 1);
3819 else_clause = TREE_OPERAND (op, 2);
3821 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3822 return false;
3824 /* We do not handle two different vector types for the condition
3825 and the values. */
3826 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3827 return false;
3829 if (TREE_CODE (then_clause) == SSA_NAME)
3831 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3832 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3833 &then_def_stmt, &def, &dt))
3834 return false;
3836 else if (TREE_CODE (then_clause) != INTEGER_CST
3837 && TREE_CODE (then_clause) != REAL_CST
3838 && TREE_CODE (then_clause) != FIXED_CST)
3839 return false;
3841 if (TREE_CODE (else_clause) == SSA_NAME)
3843 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3844 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3845 &else_def_stmt, &def, &dt))
3846 return false;
3848 else if (TREE_CODE (else_clause) != INTEGER_CST
3849 && TREE_CODE (else_clause) != REAL_CST
3850 && TREE_CODE (else_clause) != FIXED_CST)
3851 return false;
3854 vec_mode = TYPE_MODE (vectype);
3856 if (!vec_stmt)
3858 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3859 return expand_vec_cond_expr_p (op, vec_mode);
3862 /* Transform */
3864 /* Handle def. */
3865 scalar_dest = gimple_assign_lhs (stmt);
3866 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3868 /* Handle cond expr. */
3869 vec_cond_lhs =
3870 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3871 vec_cond_rhs =
3872 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3873 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3874 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3876 /* Arguments are ready. Create the new vector stmt. */
3877 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3878 vec_cond_lhs, vec_cond_rhs);
3879 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3880 vec_compare, vec_then_clause, vec_else_clause);
3882 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3883 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3884 gimple_assign_set_lhs (*vec_stmt, new_temp);
3885 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3887 return true;
3891 /* Make sure the statement is vectorizable. */
3893 bool
3894 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3896 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3897 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3898 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3899 bool ok;
3900 HOST_WIDE_INT dummy;
3901 tree scalar_type, vectype;
3903 if (vect_print_dump_info (REPORT_DETAILS))
3905 fprintf (vect_dump, "==> examining statement: ");
3906 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3909 /* Skip stmts that do not need to be vectorized. In loops this is expected
3910 to include:
3911 - the COND_EXPR which is the loop exit condition
3912 - any LABEL_EXPRs in the loop
3913 - computations that are used only for array indexing or loop control.
3914 In basic blocks we only analyze statements that are a part of some SLP
3915 instance, therefore, all the statements are relevant. */
3917 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3918 && !STMT_VINFO_LIVE_P (stmt_info))
3920 if (vect_print_dump_info (REPORT_DETAILS))
3921 fprintf (vect_dump, "irrelevant.");
3923 return true;
3926 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3928 case vect_internal_def:
3929 break;
3931 case vect_reduction_def:
3932 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3933 || relevance == vect_used_in_outer_by_reduction
3934 || relevance == vect_unused_in_scope));
3935 break;
3937 case vect_induction_def:
3938 case vect_constant_def:
3939 case vect_external_def:
3940 case vect_unknown_def_type:
3941 default:
3942 gcc_unreachable ();
3945 if (bb_vinfo)
3947 gcc_assert (PURE_SLP_STMT (stmt_info));
3949 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
3950 if (vect_print_dump_info (REPORT_DETAILS))
3952 fprintf (vect_dump, "get vectype for scalar type: ");
3953 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
3956 vectype = get_vectype_for_scalar_type (scalar_type);
3957 if (!vectype)
3959 if (vect_print_dump_info (REPORT_DETAILS))
3961 fprintf (vect_dump, "not SLPed: unsupported data-type ");
3962 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
3964 return false;
3967 if (vect_print_dump_info (REPORT_DETAILS))
3969 fprintf (vect_dump, "vectype: ");
3970 print_generic_expr (vect_dump, vectype, TDF_SLIM);
3973 STMT_VINFO_VECTYPE (stmt_info) = vectype;
3976 if (STMT_VINFO_RELEVANT_P (stmt_info))
3978 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
3979 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
3980 *need_to_vectorize = true;
3983 ok = true;
3984 if (!bb_vinfo
3985 && (STMT_VINFO_RELEVANT_P (stmt_info)
3986 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
3987 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
3988 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
3989 || vectorizable_conversion (stmt, NULL, NULL, NULL)
3990 || vectorizable_operation (stmt, NULL, NULL, NULL)
3991 || vectorizable_assignment (stmt, NULL, NULL, NULL)
3992 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
3993 || vectorizable_call (stmt, NULL, NULL)
3994 || vectorizable_store (stmt, NULL, NULL, NULL)
3995 || vectorizable_condition (stmt, NULL, NULL)
3996 || vectorizable_reduction (stmt, NULL, NULL));
3997 else
3999 if (bb_vinfo)
4000 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4001 || vectorizable_assignment (stmt, NULL, NULL, node)
4002 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4003 || vectorizable_store (stmt, NULL, NULL, node));
4006 if (!ok)
4008 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4010 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4011 fprintf (vect_dump, "supported: ");
4012 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4015 return false;
4018 if (bb_vinfo)
4019 return true;
4021 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4022 need extra handling, except for vectorizable reductions. */
4023 if (STMT_VINFO_LIVE_P (stmt_info)
4024 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4025 ok = vectorizable_live_operation (stmt, NULL, NULL);
4027 if (!ok)
4029 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4031 fprintf (vect_dump, "not vectorized: live stmt not ");
4032 fprintf (vect_dump, "supported: ");
4033 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4036 return false;
4039 if (!PURE_SLP_STMT (stmt_info))
4041 /* Groups of strided accesses whose size is not a power of 2 are not
4042 vectorizable yet using loop-vectorization. Therefore, if this stmt
4043 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4044 loop-based vectorized), the loop cannot be vectorized. */
4045 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4046 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4047 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4049 if (vect_print_dump_info (REPORT_DETAILS))
4051 fprintf (vect_dump, "not vectorized: the size of group "
4052 "of strided accesses is not a power of 2");
4053 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4056 return false;
4060 return true;
4064 /* Function vect_transform_stmt.
4066 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4068 bool
4069 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4070 bool *strided_store, slp_tree slp_node,
4071 slp_instance slp_node_instance)
4073 bool is_store = false;
4074 gimple vec_stmt = NULL;
4075 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4076 gimple orig_stmt_in_pattern;
4077 bool done;
4079 switch (STMT_VINFO_TYPE (stmt_info))
4081 case type_demotion_vec_info_type:
4082 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4083 gcc_assert (done);
4084 break;
4086 case type_promotion_vec_info_type:
4087 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4088 gcc_assert (done);
4089 break;
4091 case type_conversion_vec_info_type:
4092 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4093 gcc_assert (done);
4094 break;
4096 case induc_vec_info_type:
4097 gcc_assert (!slp_node);
4098 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4099 gcc_assert (done);
4100 break;
4102 case op_vec_info_type:
4103 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4104 gcc_assert (done);
4105 break;
4107 case assignment_vec_info_type:
4108 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4109 gcc_assert (done);
4110 break;
4112 case load_vec_info_type:
4113 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4114 slp_node_instance);
4115 gcc_assert (done);
4116 break;
4118 case store_vec_info_type:
4119 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4120 gcc_assert (done);
4121 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4123 /* In case of interleaving, the whole chain is vectorized when the
4124 last store in the chain is reached. Store stmts before the last
4125 one are skipped, and there vec_stmt_info shouldn't be freed
4126 meanwhile. */
4127 *strided_store = true;
4128 if (STMT_VINFO_VEC_STMT (stmt_info))
4129 is_store = true;
4131 else
4132 is_store = true;
4133 break;
4135 case condition_vec_info_type:
4136 gcc_assert (!slp_node);
4137 done = vectorizable_condition (stmt, gsi, &vec_stmt);
4138 gcc_assert (done);
4139 break;
4141 case call_vec_info_type:
4142 gcc_assert (!slp_node);
4143 done = vectorizable_call (stmt, gsi, &vec_stmt);
4144 break;
4146 case reduc_vec_info_type:
4147 gcc_assert (!slp_node);
4148 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4149 gcc_assert (done);
4150 break;
4152 default:
4153 if (!STMT_VINFO_LIVE_P (stmt_info))
4155 if (vect_print_dump_info (REPORT_DETAILS))
4156 fprintf (vect_dump, "stmt not supported.");
4157 gcc_unreachable ();
4161 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4162 is being vectorized, but outside the immediately enclosing loop. */
4163 if (vec_stmt
4164 && STMT_VINFO_LOOP_VINFO (stmt_info)
4165 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4166 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4167 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4168 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4169 || STMT_VINFO_RELEVANT (stmt_info) ==
4170 vect_used_in_outer_by_reduction))
4172 struct loop *innerloop = LOOP_VINFO_LOOP (
4173 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4174 imm_use_iterator imm_iter;
4175 use_operand_p use_p;
4176 tree scalar_dest;
4177 gimple exit_phi;
4179 if (vect_print_dump_info (REPORT_DETAILS))
4180 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4182 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4183 (to be used when vectorizing outer-loop stmts that use the DEF of
4184 STMT). */
4185 if (gimple_code (stmt) == GIMPLE_PHI)
4186 scalar_dest = PHI_RESULT (stmt);
4187 else
4188 scalar_dest = gimple_assign_lhs (stmt);
4190 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4192 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4194 exit_phi = USE_STMT (use_p);
4195 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4200 /* Handle stmts whose DEF is used outside the loop-nest that is
4201 being vectorized. */
4202 if (STMT_VINFO_LIVE_P (stmt_info)
4203 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4205 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4206 gcc_assert (done);
4209 if (vec_stmt)
4211 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4212 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4213 if (orig_stmt_in_pattern)
4215 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4216 /* STMT was inserted by the vectorizer to replace a computation idiom.
4217 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4218 computed this idiom. We need to record a pointer to VEC_STMT in
4219 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4220 documentation of vect_pattern_recog. */
4221 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4224 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4229 return is_store;
4233 /* Remove a group of stores (for SLP or interleaving), free their
4234 stmt_vec_info. */
4236 void
4237 vect_remove_stores (gimple first_stmt)
4239 gimple next = first_stmt;
4240 gimple tmp;
4241 gimple_stmt_iterator next_si;
4243 while (next)
4245 /* Free the attached stmt_vec_info and remove the stmt. */
4246 next_si = gsi_for_stmt (next);
4247 gsi_remove (&next_si, true);
4248 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4249 free_stmt_vec_info (next);
4250 next = tmp;
4255 /* Function new_stmt_vec_info.
4257 Create and initialize a new stmt_vec_info struct for STMT. */
4259 stmt_vec_info
4260 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4261 bb_vec_info bb_vinfo)
4263 stmt_vec_info res;
4264 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4266 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4267 STMT_VINFO_STMT (res) = stmt;
4268 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4269 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4270 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4271 STMT_VINFO_LIVE_P (res) = false;
4272 STMT_VINFO_VECTYPE (res) = NULL;
4273 STMT_VINFO_VEC_STMT (res) = NULL;
4274 STMT_VINFO_IN_PATTERN_P (res) = false;
4275 STMT_VINFO_RELATED_STMT (res) = NULL;
4276 STMT_VINFO_DATA_REF (res) = NULL;
4278 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4279 STMT_VINFO_DR_OFFSET (res) = NULL;
4280 STMT_VINFO_DR_INIT (res) = NULL;
4281 STMT_VINFO_DR_STEP (res) = NULL;
4282 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4284 if (gimple_code (stmt) == GIMPLE_PHI
4285 && is_loop_header_bb_p (gimple_bb (stmt)))
4286 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4287 else
4288 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4290 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4291 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4292 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4293 STMT_SLP_TYPE (res) = loop_vect;
4294 DR_GROUP_FIRST_DR (res) = NULL;
4295 DR_GROUP_NEXT_DR (res) = NULL;
4296 DR_GROUP_SIZE (res) = 0;
4297 DR_GROUP_STORE_COUNT (res) = 0;
4298 DR_GROUP_GAP (res) = 0;
4299 DR_GROUP_SAME_DR_STMT (res) = NULL;
4300 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4302 return res;
4306 /* Create a hash table for stmt_vec_info. */
4308 void
4309 init_stmt_vec_info_vec (void)
4311 gcc_assert (!stmt_vec_info_vec);
4312 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4316 /* Free hash table for stmt_vec_info. */
4318 void
4319 free_stmt_vec_info_vec (void)
4321 gcc_assert (stmt_vec_info_vec);
4322 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4326 /* Free stmt vectorization related info. */
4328 void
4329 free_stmt_vec_info (gimple stmt)
4331 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4333 if (!stmt_info)
4334 return;
4336 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4337 set_vinfo_for_stmt (stmt, NULL);
4338 free (stmt_info);
4342 /* Function get_vectype_for_scalar_type.
4344 Returns the vector type corresponding to SCALAR_TYPE as supported
4345 by the target. */
4347 tree
4348 get_vectype_for_scalar_type (tree scalar_type)
4350 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4351 int nbytes = GET_MODE_SIZE (inner_mode);
4352 int nunits;
4353 tree vectype;
4355 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4356 return NULL_TREE;
4358 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4359 is expected. */
4360 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4362 vectype = build_vector_type (scalar_type, nunits);
4363 if (vect_print_dump_info (REPORT_DETAILS))
4365 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4366 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4369 if (!vectype)
4370 return NULL_TREE;
4372 if (vect_print_dump_info (REPORT_DETAILS))
4374 fprintf (vect_dump, "vectype: ");
4375 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4378 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4379 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4381 if (vect_print_dump_info (REPORT_DETAILS))
4382 fprintf (vect_dump, "mode not supported by target.");
4383 return NULL_TREE;
4386 return vectype;
4389 /* Function vect_is_simple_use.
4391 Input:
4392 LOOP_VINFO - the vect info of the loop that is being vectorized.
4393 BB_VINFO - the vect info of the basic block that is being vectorized.
4394 OPERAND - operand of a stmt in the loop or bb.
4395 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4397 Returns whether a stmt with OPERAND can be vectorized.
4398 For loops, supportable operands are constants, loop invariants, and operands
4399 that are defined by the current iteration of the loop. Unsupportable
4400 operands are those that are defined by a previous iteration of the loop (as
4401 is the case in reduction/induction computations).
4402 For basic blocks, supportable operands are constants and bb invariants.
4403 For now, operands defined outside the basic block are not supported. */
4405 bool
4406 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4407 bb_vec_info bb_vinfo, gimple *def_stmt,
4408 tree *def, enum vect_def_type *dt)
4410 basic_block bb;
4411 stmt_vec_info stmt_vinfo;
4412 struct loop *loop = NULL;
4414 if (loop_vinfo)
4415 loop = LOOP_VINFO_LOOP (loop_vinfo);
4417 *def_stmt = NULL;
4418 *def = NULL_TREE;
4420 if (vect_print_dump_info (REPORT_DETAILS))
4422 fprintf (vect_dump, "vect_is_simple_use: operand ");
4423 print_generic_expr (vect_dump, operand, TDF_SLIM);
4426 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4428 *dt = vect_constant_def;
4429 return true;
4432 if (is_gimple_min_invariant (operand))
4434 *def = operand;
4435 *dt = vect_external_def;
4436 return true;
4439 if (TREE_CODE (operand) == PAREN_EXPR)
4441 if (vect_print_dump_info (REPORT_DETAILS))
4442 fprintf (vect_dump, "non-associatable copy.");
4443 operand = TREE_OPERAND (operand, 0);
4446 if (TREE_CODE (operand) != SSA_NAME)
4448 if (vect_print_dump_info (REPORT_DETAILS))
4449 fprintf (vect_dump, "not ssa-name.");
4450 return false;
4453 *def_stmt = SSA_NAME_DEF_STMT (operand);
4454 if (*def_stmt == NULL)
4456 if (vect_print_dump_info (REPORT_DETAILS))
4457 fprintf (vect_dump, "no def_stmt.");
4458 return false;
4461 if (vect_print_dump_info (REPORT_DETAILS))
4463 fprintf (vect_dump, "def_stmt: ");
4464 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4467 /* Empty stmt is expected only in case of a function argument.
4468 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4469 if (gimple_nop_p (*def_stmt))
4471 *def = operand;
4472 *dt = vect_external_def;
4473 return true;
4476 bb = gimple_bb (*def_stmt);
4478 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4479 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4480 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4481 *dt = vect_external_def;
4482 else
4484 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4485 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4488 if (*dt == vect_unknown_def_type)
4490 if (vect_print_dump_info (REPORT_DETAILS))
4491 fprintf (vect_dump, "Unsupported pattern.");
4492 return false;
4495 if (vect_print_dump_info (REPORT_DETAILS))
4496 fprintf (vect_dump, "type of def: %d.",*dt);
4498 switch (gimple_code (*def_stmt))
4500 case GIMPLE_PHI:
4501 *def = gimple_phi_result (*def_stmt);
4502 break;
4504 case GIMPLE_ASSIGN:
4505 *def = gimple_assign_lhs (*def_stmt);
4506 break;
4508 case GIMPLE_CALL:
4509 *def = gimple_call_lhs (*def_stmt);
4510 if (*def != NULL)
4511 break;
4512 /* FALLTHRU */
4513 default:
4514 if (vect_print_dump_info (REPORT_DETAILS))
4515 fprintf (vect_dump, "unsupported defining stmt: ");
4516 return false;
4519 return true;
4523 /* Function supportable_widening_operation
4525 Check whether an operation represented by the code CODE is a
4526 widening operation that is supported by the target platform in
4527 vector form (i.e., when operating on arguments of type VECTYPE).
4529 Widening operations we currently support are NOP (CONVERT), FLOAT
4530 and WIDEN_MULT. This function checks if these operations are supported
4531 by the target platform either directly (via vector tree-codes), or via
4532 target builtins.
4534 Output:
4535 - CODE1 and CODE2 are codes of vector operations to be used when
4536 vectorizing the operation, if available.
4537 - DECL1 and DECL2 are decls of target builtin functions to be used
4538 when vectorizing the operation, if available. In this case,
4539 CODE1 and CODE2 are CALL_EXPR.
4540 - MULTI_STEP_CVT determines the number of required intermediate steps in
4541 case of multi-step conversion (like char->short->int - in that case
4542 MULTI_STEP_CVT will be 1).
4543 - INTERM_TYPES contains the intermediate type required to perform the
4544 widening operation (short in the above example). */
4546 bool
4547 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4548 tree *decl1, tree *decl2,
4549 enum tree_code *code1, enum tree_code *code2,
4550 int *multi_step_cvt,
4551 VEC (tree, heap) **interm_types)
4553 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4554 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4555 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4556 bool ordered_p;
4557 enum machine_mode vec_mode;
4558 enum insn_code icode1, icode2;
4559 optab optab1, optab2;
4560 tree type = gimple_expr_type (stmt);
4561 tree wide_vectype = get_vectype_for_scalar_type (type);
4562 enum tree_code c1, c2;
4564 /* The result of a vectorized widening operation usually requires two vectors
4565 (because the widened results do not fit int one vector). The generated
4566 vector results would normally be expected to be generated in the same
4567 order as in the original scalar computation, i.e. if 8 results are
4568 generated in each vector iteration, they are to be organized as follows:
4569 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4571 However, in the special case that the result of the widening operation is
4572 used in a reduction computation only, the order doesn't matter (because
4573 when vectorizing a reduction we change the order of the computation).
4574 Some targets can take advantage of this and generate more efficient code.
4575 For example, targets like Altivec, that support widen_mult using a sequence
4576 of {mult_even,mult_odd} generate the following vectors:
4577 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4579 When vectorizing outer-loops, we execute the inner-loop sequentially
4580 (each vectorized inner-loop iteration contributes to VF outer-loop
4581 iterations in parallel). We therefore don't allow to change the order
4582 of the computation in the inner-loop during outer-loop vectorization. */
4584 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4585 && !nested_in_vect_loop_p (vect_loop, stmt))
4586 ordered_p = false;
4587 else
4588 ordered_p = true;
4590 if (!ordered_p
4591 && code == WIDEN_MULT_EXPR
4592 && targetm.vectorize.builtin_mul_widen_even
4593 && targetm.vectorize.builtin_mul_widen_even (vectype)
4594 && targetm.vectorize.builtin_mul_widen_odd
4595 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4597 if (vect_print_dump_info (REPORT_DETAILS))
4598 fprintf (vect_dump, "Unordered widening operation detected.");
4600 *code1 = *code2 = CALL_EXPR;
4601 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4602 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4603 return true;
4606 switch (code)
4608 case WIDEN_MULT_EXPR:
4609 if (BYTES_BIG_ENDIAN)
4611 c1 = VEC_WIDEN_MULT_HI_EXPR;
4612 c2 = VEC_WIDEN_MULT_LO_EXPR;
4614 else
4616 c2 = VEC_WIDEN_MULT_HI_EXPR;
4617 c1 = VEC_WIDEN_MULT_LO_EXPR;
4619 break;
4621 CASE_CONVERT:
4622 if (BYTES_BIG_ENDIAN)
4624 c1 = VEC_UNPACK_HI_EXPR;
4625 c2 = VEC_UNPACK_LO_EXPR;
4627 else
4629 c2 = VEC_UNPACK_HI_EXPR;
4630 c1 = VEC_UNPACK_LO_EXPR;
4632 break;
4634 case FLOAT_EXPR:
4635 if (BYTES_BIG_ENDIAN)
4637 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4638 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4640 else
4642 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4643 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4645 break;
4647 case FIX_TRUNC_EXPR:
4648 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4649 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4650 computing the operation. */
4651 return false;
4653 default:
4654 gcc_unreachable ();
4657 if (code == FIX_TRUNC_EXPR)
4659 /* The signedness is determined from output operand. */
4660 optab1 = optab_for_tree_code (c1, type, optab_default);
4661 optab2 = optab_for_tree_code (c2, type, optab_default);
4663 else
4665 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4666 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4669 if (!optab1 || !optab2)
4670 return false;
4672 vec_mode = TYPE_MODE (vectype);
4673 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4674 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4675 == CODE_FOR_nothing)
4676 return false;
4678 /* Check if it's a multi-step conversion that can be done using intermediate
4679 types. */
4680 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4681 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4683 int i;
4684 tree prev_type = vectype, intermediate_type;
4685 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4686 optab optab3, optab4;
4688 if (!CONVERT_EXPR_CODE_P (code))
4689 return false;
4691 *code1 = c1;
4692 *code2 = c2;
4694 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4695 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4696 to get to NARROW_VECTYPE, and fail if we do not. */
4697 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4698 for (i = 0; i < 3; i++)
4700 intermediate_mode = insn_data[icode1].operand[0].mode;
4701 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4702 TYPE_UNSIGNED (prev_type));
4703 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4704 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4706 if (!optab3 || !optab4
4707 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4708 == CODE_FOR_nothing
4709 || insn_data[icode1].operand[0].mode != intermediate_mode
4710 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4711 == CODE_FOR_nothing
4712 || insn_data[icode2].operand[0].mode != intermediate_mode
4713 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4714 == CODE_FOR_nothing
4715 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4716 == CODE_FOR_nothing)
4717 return false;
4719 VEC_quick_push (tree, *interm_types, intermediate_type);
4720 (*multi_step_cvt)++;
4722 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4723 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4724 return true;
4726 prev_type = intermediate_type;
4727 prev_mode = intermediate_mode;
4730 return false;
4733 *code1 = c1;
4734 *code2 = c2;
4735 return true;
4739 /* Function supportable_narrowing_operation
4741 Check whether an operation represented by the code CODE is a
4742 narrowing operation that is supported by the target platform in
4743 vector form (i.e., when operating on arguments of type VECTYPE).
4745 Narrowing operations we currently support are NOP (CONVERT) and
4746 FIX_TRUNC. This function checks if these operations are supported by
4747 the target platform directly via vector tree-codes.
4749 Output:
4750 - CODE1 is the code of a vector operation to be used when
4751 vectorizing the operation, if available.
4752 - MULTI_STEP_CVT determines the number of required intermediate steps in
4753 case of multi-step conversion (like int->short->char - in that case
4754 MULTI_STEP_CVT will be 1).
4755 - INTERM_TYPES contains the intermediate type required to perform the
4756 narrowing operation (short in the above example). */
4758 bool
4759 supportable_narrowing_operation (enum tree_code code,
4760 const_gimple stmt, tree vectype,
4761 enum tree_code *code1, int *multi_step_cvt,
4762 VEC (tree, heap) **interm_types)
4764 enum machine_mode vec_mode;
4765 enum insn_code icode1;
4766 optab optab1, interm_optab;
4767 tree type = gimple_expr_type (stmt);
4768 tree narrow_vectype = get_vectype_for_scalar_type (type);
4769 enum tree_code c1;
4770 tree intermediate_type, prev_type;
4771 int i;
4773 switch (code)
4775 CASE_CONVERT:
4776 c1 = VEC_PACK_TRUNC_EXPR;
4777 break;
4779 case FIX_TRUNC_EXPR:
4780 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4781 break;
4783 case FLOAT_EXPR:
4784 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4785 tree code and optabs used for computing the operation. */
4786 return false;
4788 default:
4789 gcc_unreachable ();
4792 if (code == FIX_TRUNC_EXPR)
4793 /* The signedness is determined from output operand. */
4794 optab1 = optab_for_tree_code (c1, type, optab_default);
4795 else
4796 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4798 if (!optab1)
4799 return false;
4801 vec_mode = TYPE_MODE (vectype);
4802 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4803 == CODE_FOR_nothing)
4804 return false;
4806 /* Check if it's a multi-step conversion that can be done using intermediate
4807 types. */
4808 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4810 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4812 *code1 = c1;
4813 prev_type = vectype;
4814 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4815 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4816 to get to NARROW_VECTYPE, and fail if we do not. */
4817 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4818 for (i = 0; i < 3; i++)
4820 intermediate_mode = insn_data[icode1].operand[0].mode;
4821 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4822 TYPE_UNSIGNED (prev_type));
4823 interm_optab = optab_for_tree_code (c1, intermediate_type,
4824 optab_default);
4825 if (!interm_optab
4826 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4827 == CODE_FOR_nothing
4828 || insn_data[icode1].operand[0].mode != intermediate_mode
4829 || (icode1
4830 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4831 == CODE_FOR_nothing)
4832 return false;
4834 VEC_quick_push (tree, *interm_types, intermediate_type);
4835 (*multi_step_cvt)++;
4837 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4838 return true;
4840 prev_type = intermediate_type;
4841 prev_mode = intermediate_mode;
4844 return false;
4847 *code1 = c1;
4848 return true;