Merged r158907 through r159238 into branch.
[official-gcc.git] / gcc / tree-vect-stmts.c
bloba2c2e203fe7d7c1e7ffe6a9df8522a27e81bf73e
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 if (is_gimple_debug (USE_STMT (use_p)))
149 continue;
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
156 *live_p = true;
161 return (*live_p || *relevant);
165 /* Function exist_non_indexing_operands_for_use_p
167 USE is one of the uses attached to STMT. Check if USE is
168 used in STMT for anything other than indexing an array. */
170 static bool
171 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 tree operand;
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
180 return true;
182 /* STMT has a data_ref. FORNOW this means that its of one of
183 the following forms:
184 -1- ARRAY_REF = var
185 -2- var = ARRAY_REF
186 (This should have been verified in analyze_data_refs).
188 'var' in the second case corresponds to a def, not a use,
189 so USE cannot correspond to any operands that are not used
190 for array indexing.
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
198 return false;
199 operand = gimple_assign_rhs1 (stmt);
200 if (TREE_CODE (operand) != SSA_NAME)
201 return false;
203 if (operand == use)
204 return true;
206 return false;
211 Function process_use.
213 Inputs:
214 - a USE in STMT in a loop represented by LOOP_VINFO
215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219 Outputs:
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
224 Exceptions:
225 - case 1: If USE is used only for address computations (e.g. array indexing),
226 which does not need to be directly vectorized, then the liveness/relevance
227 of the respective DEF_STMT is left unchanged.
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
233 Return true if everything is as expected. Return false otherwise. */
235 static bool
236 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
243 tree def;
244 gimple def_stmt;
245 enum vect_def_type dt;
247 /* case 1: we are only interested in uses that need to be vectorized. Uses
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
250 return true;
252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
256 return false;
259 if (!def_stmt || gimple_nop_p (def_stmt))
260 return true;
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
267 return true;
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
290 return true;
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
295 d = def_stmt
296 inner-loop:
297 stmt # use (d)
298 outer-loop-tail-bb:
299 ... */
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305 switch (relevant)
307 case vect_unused_in_scope:
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
310 break;
312 case vect_used_in_outer_by_reduction:
313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
314 relevant = vect_used_by_reduction;
315 break;
317 case vect_used_in_outer:
318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
319 relevant = vect_used_in_scope;
320 break;
322 case vect_used_in_scope:
323 break;
325 default:
326 gcc_unreachable ();
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
333 inner-loop:
334 d = def_stmt
335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
336 stmt # use (d) */
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342 switch (relevant)
344 case vect_unused_in_scope:
345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
348 break;
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
352 break;
354 case vect_used_in_scope:
355 relevant = vect_used_in_outer;
356 break;
358 default:
359 gcc_unreachable ();
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
364 return true;
368 /* Function vect_mark_stmts_to_be_vectorized.
370 Not all stmts in the loop need to be vectorized. For example:
372 for i...
373 for j...
374 1. T0 = i + j
375 2. T1 = a[T0]
377 3. j = j + 1
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
382 This pass detects such stmts. */
384 bool
385 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
392 gimple stmt;
393 unsigned int i;
394 stmt_vec_info stmt_vinfo;
395 basic_block bb;
396 gimple phi;
397 bool live_p;
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404 worklist = VEC_alloc (gimple, heap, 64);
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
409 bb = bbs[i];
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 phi = gsi_stmt (si);
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
439 use_operand_p use_p;
440 ssa_op_iter iter;
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
463 live_p = false
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
469 order of the results that they produce does not have to be kept. */
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
473 switch (def_type)
475 case vect_reduction_def:
476 switch (tmp_relevant)
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
480 break;
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
484 break;
485 /* fall through */
487 default:
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
491 VEC_free (gimple, heap, worklist);
492 return false;
495 live_p = false;
496 break;
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
506 VEC_free (gimple, heap, worklist);
507 return false;
510 live_p = false;
511 break;
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
517 if (vect_print_dump_info (REPORT_DETAILS))
518 fprintf (vect_dump, "unsupported use of double reduction.");
520 VEC_free (gimple, heap, worklist);
521 return false;
524 live_p = false;
525 break;
527 default:
528 break;
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 VEC_free (gimple, heap, worklist);
537 return false;
540 } /* while worklist */
542 VEC_free (gimple, heap, worklist);
543 return true;
548 cost_for_stmt (gimple stmt)
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552 switch (STMT_VINFO_TYPE (stmt_info))
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
569 default:
570 gcc_unreachable ();
574 /* Function vect_model_simple_cost.
576 Models cost for simple operations, i.e. those that only emit ncopies of a
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
580 void
581 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
582 enum vect_def_type *dt, slp_tree slp_node)
584 int i;
585 int inside_cost = 0, outside_cost = 0;
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
589 return;
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
597 outside_cost += TARG_SCALAR_TO_VEC_COST;
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 /* Function vect_cost_strided_group_size
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
616 static int
617 vect_cost_strided_group_size (stmt_vec_info stmt_info)
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
624 return 1;
628 /* Function vect_model_store_cost
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
633 void
634 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
635 enum vect_def_type dt, slp_tree slp_node)
637 int group_size;
638 int inside_cost = 0, outside_cost = 0;
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
642 return;
644 if (dt == vect_constant_def || dt == vect_external_def)
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
647 /* Strided access? */
648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
651 else
652 group_size = 1;
654 /* Is this an access in a group of stores, which provide strided access?
655 If so, add in the cost of the permutes. */
656 if (group_size > 1)
658 /* Uses a high and low interleave operation for each needed permute. */
659 inside_cost = ncopies * exact_log2(group_size) * group_size
660 * TARG_VEC_STMT_COST;
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 group_size);
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
681 /* Function vect_model_load_cost
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
685 accesses are supported for loads, we also account for the costs of the
686 access scheme chosen. */
688 void
689 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
692 int group_size;
693 int alignment_support_cheme;
694 gimple first_stmt;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
700 return;
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
709 /* Not a strided access. */
710 else
712 group_size = 1;
713 first_dr = dr;
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
718 /* Is this an access in a group of loads providing strided access?
719 If so, add in the cost of the permutes. */
720 if (group_size > 1)
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 group_size);
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
735 case dr_aligned:
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
742 break;
744 case dr_unaligned_supported:
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 "hardware.");
753 break;
755 case dr_explicit_realign:
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
761 outside costs. */
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
765 break;
767 case dr_explicit_realign_optimized:
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
771 "pipelined.");
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
789 break;
792 default:
793 gcc_unreachable ();
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
806 /* Function vect_init_vector.
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
811 Return the DEF of INIT_STMT.
812 It will be used in the vectorization of STMT. */
814 tree
815 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
819 tree new_var;
820 gimple init_stmt;
821 tree vec_oprnd;
822 edge pe;
823 tree new_temp;
824 basic_block new_bb;
826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
827 add_referenced_var (new_var);
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
832 if (gsi)
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
834 else
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
838 if (loop_vinfo)
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
842 if (nested_in_vect_loop_p (loop, stmt))
843 loop = loop->inner;
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
849 else
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
852 basic_block bb;
853 gimple_stmt_iterator gsi_bb_start;
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
857 gsi_bb_start = gsi_after_labels (bb);
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
862 if (vect_print_dump_info (REPORT_DETAILS))
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 vec_oprnd = gimple_assign_lhs (init_stmt);
869 return vec_oprnd;
873 /* Function vect_get_vec_def_for_operand.
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
884 tree
885 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
887 tree vec_oprnd;
888 gimple vec_stmt;
889 gimple def_stmt;
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
895 tree vec_inv;
896 tree vec_cst;
897 tree t = NULL_TREE;
898 tree def;
899 int i;
900 enum vect_def_type dt;
901 bool is_simple_use;
902 tree vector_type;
904 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
911 &dt);
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
915 if (def)
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
920 if (def_stmt)
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
927 switch (dt)
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
935 if (scalar_def)
936 *scalar_def = op;
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
942 for (i = nunits - 1; i >= 0; --i)
944 t = tree_cons (NULL_TREE, op, t);
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
950 /* Case 2: operand is defined outside the loop - loop invariant. */
951 case vect_external_def:
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
957 if (scalar_def)
958 *scalar_def = def;
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
964 for (i = nunits - 1; i >= 0; --i)
966 t = tree_cons (NULL_TREE, def, t);
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 /* Case 3: operand is defined inside the loop. */
975 case vect_internal_def:
977 if (scalar_def)
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
988 else
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
990 return vec_oprnd;
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
995 case vect_double_reduction_def:
996 case vect_nested_cycle:
998 struct loop *loop;
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1001 loop = (gimple_bb (def_stmt))->loop_father;
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1018 return vec_oprnd;
1021 default:
1022 gcc_unreachable ();
1027 /* Function vect_get_vec_def_for_stmt_copy
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1034 of the stmt that defines VEC_OPRND.
1035 DT is the type of the vector def VEC_OPRND.
1037 Context:
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
1041 arises when there are multiple data-types operated upon in the loop; the
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
1045 computing 'VF' results in each iteration). This function is called when
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
1054 VS1.3: vx.3 = memref3
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1083 tree
1084 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1089 /* Do nothing; can reuse same def. */
1090 if (dt == vect_external_def || dt == vect_constant_def )
1091 return vec_oprnd;
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1101 else
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1103 return vec_oprnd;
1107 /* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1110 static void
1111 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
1113 VEC(tree,heap) **vec_oprnds1)
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1120 if (vec_oprnds1 && *vec_oprnds1)
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1129 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1131 static void
1132 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1134 slp_tree slp_node)
1136 if (slp_node)
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1138 else
1140 tree vec_oprnd;
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1146 if (op1)
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1156 /* Function vect_finish_stmt_generation.
1158 Insert a new stmt. */
1160 void
1161 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1173 bb_vinfo));
1175 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1188 tree
1189 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1191 tree fndecl = gimple_call_fndecl (call);
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1196 return NULL_TREE;
1198 if (!fndecl
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1201 return NULL_TREE;
1203 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1204 vectype_in);
1207 /* Function vectorizable_call.
1209 Check if STMT performs a function call that can be vectorized.
1210 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1211 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1212 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1214 static bool
1215 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1217 tree vec_dest;
1218 tree scalar_dest;
1219 tree op, type;
1220 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1221 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1222 tree vectype_out, vectype_in;
1223 int nunits_in;
1224 int nunits_out;
1225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1226 tree fndecl, new_temp, def, rhs_type;
1227 gimple def_stmt;
1228 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1229 gimple new_stmt = NULL;
1230 int ncopies, j;
1231 VEC(tree, heap) *vargs = NULL;
1232 enum { NARROW, NONE, WIDEN } modifier;
1233 size_t i, nargs;
1235 /* FORNOW: unsupported in basic block SLP. */
1236 gcc_assert (loop_vinfo);
1238 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1239 return false;
1241 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1242 return false;
1244 /* FORNOW: SLP not supported. */
1245 if (STMT_SLP_TYPE (stmt_info))
1246 return false;
1248 /* Is STMT a vectorizable call? */
1249 if (!is_gimple_call (stmt))
1250 return false;
1252 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1253 return false;
1255 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1257 /* Process function arguments. */
1258 rhs_type = NULL_TREE;
1259 vectype_in = NULL_TREE;
1260 nargs = gimple_call_num_args (stmt);
1262 /* Bail out if the function has more than two arguments, we
1263 do not have interesting builtin functions to vectorize with
1264 more than two arguments. No arguments is also not good. */
1265 if (nargs == 0 || nargs > 2)
1266 return false;
1268 for (i = 0; i < nargs; i++)
1270 tree opvectype;
1272 op = gimple_call_arg (stmt, i);
1274 /* We can only handle calls with arguments of the same type. */
1275 if (rhs_type
1276 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1278 if (vect_print_dump_info (REPORT_DETAILS))
1279 fprintf (vect_dump, "argument types differ.");
1280 return false;
1282 if (!rhs_type)
1283 rhs_type = TREE_TYPE (op);
1285 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1286 &def_stmt, &def, &dt[i], &opvectype))
1288 if (vect_print_dump_info (REPORT_DETAILS))
1289 fprintf (vect_dump, "use not simple.");
1290 return false;
1293 if (!vectype_in)
1294 vectype_in = opvectype;
1295 else if (opvectype
1296 && opvectype != vectype_in)
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump, "argument vector types differ.");
1300 return false;
1303 /* If all arguments are external or constant defs use a vector type with
1304 the same size as the output vector type. */
1305 if (!vectype_in)
1306 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1307 if (vec_stmt)
1308 gcc_assert (vectype_in);
1309 if (!vectype_in)
1311 if (vect_print_dump_info (REPORT_DETAILS))
1313 fprintf (vect_dump, "no vectype for scalar type ");
1314 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1317 return false;
1320 /* FORNOW */
1321 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1322 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1323 if (nunits_in == nunits_out / 2)
1324 modifier = NARROW;
1325 else if (nunits_out == nunits_in)
1326 modifier = NONE;
1327 else if (nunits_out == nunits_in / 2)
1328 modifier = WIDEN;
1329 else
1330 return false;
1332 /* For now, we only vectorize functions if a target specific builtin
1333 is available. TODO -- in some cases, it might be profitable to
1334 insert the calls for pieces of the vector, in order to be able
1335 to vectorize other operations in the loop. */
1336 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1337 if (fndecl == NULL_TREE)
1339 if (vect_print_dump_info (REPORT_DETAILS))
1340 fprintf (vect_dump, "function is not vectorizable.");
1342 return false;
1345 gcc_assert (!gimple_vuse (stmt));
1347 if (modifier == NARROW)
1348 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1349 else
1350 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1352 /* Sanity check: make sure that at least one copy of the vectorized stmt
1353 needs to be generated. */
1354 gcc_assert (ncopies >= 1);
1356 if (!vec_stmt) /* transformation not required. */
1358 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1359 if (vect_print_dump_info (REPORT_DETAILS))
1360 fprintf (vect_dump, "=== vectorizable_call ===");
1361 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1362 return true;
1365 /** Transform. **/
1367 if (vect_print_dump_info (REPORT_DETAILS))
1368 fprintf (vect_dump, "transform operation.");
1370 /* Handle def. */
1371 scalar_dest = gimple_call_lhs (stmt);
1372 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1374 prev_stmt_info = NULL;
1375 switch (modifier)
1377 case NONE:
1378 for (j = 0; j < ncopies; ++j)
1380 /* Build argument list for the vectorized call. */
1381 if (j == 0)
1382 vargs = VEC_alloc (tree, heap, nargs);
1383 else
1384 VEC_truncate (tree, vargs, 0);
1386 for (i = 0; i < nargs; i++)
1388 op = gimple_call_arg (stmt, i);
1389 if (j == 0)
1390 vec_oprnd0
1391 = vect_get_vec_def_for_operand (op, stmt, NULL);
1392 else
1394 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1395 vec_oprnd0
1396 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1399 VEC_quick_push (tree, vargs, vec_oprnd0);
1402 new_stmt = gimple_build_call_vec (fndecl, vargs);
1403 new_temp = make_ssa_name (vec_dest, new_stmt);
1404 gimple_call_set_lhs (new_stmt, new_temp);
1406 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1407 mark_symbols_for_renaming (new_stmt);
1409 if (j == 0)
1410 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1411 else
1412 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1414 prev_stmt_info = vinfo_for_stmt (new_stmt);
1417 break;
1419 case NARROW:
1420 for (j = 0; j < ncopies; ++j)
1422 /* Build argument list for the vectorized call. */
1423 if (j == 0)
1424 vargs = VEC_alloc (tree, heap, nargs * 2);
1425 else
1426 VEC_truncate (tree, vargs, 0);
1428 for (i = 0; i < nargs; i++)
1430 op = gimple_call_arg (stmt, i);
1431 if (j == 0)
1433 vec_oprnd0
1434 = vect_get_vec_def_for_operand (op, stmt, NULL);
1435 vec_oprnd1
1436 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1438 else
1440 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1441 vec_oprnd0
1442 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1443 vec_oprnd1
1444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1447 VEC_quick_push (tree, vargs, vec_oprnd0);
1448 VEC_quick_push (tree, vargs, vec_oprnd1);
1451 new_stmt = gimple_build_call_vec (fndecl, vargs);
1452 new_temp = make_ssa_name (vec_dest, new_stmt);
1453 gimple_call_set_lhs (new_stmt, new_temp);
1455 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1456 mark_symbols_for_renaming (new_stmt);
1458 if (j == 0)
1459 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1460 else
1461 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1463 prev_stmt_info = vinfo_for_stmt (new_stmt);
1466 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1468 break;
1470 case WIDEN:
1471 /* No current target implements this case. */
1472 return false;
1475 VEC_free (tree, heap, vargs);
1477 /* Update the exception handling table with the vector stmt if necessary. */
1478 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1479 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1481 /* The call in STMT might prevent it from being removed in dce.
1482 We however cannot remove it here, due to the way the ssa name
1483 it defines is mapped to the new definition. So just replace
1484 rhs of the statement with something harmless. */
1486 type = TREE_TYPE (scalar_dest);
1487 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1488 fold_convert (type, integer_zero_node));
1489 set_vinfo_for_stmt (new_stmt, stmt_info);
1490 set_vinfo_for_stmt (stmt, NULL);
1491 STMT_VINFO_STMT (stmt_info) = new_stmt;
1492 gsi_replace (gsi, new_stmt, false);
1493 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1495 return true;
1499 /* Function vect_gen_widened_results_half
1501 Create a vector stmt whose code, type, number of arguments, and result
1502 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1503 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1504 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1505 needs to be created (DECL is a function-decl of a target-builtin).
1506 STMT is the original scalar stmt that we are vectorizing. */
1508 static gimple
1509 vect_gen_widened_results_half (enum tree_code code,
1510 tree decl,
1511 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1512 tree vec_dest, gimple_stmt_iterator *gsi,
1513 gimple stmt)
1515 gimple new_stmt;
1516 tree new_temp;
1518 /* Generate half of the widened result: */
1519 if (code == CALL_EXPR)
1521 /* Target specific support */
1522 if (op_type == binary_op)
1523 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1524 else
1525 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1526 new_temp = make_ssa_name (vec_dest, new_stmt);
1527 gimple_call_set_lhs (new_stmt, new_temp);
1529 else
1531 /* Generic support */
1532 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1533 if (op_type != binary_op)
1534 vec_oprnd1 = NULL;
1535 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1536 vec_oprnd1);
1537 new_temp = make_ssa_name (vec_dest, new_stmt);
1538 gimple_assign_set_lhs (new_stmt, new_temp);
1540 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1542 return new_stmt;
1546 /* Check if STMT performs a conversion operation, that can be vectorized.
1547 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1548 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1549 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1551 static bool
1552 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1553 gimple *vec_stmt, slp_tree slp_node)
1555 tree vec_dest;
1556 tree scalar_dest;
1557 tree op0;
1558 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1559 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1560 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1561 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1562 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1563 tree new_temp;
1564 tree def;
1565 gimple def_stmt;
1566 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1567 gimple new_stmt = NULL;
1568 stmt_vec_info prev_stmt_info;
1569 int nunits_in;
1570 int nunits_out;
1571 tree vectype_out, vectype_in;
1572 int ncopies, j;
1573 tree rhs_type;
1574 tree builtin_decl;
1575 enum { NARROW, NONE, WIDEN } modifier;
1576 int i;
1577 VEC(tree,heap) *vec_oprnds0 = NULL;
1578 tree vop0;
1579 VEC(tree,heap) *dummy = NULL;
1580 int dummy_int;
1582 /* Is STMT a vectorizable conversion? */
1584 /* FORNOW: unsupported in basic block SLP. */
1585 gcc_assert (loop_vinfo);
1587 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1588 return false;
1590 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1591 return false;
1593 if (!is_gimple_assign (stmt))
1594 return false;
1596 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1597 return false;
1599 code = gimple_assign_rhs_code (stmt);
1600 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1601 return false;
1603 /* Check types of lhs and rhs. */
1604 scalar_dest = gimple_assign_lhs (stmt);
1605 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1607 op0 = gimple_assign_rhs1 (stmt);
1608 rhs_type = TREE_TYPE (op0);
1609 /* Check the operands of the operation. */
1610 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1611 &def_stmt, &def, &dt[0], &vectype_in))
1613 if (vect_print_dump_info (REPORT_DETAILS))
1614 fprintf (vect_dump, "use not simple.");
1615 return false;
1617 /* If op0 is an external or constant defs use a vector type of
1618 the same size as the output vector type. */
1619 if (!vectype_in)
1620 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1621 if (vec_stmt)
1622 gcc_assert (vectype_in);
1623 if (!vectype_in)
1625 if (vect_print_dump_info (REPORT_DETAILS))
1627 fprintf (vect_dump, "no vectype for scalar type ");
1628 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1631 return false;
1634 /* FORNOW */
1635 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1636 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1637 if (nunits_in == nunits_out / 2)
1638 modifier = NARROW;
1639 else if (nunits_out == nunits_in)
1640 modifier = NONE;
1641 else if (nunits_out == nunits_in / 2)
1642 modifier = WIDEN;
1643 else
1644 return false;
1646 if (modifier == NARROW)
1647 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1648 else
1649 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1651 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1652 this, so we can safely override NCOPIES with 1 here. */
1653 if (slp_node)
1654 ncopies = 1;
1656 /* Sanity check: make sure that at least one copy of the vectorized stmt
1657 needs to be generated. */
1658 gcc_assert (ncopies >= 1);
1660 /* Supportable by target? */
1661 if ((modifier == NONE
1662 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1663 || (modifier == WIDEN
1664 && !supportable_widening_operation (code, stmt,
1665 vectype_out, vectype_in,
1666 &decl1, &decl2,
1667 &code1, &code2,
1668 &dummy_int, &dummy))
1669 || (modifier == NARROW
1670 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1671 &code1, &dummy_int, &dummy)))
1673 if (vect_print_dump_info (REPORT_DETAILS))
1674 fprintf (vect_dump, "conversion not supported by target.");
1675 return false;
1678 if (modifier != NONE)
1680 /* FORNOW: SLP not supported. */
1681 if (STMT_SLP_TYPE (stmt_info))
1682 return false;
1685 if (!vec_stmt) /* transformation not required. */
1687 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1688 return true;
1691 /** Transform. **/
1692 if (vect_print_dump_info (REPORT_DETAILS))
1693 fprintf (vect_dump, "transform conversion.");
1695 /* Handle def. */
1696 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1698 if (modifier == NONE && !slp_node)
1699 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1701 prev_stmt_info = NULL;
1702 switch (modifier)
1704 case NONE:
1705 for (j = 0; j < ncopies; j++)
1707 if (j == 0)
1708 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1709 else
1710 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1712 builtin_decl =
1713 targetm.vectorize.builtin_conversion (code,
1714 vectype_out, vectype_in);
1715 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1717 /* Arguments are ready. create the new vector stmt. */
1718 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1719 new_temp = make_ssa_name (vec_dest, new_stmt);
1720 gimple_call_set_lhs (new_stmt, new_temp);
1721 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1722 if (slp_node)
1723 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1726 if (j == 0)
1727 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1728 else
1729 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1730 prev_stmt_info = vinfo_for_stmt (new_stmt);
1732 break;
1734 case WIDEN:
1735 /* In case the vectorization factor (VF) is bigger than the number
1736 of elements that we can fit in a vectype (nunits), we have to
1737 generate more than one vector stmt - i.e - we need to "unroll"
1738 the vector stmt by a factor VF/nunits. */
1739 for (j = 0; j < ncopies; j++)
1741 if (j == 0)
1742 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1743 else
1744 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1746 /* Generate first half of the widened result: */
1747 new_stmt
1748 = vect_gen_widened_results_half (code1, decl1,
1749 vec_oprnd0, vec_oprnd1,
1750 unary_op, vec_dest, gsi, stmt);
1751 if (j == 0)
1752 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1753 else
1754 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1755 prev_stmt_info = vinfo_for_stmt (new_stmt);
1757 /* Generate second half of the widened result: */
1758 new_stmt
1759 = vect_gen_widened_results_half (code2, decl2,
1760 vec_oprnd0, vec_oprnd1,
1761 unary_op, vec_dest, gsi, stmt);
1762 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1763 prev_stmt_info = vinfo_for_stmt (new_stmt);
1765 break;
1767 case NARROW:
1768 /* In case the vectorization factor (VF) is bigger than the number
1769 of elements that we can fit in a vectype (nunits), we have to
1770 generate more than one vector stmt - i.e - we need to "unroll"
1771 the vector stmt by a factor VF/nunits. */
1772 for (j = 0; j < ncopies; j++)
1774 /* Handle uses. */
1775 if (j == 0)
1777 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1778 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1780 else
1782 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1783 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1786 /* Arguments are ready. Create the new vector stmt. */
1787 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1788 vec_oprnd1);
1789 new_temp = make_ssa_name (vec_dest, new_stmt);
1790 gimple_assign_set_lhs (new_stmt, new_temp);
1791 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1793 if (j == 0)
1794 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1795 else
1796 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1798 prev_stmt_info = vinfo_for_stmt (new_stmt);
1801 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1804 if (vec_oprnds0)
1805 VEC_free (tree, heap, vec_oprnds0);
1807 return true;
1809 /* Function vectorizable_assignment.
1811 Check if STMT performs an assignment (copy) that can be vectorized.
1812 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1813 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1814 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1816 static bool
1817 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1818 gimple *vec_stmt, slp_tree slp_node)
1820 tree vec_dest;
1821 tree scalar_dest;
1822 tree op;
1823 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1824 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1825 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1826 tree new_temp;
1827 tree def;
1828 gimple def_stmt;
1829 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1830 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1831 int ncopies;
1832 int i, j;
1833 VEC(tree,heap) *vec_oprnds = NULL;
1834 tree vop;
1835 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1836 gimple new_stmt = NULL;
1837 stmt_vec_info prev_stmt_info = NULL;
1839 /* Multiple types in SLP are handled by creating the appropriate number of
1840 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1841 case of SLP. */
1842 if (slp_node)
1843 ncopies = 1;
1844 else
1845 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1847 gcc_assert (ncopies >= 1);
1849 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1850 return false;
1852 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1853 return false;
1855 /* Is vectorizable assignment? */
1856 if (!is_gimple_assign (stmt))
1857 return false;
1859 scalar_dest = gimple_assign_lhs (stmt);
1860 if (TREE_CODE (scalar_dest) != SSA_NAME)
1861 return false;
1863 if (gimple_assign_single_p (stmt)
1864 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1865 op = gimple_assign_rhs1 (stmt);
1866 else
1867 return false;
1869 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1871 if (vect_print_dump_info (REPORT_DETAILS))
1872 fprintf (vect_dump, "use not simple.");
1873 return false;
1876 if (!vec_stmt) /* transformation not required. */
1878 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1879 if (vect_print_dump_info (REPORT_DETAILS))
1880 fprintf (vect_dump, "=== vectorizable_assignment ===");
1881 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1882 return true;
1885 /** Transform. **/
1886 if (vect_print_dump_info (REPORT_DETAILS))
1887 fprintf (vect_dump, "transform assignment.");
1889 /* Handle def. */
1890 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1892 /* Handle use. */
1893 for (j = 0; j < ncopies; j++)
1895 /* Handle uses. */
1896 if (j == 0)
1897 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1898 else
1899 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1901 /* Arguments are ready. create the new vector stmt. */
1902 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1904 new_stmt = gimple_build_assign (vec_dest, vop);
1905 new_temp = make_ssa_name (vec_dest, new_stmt);
1906 gimple_assign_set_lhs (new_stmt, new_temp);
1907 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1908 if (slp_node)
1909 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1912 if (slp_node)
1913 continue;
1915 if (j == 0)
1916 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1917 else
1918 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1920 prev_stmt_info = vinfo_for_stmt (new_stmt);
1923 VEC_free (tree, heap, vec_oprnds);
1924 return true;
1927 /* Function vectorizable_operation.
1929 Check if STMT performs a binary or unary operation that can be vectorized.
1930 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1931 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1932 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1934 static bool
1935 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1936 gimple *vec_stmt, slp_tree slp_node)
1938 tree vec_dest;
1939 tree scalar_dest;
1940 tree op0, op1 = NULL;
1941 tree vec_oprnd1 = NULL_TREE;
1942 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1943 tree vectype;
1944 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1945 enum tree_code code;
1946 enum machine_mode vec_mode;
1947 tree new_temp;
1948 int op_type;
1949 optab optab;
1950 int icode;
1951 enum machine_mode optab_op2_mode;
1952 tree def;
1953 gimple def_stmt;
1954 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1955 gimple new_stmt = NULL;
1956 stmt_vec_info prev_stmt_info;
1957 int nunits_in;
1958 int nunits_out;
1959 tree vectype_out;
1960 int ncopies;
1961 int j, i;
1962 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1963 tree vop0, vop1;
1964 unsigned int k;
1965 bool scalar_shift_arg = false;
1966 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1967 int vf;
1969 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1970 return false;
1972 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1973 return false;
1975 /* Is STMT a vectorizable binary/unary operation? */
1976 if (!is_gimple_assign (stmt))
1977 return false;
1979 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1980 return false;
1982 code = gimple_assign_rhs_code (stmt);
1984 /* For pointer addition, we should use the normal plus for
1985 the vector addition. */
1986 if (code == POINTER_PLUS_EXPR)
1987 code = PLUS_EXPR;
1989 /* Support only unary or binary operations. */
1990 op_type = TREE_CODE_LENGTH (code);
1991 if (op_type != unary_op && op_type != binary_op)
1993 if (vect_print_dump_info (REPORT_DETAILS))
1994 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1995 return false;
1998 scalar_dest = gimple_assign_lhs (stmt);
1999 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2001 op0 = gimple_assign_rhs1 (stmt);
2002 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2003 &def_stmt, &def, &dt[0], &vectype))
2005 if (vect_print_dump_info (REPORT_DETAILS))
2006 fprintf (vect_dump, "use not simple.");
2007 return false;
2009 /* If op0 is an external or constant def use a vector type with
2010 the same size as the output vector type. */
2011 if (!vectype)
2012 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2013 if (vec_stmt)
2014 gcc_assert (vectype);
2015 if (!vectype)
2017 if (vect_print_dump_info (REPORT_DETAILS))
2019 fprintf (vect_dump, "no vectype for scalar type ");
2020 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2023 return false;
2026 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2027 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2028 if (nunits_out != nunits_in)
2029 return false;
2031 if (op_type == binary_op)
2033 op1 = gimple_assign_rhs2 (stmt);
2034 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2035 &dt[1]))
2037 if (vect_print_dump_info (REPORT_DETAILS))
2038 fprintf (vect_dump, "use not simple.");
2039 return false;
2043 if (loop_vinfo)
2044 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2045 else
2046 vf = 1;
2048 /* Multiple types in SLP are handled by creating the appropriate number of
2049 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2050 case of SLP. */
2051 if (slp_node)
2052 ncopies = 1;
2053 else
2054 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2056 gcc_assert (ncopies >= 1);
2058 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2059 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2060 shift optabs. */
2061 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2062 || code == RROTATE_EXPR)
2064 /* vector shifted by vector */
2065 if (dt[1] == vect_internal_def)
2067 optab = optab_for_tree_code (code, vectype, optab_vector);
2068 if (vect_print_dump_info (REPORT_DETAILS))
2069 fprintf (vect_dump, "vector/vector shift/rotate found.");
2072 /* See if the machine has a vector shifted by scalar insn and if not
2073 then see if it has a vector shifted by vector insn */
2074 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2076 optab = optab_for_tree_code (code, vectype, optab_scalar);
2077 if (optab
2078 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2079 != CODE_FOR_nothing))
2081 scalar_shift_arg = true;
2082 if (vect_print_dump_info (REPORT_DETAILS))
2083 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2085 else
2087 optab = optab_for_tree_code (code, vectype, optab_vector);
2088 if (optab
2089 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2090 != CODE_FOR_nothing))
2092 if (vect_print_dump_info (REPORT_DETAILS))
2093 fprintf (vect_dump, "vector/vector shift/rotate found.");
2095 /* Unlike the other binary operators, shifts/rotates have
2096 the rhs being int, instead of the same type as the lhs,
2097 so make sure the scalar is the right type if we are
2098 dealing with vectors of short/char. */
2099 if (dt[1] == vect_constant_def)
2100 op1 = fold_convert (TREE_TYPE (vectype), op1);
2105 else
2107 if (vect_print_dump_info (REPORT_DETAILS))
2108 fprintf (vect_dump, "operand mode requires invariant argument.");
2109 return false;
2112 else
2113 optab = optab_for_tree_code (code, vectype, optab_default);
2115 /* Supportable by target? */
2116 if (!optab)
2118 if (vect_print_dump_info (REPORT_DETAILS))
2119 fprintf (vect_dump, "no optab.");
2120 return false;
2122 vec_mode = TYPE_MODE (vectype);
2123 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2124 if (icode == CODE_FOR_nothing)
2126 if (vect_print_dump_info (REPORT_DETAILS))
2127 fprintf (vect_dump, "op not supported by target.");
2128 /* Check only during analysis. */
2129 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2130 || (vf < vect_min_worthwhile_factor (code)
2131 && !vec_stmt))
2132 return false;
2133 if (vect_print_dump_info (REPORT_DETAILS))
2134 fprintf (vect_dump, "proceeding using word mode.");
2137 /* Worthwhile without SIMD support? Check only during analysis. */
2138 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2139 && vf < vect_min_worthwhile_factor (code)
2140 && !vec_stmt)
2142 if (vect_print_dump_info (REPORT_DETAILS))
2143 fprintf (vect_dump, "not worthwhile without SIMD support.");
2144 return false;
2147 if (!vec_stmt) /* transformation not required. */
2149 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2150 if (vect_print_dump_info (REPORT_DETAILS))
2151 fprintf (vect_dump, "=== vectorizable_operation ===");
2152 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2153 return true;
2156 /** Transform. **/
2158 if (vect_print_dump_info (REPORT_DETAILS))
2159 fprintf (vect_dump, "transform binary/unary operation.");
2161 /* Handle def. */
2162 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2164 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2165 created in the previous stages of the recursion, so no allocation is
2166 needed, except for the case of shift with scalar shift argument. In that
2167 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2168 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2169 In case of loop-based vectorization we allocate VECs of size 1. We
2170 allocate VEC_OPRNDS1 only in case of binary operation. */
2171 if (!slp_node)
2173 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2174 if (op_type == binary_op)
2175 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2177 else if (scalar_shift_arg)
2178 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2180 /* In case the vectorization factor (VF) is bigger than the number
2181 of elements that we can fit in a vectype (nunits), we have to generate
2182 more than one vector stmt - i.e - we need to "unroll" the
2183 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2184 from one copy of the vector stmt to the next, in the field
2185 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2186 stages to find the correct vector defs to be used when vectorizing
2187 stmts that use the defs of the current stmt. The example below illustrates
2188 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2189 4 vectorized stmts):
2191 before vectorization:
2192 RELATED_STMT VEC_STMT
2193 S1: x = memref - -
2194 S2: z = x + 1 - -
2196 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2197 there):
2198 RELATED_STMT VEC_STMT
2199 VS1_0: vx0 = memref0 VS1_1 -
2200 VS1_1: vx1 = memref1 VS1_2 -
2201 VS1_2: vx2 = memref2 VS1_3 -
2202 VS1_3: vx3 = memref3 - -
2203 S1: x = load - VS1_0
2204 S2: z = x + 1 - -
2206 step2: vectorize stmt S2 (done here):
2207 To vectorize stmt S2 we first need to find the relevant vector
2208 def for the first operand 'x'. This is, as usual, obtained from
2209 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2210 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2211 relevant vector def 'vx0'. Having found 'vx0' we can generate
2212 the vector stmt VS2_0, and as usual, record it in the
2213 STMT_VINFO_VEC_STMT of stmt S2.
2214 When creating the second copy (VS2_1), we obtain the relevant vector
2215 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2216 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2217 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2218 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2219 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2220 chain of stmts and pointers:
2221 RELATED_STMT VEC_STMT
2222 VS1_0: vx0 = memref0 VS1_1 -
2223 VS1_1: vx1 = memref1 VS1_2 -
2224 VS1_2: vx2 = memref2 VS1_3 -
2225 VS1_3: vx3 = memref3 - -
2226 S1: x = load - VS1_0
2227 VS2_0: vz0 = vx0 + v1 VS2_1 -
2228 VS2_1: vz1 = vx1 + v1 VS2_2 -
2229 VS2_2: vz2 = vx2 + v1 VS2_3 -
2230 VS2_3: vz3 = vx3 + v1 - -
2231 S2: z = x + 1 - VS2_0 */
2233 prev_stmt_info = NULL;
2234 for (j = 0; j < ncopies; j++)
2236 /* Handle uses. */
2237 if (j == 0)
2239 if (op_type == binary_op && scalar_shift_arg)
2241 /* Vector shl and shr insn patterns can be defined with scalar
2242 operand 2 (shift operand). In this case, use constant or loop
2243 invariant op1 directly, without extending it to vector mode
2244 first. */
2245 optab_op2_mode = insn_data[icode].operand[2].mode;
2246 if (!VECTOR_MODE_P (optab_op2_mode))
2248 if (vect_print_dump_info (REPORT_DETAILS))
2249 fprintf (vect_dump, "operand 1 using scalar mode.");
2250 vec_oprnd1 = op1;
2251 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2252 if (slp_node)
2254 /* Store vec_oprnd1 for every vector stmt to be created
2255 for SLP_NODE. We check during the analysis that all the
2256 shift arguments are the same.
2257 TODO: Allow different constants for different vector
2258 stmts generated for an SLP instance. */
2259 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2260 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2265 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2266 (a special case for certain kind of vector shifts); otherwise,
2267 operand 1 should be of a vector type (the usual case). */
2268 if (op_type == binary_op && !vec_oprnd1)
2269 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2270 slp_node);
2271 else
2272 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2273 slp_node);
2275 else
2276 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2278 /* Arguments are ready. Create the new vector stmt. */
2279 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2281 vop1 = ((op_type == binary_op)
2282 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2283 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2284 new_temp = make_ssa_name (vec_dest, new_stmt);
2285 gimple_assign_set_lhs (new_stmt, new_temp);
2286 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2287 if (slp_node)
2288 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2291 if (slp_node)
2292 continue;
2294 if (j == 0)
2295 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2296 else
2297 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2298 prev_stmt_info = vinfo_for_stmt (new_stmt);
2301 VEC_free (tree, heap, vec_oprnds0);
2302 if (vec_oprnds1)
2303 VEC_free (tree, heap, vec_oprnds1);
2305 return true;
2309 /* Get vectorized definitions for loop-based vectorization. For the first
2310 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2311 scalar operand), and for the rest we get a copy with
2312 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2313 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2314 The vectors are collected into VEC_OPRNDS. */
2316 static void
2317 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2318 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2320 tree vec_oprnd;
2322 /* Get first vector operand. */
2323 /* All the vector operands except the very first one (that is scalar oprnd)
2324 are stmt copies. */
2325 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2326 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2327 else
2328 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2330 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2332 /* Get second vector operand. */
2333 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2334 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2336 *oprnd = vec_oprnd;
2338 /* For conversion in multiple steps, continue to get operands
2339 recursively. */
2340 if (multi_step_cvt)
2341 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2345 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2346 For multi-step conversions store the resulting vectors and call the function
2347 recursively. */
2349 static void
2350 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2351 int multi_step_cvt, gimple stmt,
2352 VEC (tree, heap) *vec_dsts,
2353 gimple_stmt_iterator *gsi,
2354 slp_tree slp_node, enum tree_code code,
2355 stmt_vec_info *prev_stmt_info)
2357 unsigned int i;
2358 tree vop0, vop1, new_tmp, vec_dest;
2359 gimple new_stmt;
2360 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2362 vec_dest = VEC_pop (tree, vec_dsts);
2364 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2366 /* Create demotion operation. */
2367 vop0 = VEC_index (tree, *vec_oprnds, i);
2368 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2369 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2370 new_tmp = make_ssa_name (vec_dest, new_stmt);
2371 gimple_assign_set_lhs (new_stmt, new_tmp);
2372 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2374 if (multi_step_cvt)
2375 /* Store the resulting vector for next recursive call. */
2376 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2377 else
2379 /* This is the last step of the conversion sequence. Store the
2380 vectors in SLP_NODE or in vector info of the scalar statement
2381 (or in STMT_VINFO_RELATED_STMT chain). */
2382 if (slp_node)
2383 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2384 else
2386 if (!*prev_stmt_info)
2387 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2388 else
2389 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2391 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2396 /* For multi-step demotion operations we first generate demotion operations
2397 from the source type to the intermediate types, and then combine the
2398 results (stored in VEC_OPRNDS) in demotion operation to the destination
2399 type. */
2400 if (multi_step_cvt)
2402 /* At each level of recursion we have have of the operands we had at the
2403 previous level. */
2404 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2405 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2406 stmt, vec_dsts, gsi, slp_node,
2407 code, prev_stmt_info);
2412 /* Function vectorizable_type_demotion
2414 Check if STMT performs a binary or unary operation that involves
2415 type demotion, and if it can be vectorized.
2416 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2417 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2418 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2420 static bool
2421 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2422 gimple *vec_stmt, slp_tree slp_node)
2424 tree vec_dest;
2425 tree scalar_dest;
2426 tree op0;
2427 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2428 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2429 enum tree_code code, code1 = ERROR_MARK;
2430 tree def;
2431 gimple def_stmt;
2432 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2433 stmt_vec_info prev_stmt_info;
2434 int nunits_in;
2435 int nunits_out;
2436 tree vectype_out;
2437 int ncopies;
2438 int j, i;
2439 tree vectype_in;
2440 int multi_step_cvt = 0;
2441 VEC (tree, heap) *vec_oprnds0 = NULL;
2442 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2443 tree last_oprnd, intermediate_type;
2445 /* FORNOW: not supported by basic block SLP vectorization. */
2446 gcc_assert (loop_vinfo);
2448 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2449 return false;
2451 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2452 return false;
2454 /* Is STMT a vectorizable type-demotion operation? */
2455 if (!is_gimple_assign (stmt))
2456 return false;
2458 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2459 return false;
2461 code = gimple_assign_rhs_code (stmt);
2462 if (!CONVERT_EXPR_CODE_P (code))
2463 return false;
2465 scalar_dest = gimple_assign_lhs (stmt);
2466 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2468 /* Check the operands of the operation. */
2469 op0 = gimple_assign_rhs1 (stmt);
2470 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2471 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2472 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2473 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2474 && CONVERT_EXPR_CODE_P (code))))
2475 return false;
2476 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2477 &def_stmt, &def, &dt[0], &vectype_in))
2479 if (vect_print_dump_info (REPORT_DETAILS))
2480 fprintf (vect_dump, "use not simple.");
2481 return false;
2483 /* If op0 is an external def use a vector type with the
2484 same size as the output vector type if possible. */
2485 if (!vectype_in)
2486 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2487 if (vec_stmt)
2488 gcc_assert (vectype_in);
2489 if (!vectype_in)
2491 if (vect_print_dump_info (REPORT_DETAILS))
2493 fprintf (vect_dump, "no vectype for scalar type ");
2494 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2497 return false;
2500 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2501 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2502 if (nunits_in >= nunits_out)
2503 return false;
2505 /* Multiple types in SLP are handled by creating the appropriate number of
2506 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2507 case of SLP. */
2508 if (slp_node)
2509 ncopies = 1;
2510 else
2511 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2512 gcc_assert (ncopies >= 1);
2514 /* Supportable by target? */
2515 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2516 &code1, &multi_step_cvt, &interm_types))
2517 return false;
2519 if (!vec_stmt) /* transformation not required. */
2521 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2522 if (vect_print_dump_info (REPORT_DETAILS))
2523 fprintf (vect_dump, "=== vectorizable_demotion ===");
2524 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2525 return true;
2528 /** Transform. **/
2529 if (vect_print_dump_info (REPORT_DETAILS))
2530 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2531 ncopies);
2533 /* In case of multi-step demotion, we first generate demotion operations to
2534 the intermediate types, and then from that types to the final one.
2535 We create vector destinations for the intermediate type (TYPES) received
2536 from supportable_narrowing_operation, and store them in the correct order
2537 for future use in vect_create_vectorized_demotion_stmts(). */
2538 if (multi_step_cvt)
2539 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2540 else
2541 vec_dsts = VEC_alloc (tree, heap, 1);
2543 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2544 VEC_quick_push (tree, vec_dsts, vec_dest);
2546 if (multi_step_cvt)
2548 for (i = VEC_length (tree, interm_types) - 1;
2549 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2551 vec_dest = vect_create_destination_var (scalar_dest,
2552 intermediate_type);
2553 VEC_quick_push (tree, vec_dsts, vec_dest);
2557 /* In case the vectorization factor (VF) is bigger than the number
2558 of elements that we can fit in a vectype (nunits), we have to generate
2559 more than one vector stmt - i.e - we need to "unroll" the
2560 vector stmt by a factor VF/nunits. */
2561 last_oprnd = op0;
2562 prev_stmt_info = NULL;
2563 for (j = 0; j < ncopies; j++)
2565 /* Handle uses. */
2566 if (slp_node)
2567 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2568 else
2570 VEC_free (tree, heap, vec_oprnds0);
2571 vec_oprnds0 = VEC_alloc (tree, heap,
2572 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2573 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2574 vect_pow2 (multi_step_cvt) - 1);
2577 /* Arguments are ready. Create the new vector stmts. */
2578 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2579 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2580 multi_step_cvt, stmt, tmp_vec_dsts,
2581 gsi, slp_node, code1,
2582 &prev_stmt_info);
2585 VEC_free (tree, heap, vec_oprnds0);
2586 VEC_free (tree, heap, vec_dsts);
2587 VEC_free (tree, heap, tmp_vec_dsts);
2588 VEC_free (tree, heap, interm_types);
2590 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2591 return true;
2595 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2596 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2597 the resulting vectors and call the function recursively. */
2599 static void
2600 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2601 VEC (tree, heap) **vec_oprnds1,
2602 int multi_step_cvt, gimple stmt,
2603 VEC (tree, heap) *vec_dsts,
2604 gimple_stmt_iterator *gsi,
2605 slp_tree slp_node, enum tree_code code1,
2606 enum tree_code code2, tree decl1,
2607 tree decl2, int op_type,
2608 stmt_vec_info *prev_stmt_info)
2610 int i;
2611 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2612 gimple new_stmt1, new_stmt2;
2613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2614 VEC (tree, heap) *vec_tmp;
2616 vec_dest = VEC_pop (tree, vec_dsts);
2617 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2619 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2621 if (op_type == binary_op)
2622 vop1 = VEC_index (tree, *vec_oprnds1, i);
2623 else
2624 vop1 = NULL_TREE;
2626 /* Generate the two halves of promotion operation. */
2627 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2628 op_type, vec_dest, gsi, stmt);
2629 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2630 op_type, vec_dest, gsi, stmt);
2631 if (is_gimple_call (new_stmt1))
2633 new_tmp1 = gimple_call_lhs (new_stmt1);
2634 new_tmp2 = gimple_call_lhs (new_stmt2);
2636 else
2638 new_tmp1 = gimple_assign_lhs (new_stmt1);
2639 new_tmp2 = gimple_assign_lhs (new_stmt2);
2642 if (multi_step_cvt)
2644 /* Store the results for the recursive call. */
2645 VEC_quick_push (tree, vec_tmp, new_tmp1);
2646 VEC_quick_push (tree, vec_tmp, new_tmp2);
2648 else
2650 /* Last step of promotion sequience - store the results. */
2651 if (slp_node)
2653 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2654 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2656 else
2658 if (!*prev_stmt_info)
2659 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2660 else
2661 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2663 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2664 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2665 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2670 if (multi_step_cvt)
2672 /* For multi-step promotion operation we first generate we call the
2673 function recurcively for every stage. We start from the input type,
2674 create promotion operations to the intermediate types, and then
2675 create promotions to the output type. */
2676 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2677 VEC_free (tree, heap, vec_tmp);
2678 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2679 multi_step_cvt - 1, stmt,
2680 vec_dsts, gsi, slp_node, code1,
2681 code2, decl2, decl2, op_type,
2682 prev_stmt_info);
2687 /* Function vectorizable_type_promotion
2689 Check if STMT performs a binary or unary operation that involves
2690 type promotion, and if it can be vectorized.
2691 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2692 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2693 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2695 static bool
2696 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2697 gimple *vec_stmt, slp_tree slp_node)
2699 tree vec_dest;
2700 tree scalar_dest;
2701 tree op0, op1 = NULL;
2702 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2703 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2704 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2705 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2706 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2707 int op_type;
2708 tree def;
2709 gimple def_stmt;
2710 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2711 stmt_vec_info prev_stmt_info;
2712 int nunits_in;
2713 int nunits_out;
2714 tree vectype_out;
2715 int ncopies;
2716 int j, i;
2717 tree vectype_in;
2718 tree intermediate_type = NULL_TREE;
2719 int multi_step_cvt = 0;
2720 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2721 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2723 /* FORNOW: not supported by basic block SLP vectorization. */
2724 gcc_assert (loop_vinfo);
2726 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2727 return false;
2729 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2730 return false;
2732 /* Is STMT a vectorizable type-promotion operation? */
2733 if (!is_gimple_assign (stmt))
2734 return false;
2736 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2737 return false;
2739 code = gimple_assign_rhs_code (stmt);
2740 if (!CONVERT_EXPR_CODE_P (code)
2741 && code != WIDEN_MULT_EXPR)
2742 return false;
2744 scalar_dest = gimple_assign_lhs (stmt);
2745 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2747 /* Check the operands of the operation. */
2748 op0 = gimple_assign_rhs1 (stmt);
2749 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2750 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2751 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2752 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2753 && CONVERT_EXPR_CODE_P (code))))
2754 return false;
2755 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2756 &def_stmt, &def, &dt[0], &vectype_in))
2758 if (vect_print_dump_info (REPORT_DETAILS))
2759 fprintf (vect_dump, "use not simple.");
2760 return false;
2762 /* If op0 is an external or constant def use a vector type with
2763 the same size as the output vector type. */
2764 if (!vectype_in)
2765 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2766 if (vec_stmt)
2767 gcc_assert (vectype_in);
2768 if (!vectype_in)
2770 if (vect_print_dump_info (REPORT_DETAILS))
2772 fprintf (vect_dump, "no vectype for scalar type ");
2773 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2776 return false;
2779 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2780 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2781 if (nunits_in <= nunits_out)
2782 return false;
2784 /* Multiple types in SLP are handled by creating the appropriate number of
2785 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2786 case of SLP. */
2787 if (slp_node)
2788 ncopies = 1;
2789 else
2790 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2792 gcc_assert (ncopies >= 1);
2794 op_type = TREE_CODE_LENGTH (code);
2795 if (op_type == binary_op)
2797 op1 = gimple_assign_rhs2 (stmt);
2798 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2800 if (vect_print_dump_info (REPORT_DETAILS))
2801 fprintf (vect_dump, "use not simple.");
2802 return false;
2806 /* Supportable by target? */
2807 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2808 &decl1, &decl2, &code1, &code2,
2809 &multi_step_cvt, &interm_types))
2810 return false;
2812 /* Binary widening operation can only be supported directly by the
2813 architecture. */
2814 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2816 if (!vec_stmt) /* transformation not required. */
2818 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2819 if (vect_print_dump_info (REPORT_DETAILS))
2820 fprintf (vect_dump, "=== vectorizable_promotion ===");
2821 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2822 return true;
2825 /** Transform. **/
2827 if (vect_print_dump_info (REPORT_DETAILS))
2828 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2829 ncopies);
2831 /* Handle def. */
2832 /* In case of multi-step promotion, we first generate promotion operations
2833 to the intermediate types, and then from that types to the final one.
2834 We store vector destination in VEC_DSTS in the correct order for
2835 recursive creation of promotion operations in
2836 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2837 according to TYPES recieved from supportable_widening_operation(). */
2838 if (multi_step_cvt)
2839 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2840 else
2841 vec_dsts = VEC_alloc (tree, heap, 1);
2843 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2844 VEC_quick_push (tree, vec_dsts, vec_dest);
2846 if (multi_step_cvt)
2848 for (i = VEC_length (tree, interm_types) - 1;
2849 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2851 vec_dest = vect_create_destination_var (scalar_dest,
2852 intermediate_type);
2853 VEC_quick_push (tree, vec_dsts, vec_dest);
2857 if (!slp_node)
2859 vec_oprnds0 = VEC_alloc (tree, heap,
2860 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2861 if (op_type == binary_op)
2862 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2865 /* In case the vectorization factor (VF) is bigger than the number
2866 of elements that we can fit in a vectype (nunits), we have to generate
2867 more than one vector stmt - i.e - we need to "unroll" the
2868 vector stmt by a factor VF/nunits. */
2870 prev_stmt_info = NULL;
2871 for (j = 0; j < ncopies; j++)
2873 /* Handle uses. */
2874 if (j == 0)
2876 if (slp_node)
2877 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2878 else
2880 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2881 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2882 if (op_type == binary_op)
2884 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2885 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2889 else
2891 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2892 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2893 if (op_type == binary_op)
2895 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2896 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2900 /* Arguments are ready. Create the new vector stmts. */
2901 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2902 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2903 multi_step_cvt, stmt,
2904 tmp_vec_dsts,
2905 gsi, slp_node, code1, code2,
2906 decl1, decl2, op_type,
2907 &prev_stmt_info);
2910 VEC_free (tree, heap, vec_dsts);
2911 VEC_free (tree, heap, tmp_vec_dsts);
2912 VEC_free (tree, heap, interm_types);
2913 VEC_free (tree, heap, vec_oprnds0);
2914 VEC_free (tree, heap, vec_oprnds1);
2916 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2917 return true;
2921 /* Function vectorizable_store.
2923 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2924 can be vectorized.
2925 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2926 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2927 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2929 static bool
2930 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2931 slp_tree slp_node)
2933 tree scalar_dest;
2934 tree data_ref;
2935 tree op;
2936 tree vec_oprnd = NULL_TREE;
2937 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2938 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2939 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2940 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2941 struct loop *loop = NULL;
2942 enum machine_mode vec_mode;
2943 tree dummy;
2944 enum dr_alignment_support alignment_support_scheme;
2945 tree def;
2946 gimple def_stmt;
2947 enum vect_def_type dt;
2948 stmt_vec_info prev_stmt_info = NULL;
2949 tree dataref_ptr = NULL_TREE;
2950 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2951 int ncopies;
2952 int j;
2953 gimple next_stmt, first_stmt = NULL;
2954 bool strided_store = false;
2955 unsigned int group_size, i;
2956 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2957 bool inv_p;
2958 VEC(tree,heap) *vec_oprnds = NULL;
2959 bool slp = (slp_node != NULL);
2960 unsigned int vec_num;
2961 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2963 if (loop_vinfo)
2964 loop = LOOP_VINFO_LOOP (loop_vinfo);
2966 /* Multiple types in SLP are handled by creating the appropriate number of
2967 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2968 case of SLP. */
2969 if (slp)
2970 ncopies = 1;
2971 else
2972 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2974 gcc_assert (ncopies >= 1);
2976 /* FORNOW. This restriction should be relaxed. */
2977 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2979 if (vect_print_dump_info (REPORT_DETAILS))
2980 fprintf (vect_dump, "multiple types in nested loop.");
2981 return false;
2984 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2985 return false;
2987 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2988 return false;
2990 /* Is vectorizable store? */
2992 if (!is_gimple_assign (stmt))
2993 return false;
2995 scalar_dest = gimple_assign_lhs (stmt);
2996 if (TREE_CODE (scalar_dest) != ARRAY_REF
2997 && TREE_CODE (scalar_dest) != INDIRECT_REF
2998 && TREE_CODE (scalar_dest) != COMPONENT_REF
2999 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3000 && TREE_CODE (scalar_dest) != REALPART_EXPR)
3001 return false;
3003 gcc_assert (gimple_assign_single_p (stmt));
3004 op = gimple_assign_rhs1 (stmt);
3005 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3007 if (vect_print_dump_info (REPORT_DETAILS))
3008 fprintf (vect_dump, "use not simple.");
3009 return false;
3012 /* The scalar rhs type needs to be trivially convertible to the vector
3013 component type. This should always be the case. */
3014 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3016 if (vect_print_dump_info (REPORT_DETAILS))
3017 fprintf (vect_dump, "??? operands of different types");
3018 return false;
3021 vec_mode = TYPE_MODE (vectype);
3022 /* FORNOW. In some cases can vectorize even if data-type not supported
3023 (e.g. - array initialization with 0). */
3024 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
3025 return false;
3027 if (!STMT_VINFO_DATA_REF (stmt_info))
3028 return false;
3030 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3032 strided_store = true;
3033 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3034 if (!vect_strided_store_supported (vectype)
3035 && !PURE_SLP_STMT (stmt_info) && !slp)
3036 return false;
3038 if (first_stmt == stmt)
3040 /* STMT is the leader of the group. Check the operands of all the
3041 stmts of the group. */
3042 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3043 while (next_stmt)
3045 gcc_assert (gimple_assign_single_p (next_stmt));
3046 op = gimple_assign_rhs1 (next_stmt);
3047 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3048 &def, &dt))
3050 if (vect_print_dump_info (REPORT_DETAILS))
3051 fprintf (vect_dump, "use not simple.");
3052 return false;
3054 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3059 if (!vec_stmt) /* transformation not required. */
3061 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3062 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3063 return true;
3066 /** Transform. **/
3068 if (strided_store)
3070 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3071 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3073 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3075 /* FORNOW */
3076 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3078 /* We vectorize all the stmts of the interleaving group when we
3079 reach the last stmt in the group. */
3080 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3081 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3082 && !slp)
3084 *vec_stmt = NULL;
3085 return true;
3088 if (slp)
3090 strided_store = false;
3091 /* VEC_NUM is the number of vect stmts to be created for this
3092 group. */
3093 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3094 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3095 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3097 else
3098 /* VEC_NUM is the number of vect stmts to be created for this
3099 group. */
3100 vec_num = group_size;
3102 else
3104 first_stmt = stmt;
3105 first_dr = dr;
3106 group_size = vec_num = 1;
3109 if (vect_print_dump_info (REPORT_DETAILS))
3110 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3112 dr_chain = VEC_alloc (tree, heap, group_size);
3113 oprnds = VEC_alloc (tree, heap, group_size);
3115 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3116 gcc_assert (alignment_support_scheme);
3118 /* In case the vectorization factor (VF) is bigger than the number
3119 of elements that we can fit in a vectype (nunits), we have to generate
3120 more than one vector stmt - i.e - we need to "unroll" the
3121 vector stmt by a factor VF/nunits. For more details see documentation in
3122 vect_get_vec_def_for_copy_stmt. */
3124 /* In case of interleaving (non-unit strided access):
3126 S1: &base + 2 = x2
3127 S2: &base = x0
3128 S3: &base + 1 = x1
3129 S4: &base + 3 = x3
3131 We create vectorized stores starting from base address (the access of the
3132 first stmt in the chain (S2 in the above example), when the last store stmt
3133 of the chain (S4) is reached:
3135 VS1: &base = vx2
3136 VS2: &base + vec_size*1 = vx0
3137 VS3: &base + vec_size*2 = vx1
3138 VS4: &base + vec_size*3 = vx3
3140 Then permutation statements are generated:
3142 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3143 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3146 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3147 (the order of the data-refs in the output of vect_permute_store_chain
3148 corresponds to the order of scalar stmts in the interleaving chain - see
3149 the documentation of vect_permute_store_chain()).
3151 In case of both multiple types and interleaving, above vector stores and
3152 permutation stmts are created for every copy. The result vector stmts are
3153 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3154 STMT_VINFO_RELATED_STMT for the next copies.
3157 prev_stmt_info = NULL;
3158 for (j = 0; j < ncopies; j++)
3160 gimple new_stmt;
3161 gimple ptr_incr;
3163 if (j == 0)
3165 if (slp)
3167 /* Get vectorized arguments for SLP_NODE. */
3168 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3170 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3172 else
3174 /* For interleaved stores we collect vectorized defs for all the
3175 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3176 used as an input to vect_permute_store_chain(), and OPRNDS as
3177 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3179 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3180 OPRNDS are of size 1. */
3181 next_stmt = first_stmt;
3182 for (i = 0; i < group_size; i++)
3184 /* Since gaps are not supported for interleaved stores,
3185 GROUP_SIZE is the exact number of stmts in the chain.
3186 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3187 there is no interleaving, GROUP_SIZE is 1, and only one
3188 iteration of the loop will be executed. */
3189 gcc_assert (next_stmt
3190 && gimple_assign_single_p (next_stmt));
3191 op = gimple_assign_rhs1 (next_stmt);
3193 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3194 NULL);
3195 VEC_quick_push(tree, dr_chain, vec_oprnd);
3196 VEC_quick_push(tree, oprnds, vec_oprnd);
3197 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3201 /* We should have catched mismatched types earlier. */
3202 gcc_assert (useless_type_conversion_p (vectype,
3203 TREE_TYPE (vec_oprnd)));
3204 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3205 &dummy, &ptr_incr, false,
3206 &inv_p);
3207 gcc_assert (bb_vinfo || !inv_p);
3209 else
3211 /* For interleaved stores we created vectorized defs for all the
3212 defs stored in OPRNDS in the previous iteration (previous copy).
3213 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3214 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3215 next copy.
3216 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3217 OPRNDS are of size 1. */
3218 for (i = 0; i < group_size; i++)
3220 op = VEC_index (tree, oprnds, i);
3221 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3222 &dt);
3223 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3224 VEC_replace(tree, dr_chain, i, vec_oprnd);
3225 VEC_replace(tree, oprnds, i, vec_oprnd);
3227 dataref_ptr =
3228 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3231 if (strided_store)
3233 result_chain = VEC_alloc (tree, heap, group_size);
3234 /* Permute. */
3235 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3236 &result_chain))
3237 return false;
3240 next_stmt = first_stmt;
3241 for (i = 0; i < vec_num; i++)
3243 if (i > 0)
3244 /* Bump the vector pointer. */
3245 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3246 NULL_TREE);
3248 if (slp)
3249 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3250 else if (strided_store)
3251 /* For strided stores vectorized defs are interleaved in
3252 vect_permute_store_chain(). */
3253 vec_oprnd = VEC_index (tree, result_chain, i);
3255 if (aligned_access_p (first_dr))
3256 data_ref = build_fold_indirect_ref (dataref_ptr);
3257 else
3259 int mis = DR_MISALIGNMENT (first_dr);
3260 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3261 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3262 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3265 /* If accesses through a pointer to vectype do not alias the original
3266 memory reference we have a problem. This should never happen. */
3267 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3268 get_alias_set (gimple_assign_lhs (stmt))));
3270 /* Arguments are ready. Create the new vector stmt. */
3271 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3272 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3273 mark_symbols_for_renaming (new_stmt);
3275 if (slp)
3276 continue;
3278 if (j == 0)
3279 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3280 else
3281 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3283 prev_stmt_info = vinfo_for_stmt (new_stmt);
3284 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3285 if (!next_stmt)
3286 break;
3290 VEC_free (tree, heap, dr_chain);
3291 VEC_free (tree, heap, oprnds);
3292 if (result_chain)
3293 VEC_free (tree, heap, result_chain);
3295 return true;
3298 /* vectorizable_load.
3300 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3301 can be vectorized.
3302 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3303 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3304 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3306 static bool
3307 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3308 slp_tree slp_node, slp_instance slp_node_instance)
3310 tree scalar_dest;
3311 tree vec_dest = NULL;
3312 tree data_ref = NULL;
3313 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3314 stmt_vec_info prev_stmt_info;
3315 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3316 struct loop *loop = NULL;
3317 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3318 bool nested_in_vect_loop = false;
3319 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3320 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3321 tree new_temp;
3322 int mode;
3323 gimple new_stmt = NULL;
3324 tree dummy;
3325 enum dr_alignment_support alignment_support_scheme;
3326 tree dataref_ptr = NULL_TREE;
3327 gimple ptr_incr;
3328 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3329 int ncopies;
3330 int i, j, group_size;
3331 tree msq = NULL_TREE, lsq;
3332 tree offset = NULL_TREE;
3333 tree realignment_token = NULL_TREE;
3334 gimple phi = NULL;
3335 VEC(tree,heap) *dr_chain = NULL;
3336 bool strided_load = false;
3337 gimple first_stmt;
3338 tree scalar_type;
3339 bool inv_p;
3340 bool compute_in_loop = false;
3341 struct loop *at_loop;
3342 int vec_num;
3343 bool slp = (slp_node != NULL);
3344 bool slp_perm = false;
3345 enum tree_code code;
3346 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3347 int vf;
3349 if (loop_vinfo)
3351 loop = LOOP_VINFO_LOOP (loop_vinfo);
3352 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3353 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3355 else
3356 vf = 1;
3358 /* Multiple types in SLP are handled by creating the appropriate number of
3359 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3360 case of SLP. */
3361 if (slp)
3362 ncopies = 1;
3363 else
3364 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3366 gcc_assert (ncopies >= 1);
3368 /* FORNOW. This restriction should be relaxed. */
3369 if (nested_in_vect_loop && ncopies > 1)
3371 if (vect_print_dump_info (REPORT_DETAILS))
3372 fprintf (vect_dump, "multiple types in nested loop.");
3373 return false;
3376 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3377 return false;
3379 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3380 return false;
3382 /* Is vectorizable load? */
3383 if (!is_gimple_assign (stmt))
3384 return false;
3386 scalar_dest = gimple_assign_lhs (stmt);
3387 if (TREE_CODE (scalar_dest) != SSA_NAME)
3388 return false;
3390 code = gimple_assign_rhs_code (stmt);
3391 if (code != ARRAY_REF
3392 && code != INDIRECT_REF
3393 && code != COMPONENT_REF
3394 && code != IMAGPART_EXPR
3395 && code != REALPART_EXPR)
3396 return false;
3398 if (!STMT_VINFO_DATA_REF (stmt_info))
3399 return false;
3401 scalar_type = TREE_TYPE (DR_REF (dr));
3402 mode = (int) TYPE_MODE (vectype);
3404 /* FORNOW. In some cases can vectorize even if data-type not supported
3405 (e.g. - data copies). */
3406 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3408 if (vect_print_dump_info (REPORT_DETAILS))
3409 fprintf (vect_dump, "Aligned load, but unsupported type.");
3410 return false;
3413 /* The vector component type needs to be trivially convertible to the
3414 scalar lhs. This should always be the case. */
3415 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3417 if (vect_print_dump_info (REPORT_DETAILS))
3418 fprintf (vect_dump, "??? operands of different types");
3419 return false;
3422 /* Check if the load is a part of an interleaving chain. */
3423 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3425 strided_load = true;
3426 /* FORNOW */
3427 gcc_assert (! nested_in_vect_loop);
3429 /* Check if interleaving is supported. */
3430 if (!vect_strided_load_supported (vectype)
3431 && !PURE_SLP_STMT (stmt_info) && !slp)
3432 return false;
3435 if (!vec_stmt) /* transformation not required. */
3437 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3438 vect_model_load_cost (stmt_info, ncopies, NULL);
3439 return true;
3442 if (vect_print_dump_info (REPORT_DETAILS))
3443 fprintf (vect_dump, "transform load.");
3445 /** Transform. **/
3447 if (strided_load)
3449 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3450 /* Check if the chain of loads is already vectorized. */
3451 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3453 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3454 return true;
3456 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3457 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3459 /* VEC_NUM is the number of vect stmts to be created for this group. */
3460 if (slp)
3462 strided_load = false;
3463 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3464 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3465 slp_perm = true;
3467 else
3468 vec_num = group_size;
3470 dr_chain = VEC_alloc (tree, heap, vec_num);
3472 else
3474 first_stmt = stmt;
3475 first_dr = dr;
3476 group_size = vec_num = 1;
3479 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3480 gcc_assert (alignment_support_scheme);
3482 /* In case the vectorization factor (VF) is bigger than the number
3483 of elements that we can fit in a vectype (nunits), we have to generate
3484 more than one vector stmt - i.e - we need to "unroll" the
3485 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3486 from one copy of the vector stmt to the next, in the field
3487 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3488 stages to find the correct vector defs to be used when vectorizing
3489 stmts that use the defs of the current stmt. The example below illustrates
3490 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3491 4 vectorized stmts):
3493 before vectorization:
3494 RELATED_STMT VEC_STMT
3495 S1: x = memref - -
3496 S2: z = x + 1 - -
3498 step 1: vectorize stmt S1:
3499 We first create the vector stmt VS1_0, and, as usual, record a
3500 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3501 Next, we create the vector stmt VS1_1, and record a pointer to
3502 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3503 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3504 stmts and pointers:
3505 RELATED_STMT VEC_STMT
3506 VS1_0: vx0 = memref0 VS1_1 -
3507 VS1_1: vx1 = memref1 VS1_2 -
3508 VS1_2: vx2 = memref2 VS1_3 -
3509 VS1_3: vx3 = memref3 - -
3510 S1: x = load - VS1_0
3511 S2: z = x + 1 - -
3513 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3514 information we recorded in RELATED_STMT field is used to vectorize
3515 stmt S2. */
3517 /* In case of interleaving (non-unit strided access):
3519 S1: x2 = &base + 2
3520 S2: x0 = &base
3521 S3: x1 = &base + 1
3522 S4: x3 = &base + 3
3524 Vectorized loads are created in the order of memory accesses
3525 starting from the access of the first stmt of the chain:
3527 VS1: vx0 = &base
3528 VS2: vx1 = &base + vec_size*1
3529 VS3: vx3 = &base + vec_size*2
3530 VS4: vx4 = &base + vec_size*3
3532 Then permutation statements are generated:
3534 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3535 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3538 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3539 (the order of the data-refs in the output of vect_permute_load_chain
3540 corresponds to the order of scalar stmts in the interleaving chain - see
3541 the documentation of vect_permute_load_chain()).
3542 The generation of permutation stmts and recording them in
3543 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3545 In case of both multiple types and interleaving, the vector loads and
3546 permutation stmts above are created for every copy. The result vector stmts
3547 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3548 STMT_VINFO_RELATED_STMT for the next copies. */
3550 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3551 on a target that supports unaligned accesses (dr_unaligned_supported)
3552 we generate the following code:
3553 p = initial_addr;
3554 indx = 0;
3555 loop {
3556 p = p + indx * vectype_size;
3557 vec_dest = *(p);
3558 indx = indx + 1;
3561 Otherwise, the data reference is potentially unaligned on a target that
3562 does not support unaligned accesses (dr_explicit_realign_optimized) -
3563 then generate the following code, in which the data in each iteration is
3564 obtained by two vector loads, one from the previous iteration, and one
3565 from the current iteration:
3566 p1 = initial_addr;
3567 msq_init = *(floor(p1))
3568 p2 = initial_addr + VS - 1;
3569 realignment_token = call target_builtin;
3570 indx = 0;
3571 loop {
3572 p2 = p2 + indx * vectype_size
3573 lsq = *(floor(p2))
3574 vec_dest = realign_load (msq, lsq, realignment_token)
3575 indx = indx + 1;
3576 msq = lsq;
3577 } */
3579 /* If the misalignment remains the same throughout the execution of the
3580 loop, we can create the init_addr and permutation mask at the loop
3581 preheader. Otherwise, it needs to be created inside the loop.
3582 This can only occur when vectorizing memory accesses in the inner-loop
3583 nested within an outer-loop that is being vectorized. */
3585 if (loop && nested_in_vect_loop_p (loop, stmt)
3586 && (TREE_INT_CST_LOW (DR_STEP (dr))
3587 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3589 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3590 compute_in_loop = true;
3593 if ((alignment_support_scheme == dr_explicit_realign_optimized
3594 || alignment_support_scheme == dr_explicit_realign)
3595 && !compute_in_loop)
3597 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3598 alignment_support_scheme, NULL_TREE,
3599 &at_loop);
3600 if (alignment_support_scheme == dr_explicit_realign_optimized)
3602 phi = SSA_NAME_DEF_STMT (msq);
3603 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3606 else
3607 at_loop = loop;
3609 prev_stmt_info = NULL;
3610 for (j = 0; j < ncopies; j++)
3612 /* 1. Create the vector pointer update chain. */
3613 if (j == 0)
3614 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3615 at_loop, offset,
3616 &dummy, &ptr_incr, false,
3617 &inv_p);
3618 else
3619 dataref_ptr =
3620 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3622 for (i = 0; i < vec_num; i++)
3624 if (i > 0)
3625 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3626 NULL_TREE);
3628 /* 2. Create the vector-load in the loop. */
3629 switch (alignment_support_scheme)
3631 case dr_aligned:
3632 gcc_assert (aligned_access_p (first_dr));
3633 data_ref = build_fold_indirect_ref (dataref_ptr);
3634 break;
3635 case dr_unaligned_supported:
3637 int mis = DR_MISALIGNMENT (first_dr);
3638 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3640 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3641 data_ref =
3642 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3643 break;
3645 case dr_explicit_realign:
3647 tree ptr, bump;
3648 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3650 if (compute_in_loop)
3651 msq = vect_setup_realignment (first_stmt, gsi,
3652 &realignment_token,
3653 dr_explicit_realign,
3654 dataref_ptr, NULL);
3656 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3657 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3658 new_stmt = gimple_build_assign (vec_dest, data_ref);
3659 new_temp = make_ssa_name (vec_dest, new_stmt);
3660 gimple_assign_set_lhs (new_stmt, new_temp);
3661 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3662 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3663 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3664 msq = new_temp;
3666 bump = size_binop (MULT_EXPR, vs_minus_1,
3667 TYPE_SIZE_UNIT (scalar_type));
3668 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3669 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3670 break;
3672 case dr_explicit_realign_optimized:
3673 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3674 break;
3675 default:
3676 gcc_unreachable ();
3678 /* If accesses through a pointer to vectype do not alias the original
3679 memory reference we have a problem. This should never happen. */
3680 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3681 get_alias_set (gimple_assign_rhs1 (stmt))));
3682 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3683 new_stmt = gimple_build_assign (vec_dest, data_ref);
3684 new_temp = make_ssa_name (vec_dest, new_stmt);
3685 gimple_assign_set_lhs (new_stmt, new_temp);
3686 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3687 mark_symbols_for_renaming (new_stmt);
3689 /* 3. Handle explicit realignment if necessary/supported. Create in
3690 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3691 if (alignment_support_scheme == dr_explicit_realign_optimized
3692 || alignment_support_scheme == dr_explicit_realign)
3694 tree tmp;
3696 lsq = gimple_assign_lhs (new_stmt);
3697 if (!realignment_token)
3698 realignment_token = dataref_ptr;
3699 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3700 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3701 realignment_token);
3702 new_stmt = gimple_build_assign (vec_dest, tmp);
3703 new_temp = make_ssa_name (vec_dest, new_stmt);
3704 gimple_assign_set_lhs (new_stmt, new_temp);
3705 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3707 if (alignment_support_scheme == dr_explicit_realign_optimized)
3709 gcc_assert (phi);
3710 if (i == vec_num - 1 && j == ncopies - 1)
3711 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3712 UNKNOWN_LOCATION);
3713 msq = lsq;
3717 /* 4. Handle invariant-load. */
3718 if (inv_p && !bb_vinfo)
3720 gcc_assert (!strided_load);
3721 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3722 if (j == 0)
3724 int k;
3725 tree t = NULL_TREE;
3726 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3728 /* CHECKME: bitpos depends on endianess? */
3729 bitpos = bitsize_zero_node;
3730 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3731 bitsize, bitpos);
3732 vec_dest =
3733 vect_create_destination_var (scalar_dest, NULL_TREE);
3734 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3735 new_temp = make_ssa_name (vec_dest, new_stmt);
3736 gimple_assign_set_lhs (new_stmt, new_temp);
3737 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3739 for (k = nunits - 1; k >= 0; --k)
3740 t = tree_cons (NULL_TREE, new_temp, t);
3741 /* FIXME: use build_constructor directly. */
3742 vec_inv = build_constructor_from_list (vectype, t);
3743 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3744 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3746 else
3747 gcc_unreachable (); /* FORNOW. */
3750 /* Collect vector loads and later create their permutation in
3751 vect_transform_strided_load (). */
3752 if (strided_load || slp_perm)
3753 VEC_quick_push (tree, dr_chain, new_temp);
3755 /* Store vector loads in the corresponding SLP_NODE. */
3756 if (slp && !slp_perm)
3757 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3760 if (slp && !slp_perm)
3761 continue;
3763 if (slp_perm)
3765 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3766 slp_node_instance, false))
3768 VEC_free (tree, heap, dr_chain);
3769 return false;
3772 else
3774 if (strided_load)
3776 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3777 return false;
3779 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3780 VEC_free (tree, heap, dr_chain);
3781 dr_chain = VEC_alloc (tree, heap, group_size);
3783 else
3785 if (j == 0)
3786 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3787 else
3788 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3789 prev_stmt_info = vinfo_for_stmt (new_stmt);
3794 if (dr_chain)
3795 VEC_free (tree, heap, dr_chain);
3797 return true;
3800 /* Function vect_is_simple_cond.
3802 Input:
3803 LOOP - the loop that is being vectorized.
3804 COND - Condition that is checked for simple use.
3806 Returns whether a COND can be vectorized. Checks whether
3807 condition operands are supportable using vec_is_simple_use. */
3809 static bool
3810 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3812 tree lhs, rhs;
3813 tree def;
3814 enum vect_def_type dt;
3816 if (!COMPARISON_CLASS_P (cond))
3817 return false;
3819 lhs = TREE_OPERAND (cond, 0);
3820 rhs = TREE_OPERAND (cond, 1);
3822 if (TREE_CODE (lhs) == SSA_NAME)
3824 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3825 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3826 &dt))
3827 return false;
3829 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3830 && TREE_CODE (lhs) != FIXED_CST)
3831 return false;
3833 if (TREE_CODE (rhs) == SSA_NAME)
3835 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3836 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3837 &dt))
3838 return false;
3840 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3841 && TREE_CODE (rhs) != FIXED_CST)
3842 return false;
3844 return true;
3847 /* vectorizable_condition.
3849 Check if STMT is conditional modify expression that can be vectorized.
3850 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3851 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3852 at GSI.
3854 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3855 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3856 else caluse if it is 2).
3858 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3860 bool
3861 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3862 gimple *vec_stmt, tree reduc_def, int reduc_index)
3864 tree scalar_dest = NULL_TREE;
3865 tree vec_dest = NULL_TREE;
3866 tree op = NULL_TREE;
3867 tree cond_expr, then_clause, else_clause;
3868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3869 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3870 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3871 tree vec_compare, vec_cond_expr;
3872 tree new_temp;
3873 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3874 enum machine_mode vec_mode;
3875 tree def;
3876 enum vect_def_type dt;
3877 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3878 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3879 enum tree_code code;
3881 /* FORNOW: unsupported in basic block SLP. */
3882 gcc_assert (loop_vinfo);
3884 gcc_assert (ncopies >= 1);
3885 if (ncopies > 1)
3886 return false; /* FORNOW */
3888 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3889 return false;
3891 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3892 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3893 && reduc_def))
3894 return false;
3896 /* FORNOW: SLP not supported. */
3897 if (STMT_SLP_TYPE (stmt_info))
3898 return false;
3900 /* FORNOW: not yet supported. */
3901 if (STMT_VINFO_LIVE_P (stmt_info))
3903 if (vect_print_dump_info (REPORT_DETAILS))
3904 fprintf (vect_dump, "value used after loop.");
3905 return false;
3908 /* Is vectorizable conditional operation? */
3909 if (!is_gimple_assign (stmt))
3910 return false;
3912 code = gimple_assign_rhs_code (stmt);
3914 if (code != COND_EXPR)
3915 return false;
3917 gcc_assert (gimple_assign_single_p (stmt));
3918 op = gimple_assign_rhs1 (stmt);
3919 cond_expr = TREE_OPERAND (op, 0);
3920 then_clause = TREE_OPERAND (op, 1);
3921 else_clause = TREE_OPERAND (op, 2);
3923 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3924 return false;
3926 /* We do not handle two different vector types for the condition
3927 and the values. */
3928 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
3929 TREE_TYPE (vectype)))
3930 return false;
3932 if (TREE_CODE (then_clause) == SSA_NAME)
3934 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3935 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3936 &then_def_stmt, &def, &dt))
3937 return false;
3939 else if (TREE_CODE (then_clause) != INTEGER_CST
3940 && TREE_CODE (then_clause) != REAL_CST
3941 && TREE_CODE (then_clause) != FIXED_CST)
3942 return false;
3944 if (TREE_CODE (else_clause) == SSA_NAME)
3946 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3947 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3948 &else_def_stmt, &def, &dt))
3949 return false;
3951 else if (TREE_CODE (else_clause) != INTEGER_CST
3952 && TREE_CODE (else_clause) != REAL_CST
3953 && TREE_CODE (else_clause) != FIXED_CST)
3954 return false;
3957 vec_mode = TYPE_MODE (vectype);
3959 if (!vec_stmt)
3961 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3962 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3965 /* Transform */
3967 /* Handle def. */
3968 scalar_dest = gimple_assign_lhs (stmt);
3969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3971 /* Handle cond expr. */
3972 vec_cond_lhs =
3973 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3974 vec_cond_rhs =
3975 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3976 if (reduc_index == 1)
3977 vec_then_clause = reduc_def;
3978 else
3979 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3980 if (reduc_index == 2)
3981 vec_else_clause = reduc_def;
3982 else
3983 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3985 /* Arguments are ready. Create the new vector stmt. */
3986 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3987 vec_cond_lhs, vec_cond_rhs);
3988 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3989 vec_compare, vec_then_clause, vec_else_clause);
3991 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3992 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3993 gimple_assign_set_lhs (*vec_stmt, new_temp);
3994 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3996 return true;
4000 /* Make sure the statement is vectorizable. */
4002 bool
4003 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4005 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4006 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4007 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4008 bool ok;
4009 tree scalar_type, vectype;
4011 if (vect_print_dump_info (REPORT_DETAILS))
4013 fprintf (vect_dump, "==> examining statement: ");
4014 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4017 if (gimple_has_volatile_ops (stmt))
4019 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4020 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4022 return false;
4025 /* Skip stmts that do not need to be vectorized. In loops this is expected
4026 to include:
4027 - the COND_EXPR which is the loop exit condition
4028 - any LABEL_EXPRs in the loop
4029 - computations that are used only for array indexing or loop control.
4030 In basic blocks we only analyze statements that are a part of some SLP
4031 instance, therefore, all the statements are relevant. */
4033 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4034 && !STMT_VINFO_LIVE_P (stmt_info))
4036 if (vect_print_dump_info (REPORT_DETAILS))
4037 fprintf (vect_dump, "irrelevant.");
4039 return true;
4042 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4044 case vect_internal_def:
4045 break;
4047 case vect_reduction_def:
4048 case vect_nested_cycle:
4049 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4050 || relevance == vect_used_in_outer_by_reduction
4051 || relevance == vect_unused_in_scope));
4052 break;
4054 case vect_induction_def:
4055 case vect_constant_def:
4056 case vect_external_def:
4057 case vect_unknown_def_type:
4058 default:
4059 gcc_unreachable ();
4062 if (bb_vinfo)
4064 gcc_assert (PURE_SLP_STMT (stmt_info));
4066 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4067 if (vect_print_dump_info (REPORT_DETAILS))
4069 fprintf (vect_dump, "get vectype for scalar type: ");
4070 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4073 vectype = get_vectype_for_scalar_type (scalar_type);
4074 if (!vectype)
4076 if (vect_print_dump_info (REPORT_DETAILS))
4078 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4079 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4081 return false;
4084 if (vect_print_dump_info (REPORT_DETAILS))
4086 fprintf (vect_dump, "vectype: ");
4087 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4090 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4093 if (STMT_VINFO_RELEVANT_P (stmt_info))
4095 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4096 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4097 *need_to_vectorize = true;
4100 ok = true;
4101 if (!bb_vinfo
4102 && (STMT_VINFO_RELEVANT_P (stmt_info)
4103 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4104 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4105 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4106 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4107 || vectorizable_operation (stmt, NULL, NULL, NULL)
4108 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4109 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4110 || vectorizable_call (stmt, NULL, NULL)
4111 || vectorizable_store (stmt, NULL, NULL, NULL)
4112 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4113 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4114 else
4116 if (bb_vinfo)
4117 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4118 || vectorizable_assignment (stmt, NULL, NULL, node)
4119 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4120 || vectorizable_store (stmt, NULL, NULL, node));
4123 if (!ok)
4125 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4127 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4128 fprintf (vect_dump, "supported: ");
4129 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4132 return false;
4135 if (bb_vinfo)
4136 return true;
4138 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4139 need extra handling, except for vectorizable reductions. */
4140 if (STMT_VINFO_LIVE_P (stmt_info)
4141 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4142 ok = vectorizable_live_operation (stmt, NULL, NULL);
4144 if (!ok)
4146 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4148 fprintf (vect_dump, "not vectorized: live stmt not ");
4149 fprintf (vect_dump, "supported: ");
4150 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4153 return false;
4156 if (!PURE_SLP_STMT (stmt_info))
4158 /* Groups of strided accesses whose size is not a power of 2 are not
4159 vectorizable yet using loop-vectorization. Therefore, if this stmt
4160 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4161 loop-based vectorized), the loop cannot be vectorized. */
4162 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4163 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4164 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4166 if (vect_print_dump_info (REPORT_DETAILS))
4168 fprintf (vect_dump, "not vectorized: the size of group "
4169 "of strided accesses is not a power of 2");
4170 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4173 return false;
4177 return true;
4181 /* Function vect_transform_stmt.
4183 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4185 bool
4186 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4187 bool *strided_store, slp_tree slp_node,
4188 slp_instance slp_node_instance)
4190 bool is_store = false;
4191 gimple vec_stmt = NULL;
4192 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4193 gimple orig_stmt_in_pattern;
4194 bool done;
4196 switch (STMT_VINFO_TYPE (stmt_info))
4198 case type_demotion_vec_info_type:
4199 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4200 gcc_assert (done);
4201 break;
4203 case type_promotion_vec_info_type:
4204 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4205 gcc_assert (done);
4206 break;
4208 case type_conversion_vec_info_type:
4209 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4210 gcc_assert (done);
4211 break;
4213 case induc_vec_info_type:
4214 gcc_assert (!slp_node);
4215 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4216 gcc_assert (done);
4217 break;
4219 case op_vec_info_type:
4220 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4221 gcc_assert (done);
4222 break;
4224 case assignment_vec_info_type:
4225 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4226 gcc_assert (done);
4227 break;
4229 case load_vec_info_type:
4230 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4231 slp_node_instance);
4232 gcc_assert (done);
4233 break;
4235 case store_vec_info_type:
4236 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4237 gcc_assert (done);
4238 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4240 /* In case of interleaving, the whole chain is vectorized when the
4241 last store in the chain is reached. Store stmts before the last
4242 one are skipped, and there vec_stmt_info shouldn't be freed
4243 meanwhile. */
4244 *strided_store = true;
4245 if (STMT_VINFO_VEC_STMT (stmt_info))
4246 is_store = true;
4248 else
4249 is_store = true;
4250 break;
4252 case condition_vec_info_type:
4253 gcc_assert (!slp_node);
4254 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4255 gcc_assert (done);
4256 break;
4258 case call_vec_info_type:
4259 gcc_assert (!slp_node);
4260 done = vectorizable_call (stmt, gsi, &vec_stmt);
4261 break;
4263 case reduc_vec_info_type:
4264 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4265 gcc_assert (done);
4266 break;
4268 default:
4269 if (!STMT_VINFO_LIVE_P (stmt_info))
4271 if (vect_print_dump_info (REPORT_DETAILS))
4272 fprintf (vect_dump, "stmt not supported.");
4273 gcc_unreachable ();
4277 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4278 is being vectorized, but outside the immediately enclosing loop. */
4279 if (vec_stmt
4280 && STMT_VINFO_LOOP_VINFO (stmt_info)
4281 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4282 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4283 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4284 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4285 || STMT_VINFO_RELEVANT (stmt_info) ==
4286 vect_used_in_outer_by_reduction))
4288 struct loop *innerloop = LOOP_VINFO_LOOP (
4289 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4290 imm_use_iterator imm_iter;
4291 use_operand_p use_p;
4292 tree scalar_dest;
4293 gimple exit_phi;
4295 if (vect_print_dump_info (REPORT_DETAILS))
4296 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4298 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4299 (to be used when vectorizing outer-loop stmts that use the DEF of
4300 STMT). */
4301 if (gimple_code (stmt) == GIMPLE_PHI)
4302 scalar_dest = PHI_RESULT (stmt);
4303 else
4304 scalar_dest = gimple_assign_lhs (stmt);
4306 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4308 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4310 exit_phi = USE_STMT (use_p);
4311 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4316 /* Handle stmts whose DEF is used outside the loop-nest that is
4317 being vectorized. */
4318 if (STMT_VINFO_LIVE_P (stmt_info)
4319 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4321 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4322 gcc_assert (done);
4325 if (vec_stmt)
4327 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4328 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4329 if (orig_stmt_in_pattern)
4331 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4332 /* STMT was inserted by the vectorizer to replace a computation idiom.
4333 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4334 computed this idiom. We need to record a pointer to VEC_STMT in
4335 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4336 documentation of vect_pattern_recog. */
4337 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4339 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4340 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4345 return is_store;
4349 /* Remove a group of stores (for SLP or interleaving), free their
4350 stmt_vec_info. */
4352 void
4353 vect_remove_stores (gimple first_stmt)
4355 gimple next = first_stmt;
4356 gimple tmp;
4357 gimple_stmt_iterator next_si;
4359 while (next)
4361 /* Free the attached stmt_vec_info and remove the stmt. */
4362 next_si = gsi_for_stmt (next);
4363 gsi_remove (&next_si, true);
4364 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4365 free_stmt_vec_info (next);
4366 next = tmp;
4371 /* Function new_stmt_vec_info.
4373 Create and initialize a new stmt_vec_info struct for STMT. */
4375 stmt_vec_info
4376 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4377 bb_vec_info bb_vinfo)
4379 stmt_vec_info res;
4380 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4382 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4383 STMT_VINFO_STMT (res) = stmt;
4384 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4385 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4386 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4387 STMT_VINFO_LIVE_P (res) = false;
4388 STMT_VINFO_VECTYPE (res) = NULL;
4389 STMT_VINFO_VEC_STMT (res) = NULL;
4390 STMT_VINFO_VECTORIZABLE (res) = true;
4391 STMT_VINFO_IN_PATTERN_P (res) = false;
4392 STMT_VINFO_RELATED_STMT (res) = NULL;
4393 STMT_VINFO_DATA_REF (res) = NULL;
4395 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4396 STMT_VINFO_DR_OFFSET (res) = NULL;
4397 STMT_VINFO_DR_INIT (res) = NULL;
4398 STMT_VINFO_DR_STEP (res) = NULL;
4399 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4401 if (gimple_code (stmt) == GIMPLE_PHI
4402 && is_loop_header_bb_p (gimple_bb (stmt)))
4403 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4404 else
4405 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4407 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4408 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4409 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4410 STMT_SLP_TYPE (res) = loop_vect;
4411 DR_GROUP_FIRST_DR (res) = NULL;
4412 DR_GROUP_NEXT_DR (res) = NULL;
4413 DR_GROUP_SIZE (res) = 0;
4414 DR_GROUP_STORE_COUNT (res) = 0;
4415 DR_GROUP_GAP (res) = 0;
4416 DR_GROUP_SAME_DR_STMT (res) = NULL;
4417 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4419 return res;
4423 /* Create a hash table for stmt_vec_info. */
4425 void
4426 init_stmt_vec_info_vec (void)
4428 gcc_assert (!stmt_vec_info_vec);
4429 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4433 /* Free hash table for stmt_vec_info. */
4435 void
4436 free_stmt_vec_info_vec (void)
4438 gcc_assert (stmt_vec_info_vec);
4439 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4443 /* Free stmt vectorization related info. */
4445 void
4446 free_stmt_vec_info (gimple stmt)
4448 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4450 if (!stmt_info)
4451 return;
4453 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4454 set_vinfo_for_stmt (stmt, NULL);
4455 free (stmt_info);
4459 /* Function get_vectype_for_scalar_type.
4461 Returns the vector type corresponding to SCALAR_TYPE as supported
4462 by the target. */
4464 tree
4465 get_vectype_for_scalar_type (tree scalar_type)
4467 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4468 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4469 int nunits;
4470 tree vectype;
4472 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4473 return NULL_TREE;
4475 /* We can't build a vector type of elements with alignment bigger than
4476 their size. */
4477 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4478 return NULL_TREE;
4480 /* If we'd build a vector type of elements whose mode precision doesn't
4481 match their types precision we'll get mismatched types on vector
4482 extracts via BIT_FIELD_REFs. This effectively means we disable
4483 vectorization of bool and/or enum types in some languages. */
4484 if (INTEGRAL_TYPE_P (scalar_type)
4485 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4486 return NULL_TREE;
4488 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4489 is expected. */
4490 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4492 vectype = build_vector_type (scalar_type, nunits);
4493 if (vect_print_dump_info (REPORT_DETAILS))
4495 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4496 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4499 if (!vectype)
4500 return NULL_TREE;
4502 if (vect_print_dump_info (REPORT_DETAILS))
4504 fprintf (vect_dump, "vectype: ");
4505 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4508 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4509 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4511 if (vect_print_dump_info (REPORT_DETAILS))
4512 fprintf (vect_dump, "mode not supported by target.");
4513 return NULL_TREE;
4516 return vectype;
4519 /* Function get_same_sized_vectype
4521 Returns a vector type corresponding to SCALAR_TYPE of size
4522 VECTOR_TYPE if supported by the target. */
4524 tree
4525 get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4527 return get_vectype_for_scalar_type (scalar_type);
4530 /* Function vect_is_simple_use.
4532 Input:
4533 LOOP_VINFO - the vect info of the loop that is being vectorized.
4534 BB_VINFO - the vect info of the basic block that is being vectorized.
4535 OPERAND - operand of a stmt in the loop or bb.
4536 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4538 Returns whether a stmt with OPERAND can be vectorized.
4539 For loops, supportable operands are constants, loop invariants, and operands
4540 that are defined by the current iteration of the loop. Unsupportable
4541 operands are those that are defined by a previous iteration of the loop (as
4542 is the case in reduction/induction computations).
4543 For basic blocks, supportable operands are constants and bb invariants.
4544 For now, operands defined outside the basic block are not supported. */
4546 bool
4547 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4548 bb_vec_info bb_vinfo, gimple *def_stmt,
4549 tree *def, enum vect_def_type *dt)
4551 basic_block bb;
4552 stmt_vec_info stmt_vinfo;
4553 struct loop *loop = NULL;
4555 if (loop_vinfo)
4556 loop = LOOP_VINFO_LOOP (loop_vinfo);
4558 *def_stmt = NULL;
4559 *def = NULL_TREE;
4561 if (vect_print_dump_info (REPORT_DETAILS))
4563 fprintf (vect_dump, "vect_is_simple_use: operand ");
4564 print_generic_expr (vect_dump, operand, TDF_SLIM);
4567 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4569 *dt = vect_constant_def;
4570 return true;
4573 if (is_gimple_min_invariant (operand))
4575 *def = operand;
4576 *dt = vect_external_def;
4577 return true;
4580 if (TREE_CODE (operand) == PAREN_EXPR)
4582 if (vect_print_dump_info (REPORT_DETAILS))
4583 fprintf (vect_dump, "non-associatable copy.");
4584 operand = TREE_OPERAND (operand, 0);
4587 if (TREE_CODE (operand) != SSA_NAME)
4589 if (vect_print_dump_info (REPORT_DETAILS))
4590 fprintf (vect_dump, "not ssa-name.");
4591 return false;
4594 *def_stmt = SSA_NAME_DEF_STMT (operand);
4595 if (*def_stmt == NULL)
4597 if (vect_print_dump_info (REPORT_DETAILS))
4598 fprintf (vect_dump, "no def_stmt.");
4599 return false;
4602 if (vect_print_dump_info (REPORT_DETAILS))
4604 fprintf (vect_dump, "def_stmt: ");
4605 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4608 /* Empty stmt is expected only in case of a function argument.
4609 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4610 if (gimple_nop_p (*def_stmt))
4612 *def = operand;
4613 *dt = vect_external_def;
4614 return true;
4617 bb = gimple_bb (*def_stmt);
4619 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4620 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4621 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4622 *dt = vect_external_def;
4623 else
4625 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4626 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4629 if (*dt == vect_unknown_def_type)
4631 if (vect_print_dump_info (REPORT_DETAILS))
4632 fprintf (vect_dump, "Unsupported pattern.");
4633 return false;
4636 if (vect_print_dump_info (REPORT_DETAILS))
4637 fprintf (vect_dump, "type of def: %d.",*dt);
4639 switch (gimple_code (*def_stmt))
4641 case GIMPLE_PHI:
4642 *def = gimple_phi_result (*def_stmt);
4643 break;
4645 case GIMPLE_ASSIGN:
4646 *def = gimple_assign_lhs (*def_stmt);
4647 break;
4649 case GIMPLE_CALL:
4650 *def = gimple_call_lhs (*def_stmt);
4651 if (*def != NULL)
4652 break;
4653 /* FALLTHRU */
4654 default:
4655 if (vect_print_dump_info (REPORT_DETAILS))
4656 fprintf (vect_dump, "unsupported defining stmt: ");
4657 return false;
4660 return true;
4663 /* Function vect_is_simple_use_1.
4665 Same as vect_is_simple_use_1 but also determines the vector operand
4666 type of OPERAND and stores it to *VECTYPE. If the definition of
4667 OPERAND is vect_uninitialized_def, vect_constant_def or
4668 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4669 is responsible to compute the best suited vector type for the
4670 scalar operand. */
4672 bool
4673 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4674 bb_vec_info bb_vinfo, gimple *def_stmt,
4675 tree *def, enum vect_def_type *dt, tree *vectype)
4677 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4678 return false;
4680 /* Now get a vector type if the def is internal, otherwise supply
4681 NULL_TREE and leave it up to the caller to figure out a proper
4682 type for the use stmt. */
4683 if (*dt == vect_internal_def
4684 || *dt == vect_induction_def
4685 || *dt == vect_reduction_def
4686 || *dt == vect_double_reduction_def
4687 || *dt == vect_nested_cycle)
4689 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4690 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4691 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4692 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4693 gcc_assert (*vectype != NULL_TREE);
4695 else if (*dt == vect_uninitialized_def
4696 || *dt == vect_constant_def
4697 || *dt == vect_external_def)
4698 *vectype = NULL_TREE;
4699 else
4700 gcc_unreachable ();
4702 return true;
4706 /* Function supportable_widening_operation
4708 Check whether an operation represented by the code CODE is a
4709 widening operation that is supported by the target platform in
4710 vector form (i.e., when operating on arguments of type VECTYPE_IN
4711 producing a result of type VECTYPE_OUT).
4713 Widening operations we currently support are NOP (CONVERT), FLOAT
4714 and WIDEN_MULT. This function checks if these operations are supported
4715 by the target platform either directly (via vector tree-codes), or via
4716 target builtins.
4718 Output:
4719 - CODE1 and CODE2 are codes of vector operations to be used when
4720 vectorizing the operation, if available.
4721 - DECL1 and DECL2 are decls of target builtin functions to be used
4722 when vectorizing the operation, if available. In this case,
4723 CODE1 and CODE2 are CALL_EXPR.
4724 - MULTI_STEP_CVT determines the number of required intermediate steps in
4725 case of multi-step conversion (like char->short->int - in that case
4726 MULTI_STEP_CVT will be 1).
4727 - INTERM_TYPES contains the intermediate type required to perform the
4728 widening operation (short in the above example). */
4730 bool
4731 supportable_widening_operation (enum tree_code code, gimple stmt,
4732 tree vectype_out, tree vectype_in,
4733 tree *decl1, tree *decl2,
4734 enum tree_code *code1, enum tree_code *code2,
4735 int *multi_step_cvt,
4736 VEC (tree, heap) **interm_types)
4738 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4739 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4740 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4741 bool ordered_p;
4742 enum machine_mode vec_mode;
4743 enum insn_code icode1, icode2;
4744 optab optab1, optab2;
4745 tree vectype = vectype_in;
4746 tree wide_vectype = vectype_out;
4747 enum tree_code c1, c2;
4749 /* The result of a vectorized widening operation usually requires two vectors
4750 (because the widened results do not fit int one vector). The generated
4751 vector results would normally be expected to be generated in the same
4752 order as in the original scalar computation, i.e. if 8 results are
4753 generated in each vector iteration, they are to be organized as follows:
4754 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4756 However, in the special case that the result of the widening operation is
4757 used in a reduction computation only, the order doesn't matter (because
4758 when vectorizing a reduction we change the order of the computation).
4759 Some targets can take advantage of this and generate more efficient code.
4760 For example, targets like Altivec, that support widen_mult using a sequence
4761 of {mult_even,mult_odd} generate the following vectors:
4762 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4764 When vectorizing outer-loops, we execute the inner-loop sequentially
4765 (each vectorized inner-loop iteration contributes to VF outer-loop
4766 iterations in parallel). We therefore don't allow to change the order
4767 of the computation in the inner-loop during outer-loop vectorization. */
4769 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4770 && !nested_in_vect_loop_p (vect_loop, stmt))
4771 ordered_p = false;
4772 else
4773 ordered_p = true;
4775 if (!ordered_p
4776 && code == WIDEN_MULT_EXPR
4777 && targetm.vectorize.builtin_mul_widen_even
4778 && targetm.vectorize.builtin_mul_widen_even (vectype)
4779 && targetm.vectorize.builtin_mul_widen_odd
4780 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4782 if (vect_print_dump_info (REPORT_DETAILS))
4783 fprintf (vect_dump, "Unordered widening operation detected.");
4785 *code1 = *code2 = CALL_EXPR;
4786 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4787 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4788 return true;
4791 switch (code)
4793 case WIDEN_MULT_EXPR:
4794 if (BYTES_BIG_ENDIAN)
4796 c1 = VEC_WIDEN_MULT_HI_EXPR;
4797 c2 = VEC_WIDEN_MULT_LO_EXPR;
4799 else
4801 c2 = VEC_WIDEN_MULT_HI_EXPR;
4802 c1 = VEC_WIDEN_MULT_LO_EXPR;
4804 break;
4806 CASE_CONVERT:
4807 if (BYTES_BIG_ENDIAN)
4809 c1 = VEC_UNPACK_HI_EXPR;
4810 c2 = VEC_UNPACK_LO_EXPR;
4812 else
4814 c2 = VEC_UNPACK_HI_EXPR;
4815 c1 = VEC_UNPACK_LO_EXPR;
4817 break;
4819 case FLOAT_EXPR:
4820 if (BYTES_BIG_ENDIAN)
4822 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4823 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4825 else
4827 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4828 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4830 break;
4832 case FIX_TRUNC_EXPR:
4833 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4834 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4835 computing the operation. */
4836 return false;
4838 default:
4839 gcc_unreachable ();
4842 if (code == FIX_TRUNC_EXPR)
4844 /* The signedness is determined from output operand. */
4845 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4846 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
4848 else
4850 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4851 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4854 if (!optab1 || !optab2)
4855 return false;
4857 vec_mode = TYPE_MODE (vectype);
4858 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4859 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4860 == CODE_FOR_nothing)
4861 return false;
4863 /* Check if it's a multi-step conversion that can be done using intermediate
4864 types. */
4865 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4866 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4868 int i;
4869 tree prev_type = vectype, intermediate_type;
4870 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4871 optab optab3, optab4;
4873 if (!CONVERT_EXPR_CODE_P (code))
4874 return false;
4876 *code1 = c1;
4877 *code2 = c2;
4879 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4880 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4881 to get to NARROW_VECTYPE, and fail if we do not. */
4882 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4883 for (i = 0; i < 3; i++)
4885 intermediate_mode = insn_data[icode1].operand[0].mode;
4886 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4887 TYPE_UNSIGNED (prev_type));
4888 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4889 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4891 if (!optab3 || !optab4
4892 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4893 == CODE_FOR_nothing
4894 || insn_data[icode1].operand[0].mode != intermediate_mode
4895 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4896 == CODE_FOR_nothing
4897 || insn_data[icode2].operand[0].mode != intermediate_mode
4898 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4899 == CODE_FOR_nothing
4900 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4901 == CODE_FOR_nothing)
4902 return false;
4904 VEC_quick_push (tree, *interm_types, intermediate_type);
4905 (*multi_step_cvt)++;
4907 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4908 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4909 return true;
4911 prev_type = intermediate_type;
4912 prev_mode = intermediate_mode;
4915 return false;
4918 *code1 = c1;
4919 *code2 = c2;
4920 return true;
4924 /* Function supportable_narrowing_operation
4926 Check whether an operation represented by the code CODE is a
4927 narrowing operation that is supported by the target platform in
4928 vector form (i.e., when operating on arguments of type VECTYPE_IN
4929 and producing a result of type VECTYPE_OUT).
4931 Narrowing operations we currently support are NOP (CONVERT) and
4932 FIX_TRUNC. This function checks if these operations are supported by
4933 the target platform directly via vector tree-codes.
4935 Output:
4936 - CODE1 is the code of a vector operation to be used when
4937 vectorizing the operation, if available.
4938 - MULTI_STEP_CVT determines the number of required intermediate steps in
4939 case of multi-step conversion (like int->short->char - in that case
4940 MULTI_STEP_CVT will be 1).
4941 - INTERM_TYPES contains the intermediate type required to perform the
4942 narrowing operation (short in the above example). */
4944 bool
4945 supportable_narrowing_operation (enum tree_code code,
4946 tree vectype_out, tree vectype_in,
4947 enum tree_code *code1, int *multi_step_cvt,
4948 VEC (tree, heap) **interm_types)
4950 enum machine_mode vec_mode;
4951 enum insn_code icode1;
4952 optab optab1, interm_optab;
4953 tree vectype = vectype_in;
4954 tree narrow_vectype = vectype_out;
4955 enum tree_code c1;
4956 tree intermediate_type, prev_type;
4957 int i;
4959 switch (code)
4961 CASE_CONVERT:
4962 c1 = VEC_PACK_TRUNC_EXPR;
4963 break;
4965 case FIX_TRUNC_EXPR:
4966 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4967 break;
4969 case FLOAT_EXPR:
4970 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4971 tree code and optabs used for computing the operation. */
4972 return false;
4974 default:
4975 gcc_unreachable ();
4978 if (code == FIX_TRUNC_EXPR)
4979 /* The signedness is determined from output operand. */
4980 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4981 else
4982 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4984 if (!optab1)
4985 return false;
4987 vec_mode = TYPE_MODE (vectype);
4988 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4989 == CODE_FOR_nothing)
4990 return false;
4992 /* Check if it's a multi-step conversion that can be done using intermediate
4993 types. */
4994 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4996 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4998 *code1 = c1;
4999 prev_type = vectype;
5000 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5001 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5002 to get to NARROW_VECTYPE, and fail if we do not. */
5003 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5004 for (i = 0; i < 3; i++)
5006 intermediate_mode = insn_data[icode1].operand[0].mode;
5007 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5008 TYPE_UNSIGNED (prev_type));
5009 interm_optab = optab_for_tree_code (c1, intermediate_type,
5010 optab_default);
5011 if (!interm_optab
5012 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
5013 == CODE_FOR_nothing
5014 || insn_data[icode1].operand[0].mode != intermediate_mode
5015 || (icode1
5016 = interm_optab->handlers[(int) intermediate_mode].insn_code)
5017 == CODE_FOR_nothing)
5018 return false;
5020 VEC_quick_push (tree, *interm_types, intermediate_type);
5021 (*multi_step_cvt)++;
5023 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5024 return true;
5026 prev_type = intermediate_type;
5027 prev_mode = intermediate_mode;
5030 return false;
5033 *code1 = c1;
5034 return true;