Print SCoPs under CLooG format.
[official-gcc/graphite-test-results.git] / gcc / tree-vect-stmts.c
blobce604b3dea6d07c58fff7abf63acf26f78024223
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 if (is_gimple_debug (USE_STMT (use_p)))
149 continue;
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
156 *live_p = true;
161 return (*live_p || *relevant);
165 /* Function exist_non_indexing_operands_for_use_p
167 USE is one of the uses attached to STMT. Check if USE is
168 used in STMT for anything other than indexing an array. */
170 static bool
171 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 tree operand;
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
180 return true;
182 /* STMT has a data_ref. FORNOW this means that its of one of
183 the following forms:
184 -1- ARRAY_REF = var
185 -2- var = ARRAY_REF
186 (This should have been verified in analyze_data_refs).
188 'var' in the second case corresponds to a def, not a use,
189 so USE cannot correspond to any operands that are not used
190 for array indexing.
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
198 return false;
199 operand = gimple_assign_rhs1 (stmt);
200 if (TREE_CODE (operand) != SSA_NAME)
201 return false;
203 if (operand == use)
204 return true;
206 return false;
211 Function process_use.
213 Inputs:
214 - a USE in STMT in a loop represented by LOOP_VINFO
215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219 Outputs:
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
224 Exceptions:
225 - case 1: If USE is used only for address computations (e.g. array indexing),
226 which does not need to be directly vectorized, then the liveness/relevance
227 of the respective DEF_STMT is left unchanged.
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
233 Return true if everything is as expected. Return false otherwise. */
235 static bool
236 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
243 tree def;
244 gimple def_stmt;
245 enum vect_def_type dt;
247 /* case 1: we are only interested in uses that need to be vectorized. Uses
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
250 return true;
252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
256 return false;
259 if (!def_stmt || gimple_nop_p (def_stmt))
260 return true;
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
267 return true;
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
290 return true;
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
295 d = def_stmt
296 inner-loop:
297 stmt # use (d)
298 outer-loop-tail-bb:
299 ... */
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305 switch (relevant)
307 case vect_unused_in_scope:
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
310 break;
312 case vect_used_in_outer_by_reduction:
313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
314 relevant = vect_used_by_reduction;
315 break;
317 case vect_used_in_outer:
318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
319 relevant = vect_used_in_scope;
320 break;
322 case vect_used_in_scope:
323 break;
325 default:
326 gcc_unreachable ();
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
333 inner-loop:
334 d = def_stmt
335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
336 stmt # use (d) */
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342 switch (relevant)
344 case vect_unused_in_scope:
345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
348 break;
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
352 break;
354 case vect_used_in_scope:
355 relevant = vect_used_in_outer;
356 break;
358 default:
359 gcc_unreachable ();
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
364 return true;
368 /* Function vect_mark_stmts_to_be_vectorized.
370 Not all stmts in the loop need to be vectorized. For example:
372 for i...
373 for j...
374 1. T0 = i + j
375 2. T1 = a[T0]
377 3. j = j + 1
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
382 This pass detects such stmts. */
384 bool
385 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
392 gimple stmt;
393 unsigned int i;
394 stmt_vec_info stmt_vinfo;
395 basic_block bb;
396 gimple phi;
397 bool live_p;
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404 worklist = VEC_alloc (gimple, heap, 64);
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
409 bb = bbs[i];
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 phi = gsi_stmt (si);
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
439 use_operand_p use_p;
440 ssa_op_iter iter;
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
463 live_p = false
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
469 order of the results that they produce does not have to be kept. */
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
473 switch (def_type)
475 case vect_reduction_def:
476 switch (tmp_relevant)
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
480 break;
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
484 break;
485 /* fall through */
487 default:
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
491 VEC_free (gimple, heap, worklist);
492 return false;
495 live_p = false;
496 break;
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
506 VEC_free (gimple, heap, worklist);
507 return false;
510 live_p = false;
511 break;
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
517 if (vect_print_dump_info (REPORT_DETAILS))
518 fprintf (vect_dump, "unsupported use of double reduction.");
520 VEC_free (gimple, heap, worklist);
521 return false;
524 live_p = false;
525 break;
527 default:
528 break;
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 VEC_free (gimple, heap, worklist);
537 return false;
540 } /* while worklist */
542 VEC_free (gimple, heap, worklist);
543 return true;
548 cost_for_stmt (gimple stmt)
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552 switch (STMT_VINFO_TYPE (stmt_info))
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
569 default:
570 gcc_unreachable ();
574 /* Function vect_model_simple_cost.
576 Models cost for simple operations, i.e. those that only emit ncopies of a
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
580 void
581 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
582 enum vect_def_type *dt, slp_tree slp_node)
584 int i;
585 int inside_cost = 0, outside_cost = 0;
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
589 return;
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
597 outside_cost += TARG_SCALAR_TO_VEC_COST;
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 /* Function vect_cost_strided_group_size
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
616 static int
617 vect_cost_strided_group_size (stmt_vec_info stmt_info)
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
624 return 1;
628 /* Function vect_model_store_cost
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
633 void
634 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
635 enum vect_def_type dt, slp_tree slp_node)
637 int group_size;
638 int inside_cost = 0, outside_cost = 0;
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
642 return;
644 if (dt == vect_constant_def || dt == vect_external_def)
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
647 /* Strided access? */
648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
651 else
652 group_size = 1;
654 /* Is this an access in a group of stores, which provide strided access?
655 If so, add in the cost of the permutes. */
656 if (group_size > 1)
658 /* Uses a high and low interleave operation for each needed permute. */
659 inside_cost = ncopies * exact_log2(group_size) * group_size
660 * TARG_VEC_STMT_COST;
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 group_size);
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
681 /* Function vect_model_load_cost
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
685 accesses are supported for loads, we also account for the costs of the
686 access scheme chosen. */
688 void
689 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
692 int group_size;
693 int alignment_support_cheme;
694 gimple first_stmt;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
700 return;
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
709 /* Not a strided access. */
710 else
712 group_size = 1;
713 first_dr = dr;
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
718 /* Is this an access in a group of loads providing strided access?
719 If so, add in the cost of the permutes. */
720 if (group_size > 1)
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 group_size);
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
735 case dr_aligned:
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
742 break;
744 case dr_unaligned_supported:
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 "hardware.");
753 break;
755 case dr_explicit_realign:
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
761 outside costs. */
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
765 break;
767 case dr_explicit_realign_optimized:
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
771 "pipelined.");
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
789 break;
792 default:
793 gcc_unreachable ();
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
806 /* Function vect_init_vector.
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
811 Return the DEF of INIT_STMT.
812 It will be used in the vectorization of STMT. */
814 tree
815 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
819 tree new_var;
820 gimple init_stmt;
821 tree vec_oprnd;
822 edge pe;
823 tree new_temp;
824 basic_block new_bb;
826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
827 add_referenced_var (new_var);
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
832 if (gsi)
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
834 else
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
838 if (loop_vinfo)
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
842 if (nested_in_vect_loop_p (loop, stmt))
843 loop = loop->inner;
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
849 else
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
852 basic_block bb;
853 gimple_stmt_iterator gsi_bb_start;
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
857 gsi_bb_start = gsi_after_labels (bb);
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
862 if (vect_print_dump_info (REPORT_DETAILS))
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 vec_oprnd = gimple_assign_lhs (init_stmt);
869 return vec_oprnd;
873 /* Function vect_get_vec_def_for_operand.
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
884 tree
885 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
887 tree vec_oprnd;
888 gimple vec_stmt;
889 gimple def_stmt;
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
895 tree vec_inv;
896 tree vec_cst;
897 tree t = NULL_TREE;
898 tree def;
899 int i;
900 enum vect_def_type dt;
901 bool is_simple_use;
902 tree vector_type;
904 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
911 &dt);
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
915 if (def)
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
920 if (def_stmt)
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
927 switch (dt)
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
935 if (scalar_def)
936 *scalar_def = op;
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
942 for (i = nunits - 1; i >= 0; --i)
944 t = tree_cons (NULL_TREE, op, t);
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
950 /* Case 2: operand is defined outside the loop - loop invariant. */
951 case vect_external_def:
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
957 if (scalar_def)
958 *scalar_def = def;
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
964 for (i = nunits - 1; i >= 0; --i)
966 t = tree_cons (NULL_TREE, def, t);
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 /* Case 3: operand is defined inside the loop. */
975 case vect_internal_def:
977 if (scalar_def)
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
988 else
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
990 return vec_oprnd;
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
995 case vect_double_reduction_def:
996 case vect_nested_cycle:
998 struct loop *loop;
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1001 loop = (gimple_bb (def_stmt))->loop_father;
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1018 return vec_oprnd;
1021 default:
1022 gcc_unreachable ();
1027 /* Function vect_get_vec_def_for_stmt_copy
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1034 of the stmt that defines VEC_OPRND.
1035 DT is the type of the vector def VEC_OPRND.
1037 Context:
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
1041 arises when there are multiple data-types operated upon in the loop; the
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
1045 computing 'VF' results in each iteration). This function is called when
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
1054 VS1.3: vx.3 = memref3
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1083 tree
1084 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1089 /* Do nothing; can reuse same def. */
1090 if (dt == vect_external_def || dt == vect_constant_def )
1091 return vec_oprnd;
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1101 else
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1103 return vec_oprnd;
1107 /* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1110 static void
1111 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
1113 VEC(tree,heap) **vec_oprnds1)
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1120 if (vec_oprnds1 && *vec_oprnds1)
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1129 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1131 static void
1132 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1134 slp_tree slp_node)
1136 if (slp_node)
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1138 else
1140 tree vec_oprnd;
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1146 if (op1)
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1156 /* Function vect_finish_stmt_generation.
1158 Insert a new stmt. */
1160 void
1161 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1173 bb_vinfo));
1175 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1188 tree
1189 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1191 tree fndecl = gimple_call_fndecl (call);
1192 enum built_in_function code;
1194 /* We only handle functions that do not read or clobber memory -- i.e.
1195 const or novops ones. */
1196 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1197 return NULL_TREE;
1199 if (!fndecl
1200 || TREE_CODE (fndecl) != FUNCTION_DECL
1201 || !DECL_BUILT_IN (fndecl))
1202 return NULL_TREE;
1204 code = DECL_FUNCTION_CODE (fndecl);
1205 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
1206 vectype_in);
1209 /* Function vectorizable_call.
1211 Check if STMT performs a function call that can be vectorized.
1212 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1213 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1214 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1216 static bool
1217 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1219 tree vec_dest;
1220 tree scalar_dest;
1221 tree op, type;
1222 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1223 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1224 tree vectype_out, vectype_in;
1225 int nunits_in;
1226 int nunits_out;
1227 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1228 tree fndecl, new_temp, def, rhs_type, lhs_type;
1229 gimple def_stmt;
1230 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1231 gimple new_stmt = NULL;
1232 int ncopies, j;
1233 VEC(tree, heap) *vargs = NULL;
1234 enum { NARROW, NONE, WIDEN } modifier;
1235 size_t i, nargs;
1237 /* FORNOW: unsupported in basic block SLP. */
1238 gcc_assert (loop_vinfo);
1240 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1241 return false;
1243 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1244 return false;
1246 /* FORNOW: SLP not supported. */
1247 if (STMT_SLP_TYPE (stmt_info))
1248 return false;
1250 /* Is STMT a vectorizable call? */
1251 if (!is_gimple_call (stmt))
1252 return false;
1254 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1255 return false;
1257 /* Process function arguments. */
1258 rhs_type = NULL_TREE;
1259 nargs = gimple_call_num_args (stmt);
1261 /* Bail out if the function has more than two arguments, we
1262 do not have interesting builtin functions to vectorize with
1263 more than two arguments. No arguments is also not good. */
1264 if (nargs == 0 || nargs > 2)
1265 return false;
1267 for (i = 0; i < nargs; i++)
1269 op = gimple_call_arg (stmt, i);
1271 /* We can only handle calls with arguments of the same type. */
1272 if (rhs_type
1273 && rhs_type != TREE_TYPE (op))
1275 if (vect_print_dump_info (REPORT_DETAILS))
1276 fprintf (vect_dump, "argument types differ.");
1277 return false;
1279 rhs_type = TREE_TYPE (op);
1281 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
1283 if (vect_print_dump_info (REPORT_DETAILS))
1284 fprintf (vect_dump, "use not simple.");
1285 return false;
1289 vectype_in = get_vectype_for_scalar_type (rhs_type);
1290 if (!vectype_in)
1291 return false;
1292 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1294 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1295 vectype_out = get_vectype_for_scalar_type (lhs_type);
1296 if (!vectype_out)
1297 return false;
1298 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1300 /* FORNOW */
1301 if (nunits_in == nunits_out / 2)
1302 modifier = NARROW;
1303 else if (nunits_out == nunits_in)
1304 modifier = NONE;
1305 else if (nunits_out == nunits_in / 2)
1306 modifier = WIDEN;
1307 else
1308 return false;
1310 /* For now, we only vectorize functions if a target specific builtin
1311 is available. TODO -- in some cases, it might be profitable to
1312 insert the calls for pieces of the vector, in order to be able
1313 to vectorize other operations in the loop. */
1314 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1315 if (fndecl == NULL_TREE)
1317 if (vect_print_dump_info (REPORT_DETAILS))
1318 fprintf (vect_dump, "function is not vectorizable.");
1320 return false;
1323 gcc_assert (!gimple_vuse (stmt));
1325 if (modifier == NARROW)
1326 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1327 else
1328 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1330 /* Sanity check: make sure that at least one copy of the vectorized stmt
1331 needs to be generated. */
1332 gcc_assert (ncopies >= 1);
1334 if (!vec_stmt) /* transformation not required. */
1336 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1337 if (vect_print_dump_info (REPORT_DETAILS))
1338 fprintf (vect_dump, "=== vectorizable_call ===");
1339 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1340 return true;
1343 /** Transform. **/
1345 if (vect_print_dump_info (REPORT_DETAILS))
1346 fprintf (vect_dump, "transform operation.");
1348 /* Handle def. */
1349 scalar_dest = gimple_call_lhs (stmt);
1350 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1352 prev_stmt_info = NULL;
1353 switch (modifier)
1355 case NONE:
1356 for (j = 0; j < ncopies; ++j)
1358 /* Build argument list for the vectorized call. */
1359 if (j == 0)
1360 vargs = VEC_alloc (tree, heap, nargs);
1361 else
1362 VEC_truncate (tree, vargs, 0);
1364 for (i = 0; i < nargs; i++)
1366 op = gimple_call_arg (stmt, i);
1367 if (j == 0)
1368 vec_oprnd0
1369 = vect_get_vec_def_for_operand (op, stmt, NULL);
1370 else
1372 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1373 vec_oprnd0
1374 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1377 VEC_quick_push (tree, vargs, vec_oprnd0);
1380 new_stmt = gimple_build_call_vec (fndecl, vargs);
1381 new_temp = make_ssa_name (vec_dest, new_stmt);
1382 gimple_call_set_lhs (new_stmt, new_temp);
1384 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1385 mark_symbols_for_renaming (new_stmt);
1387 if (j == 0)
1388 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1389 else
1390 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1392 prev_stmt_info = vinfo_for_stmt (new_stmt);
1395 break;
1397 case NARROW:
1398 for (j = 0; j < ncopies; ++j)
1400 /* Build argument list for the vectorized call. */
1401 if (j == 0)
1402 vargs = VEC_alloc (tree, heap, nargs * 2);
1403 else
1404 VEC_truncate (tree, vargs, 0);
1406 for (i = 0; i < nargs; i++)
1408 op = gimple_call_arg (stmt, i);
1409 if (j == 0)
1411 vec_oprnd0
1412 = vect_get_vec_def_for_operand (op, stmt, NULL);
1413 vec_oprnd1
1414 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1416 else
1418 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1419 vec_oprnd0
1420 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1421 vec_oprnd1
1422 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1425 VEC_quick_push (tree, vargs, vec_oprnd0);
1426 VEC_quick_push (tree, vargs, vec_oprnd1);
1429 new_stmt = gimple_build_call_vec (fndecl, vargs);
1430 new_temp = make_ssa_name (vec_dest, new_stmt);
1431 gimple_call_set_lhs (new_stmt, new_temp);
1433 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1434 mark_symbols_for_renaming (new_stmt);
1436 if (j == 0)
1437 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1438 else
1439 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1441 prev_stmt_info = vinfo_for_stmt (new_stmt);
1444 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1446 break;
1448 case WIDEN:
1449 /* No current target implements this case. */
1450 return false;
1453 VEC_free (tree, heap, vargs);
1455 /* Update the exception handling table with the vector stmt if necessary. */
1456 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1457 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1459 /* The call in STMT might prevent it from being removed in dce.
1460 We however cannot remove it here, due to the way the ssa name
1461 it defines is mapped to the new definition. So just replace
1462 rhs of the statement with something harmless. */
1464 type = TREE_TYPE (scalar_dest);
1465 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1466 fold_convert (type, integer_zero_node));
1467 set_vinfo_for_stmt (new_stmt, stmt_info);
1468 set_vinfo_for_stmt (stmt, NULL);
1469 STMT_VINFO_STMT (stmt_info) = new_stmt;
1470 gsi_replace (gsi, new_stmt, false);
1471 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1473 return true;
1477 /* Function vect_gen_widened_results_half
1479 Create a vector stmt whose code, type, number of arguments, and result
1480 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1481 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1482 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1483 needs to be created (DECL is a function-decl of a target-builtin).
1484 STMT is the original scalar stmt that we are vectorizing. */
1486 static gimple
1487 vect_gen_widened_results_half (enum tree_code code,
1488 tree decl,
1489 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1490 tree vec_dest, gimple_stmt_iterator *gsi,
1491 gimple stmt)
1493 gimple new_stmt;
1494 tree new_temp;
1496 /* Generate half of the widened result: */
1497 if (code == CALL_EXPR)
1499 /* Target specific support */
1500 if (op_type == binary_op)
1501 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1502 else
1503 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1504 new_temp = make_ssa_name (vec_dest, new_stmt);
1505 gimple_call_set_lhs (new_stmt, new_temp);
1507 else
1509 /* Generic support */
1510 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1511 if (op_type != binary_op)
1512 vec_oprnd1 = NULL;
1513 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1514 vec_oprnd1);
1515 new_temp = make_ssa_name (vec_dest, new_stmt);
1516 gimple_assign_set_lhs (new_stmt, new_temp);
1518 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1520 return new_stmt;
1524 /* Check if STMT performs a conversion operation, that can be vectorized.
1525 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1526 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1527 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1529 static bool
1530 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1531 gimple *vec_stmt, slp_tree slp_node)
1533 tree vec_dest;
1534 tree scalar_dest;
1535 tree op0;
1536 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1537 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1538 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1539 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1540 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1541 tree new_temp;
1542 tree def;
1543 gimple def_stmt;
1544 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1545 gimple new_stmt = NULL;
1546 stmt_vec_info prev_stmt_info;
1547 int nunits_in;
1548 int nunits_out;
1549 tree vectype_out, vectype_in;
1550 int ncopies, j;
1551 tree rhs_type, lhs_type;
1552 tree builtin_decl;
1553 enum { NARROW, NONE, WIDEN } modifier;
1554 int i;
1555 VEC(tree,heap) *vec_oprnds0 = NULL;
1556 tree vop0;
1557 tree integral_type;
1558 VEC(tree,heap) *dummy = NULL;
1559 int dummy_int;
1561 /* Is STMT a vectorizable conversion? */
1563 /* FORNOW: unsupported in basic block SLP. */
1564 gcc_assert (loop_vinfo);
1566 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1567 return false;
1569 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1570 return false;
1572 if (!is_gimple_assign (stmt))
1573 return false;
1575 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1576 return false;
1578 code = gimple_assign_rhs_code (stmt);
1579 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1580 return false;
1582 /* Check types of lhs and rhs. */
1583 op0 = gimple_assign_rhs1 (stmt);
1584 rhs_type = TREE_TYPE (op0);
1585 vectype_in = get_vectype_for_scalar_type (rhs_type);
1586 if (!vectype_in)
1587 return false;
1588 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1590 scalar_dest = gimple_assign_lhs (stmt);
1591 lhs_type = TREE_TYPE (scalar_dest);
1592 vectype_out = get_vectype_for_scalar_type (lhs_type);
1593 if (!vectype_out)
1594 return false;
1595 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1597 /* FORNOW */
1598 if (nunits_in == nunits_out / 2)
1599 modifier = NARROW;
1600 else if (nunits_out == nunits_in)
1601 modifier = NONE;
1602 else if (nunits_out == nunits_in / 2)
1603 modifier = WIDEN;
1604 else
1605 return false;
1607 if (modifier == NONE)
1608 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1610 /* Bail out if the types are both integral or non-integral. */
1611 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1612 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1613 return false;
1615 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1617 if (modifier == NARROW)
1618 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1619 else
1620 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1622 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1623 this, so we can safely override NCOPIES with 1 here. */
1624 if (slp_node)
1625 ncopies = 1;
1627 /* Sanity check: make sure that at least one copy of the vectorized stmt
1628 needs to be generated. */
1629 gcc_assert (ncopies >= 1);
1631 /* Check the operands of the operation. */
1632 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
1634 if (vect_print_dump_info (REPORT_DETAILS))
1635 fprintf (vect_dump, "use not simple.");
1636 return false;
1639 /* Supportable by target? */
1640 if ((modifier == NONE
1641 && !targetm.vectorize.builtin_conversion (code, integral_type))
1642 || (modifier == WIDEN
1643 && !supportable_widening_operation (code, stmt, vectype_in,
1644 &decl1, &decl2,
1645 &code1, &code2,
1646 &dummy_int, &dummy))
1647 || (modifier == NARROW
1648 && !supportable_narrowing_operation (code, stmt, vectype_in,
1649 &code1, &dummy_int, &dummy)))
1651 if (vect_print_dump_info (REPORT_DETAILS))
1652 fprintf (vect_dump, "conversion not supported by target.");
1653 return false;
1656 if (modifier != NONE)
1658 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1659 /* FORNOW: SLP not supported. */
1660 if (STMT_SLP_TYPE (stmt_info))
1661 return false;
1664 if (!vec_stmt) /* transformation not required. */
1666 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1667 return true;
1670 /** Transform. **/
1671 if (vect_print_dump_info (REPORT_DETAILS))
1672 fprintf (vect_dump, "transform conversion.");
1674 /* Handle def. */
1675 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1677 if (modifier == NONE && !slp_node)
1678 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1680 prev_stmt_info = NULL;
1681 switch (modifier)
1683 case NONE:
1684 for (j = 0; j < ncopies; j++)
1686 if (j == 0)
1687 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1688 else
1689 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1691 builtin_decl =
1692 targetm.vectorize.builtin_conversion (code, integral_type);
1693 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1695 /* Arguments are ready. create the new vector stmt. */
1696 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1697 new_temp = make_ssa_name (vec_dest, new_stmt);
1698 gimple_call_set_lhs (new_stmt, new_temp);
1699 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1700 if (slp_node)
1701 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1704 if (j == 0)
1705 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1706 else
1707 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1708 prev_stmt_info = vinfo_for_stmt (new_stmt);
1710 break;
1712 case WIDEN:
1713 /* In case the vectorization factor (VF) is bigger than the number
1714 of elements that we can fit in a vectype (nunits), we have to
1715 generate more than one vector stmt - i.e - we need to "unroll"
1716 the vector stmt by a factor VF/nunits. */
1717 for (j = 0; j < ncopies; j++)
1719 if (j == 0)
1720 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1721 else
1722 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1724 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1726 /* Generate first half of the widened result: */
1727 new_stmt
1728 = vect_gen_widened_results_half (code1, decl1,
1729 vec_oprnd0, vec_oprnd1,
1730 unary_op, vec_dest, gsi, stmt);
1731 if (j == 0)
1732 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1733 else
1734 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1735 prev_stmt_info = vinfo_for_stmt (new_stmt);
1737 /* Generate second half of the widened result: */
1738 new_stmt
1739 = vect_gen_widened_results_half (code2, decl2,
1740 vec_oprnd0, vec_oprnd1,
1741 unary_op, vec_dest, gsi, stmt);
1742 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1743 prev_stmt_info = vinfo_for_stmt (new_stmt);
1745 break;
1747 case NARROW:
1748 /* In case the vectorization factor (VF) is bigger than the number
1749 of elements that we can fit in a vectype (nunits), we have to
1750 generate more than one vector stmt - i.e - we need to "unroll"
1751 the vector stmt by a factor VF/nunits. */
1752 for (j = 0; j < ncopies; j++)
1754 /* Handle uses. */
1755 if (j == 0)
1757 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1758 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1760 else
1762 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1763 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1766 /* Arguments are ready. Create the new vector stmt. */
1767 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1768 vec_oprnd1);
1769 new_temp = make_ssa_name (vec_dest, new_stmt);
1770 gimple_assign_set_lhs (new_stmt, new_temp);
1771 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1773 if (j == 0)
1774 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1775 else
1776 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1778 prev_stmt_info = vinfo_for_stmt (new_stmt);
1781 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1784 if (vec_oprnds0)
1785 VEC_free (tree, heap, vec_oprnds0);
1787 return true;
1789 /* Function vectorizable_assignment.
1791 Check if STMT performs an assignment (copy) that can be vectorized.
1792 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1793 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1794 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1796 static bool
1797 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1798 gimple *vec_stmt, slp_tree slp_node)
1800 tree vec_dest;
1801 tree scalar_dest;
1802 tree op;
1803 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1804 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1805 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1806 tree new_temp;
1807 tree def;
1808 gimple def_stmt;
1809 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1810 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1811 int ncopies;
1812 int i, j;
1813 VEC(tree,heap) *vec_oprnds = NULL;
1814 tree vop;
1815 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1816 gimple new_stmt = NULL;
1817 stmt_vec_info prev_stmt_info = NULL;
1819 /* Multiple types in SLP are handled by creating the appropriate number of
1820 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1821 case of SLP. */
1822 if (slp_node)
1823 ncopies = 1;
1824 else
1825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1827 gcc_assert (ncopies >= 1);
1829 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1830 return false;
1832 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1833 return false;
1835 /* Is vectorizable assignment? */
1836 if (!is_gimple_assign (stmt))
1837 return false;
1839 scalar_dest = gimple_assign_lhs (stmt);
1840 if (TREE_CODE (scalar_dest) != SSA_NAME)
1841 return false;
1843 if (gimple_assign_single_p (stmt)
1844 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1845 op = gimple_assign_rhs1 (stmt);
1846 else
1847 return false;
1849 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1851 if (vect_print_dump_info (REPORT_DETAILS))
1852 fprintf (vect_dump, "use not simple.");
1853 return false;
1856 if (!vec_stmt) /* transformation not required. */
1858 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1859 if (vect_print_dump_info (REPORT_DETAILS))
1860 fprintf (vect_dump, "=== vectorizable_assignment ===");
1861 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1862 return true;
1865 /** Transform. **/
1866 if (vect_print_dump_info (REPORT_DETAILS))
1867 fprintf (vect_dump, "transform assignment.");
1869 /* Handle def. */
1870 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1872 /* Handle use. */
1873 for (j = 0; j < ncopies; j++)
1875 /* Handle uses. */
1876 if (j == 0)
1877 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1878 else
1879 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1881 /* Arguments are ready. create the new vector stmt. */
1882 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1884 new_stmt = gimple_build_assign (vec_dest, vop);
1885 new_temp = make_ssa_name (vec_dest, new_stmt);
1886 gimple_assign_set_lhs (new_stmt, new_temp);
1887 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1888 if (slp_node)
1889 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1892 if (slp_node)
1893 continue;
1895 if (j == 0)
1896 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1897 else
1898 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1900 prev_stmt_info = vinfo_for_stmt (new_stmt);
1903 VEC_free (tree, heap, vec_oprnds);
1904 return true;
1907 /* Function vectorizable_operation.
1909 Check if STMT performs a binary or unary operation that can be vectorized.
1910 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1911 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1912 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1914 static bool
1915 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1916 gimple *vec_stmt, slp_tree slp_node)
1918 tree vec_dest;
1919 tree scalar_dest;
1920 tree op0, op1 = NULL;
1921 tree vec_oprnd1 = NULL_TREE;
1922 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1923 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1924 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1925 enum tree_code code;
1926 enum machine_mode vec_mode;
1927 tree new_temp;
1928 int op_type;
1929 optab optab;
1930 int icode;
1931 enum machine_mode optab_op2_mode;
1932 tree def;
1933 gimple def_stmt;
1934 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1935 gimple new_stmt = NULL;
1936 stmt_vec_info prev_stmt_info;
1937 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1938 int nunits_out;
1939 tree vectype_out;
1940 int ncopies;
1941 int j, i;
1942 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1943 tree vop0, vop1;
1944 unsigned int k;
1945 bool scalar_shift_arg = false;
1946 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1947 int vf;
1949 if (loop_vinfo)
1950 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1951 else
1952 vf = 1;
1954 /* Multiple types in SLP are handled by creating the appropriate number of
1955 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1956 case of SLP. */
1957 if (slp_node)
1958 ncopies = 1;
1959 else
1960 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1962 gcc_assert (ncopies >= 1);
1964 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1965 return false;
1967 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1968 return false;
1970 /* Is STMT a vectorizable binary/unary operation? */
1971 if (!is_gimple_assign (stmt))
1972 return false;
1974 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1975 return false;
1977 scalar_dest = gimple_assign_lhs (stmt);
1978 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1979 if (!vectype_out)
1980 return false;
1981 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1982 if (nunits_out != nunits_in)
1983 return false;
1985 code = gimple_assign_rhs_code (stmt);
1987 /* For pointer addition, we should use the normal plus for
1988 the vector addition. */
1989 if (code == POINTER_PLUS_EXPR)
1990 code = PLUS_EXPR;
1992 /* Support only unary or binary operations. */
1993 op_type = TREE_CODE_LENGTH (code);
1994 if (op_type != unary_op && op_type != binary_op)
1996 if (vect_print_dump_info (REPORT_DETAILS))
1997 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1998 return false;
2001 op0 = gimple_assign_rhs1 (stmt);
2002 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
2004 if (vect_print_dump_info (REPORT_DETAILS))
2005 fprintf (vect_dump, "use not simple.");
2006 return false;
2009 if (op_type == binary_op)
2011 op1 = gimple_assign_rhs2 (stmt);
2012 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2013 &dt[1]))
2015 if (vect_print_dump_info (REPORT_DETAILS))
2016 fprintf (vect_dump, "use not simple.");
2017 return false;
2021 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2022 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2023 shift optabs. */
2024 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2025 || code == RROTATE_EXPR)
2027 /* vector shifted by vector */
2028 if (dt[1] == vect_internal_def)
2030 optab = optab_for_tree_code (code, vectype, optab_vector);
2031 if (vect_print_dump_info (REPORT_DETAILS))
2032 fprintf (vect_dump, "vector/vector shift/rotate found.");
2035 /* See if the machine has a vector shifted by scalar insn and if not
2036 then see if it has a vector shifted by vector insn */
2037 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2039 optab = optab_for_tree_code (code, vectype, optab_scalar);
2040 if (optab
2041 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2042 != CODE_FOR_nothing))
2044 scalar_shift_arg = true;
2045 if (vect_print_dump_info (REPORT_DETAILS))
2046 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2048 else
2050 optab = optab_for_tree_code (code, vectype, optab_vector);
2051 if (optab
2052 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2053 != CODE_FOR_nothing))
2055 if (vect_print_dump_info (REPORT_DETAILS))
2056 fprintf (vect_dump, "vector/vector shift/rotate found.");
2058 /* Unlike the other binary operators, shifts/rotates have
2059 the rhs being int, instead of the same type as the lhs,
2060 so make sure the scalar is the right type if we are
2061 dealing with vectors of short/char. */
2062 if (dt[1] == vect_constant_def)
2063 op1 = fold_convert (TREE_TYPE (vectype), op1);
2068 else
2070 if (vect_print_dump_info (REPORT_DETAILS))
2071 fprintf (vect_dump, "operand mode requires invariant argument.");
2072 return false;
2075 else
2076 optab = optab_for_tree_code (code, vectype, optab_default);
2078 /* Supportable by target? */
2079 if (!optab)
2081 if (vect_print_dump_info (REPORT_DETAILS))
2082 fprintf (vect_dump, "no optab.");
2083 return false;
2085 vec_mode = TYPE_MODE (vectype);
2086 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2087 if (icode == CODE_FOR_nothing)
2089 if (vect_print_dump_info (REPORT_DETAILS))
2090 fprintf (vect_dump, "op not supported by target.");
2091 /* Check only during analysis. */
2092 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2093 || (vf < vect_min_worthwhile_factor (code)
2094 && !vec_stmt))
2095 return false;
2096 if (vect_print_dump_info (REPORT_DETAILS))
2097 fprintf (vect_dump, "proceeding using word mode.");
2100 /* Worthwhile without SIMD support? Check only during analysis. */
2101 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2102 && vf < vect_min_worthwhile_factor (code)
2103 && !vec_stmt)
2105 if (vect_print_dump_info (REPORT_DETAILS))
2106 fprintf (vect_dump, "not worthwhile without SIMD support.");
2107 return false;
2110 if (!vec_stmt) /* transformation not required. */
2112 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2113 if (vect_print_dump_info (REPORT_DETAILS))
2114 fprintf (vect_dump, "=== vectorizable_operation ===");
2115 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2116 return true;
2119 /** Transform. **/
2121 if (vect_print_dump_info (REPORT_DETAILS))
2122 fprintf (vect_dump, "transform binary/unary operation.");
2124 /* Handle def. */
2125 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2127 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2128 created in the previous stages of the recursion, so no allocation is
2129 needed, except for the case of shift with scalar shift argument. In that
2130 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2131 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2132 In case of loop-based vectorization we allocate VECs of size 1. We
2133 allocate VEC_OPRNDS1 only in case of binary operation. */
2134 if (!slp_node)
2136 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2137 if (op_type == binary_op)
2138 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2140 else if (scalar_shift_arg)
2141 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2143 /* In case the vectorization factor (VF) is bigger than the number
2144 of elements that we can fit in a vectype (nunits), we have to generate
2145 more than one vector stmt - i.e - we need to "unroll" the
2146 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2147 from one copy of the vector stmt to the next, in the field
2148 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2149 stages to find the correct vector defs to be used when vectorizing
2150 stmts that use the defs of the current stmt. The example below illustrates
2151 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2152 4 vectorized stmts):
2154 before vectorization:
2155 RELATED_STMT VEC_STMT
2156 S1: x = memref - -
2157 S2: z = x + 1 - -
2159 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2160 there):
2161 RELATED_STMT VEC_STMT
2162 VS1_0: vx0 = memref0 VS1_1 -
2163 VS1_1: vx1 = memref1 VS1_2 -
2164 VS1_2: vx2 = memref2 VS1_3 -
2165 VS1_3: vx3 = memref3 - -
2166 S1: x = load - VS1_0
2167 S2: z = x + 1 - -
2169 step2: vectorize stmt S2 (done here):
2170 To vectorize stmt S2 we first need to find the relevant vector
2171 def for the first operand 'x'. This is, as usual, obtained from
2172 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2173 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2174 relevant vector def 'vx0'. Having found 'vx0' we can generate
2175 the vector stmt VS2_0, and as usual, record it in the
2176 STMT_VINFO_VEC_STMT of stmt S2.
2177 When creating the second copy (VS2_1), we obtain the relevant vector
2178 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2179 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2180 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2181 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2182 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2183 chain of stmts and pointers:
2184 RELATED_STMT VEC_STMT
2185 VS1_0: vx0 = memref0 VS1_1 -
2186 VS1_1: vx1 = memref1 VS1_2 -
2187 VS1_2: vx2 = memref2 VS1_3 -
2188 VS1_3: vx3 = memref3 - -
2189 S1: x = load - VS1_0
2190 VS2_0: vz0 = vx0 + v1 VS2_1 -
2191 VS2_1: vz1 = vx1 + v1 VS2_2 -
2192 VS2_2: vz2 = vx2 + v1 VS2_3 -
2193 VS2_3: vz3 = vx3 + v1 - -
2194 S2: z = x + 1 - VS2_0 */
2196 prev_stmt_info = NULL;
2197 for (j = 0; j < ncopies; j++)
2199 /* Handle uses. */
2200 if (j == 0)
2202 if (op_type == binary_op && scalar_shift_arg)
2204 /* Vector shl and shr insn patterns can be defined with scalar
2205 operand 2 (shift operand). In this case, use constant or loop
2206 invariant op1 directly, without extending it to vector mode
2207 first. */
2208 optab_op2_mode = insn_data[icode].operand[2].mode;
2209 if (!VECTOR_MODE_P (optab_op2_mode))
2211 if (vect_print_dump_info (REPORT_DETAILS))
2212 fprintf (vect_dump, "operand 1 using scalar mode.");
2213 vec_oprnd1 = op1;
2214 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2215 if (slp_node)
2217 /* Store vec_oprnd1 for every vector stmt to be created
2218 for SLP_NODE. We check during the analysis that all the
2219 shift arguments are the same.
2220 TODO: Allow different constants for different vector
2221 stmts generated for an SLP instance. */
2222 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2223 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2228 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2229 (a special case for certain kind of vector shifts); otherwise,
2230 operand 1 should be of a vector type (the usual case). */
2231 if (op_type == binary_op && !vec_oprnd1)
2232 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2233 slp_node);
2234 else
2235 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2236 slp_node);
2238 else
2239 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2241 /* Arguments are ready. Create the new vector stmt. */
2242 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2244 vop1 = ((op_type == binary_op)
2245 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2246 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2247 new_temp = make_ssa_name (vec_dest, new_stmt);
2248 gimple_assign_set_lhs (new_stmt, new_temp);
2249 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2250 if (slp_node)
2251 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2254 if (slp_node)
2255 continue;
2257 if (j == 0)
2258 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2259 else
2260 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2261 prev_stmt_info = vinfo_for_stmt (new_stmt);
2264 VEC_free (tree, heap, vec_oprnds0);
2265 if (vec_oprnds1)
2266 VEC_free (tree, heap, vec_oprnds1);
2268 return true;
2272 /* Get vectorized definitions for loop-based vectorization. For the first
2273 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2274 scalar operand), and for the rest we get a copy with
2275 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2276 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2277 The vectors are collected into VEC_OPRNDS. */
2279 static void
2280 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2281 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2283 tree vec_oprnd;
2285 /* Get first vector operand. */
2286 /* All the vector operands except the very first one (that is scalar oprnd)
2287 are stmt copies. */
2288 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2289 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2290 else
2291 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2293 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2295 /* Get second vector operand. */
2296 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2297 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2299 *oprnd = vec_oprnd;
2301 /* For conversion in multiple steps, continue to get operands
2302 recursively. */
2303 if (multi_step_cvt)
2304 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2308 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2309 For multi-step conversions store the resulting vectors and call the function
2310 recursively. */
2312 static void
2313 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2314 int multi_step_cvt, gimple stmt,
2315 VEC (tree, heap) *vec_dsts,
2316 gimple_stmt_iterator *gsi,
2317 slp_tree slp_node, enum tree_code code,
2318 stmt_vec_info *prev_stmt_info)
2320 unsigned int i;
2321 tree vop0, vop1, new_tmp, vec_dest;
2322 gimple new_stmt;
2323 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2325 vec_dest = VEC_pop (tree, vec_dsts);
2327 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2329 /* Create demotion operation. */
2330 vop0 = VEC_index (tree, *vec_oprnds, i);
2331 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2332 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2333 new_tmp = make_ssa_name (vec_dest, new_stmt);
2334 gimple_assign_set_lhs (new_stmt, new_tmp);
2335 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2337 if (multi_step_cvt)
2338 /* Store the resulting vector for next recursive call. */
2339 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2340 else
2342 /* This is the last step of the conversion sequence. Store the
2343 vectors in SLP_NODE or in vector info of the scalar statement
2344 (or in STMT_VINFO_RELATED_STMT chain). */
2345 if (slp_node)
2346 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2347 else
2349 if (!*prev_stmt_info)
2350 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2351 else
2352 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2354 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2359 /* For multi-step demotion operations we first generate demotion operations
2360 from the source type to the intermediate types, and then combine the
2361 results (stored in VEC_OPRNDS) in demotion operation to the destination
2362 type. */
2363 if (multi_step_cvt)
2365 /* At each level of recursion we have have of the operands we had at the
2366 previous level. */
2367 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2368 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2369 stmt, vec_dsts, gsi, slp_node,
2370 code, prev_stmt_info);
2375 /* Function vectorizable_type_demotion
2377 Check if STMT performs a binary or unary operation that involves
2378 type demotion, and if it can be vectorized.
2379 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2380 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2381 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2383 static bool
2384 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2385 gimple *vec_stmt, slp_tree slp_node)
2387 tree vec_dest;
2388 tree scalar_dest;
2389 tree op0;
2390 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2391 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2392 enum tree_code code, code1 = ERROR_MARK;
2393 tree def;
2394 gimple def_stmt;
2395 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2396 stmt_vec_info prev_stmt_info;
2397 int nunits_in;
2398 int nunits_out;
2399 tree vectype_out;
2400 int ncopies;
2401 int j, i;
2402 tree vectype_in;
2403 int multi_step_cvt = 0;
2404 VEC (tree, heap) *vec_oprnds0 = NULL;
2405 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2406 tree last_oprnd, intermediate_type;
2408 /* FORNOW: not supported by basic block SLP vectorization. */
2409 gcc_assert (loop_vinfo);
2411 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2412 return false;
2414 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2415 return false;
2417 /* Is STMT a vectorizable type-demotion operation? */
2418 if (!is_gimple_assign (stmt))
2419 return false;
2421 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2422 return false;
2424 code = gimple_assign_rhs_code (stmt);
2425 if (!CONVERT_EXPR_CODE_P (code))
2426 return false;
2428 op0 = gimple_assign_rhs1 (stmt);
2429 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2430 if (!vectype_in)
2431 return false;
2432 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2434 scalar_dest = gimple_assign_lhs (stmt);
2435 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2436 if (!vectype_out)
2437 return false;
2438 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2439 if (nunits_in >= nunits_out)
2440 return false;
2442 /* Multiple types in SLP are handled by creating the appropriate number of
2443 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2444 case of SLP. */
2445 if (slp_node)
2446 ncopies = 1;
2447 else
2448 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2449 gcc_assert (ncopies >= 1);
2451 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2452 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2453 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2454 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2455 && CONVERT_EXPR_CODE_P (code))))
2456 return false;
2458 /* Check the operands of the operation. */
2459 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2461 if (vect_print_dump_info (REPORT_DETAILS))
2462 fprintf (vect_dump, "use not simple.");
2463 return false;
2466 /* Supportable by target? */
2467 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2468 &multi_step_cvt, &interm_types))
2469 return false;
2471 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2473 if (!vec_stmt) /* transformation not required. */
2475 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2476 if (vect_print_dump_info (REPORT_DETAILS))
2477 fprintf (vect_dump, "=== vectorizable_demotion ===");
2478 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2479 return true;
2482 /** Transform. **/
2483 if (vect_print_dump_info (REPORT_DETAILS))
2484 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2485 ncopies);
2487 /* In case of multi-step demotion, we first generate demotion operations to
2488 the intermediate types, and then from that types to the final one.
2489 We create vector destinations for the intermediate type (TYPES) received
2490 from supportable_narrowing_operation, and store them in the correct order
2491 for future use in vect_create_vectorized_demotion_stmts(). */
2492 if (multi_step_cvt)
2493 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2494 else
2495 vec_dsts = VEC_alloc (tree, heap, 1);
2497 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2498 VEC_quick_push (tree, vec_dsts, vec_dest);
2500 if (multi_step_cvt)
2502 for (i = VEC_length (tree, interm_types) - 1;
2503 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2505 vec_dest = vect_create_destination_var (scalar_dest,
2506 intermediate_type);
2507 VEC_quick_push (tree, vec_dsts, vec_dest);
2511 /* In case the vectorization factor (VF) is bigger than the number
2512 of elements that we can fit in a vectype (nunits), we have to generate
2513 more than one vector stmt - i.e - we need to "unroll" the
2514 vector stmt by a factor VF/nunits. */
2515 last_oprnd = op0;
2516 prev_stmt_info = NULL;
2517 for (j = 0; j < ncopies; j++)
2519 /* Handle uses. */
2520 if (slp_node)
2521 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2522 else
2524 VEC_free (tree, heap, vec_oprnds0);
2525 vec_oprnds0 = VEC_alloc (tree, heap,
2526 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2527 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2528 vect_pow2 (multi_step_cvt) - 1);
2531 /* Arguments are ready. Create the new vector stmts. */
2532 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2533 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2534 multi_step_cvt, stmt, tmp_vec_dsts,
2535 gsi, slp_node, code1,
2536 &prev_stmt_info);
2539 VEC_free (tree, heap, vec_oprnds0);
2540 VEC_free (tree, heap, vec_dsts);
2541 VEC_free (tree, heap, tmp_vec_dsts);
2542 VEC_free (tree, heap, interm_types);
2544 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2545 return true;
2549 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2550 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2551 the resulting vectors and call the function recursively. */
2553 static void
2554 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2555 VEC (tree, heap) **vec_oprnds1,
2556 int multi_step_cvt, gimple stmt,
2557 VEC (tree, heap) *vec_dsts,
2558 gimple_stmt_iterator *gsi,
2559 slp_tree slp_node, enum tree_code code1,
2560 enum tree_code code2, tree decl1,
2561 tree decl2, int op_type,
2562 stmt_vec_info *prev_stmt_info)
2564 int i;
2565 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2566 gimple new_stmt1, new_stmt2;
2567 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2568 VEC (tree, heap) *vec_tmp;
2570 vec_dest = VEC_pop (tree, vec_dsts);
2571 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2573 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2575 if (op_type == binary_op)
2576 vop1 = VEC_index (tree, *vec_oprnds1, i);
2577 else
2578 vop1 = NULL_TREE;
2580 /* Generate the two halves of promotion operation. */
2581 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2582 op_type, vec_dest, gsi, stmt);
2583 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2584 op_type, vec_dest, gsi, stmt);
2585 if (is_gimple_call (new_stmt1))
2587 new_tmp1 = gimple_call_lhs (new_stmt1);
2588 new_tmp2 = gimple_call_lhs (new_stmt2);
2590 else
2592 new_tmp1 = gimple_assign_lhs (new_stmt1);
2593 new_tmp2 = gimple_assign_lhs (new_stmt2);
2596 if (multi_step_cvt)
2598 /* Store the results for the recursive call. */
2599 VEC_quick_push (tree, vec_tmp, new_tmp1);
2600 VEC_quick_push (tree, vec_tmp, new_tmp2);
2602 else
2604 /* Last step of promotion sequience - store the results. */
2605 if (slp_node)
2607 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2608 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2610 else
2612 if (!*prev_stmt_info)
2613 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2614 else
2615 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2617 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2618 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2619 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2624 if (multi_step_cvt)
2626 /* For multi-step promotion operation we first generate we call the
2627 function recurcively for every stage. We start from the input type,
2628 create promotion operations to the intermediate types, and then
2629 create promotions to the output type. */
2630 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2631 VEC_free (tree, heap, vec_tmp);
2632 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2633 multi_step_cvt - 1, stmt,
2634 vec_dsts, gsi, slp_node, code1,
2635 code2, decl2, decl2, op_type,
2636 prev_stmt_info);
2641 /* Function vectorizable_type_promotion
2643 Check if STMT performs a binary or unary operation that involves
2644 type promotion, and if it can be vectorized.
2645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2649 static bool
2650 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2651 gimple *vec_stmt, slp_tree slp_node)
2653 tree vec_dest;
2654 tree scalar_dest;
2655 tree op0, op1 = NULL;
2656 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2658 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2659 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2660 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2661 int op_type;
2662 tree def;
2663 gimple def_stmt;
2664 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2665 stmt_vec_info prev_stmt_info;
2666 int nunits_in;
2667 int nunits_out;
2668 tree vectype_out;
2669 int ncopies;
2670 int j, i;
2671 tree vectype_in;
2672 tree intermediate_type = NULL_TREE;
2673 int multi_step_cvt = 0;
2674 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2675 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2677 /* FORNOW: not supported by basic block SLP vectorization. */
2678 gcc_assert (loop_vinfo);
2680 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2681 return false;
2683 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2684 return false;
2686 /* Is STMT a vectorizable type-promotion operation? */
2687 if (!is_gimple_assign (stmt))
2688 return false;
2690 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2691 return false;
2693 code = gimple_assign_rhs_code (stmt);
2694 if (!CONVERT_EXPR_CODE_P (code)
2695 && code != WIDEN_MULT_EXPR)
2696 return false;
2698 op0 = gimple_assign_rhs1 (stmt);
2699 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2700 if (!vectype_in)
2701 return false;
2702 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2704 scalar_dest = gimple_assign_lhs (stmt);
2705 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2706 if (!vectype_out)
2707 return false;
2708 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2709 if (nunits_in <= nunits_out)
2710 return false;
2712 /* Multiple types in SLP are handled by creating the appropriate number of
2713 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2714 case of SLP. */
2715 if (slp_node)
2716 ncopies = 1;
2717 else
2718 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2720 gcc_assert (ncopies >= 1);
2722 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2723 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2724 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2725 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2726 && CONVERT_EXPR_CODE_P (code))))
2727 return false;
2729 /* Check the operands of the operation. */
2730 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2732 if (vect_print_dump_info (REPORT_DETAILS))
2733 fprintf (vect_dump, "use not simple.");
2734 return false;
2737 op_type = TREE_CODE_LENGTH (code);
2738 if (op_type == binary_op)
2740 op1 = gimple_assign_rhs2 (stmt);
2741 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2743 if (vect_print_dump_info (REPORT_DETAILS))
2744 fprintf (vect_dump, "use not simple.");
2745 return false;
2749 /* Supportable by target? */
2750 if (!supportable_widening_operation (code, stmt, vectype_in,
2751 &decl1, &decl2, &code1, &code2,
2752 &multi_step_cvt, &interm_types))
2753 return false;
2755 /* Binary widening operation can only be supported directly by the
2756 architecture. */
2757 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2759 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2761 if (!vec_stmt) /* transformation not required. */
2763 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2764 if (vect_print_dump_info (REPORT_DETAILS))
2765 fprintf (vect_dump, "=== vectorizable_promotion ===");
2766 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2767 return true;
2770 /** Transform. **/
2772 if (vect_print_dump_info (REPORT_DETAILS))
2773 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2774 ncopies);
2776 /* Handle def. */
2777 /* In case of multi-step promotion, we first generate promotion operations
2778 to the intermediate types, and then from that types to the final one.
2779 We store vector destination in VEC_DSTS in the correct order for
2780 recursive creation of promotion operations in
2781 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2782 according to TYPES recieved from supportable_widening_operation(). */
2783 if (multi_step_cvt)
2784 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2785 else
2786 vec_dsts = VEC_alloc (tree, heap, 1);
2788 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2789 VEC_quick_push (tree, vec_dsts, vec_dest);
2791 if (multi_step_cvt)
2793 for (i = VEC_length (tree, interm_types) - 1;
2794 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2796 vec_dest = vect_create_destination_var (scalar_dest,
2797 intermediate_type);
2798 VEC_quick_push (tree, vec_dsts, vec_dest);
2802 if (!slp_node)
2804 vec_oprnds0 = VEC_alloc (tree, heap,
2805 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2806 if (op_type == binary_op)
2807 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2810 /* In case the vectorization factor (VF) is bigger than the number
2811 of elements that we can fit in a vectype (nunits), we have to generate
2812 more than one vector stmt - i.e - we need to "unroll" the
2813 vector stmt by a factor VF/nunits. */
2815 prev_stmt_info = NULL;
2816 for (j = 0; j < ncopies; j++)
2818 /* Handle uses. */
2819 if (j == 0)
2821 if (slp_node)
2822 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2823 else
2825 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2826 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2827 if (op_type == binary_op)
2829 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2830 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2834 else
2836 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2837 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2838 if (op_type == binary_op)
2840 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2841 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2845 /* Arguments are ready. Create the new vector stmts. */
2846 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2847 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2848 multi_step_cvt, stmt,
2849 tmp_vec_dsts,
2850 gsi, slp_node, code1, code2,
2851 decl1, decl2, op_type,
2852 &prev_stmt_info);
2855 VEC_free (tree, heap, vec_dsts);
2856 VEC_free (tree, heap, tmp_vec_dsts);
2857 VEC_free (tree, heap, interm_types);
2858 VEC_free (tree, heap, vec_oprnds0);
2859 VEC_free (tree, heap, vec_oprnds1);
2861 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2862 return true;
2866 /* Function vectorizable_store.
2868 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2869 can be vectorized.
2870 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2871 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2872 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2874 static bool
2875 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2876 slp_tree slp_node)
2878 tree scalar_dest;
2879 tree data_ref;
2880 tree op;
2881 tree vec_oprnd = NULL_TREE;
2882 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2883 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2884 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2885 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2886 struct loop *loop = NULL;
2887 enum machine_mode vec_mode;
2888 tree dummy;
2889 enum dr_alignment_support alignment_support_scheme;
2890 tree def;
2891 gimple def_stmt;
2892 enum vect_def_type dt;
2893 stmt_vec_info prev_stmt_info = NULL;
2894 tree dataref_ptr = NULL_TREE;
2895 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2896 int ncopies;
2897 int j;
2898 gimple next_stmt, first_stmt = NULL;
2899 bool strided_store = false;
2900 unsigned int group_size, i;
2901 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2902 bool inv_p;
2903 VEC(tree,heap) *vec_oprnds = NULL;
2904 bool slp = (slp_node != NULL);
2905 unsigned int vec_num;
2906 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2908 if (loop_vinfo)
2909 loop = LOOP_VINFO_LOOP (loop_vinfo);
2911 /* Multiple types in SLP are handled by creating the appropriate number of
2912 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2913 case of SLP. */
2914 if (slp)
2915 ncopies = 1;
2916 else
2917 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2919 gcc_assert (ncopies >= 1);
2921 /* FORNOW. This restriction should be relaxed. */
2922 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2924 if (vect_print_dump_info (REPORT_DETAILS))
2925 fprintf (vect_dump, "multiple types in nested loop.");
2926 return false;
2929 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2930 return false;
2932 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2933 return false;
2935 /* Is vectorizable store? */
2937 if (!is_gimple_assign (stmt))
2938 return false;
2940 scalar_dest = gimple_assign_lhs (stmt);
2941 if (TREE_CODE (scalar_dest) != ARRAY_REF
2942 && TREE_CODE (scalar_dest) != INDIRECT_REF
2943 && TREE_CODE (scalar_dest) != COMPONENT_REF
2944 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2945 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2946 return false;
2948 gcc_assert (gimple_assign_single_p (stmt));
2949 op = gimple_assign_rhs1 (stmt);
2950 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2952 if (vect_print_dump_info (REPORT_DETAILS))
2953 fprintf (vect_dump, "use not simple.");
2954 return false;
2957 /* The scalar rhs type needs to be trivially convertible to the vector
2958 component type. This should always be the case. */
2959 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2961 if (vect_print_dump_info (REPORT_DETAILS))
2962 fprintf (vect_dump, "??? operands of different types");
2963 return false;
2966 vec_mode = TYPE_MODE (vectype);
2967 /* FORNOW. In some cases can vectorize even if data-type not supported
2968 (e.g. - array initialization with 0). */
2969 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2970 return false;
2972 if (!STMT_VINFO_DATA_REF (stmt_info))
2973 return false;
2975 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2977 strided_store = true;
2978 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2979 if (!vect_strided_store_supported (vectype)
2980 && !PURE_SLP_STMT (stmt_info) && !slp)
2981 return false;
2983 if (first_stmt == stmt)
2985 /* STMT is the leader of the group. Check the operands of all the
2986 stmts of the group. */
2987 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2988 while (next_stmt)
2990 gcc_assert (gimple_assign_single_p (next_stmt));
2991 op = gimple_assign_rhs1 (next_stmt);
2992 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2993 &def, &dt))
2995 if (vect_print_dump_info (REPORT_DETAILS))
2996 fprintf (vect_dump, "use not simple.");
2997 return false;
2999 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3004 if (!vec_stmt) /* transformation not required. */
3006 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3007 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3008 return true;
3011 /** Transform. **/
3013 if (strided_store)
3015 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3016 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3018 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3020 /* FORNOW */
3021 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3023 /* We vectorize all the stmts of the interleaving group when we
3024 reach the last stmt in the group. */
3025 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3026 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3027 && !slp)
3029 *vec_stmt = NULL;
3030 return true;
3033 if (slp)
3034 strided_store = false;
3036 /* VEC_NUM is the number of vect stmts to be created for this group. */
3037 if (slp)
3038 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3039 else
3040 vec_num = group_size;
3042 else
3044 first_stmt = stmt;
3045 first_dr = dr;
3046 group_size = vec_num = 1;
3049 if (vect_print_dump_info (REPORT_DETAILS))
3050 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3052 dr_chain = VEC_alloc (tree, heap, group_size);
3053 oprnds = VEC_alloc (tree, heap, group_size);
3055 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3056 gcc_assert (alignment_support_scheme);
3058 /* In case the vectorization factor (VF) is bigger than the number
3059 of elements that we can fit in a vectype (nunits), we have to generate
3060 more than one vector stmt - i.e - we need to "unroll" the
3061 vector stmt by a factor VF/nunits. For more details see documentation in
3062 vect_get_vec_def_for_copy_stmt. */
3064 /* In case of interleaving (non-unit strided access):
3066 S1: &base + 2 = x2
3067 S2: &base = x0
3068 S3: &base + 1 = x1
3069 S4: &base + 3 = x3
3071 We create vectorized stores starting from base address (the access of the
3072 first stmt in the chain (S2 in the above example), when the last store stmt
3073 of the chain (S4) is reached:
3075 VS1: &base = vx2
3076 VS2: &base + vec_size*1 = vx0
3077 VS3: &base + vec_size*2 = vx1
3078 VS4: &base + vec_size*3 = vx3
3080 Then permutation statements are generated:
3082 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3083 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3086 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3087 (the order of the data-refs in the output of vect_permute_store_chain
3088 corresponds to the order of scalar stmts in the interleaving chain - see
3089 the documentation of vect_permute_store_chain()).
3091 In case of both multiple types and interleaving, above vector stores and
3092 permutation stmts are created for every copy. The result vector stmts are
3093 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3094 STMT_VINFO_RELATED_STMT for the next copies.
3097 prev_stmt_info = NULL;
3098 for (j = 0; j < ncopies; j++)
3100 gimple new_stmt;
3101 gimple ptr_incr;
3103 if (j == 0)
3105 if (slp)
3107 /* Get vectorized arguments for SLP_NODE. */
3108 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3110 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3112 else
3114 /* For interleaved stores we collect vectorized defs for all the
3115 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3116 used as an input to vect_permute_store_chain(), and OPRNDS as
3117 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3119 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3120 OPRNDS are of size 1. */
3121 next_stmt = first_stmt;
3122 for (i = 0; i < group_size; i++)
3124 /* Since gaps are not supported for interleaved stores,
3125 GROUP_SIZE is the exact number of stmts in the chain.
3126 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3127 there is no interleaving, GROUP_SIZE is 1, and only one
3128 iteration of the loop will be executed. */
3129 gcc_assert (next_stmt
3130 && gimple_assign_single_p (next_stmt));
3131 op = gimple_assign_rhs1 (next_stmt);
3133 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3134 NULL);
3135 VEC_quick_push(tree, dr_chain, vec_oprnd);
3136 VEC_quick_push(tree, oprnds, vec_oprnd);
3137 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3141 /* We should have catched mismatched types earlier. */
3142 gcc_assert (useless_type_conversion_p (vectype,
3143 TREE_TYPE (vec_oprnd)));
3144 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3145 &dummy, &ptr_incr, false,
3146 &inv_p);
3147 gcc_assert (bb_vinfo || !inv_p);
3149 else
3151 /* For interleaved stores we created vectorized defs for all the
3152 defs stored in OPRNDS in the previous iteration (previous copy).
3153 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3154 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3155 next copy.
3156 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3157 OPRNDS are of size 1. */
3158 for (i = 0; i < group_size; i++)
3160 op = VEC_index (tree, oprnds, i);
3161 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3162 &dt);
3163 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3164 VEC_replace(tree, dr_chain, i, vec_oprnd);
3165 VEC_replace(tree, oprnds, i, vec_oprnd);
3167 dataref_ptr =
3168 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3171 if (strided_store)
3173 result_chain = VEC_alloc (tree, heap, group_size);
3174 /* Permute. */
3175 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3176 &result_chain))
3177 return false;
3180 next_stmt = first_stmt;
3181 for (i = 0; i < vec_num; i++)
3183 if (i > 0)
3184 /* Bump the vector pointer. */
3185 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3186 NULL_TREE);
3188 if (slp)
3189 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3190 else if (strided_store)
3191 /* For strided stores vectorized defs are interleaved in
3192 vect_permute_store_chain(). */
3193 vec_oprnd = VEC_index (tree, result_chain, i);
3195 if (aligned_access_p (first_dr))
3196 data_ref = build_fold_indirect_ref (dataref_ptr);
3197 else
3199 int mis = DR_MISALIGNMENT (first_dr);
3200 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3201 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3202 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3205 /* If accesses through a pointer to vectype do not alias the original
3206 memory reference we have a problem. This should never happen. */
3207 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3208 get_alias_set (gimple_assign_lhs (stmt))));
3210 /* Arguments are ready. Create the new vector stmt. */
3211 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3212 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3213 mark_symbols_for_renaming (new_stmt);
3215 if (slp)
3216 continue;
3218 if (j == 0)
3219 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3220 else
3221 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3223 prev_stmt_info = vinfo_for_stmt (new_stmt);
3224 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3225 if (!next_stmt)
3226 break;
3230 VEC_free (tree, heap, dr_chain);
3231 VEC_free (tree, heap, oprnds);
3232 if (result_chain)
3233 VEC_free (tree, heap, result_chain);
3235 return true;
3238 /* vectorizable_load.
3240 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3241 can be vectorized.
3242 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3243 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3244 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3246 static bool
3247 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3248 slp_tree slp_node, slp_instance slp_node_instance)
3250 tree scalar_dest;
3251 tree vec_dest = NULL;
3252 tree data_ref = NULL;
3253 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3254 stmt_vec_info prev_stmt_info;
3255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3256 struct loop *loop = NULL;
3257 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3258 bool nested_in_vect_loop = false;
3259 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3260 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3261 tree new_temp;
3262 int mode;
3263 gimple new_stmt = NULL;
3264 tree dummy;
3265 enum dr_alignment_support alignment_support_scheme;
3266 tree dataref_ptr = NULL_TREE;
3267 gimple ptr_incr;
3268 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3269 int ncopies;
3270 int i, j, group_size;
3271 tree msq = NULL_TREE, lsq;
3272 tree offset = NULL_TREE;
3273 tree realignment_token = NULL_TREE;
3274 gimple phi = NULL;
3275 VEC(tree,heap) *dr_chain = NULL;
3276 bool strided_load = false;
3277 gimple first_stmt;
3278 tree scalar_type;
3279 bool inv_p;
3280 bool compute_in_loop = false;
3281 struct loop *at_loop;
3282 int vec_num;
3283 bool slp = (slp_node != NULL);
3284 bool slp_perm = false;
3285 enum tree_code code;
3286 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3287 int vf;
3289 if (loop_vinfo)
3291 loop = LOOP_VINFO_LOOP (loop_vinfo);
3292 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3293 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3295 else
3296 vf = 1;
3298 /* Multiple types in SLP are handled by creating the appropriate number of
3299 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3300 case of SLP. */
3301 if (slp)
3302 ncopies = 1;
3303 else
3304 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3306 gcc_assert (ncopies >= 1);
3308 /* FORNOW. This restriction should be relaxed. */
3309 if (nested_in_vect_loop && ncopies > 1)
3311 if (vect_print_dump_info (REPORT_DETAILS))
3312 fprintf (vect_dump, "multiple types in nested loop.");
3313 return false;
3316 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3317 return false;
3319 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3320 return false;
3322 /* Is vectorizable load? */
3323 if (!is_gimple_assign (stmt))
3324 return false;
3326 scalar_dest = gimple_assign_lhs (stmt);
3327 if (TREE_CODE (scalar_dest) != SSA_NAME)
3328 return false;
3330 code = gimple_assign_rhs_code (stmt);
3331 if (code != ARRAY_REF
3332 && code != INDIRECT_REF
3333 && code != COMPONENT_REF
3334 && code != IMAGPART_EXPR
3335 && code != REALPART_EXPR)
3336 return false;
3338 if (!STMT_VINFO_DATA_REF (stmt_info))
3339 return false;
3341 scalar_type = TREE_TYPE (DR_REF (dr));
3342 mode = (int) TYPE_MODE (vectype);
3344 /* FORNOW. In some cases can vectorize even if data-type not supported
3345 (e.g. - data copies). */
3346 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3348 if (vect_print_dump_info (REPORT_DETAILS))
3349 fprintf (vect_dump, "Aligned load, but unsupported type.");
3350 return false;
3353 /* The vector component type needs to be trivially convertible to the
3354 scalar lhs. This should always be the case. */
3355 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3357 if (vect_print_dump_info (REPORT_DETAILS))
3358 fprintf (vect_dump, "??? operands of different types");
3359 return false;
3362 /* Check if the load is a part of an interleaving chain. */
3363 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3365 strided_load = true;
3366 /* FORNOW */
3367 gcc_assert (! nested_in_vect_loop);
3369 /* Check if interleaving is supported. */
3370 if (!vect_strided_load_supported (vectype)
3371 && !PURE_SLP_STMT (stmt_info) && !slp)
3372 return false;
3375 if (!vec_stmt) /* transformation not required. */
3377 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3378 vect_model_load_cost (stmt_info, ncopies, NULL);
3379 return true;
3382 if (vect_print_dump_info (REPORT_DETAILS))
3383 fprintf (vect_dump, "transform load.");
3385 /** Transform. **/
3387 if (strided_load)
3389 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3390 /* Check if the chain of loads is already vectorized. */
3391 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3393 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3394 return true;
3396 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3397 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3399 /* VEC_NUM is the number of vect stmts to be created for this group. */
3400 if (slp)
3402 strided_load = false;
3403 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3404 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3405 slp_perm = true;
3407 else
3408 vec_num = group_size;
3410 dr_chain = VEC_alloc (tree, heap, vec_num);
3412 else
3414 first_stmt = stmt;
3415 first_dr = dr;
3416 group_size = vec_num = 1;
3419 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3420 gcc_assert (alignment_support_scheme);
3422 /* In case the vectorization factor (VF) is bigger than the number
3423 of elements that we can fit in a vectype (nunits), we have to generate
3424 more than one vector stmt - i.e - we need to "unroll" the
3425 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3426 from one copy of the vector stmt to the next, in the field
3427 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3428 stages to find the correct vector defs to be used when vectorizing
3429 stmts that use the defs of the current stmt. The example below illustrates
3430 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3431 4 vectorized stmts):
3433 before vectorization:
3434 RELATED_STMT VEC_STMT
3435 S1: x = memref - -
3436 S2: z = x + 1 - -
3438 step 1: vectorize stmt S1:
3439 We first create the vector stmt VS1_0, and, as usual, record a
3440 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3441 Next, we create the vector stmt VS1_1, and record a pointer to
3442 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3443 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3444 stmts and pointers:
3445 RELATED_STMT VEC_STMT
3446 VS1_0: vx0 = memref0 VS1_1 -
3447 VS1_1: vx1 = memref1 VS1_2 -
3448 VS1_2: vx2 = memref2 VS1_3 -
3449 VS1_3: vx3 = memref3 - -
3450 S1: x = load - VS1_0
3451 S2: z = x + 1 - -
3453 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3454 information we recorded in RELATED_STMT field is used to vectorize
3455 stmt S2. */
3457 /* In case of interleaving (non-unit strided access):
3459 S1: x2 = &base + 2
3460 S2: x0 = &base
3461 S3: x1 = &base + 1
3462 S4: x3 = &base + 3
3464 Vectorized loads are created in the order of memory accesses
3465 starting from the access of the first stmt of the chain:
3467 VS1: vx0 = &base
3468 VS2: vx1 = &base + vec_size*1
3469 VS3: vx3 = &base + vec_size*2
3470 VS4: vx4 = &base + vec_size*3
3472 Then permutation statements are generated:
3474 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3475 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3478 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3479 (the order of the data-refs in the output of vect_permute_load_chain
3480 corresponds to the order of scalar stmts in the interleaving chain - see
3481 the documentation of vect_permute_load_chain()).
3482 The generation of permutation stmts and recording them in
3483 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3485 In case of both multiple types and interleaving, the vector loads and
3486 permutation stmts above are created for every copy. The result vector stmts
3487 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3488 STMT_VINFO_RELATED_STMT for the next copies. */
3490 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3491 on a target that supports unaligned accesses (dr_unaligned_supported)
3492 we generate the following code:
3493 p = initial_addr;
3494 indx = 0;
3495 loop {
3496 p = p + indx * vectype_size;
3497 vec_dest = *(p);
3498 indx = indx + 1;
3501 Otherwise, the data reference is potentially unaligned on a target that
3502 does not support unaligned accesses (dr_explicit_realign_optimized) -
3503 then generate the following code, in which the data in each iteration is
3504 obtained by two vector loads, one from the previous iteration, and one
3505 from the current iteration:
3506 p1 = initial_addr;
3507 msq_init = *(floor(p1))
3508 p2 = initial_addr + VS - 1;
3509 realignment_token = call target_builtin;
3510 indx = 0;
3511 loop {
3512 p2 = p2 + indx * vectype_size
3513 lsq = *(floor(p2))
3514 vec_dest = realign_load (msq, lsq, realignment_token)
3515 indx = indx + 1;
3516 msq = lsq;
3517 } */
3519 /* If the misalignment remains the same throughout the execution of the
3520 loop, we can create the init_addr and permutation mask at the loop
3521 preheader. Otherwise, it needs to be created inside the loop.
3522 This can only occur when vectorizing memory accesses in the inner-loop
3523 nested within an outer-loop that is being vectorized. */
3525 if (loop && nested_in_vect_loop_p (loop, stmt)
3526 && (TREE_INT_CST_LOW (DR_STEP (dr))
3527 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3529 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3530 compute_in_loop = true;
3533 if ((alignment_support_scheme == dr_explicit_realign_optimized
3534 || alignment_support_scheme == dr_explicit_realign)
3535 && !compute_in_loop)
3537 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3538 alignment_support_scheme, NULL_TREE,
3539 &at_loop);
3540 if (alignment_support_scheme == dr_explicit_realign_optimized)
3542 phi = SSA_NAME_DEF_STMT (msq);
3543 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3546 else
3547 at_loop = loop;
3549 prev_stmt_info = NULL;
3550 for (j = 0; j < ncopies; j++)
3552 /* 1. Create the vector pointer update chain. */
3553 if (j == 0)
3554 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3555 at_loop, offset,
3556 &dummy, &ptr_incr, false,
3557 &inv_p);
3558 else
3559 dataref_ptr =
3560 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3562 for (i = 0; i < vec_num; i++)
3564 if (i > 0)
3565 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3566 NULL_TREE);
3568 /* 2. Create the vector-load in the loop. */
3569 switch (alignment_support_scheme)
3571 case dr_aligned:
3572 gcc_assert (aligned_access_p (first_dr));
3573 data_ref = build_fold_indirect_ref (dataref_ptr);
3574 break;
3575 case dr_unaligned_supported:
3577 int mis = DR_MISALIGNMENT (first_dr);
3578 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3580 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3581 data_ref =
3582 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3583 break;
3585 case dr_explicit_realign:
3587 tree ptr, bump;
3588 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3590 if (compute_in_loop)
3591 msq = vect_setup_realignment (first_stmt, gsi,
3592 &realignment_token,
3593 dr_explicit_realign,
3594 dataref_ptr, NULL);
3596 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3597 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3598 new_stmt = gimple_build_assign (vec_dest, data_ref);
3599 new_temp = make_ssa_name (vec_dest, new_stmt);
3600 gimple_assign_set_lhs (new_stmt, new_temp);
3601 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3602 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3603 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3604 msq = new_temp;
3606 bump = size_binop (MULT_EXPR, vs_minus_1,
3607 TYPE_SIZE_UNIT (scalar_type));
3608 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3609 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3610 break;
3612 case dr_explicit_realign_optimized:
3613 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3614 break;
3615 default:
3616 gcc_unreachable ();
3618 /* If accesses through a pointer to vectype do not alias the original
3619 memory reference we have a problem. This should never happen. */
3620 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3621 get_alias_set (gimple_assign_rhs1 (stmt))));
3622 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3623 new_stmt = gimple_build_assign (vec_dest, data_ref);
3624 new_temp = make_ssa_name (vec_dest, new_stmt);
3625 gimple_assign_set_lhs (new_stmt, new_temp);
3626 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3627 mark_symbols_for_renaming (new_stmt);
3629 /* 3. Handle explicit realignment if necessary/supported. Create in
3630 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3631 if (alignment_support_scheme == dr_explicit_realign_optimized
3632 || alignment_support_scheme == dr_explicit_realign)
3634 tree tmp;
3636 lsq = gimple_assign_lhs (new_stmt);
3637 if (!realignment_token)
3638 realignment_token = dataref_ptr;
3639 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3640 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3641 realignment_token);
3642 new_stmt = gimple_build_assign (vec_dest, tmp);
3643 new_temp = make_ssa_name (vec_dest, new_stmt);
3644 gimple_assign_set_lhs (new_stmt, new_temp);
3645 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3647 if (alignment_support_scheme == dr_explicit_realign_optimized)
3649 gcc_assert (phi);
3650 if (i == vec_num - 1 && j == ncopies - 1)
3651 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3652 UNKNOWN_LOCATION);
3653 msq = lsq;
3657 /* 4. Handle invariant-load. */
3658 if (inv_p && !bb_vinfo)
3660 gcc_assert (!strided_load);
3661 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3662 if (j == 0)
3664 int k;
3665 tree t = NULL_TREE;
3666 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3668 /* CHECKME: bitpos depends on endianess? */
3669 bitpos = bitsize_zero_node;
3670 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3671 bitsize, bitpos);
3672 vec_dest =
3673 vect_create_destination_var (scalar_dest, NULL_TREE);
3674 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3675 new_temp = make_ssa_name (vec_dest, new_stmt);
3676 gimple_assign_set_lhs (new_stmt, new_temp);
3677 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3679 for (k = nunits - 1; k >= 0; --k)
3680 t = tree_cons (NULL_TREE, new_temp, t);
3681 /* FIXME: use build_constructor directly. */
3682 vec_inv = build_constructor_from_list (vectype, t);
3683 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3684 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3686 else
3687 gcc_unreachable (); /* FORNOW. */
3690 /* Collect vector loads and later create their permutation in
3691 vect_transform_strided_load (). */
3692 if (strided_load || slp_perm)
3693 VEC_quick_push (tree, dr_chain, new_temp);
3695 /* Store vector loads in the corresponding SLP_NODE. */
3696 if (slp && !slp_perm)
3697 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3700 if (slp && !slp_perm)
3701 continue;
3703 if (slp_perm)
3705 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3706 slp_node_instance, false))
3708 VEC_free (tree, heap, dr_chain);
3709 return false;
3712 else
3714 if (strided_load)
3716 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3717 return false;
3719 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3720 VEC_free (tree, heap, dr_chain);
3721 dr_chain = VEC_alloc (tree, heap, group_size);
3723 else
3725 if (j == 0)
3726 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3727 else
3728 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3729 prev_stmt_info = vinfo_for_stmt (new_stmt);
3734 if (dr_chain)
3735 VEC_free (tree, heap, dr_chain);
3737 return true;
3740 /* Function vect_is_simple_cond.
3742 Input:
3743 LOOP - the loop that is being vectorized.
3744 COND - Condition that is checked for simple use.
3746 Returns whether a COND can be vectorized. Checks whether
3747 condition operands are supportable using vec_is_simple_use. */
3749 static bool
3750 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3752 tree lhs, rhs;
3753 tree def;
3754 enum vect_def_type dt;
3756 if (!COMPARISON_CLASS_P (cond))
3757 return false;
3759 lhs = TREE_OPERAND (cond, 0);
3760 rhs = TREE_OPERAND (cond, 1);
3762 if (TREE_CODE (lhs) == SSA_NAME)
3764 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3765 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3766 &dt))
3767 return false;
3769 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3770 && TREE_CODE (lhs) != FIXED_CST)
3771 return false;
3773 if (TREE_CODE (rhs) == SSA_NAME)
3775 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3776 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3777 &dt))
3778 return false;
3780 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3781 && TREE_CODE (rhs) != FIXED_CST)
3782 return false;
3784 return true;
3787 /* vectorizable_condition.
3789 Check if STMT is conditional modify expression that can be vectorized.
3790 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3791 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3792 at GSI.
3794 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3795 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3796 else caluse if it is 2).
3798 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3800 bool
3801 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3802 gimple *vec_stmt, tree reduc_def, int reduc_index)
3804 tree scalar_dest = NULL_TREE;
3805 tree vec_dest = NULL_TREE;
3806 tree op = NULL_TREE;
3807 tree cond_expr, then_clause, else_clause;
3808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3809 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3810 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3811 tree vec_compare, vec_cond_expr;
3812 tree new_temp;
3813 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3814 enum machine_mode vec_mode;
3815 tree def;
3816 enum vect_def_type dt;
3817 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3818 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3819 enum tree_code code;
3821 /* FORNOW: unsupported in basic block SLP. */
3822 gcc_assert (loop_vinfo);
3824 gcc_assert (ncopies >= 1);
3825 if (ncopies > 1)
3826 return false; /* FORNOW */
3828 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3829 return false;
3831 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3832 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3833 && reduc_def))
3834 return false;
3836 /* FORNOW: SLP not supported. */
3837 if (STMT_SLP_TYPE (stmt_info))
3838 return false;
3840 /* FORNOW: not yet supported. */
3841 if (STMT_VINFO_LIVE_P (stmt_info))
3843 if (vect_print_dump_info (REPORT_DETAILS))
3844 fprintf (vect_dump, "value used after loop.");
3845 return false;
3848 /* Is vectorizable conditional operation? */
3849 if (!is_gimple_assign (stmt))
3850 return false;
3852 code = gimple_assign_rhs_code (stmt);
3854 if (code != COND_EXPR)
3855 return false;
3857 gcc_assert (gimple_assign_single_p (stmt));
3858 op = gimple_assign_rhs1 (stmt);
3859 cond_expr = TREE_OPERAND (op, 0);
3860 then_clause = TREE_OPERAND (op, 1);
3861 else_clause = TREE_OPERAND (op, 2);
3863 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3864 return false;
3866 /* We do not handle two different vector types for the condition
3867 and the values. */
3868 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3869 return false;
3871 if (TREE_CODE (then_clause) == SSA_NAME)
3873 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3874 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3875 &then_def_stmt, &def, &dt))
3876 return false;
3878 else if (TREE_CODE (then_clause) != INTEGER_CST
3879 && TREE_CODE (then_clause) != REAL_CST
3880 && TREE_CODE (then_clause) != FIXED_CST)
3881 return false;
3883 if (TREE_CODE (else_clause) == SSA_NAME)
3885 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3886 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3887 &else_def_stmt, &def, &dt))
3888 return false;
3890 else if (TREE_CODE (else_clause) != INTEGER_CST
3891 && TREE_CODE (else_clause) != REAL_CST
3892 && TREE_CODE (else_clause) != FIXED_CST)
3893 return false;
3896 vec_mode = TYPE_MODE (vectype);
3898 if (!vec_stmt)
3900 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3901 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3904 /* Transform */
3906 /* Handle def. */
3907 scalar_dest = gimple_assign_lhs (stmt);
3908 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3910 /* Handle cond expr. */
3911 vec_cond_lhs =
3912 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3913 vec_cond_rhs =
3914 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3915 if (reduc_index == 1)
3916 vec_then_clause = reduc_def;
3917 else
3918 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3919 if (reduc_index == 2)
3920 vec_else_clause = reduc_def;
3921 else
3922 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3924 /* Arguments are ready. Create the new vector stmt. */
3925 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3926 vec_cond_lhs, vec_cond_rhs);
3927 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3928 vec_compare, vec_then_clause, vec_else_clause);
3930 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3931 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3932 gimple_assign_set_lhs (*vec_stmt, new_temp);
3933 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3935 return true;
3939 /* Make sure the statement is vectorizable. */
3941 bool
3942 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3944 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3946 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3947 bool ok;
3948 HOST_WIDE_INT dummy;
3949 tree scalar_type, vectype;
3951 if (vect_print_dump_info (REPORT_DETAILS))
3953 fprintf (vect_dump, "==> examining statement: ");
3954 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3957 if (gimple_has_volatile_ops (stmt))
3959 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3960 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3962 return false;
3965 /* Skip stmts that do not need to be vectorized. In loops this is expected
3966 to include:
3967 - the COND_EXPR which is the loop exit condition
3968 - any LABEL_EXPRs in the loop
3969 - computations that are used only for array indexing or loop control.
3970 In basic blocks we only analyze statements that are a part of some SLP
3971 instance, therefore, all the statements are relevant. */
3973 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3974 && !STMT_VINFO_LIVE_P (stmt_info))
3976 if (vect_print_dump_info (REPORT_DETAILS))
3977 fprintf (vect_dump, "irrelevant.");
3979 return true;
3982 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3984 case vect_internal_def:
3985 break;
3987 case vect_reduction_def:
3988 case vect_nested_cycle:
3989 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3990 || relevance == vect_used_in_outer_by_reduction
3991 || relevance == vect_unused_in_scope));
3992 break;
3994 case vect_induction_def:
3995 case vect_constant_def:
3996 case vect_external_def:
3997 case vect_unknown_def_type:
3998 default:
3999 gcc_unreachable ();
4002 if (bb_vinfo)
4004 gcc_assert (PURE_SLP_STMT (stmt_info));
4006 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
4007 if (vect_print_dump_info (REPORT_DETAILS))
4009 fprintf (vect_dump, "get vectype for scalar type: ");
4010 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4013 vectype = get_vectype_for_scalar_type (scalar_type);
4014 if (!vectype)
4016 if (vect_print_dump_info (REPORT_DETAILS))
4018 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4019 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4021 return false;
4024 if (vect_print_dump_info (REPORT_DETAILS))
4026 fprintf (vect_dump, "vectype: ");
4027 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4030 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4033 if (STMT_VINFO_RELEVANT_P (stmt_info))
4035 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4036 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4037 *need_to_vectorize = true;
4040 ok = true;
4041 if (!bb_vinfo
4042 && (STMT_VINFO_RELEVANT_P (stmt_info)
4043 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4044 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4045 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4046 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4047 || vectorizable_operation (stmt, NULL, NULL, NULL)
4048 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4049 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4050 || vectorizable_call (stmt, NULL, NULL)
4051 || vectorizable_store (stmt, NULL, NULL, NULL)
4052 || vectorizable_reduction (stmt, NULL, NULL)
4053 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4054 else
4056 if (bb_vinfo)
4057 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4058 || vectorizable_assignment (stmt, NULL, NULL, node)
4059 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4060 || vectorizable_store (stmt, NULL, NULL, node));
4063 if (!ok)
4065 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4067 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4068 fprintf (vect_dump, "supported: ");
4069 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4072 return false;
4075 if (bb_vinfo)
4076 return true;
4078 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4079 need extra handling, except for vectorizable reductions. */
4080 if (STMT_VINFO_LIVE_P (stmt_info)
4081 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4082 ok = vectorizable_live_operation (stmt, NULL, NULL);
4084 if (!ok)
4086 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4088 fprintf (vect_dump, "not vectorized: live stmt not ");
4089 fprintf (vect_dump, "supported: ");
4090 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4093 return false;
4096 if (!PURE_SLP_STMT (stmt_info))
4098 /* Groups of strided accesses whose size is not a power of 2 are not
4099 vectorizable yet using loop-vectorization. Therefore, if this stmt
4100 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4101 loop-based vectorized), the loop cannot be vectorized. */
4102 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4103 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4104 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4106 if (vect_print_dump_info (REPORT_DETAILS))
4108 fprintf (vect_dump, "not vectorized: the size of group "
4109 "of strided accesses is not a power of 2");
4110 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4113 return false;
4117 return true;
4121 /* Function vect_transform_stmt.
4123 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4125 bool
4126 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4127 bool *strided_store, slp_tree slp_node,
4128 slp_instance slp_node_instance)
4130 bool is_store = false;
4131 gimple vec_stmt = NULL;
4132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4133 gimple orig_stmt_in_pattern;
4134 bool done;
4136 switch (STMT_VINFO_TYPE (stmt_info))
4138 case type_demotion_vec_info_type:
4139 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4140 gcc_assert (done);
4141 break;
4143 case type_promotion_vec_info_type:
4144 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4145 gcc_assert (done);
4146 break;
4148 case type_conversion_vec_info_type:
4149 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4150 gcc_assert (done);
4151 break;
4153 case induc_vec_info_type:
4154 gcc_assert (!slp_node);
4155 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4156 gcc_assert (done);
4157 break;
4159 case op_vec_info_type:
4160 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4161 gcc_assert (done);
4162 break;
4164 case assignment_vec_info_type:
4165 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4166 gcc_assert (done);
4167 break;
4169 case load_vec_info_type:
4170 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4171 slp_node_instance);
4172 gcc_assert (done);
4173 break;
4175 case store_vec_info_type:
4176 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4177 gcc_assert (done);
4178 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4180 /* In case of interleaving, the whole chain is vectorized when the
4181 last store in the chain is reached. Store stmts before the last
4182 one are skipped, and there vec_stmt_info shouldn't be freed
4183 meanwhile. */
4184 *strided_store = true;
4185 if (STMT_VINFO_VEC_STMT (stmt_info))
4186 is_store = true;
4188 else
4189 is_store = true;
4190 break;
4192 case condition_vec_info_type:
4193 gcc_assert (!slp_node);
4194 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4195 gcc_assert (done);
4196 break;
4198 case call_vec_info_type:
4199 gcc_assert (!slp_node);
4200 done = vectorizable_call (stmt, gsi, &vec_stmt);
4201 break;
4203 case reduc_vec_info_type:
4204 gcc_assert (!slp_node);
4205 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4206 gcc_assert (done);
4207 break;
4209 default:
4210 if (!STMT_VINFO_LIVE_P (stmt_info))
4212 if (vect_print_dump_info (REPORT_DETAILS))
4213 fprintf (vect_dump, "stmt not supported.");
4214 gcc_unreachable ();
4218 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4219 is being vectorized, but outside the immediately enclosing loop. */
4220 if (vec_stmt
4221 && STMT_VINFO_LOOP_VINFO (stmt_info)
4222 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4223 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4224 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4225 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4226 || STMT_VINFO_RELEVANT (stmt_info) ==
4227 vect_used_in_outer_by_reduction))
4229 struct loop *innerloop = LOOP_VINFO_LOOP (
4230 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4231 imm_use_iterator imm_iter;
4232 use_operand_p use_p;
4233 tree scalar_dest;
4234 gimple exit_phi;
4236 if (vect_print_dump_info (REPORT_DETAILS))
4237 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4239 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4240 (to be used when vectorizing outer-loop stmts that use the DEF of
4241 STMT). */
4242 if (gimple_code (stmt) == GIMPLE_PHI)
4243 scalar_dest = PHI_RESULT (stmt);
4244 else
4245 scalar_dest = gimple_assign_lhs (stmt);
4247 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4249 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4251 exit_phi = USE_STMT (use_p);
4252 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4257 /* Handle stmts whose DEF is used outside the loop-nest that is
4258 being vectorized. */
4259 if (STMT_VINFO_LIVE_P (stmt_info)
4260 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4262 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4263 gcc_assert (done);
4266 if (vec_stmt)
4268 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4269 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4270 if (orig_stmt_in_pattern)
4272 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4273 /* STMT was inserted by the vectorizer to replace a computation idiom.
4274 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4275 computed this idiom. We need to record a pointer to VEC_STMT in
4276 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4277 documentation of vect_pattern_recog. */
4278 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4280 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4281 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4286 return is_store;
4290 /* Remove a group of stores (for SLP or interleaving), free their
4291 stmt_vec_info. */
4293 void
4294 vect_remove_stores (gimple first_stmt)
4296 gimple next = first_stmt;
4297 gimple tmp;
4298 gimple_stmt_iterator next_si;
4300 while (next)
4302 /* Free the attached stmt_vec_info and remove the stmt. */
4303 next_si = gsi_for_stmt (next);
4304 gsi_remove (&next_si, true);
4305 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4306 free_stmt_vec_info (next);
4307 next = tmp;
4312 /* Function new_stmt_vec_info.
4314 Create and initialize a new stmt_vec_info struct for STMT. */
4316 stmt_vec_info
4317 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4318 bb_vec_info bb_vinfo)
4320 stmt_vec_info res;
4321 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4323 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4324 STMT_VINFO_STMT (res) = stmt;
4325 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4326 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4327 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4328 STMT_VINFO_LIVE_P (res) = false;
4329 STMT_VINFO_VECTYPE (res) = NULL;
4330 STMT_VINFO_VEC_STMT (res) = NULL;
4331 STMT_VINFO_IN_PATTERN_P (res) = false;
4332 STMT_VINFO_RELATED_STMT (res) = NULL;
4333 STMT_VINFO_DATA_REF (res) = NULL;
4335 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4336 STMT_VINFO_DR_OFFSET (res) = NULL;
4337 STMT_VINFO_DR_INIT (res) = NULL;
4338 STMT_VINFO_DR_STEP (res) = NULL;
4339 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4341 if (gimple_code (stmt) == GIMPLE_PHI
4342 && is_loop_header_bb_p (gimple_bb (stmt)))
4343 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4344 else
4345 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4347 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4348 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4349 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4350 STMT_SLP_TYPE (res) = loop_vect;
4351 DR_GROUP_FIRST_DR (res) = NULL;
4352 DR_GROUP_NEXT_DR (res) = NULL;
4353 DR_GROUP_SIZE (res) = 0;
4354 DR_GROUP_STORE_COUNT (res) = 0;
4355 DR_GROUP_GAP (res) = 0;
4356 DR_GROUP_SAME_DR_STMT (res) = NULL;
4357 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4359 return res;
4363 /* Create a hash table for stmt_vec_info. */
4365 void
4366 init_stmt_vec_info_vec (void)
4368 gcc_assert (!stmt_vec_info_vec);
4369 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4373 /* Free hash table for stmt_vec_info. */
4375 void
4376 free_stmt_vec_info_vec (void)
4378 gcc_assert (stmt_vec_info_vec);
4379 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4383 /* Free stmt vectorization related info. */
4385 void
4386 free_stmt_vec_info (gimple stmt)
4388 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4390 if (!stmt_info)
4391 return;
4393 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4394 set_vinfo_for_stmt (stmt, NULL);
4395 free (stmt_info);
4399 /* Function get_vectype_for_scalar_type.
4401 Returns the vector type corresponding to SCALAR_TYPE as supported
4402 by the target. */
4404 tree
4405 get_vectype_for_scalar_type (tree scalar_type)
4407 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4408 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4409 int nunits;
4410 tree vectype;
4412 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4413 return NULL_TREE;
4415 /* We can't build a vector type of elements with alignment bigger than
4416 their size. */
4417 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4418 return NULL_TREE;
4420 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4421 is expected. */
4422 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4424 vectype = build_vector_type (scalar_type, nunits);
4425 if (vect_print_dump_info (REPORT_DETAILS))
4427 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4428 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4431 if (!vectype)
4432 return NULL_TREE;
4434 if (vect_print_dump_info (REPORT_DETAILS))
4436 fprintf (vect_dump, "vectype: ");
4437 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4440 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4441 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4443 if (vect_print_dump_info (REPORT_DETAILS))
4444 fprintf (vect_dump, "mode not supported by target.");
4445 return NULL_TREE;
4448 return vectype;
4451 /* Function vect_is_simple_use.
4453 Input:
4454 LOOP_VINFO - the vect info of the loop that is being vectorized.
4455 BB_VINFO - the vect info of the basic block that is being vectorized.
4456 OPERAND - operand of a stmt in the loop or bb.
4457 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4459 Returns whether a stmt with OPERAND can be vectorized.
4460 For loops, supportable operands are constants, loop invariants, and operands
4461 that are defined by the current iteration of the loop. Unsupportable
4462 operands are those that are defined by a previous iteration of the loop (as
4463 is the case in reduction/induction computations).
4464 For basic blocks, supportable operands are constants and bb invariants.
4465 For now, operands defined outside the basic block are not supported. */
4467 bool
4468 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4469 bb_vec_info bb_vinfo, gimple *def_stmt,
4470 tree *def, enum vect_def_type *dt)
4472 basic_block bb;
4473 stmt_vec_info stmt_vinfo;
4474 struct loop *loop = NULL;
4476 if (loop_vinfo)
4477 loop = LOOP_VINFO_LOOP (loop_vinfo);
4479 *def_stmt = NULL;
4480 *def = NULL_TREE;
4482 if (vect_print_dump_info (REPORT_DETAILS))
4484 fprintf (vect_dump, "vect_is_simple_use: operand ");
4485 print_generic_expr (vect_dump, operand, TDF_SLIM);
4488 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4490 *dt = vect_constant_def;
4491 return true;
4494 if (is_gimple_min_invariant (operand))
4496 *def = operand;
4497 *dt = vect_external_def;
4498 return true;
4501 if (TREE_CODE (operand) == PAREN_EXPR)
4503 if (vect_print_dump_info (REPORT_DETAILS))
4504 fprintf (vect_dump, "non-associatable copy.");
4505 operand = TREE_OPERAND (operand, 0);
4508 if (TREE_CODE (operand) != SSA_NAME)
4510 if (vect_print_dump_info (REPORT_DETAILS))
4511 fprintf (vect_dump, "not ssa-name.");
4512 return false;
4515 *def_stmt = SSA_NAME_DEF_STMT (operand);
4516 if (*def_stmt == NULL)
4518 if (vect_print_dump_info (REPORT_DETAILS))
4519 fprintf (vect_dump, "no def_stmt.");
4520 return false;
4523 if (vect_print_dump_info (REPORT_DETAILS))
4525 fprintf (vect_dump, "def_stmt: ");
4526 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4529 /* Empty stmt is expected only in case of a function argument.
4530 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4531 if (gimple_nop_p (*def_stmt))
4533 *def = operand;
4534 *dt = vect_external_def;
4535 return true;
4538 bb = gimple_bb (*def_stmt);
4540 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4541 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4542 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4543 *dt = vect_external_def;
4544 else
4546 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4547 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4550 if (*dt == vect_unknown_def_type)
4552 if (vect_print_dump_info (REPORT_DETAILS))
4553 fprintf (vect_dump, "Unsupported pattern.");
4554 return false;
4557 if (vect_print_dump_info (REPORT_DETAILS))
4558 fprintf (vect_dump, "type of def: %d.",*dt);
4560 switch (gimple_code (*def_stmt))
4562 case GIMPLE_PHI:
4563 *def = gimple_phi_result (*def_stmt);
4564 break;
4566 case GIMPLE_ASSIGN:
4567 *def = gimple_assign_lhs (*def_stmt);
4568 break;
4570 case GIMPLE_CALL:
4571 *def = gimple_call_lhs (*def_stmt);
4572 if (*def != NULL)
4573 break;
4574 /* FALLTHRU */
4575 default:
4576 if (vect_print_dump_info (REPORT_DETAILS))
4577 fprintf (vect_dump, "unsupported defining stmt: ");
4578 return false;
4581 return true;
4585 /* Function supportable_widening_operation
4587 Check whether an operation represented by the code CODE is a
4588 widening operation that is supported by the target platform in
4589 vector form (i.e., when operating on arguments of type VECTYPE).
4591 Widening operations we currently support are NOP (CONVERT), FLOAT
4592 and WIDEN_MULT. This function checks if these operations are supported
4593 by the target platform either directly (via vector tree-codes), or via
4594 target builtins.
4596 Output:
4597 - CODE1 and CODE2 are codes of vector operations to be used when
4598 vectorizing the operation, if available.
4599 - DECL1 and DECL2 are decls of target builtin functions to be used
4600 when vectorizing the operation, if available. In this case,
4601 CODE1 and CODE2 are CALL_EXPR.
4602 - MULTI_STEP_CVT determines the number of required intermediate steps in
4603 case of multi-step conversion (like char->short->int - in that case
4604 MULTI_STEP_CVT will be 1).
4605 - INTERM_TYPES contains the intermediate type required to perform the
4606 widening operation (short in the above example). */
4608 bool
4609 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4610 tree *decl1, tree *decl2,
4611 enum tree_code *code1, enum tree_code *code2,
4612 int *multi_step_cvt,
4613 VEC (tree, heap) **interm_types)
4615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4616 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4617 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4618 bool ordered_p;
4619 enum machine_mode vec_mode;
4620 enum insn_code icode1, icode2;
4621 optab optab1, optab2;
4622 tree type = gimple_expr_type (stmt);
4623 tree wide_vectype = get_vectype_for_scalar_type (type);
4624 enum tree_code c1, c2;
4626 /* The result of a vectorized widening operation usually requires two vectors
4627 (because the widened results do not fit int one vector). The generated
4628 vector results would normally be expected to be generated in the same
4629 order as in the original scalar computation, i.e. if 8 results are
4630 generated in each vector iteration, they are to be organized as follows:
4631 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4633 However, in the special case that the result of the widening operation is
4634 used in a reduction computation only, the order doesn't matter (because
4635 when vectorizing a reduction we change the order of the computation).
4636 Some targets can take advantage of this and generate more efficient code.
4637 For example, targets like Altivec, that support widen_mult using a sequence
4638 of {mult_even,mult_odd} generate the following vectors:
4639 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4641 When vectorizing outer-loops, we execute the inner-loop sequentially
4642 (each vectorized inner-loop iteration contributes to VF outer-loop
4643 iterations in parallel). We therefore don't allow to change the order
4644 of the computation in the inner-loop during outer-loop vectorization. */
4646 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4647 && !nested_in_vect_loop_p (vect_loop, stmt))
4648 ordered_p = false;
4649 else
4650 ordered_p = true;
4652 if (!ordered_p
4653 && code == WIDEN_MULT_EXPR
4654 && targetm.vectorize.builtin_mul_widen_even
4655 && targetm.vectorize.builtin_mul_widen_even (vectype)
4656 && targetm.vectorize.builtin_mul_widen_odd
4657 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4659 if (vect_print_dump_info (REPORT_DETAILS))
4660 fprintf (vect_dump, "Unordered widening operation detected.");
4662 *code1 = *code2 = CALL_EXPR;
4663 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4664 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4665 return true;
4668 switch (code)
4670 case WIDEN_MULT_EXPR:
4671 if (BYTES_BIG_ENDIAN)
4673 c1 = VEC_WIDEN_MULT_HI_EXPR;
4674 c2 = VEC_WIDEN_MULT_LO_EXPR;
4676 else
4678 c2 = VEC_WIDEN_MULT_HI_EXPR;
4679 c1 = VEC_WIDEN_MULT_LO_EXPR;
4681 break;
4683 CASE_CONVERT:
4684 if (BYTES_BIG_ENDIAN)
4686 c1 = VEC_UNPACK_HI_EXPR;
4687 c2 = VEC_UNPACK_LO_EXPR;
4689 else
4691 c2 = VEC_UNPACK_HI_EXPR;
4692 c1 = VEC_UNPACK_LO_EXPR;
4694 break;
4696 case FLOAT_EXPR:
4697 if (BYTES_BIG_ENDIAN)
4699 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4700 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4702 else
4704 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4705 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4707 break;
4709 case FIX_TRUNC_EXPR:
4710 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4711 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4712 computing the operation. */
4713 return false;
4715 default:
4716 gcc_unreachable ();
4719 if (code == FIX_TRUNC_EXPR)
4721 /* The signedness is determined from output operand. */
4722 optab1 = optab_for_tree_code (c1, type, optab_default);
4723 optab2 = optab_for_tree_code (c2, type, optab_default);
4725 else
4727 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4728 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4731 if (!optab1 || !optab2)
4732 return false;
4734 vec_mode = TYPE_MODE (vectype);
4735 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4736 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4737 == CODE_FOR_nothing)
4738 return false;
4740 /* Check if it's a multi-step conversion that can be done using intermediate
4741 types. */
4742 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4743 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4745 int i;
4746 tree prev_type = vectype, intermediate_type;
4747 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4748 optab optab3, optab4;
4750 if (!CONVERT_EXPR_CODE_P (code))
4751 return false;
4753 *code1 = c1;
4754 *code2 = c2;
4756 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4757 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4758 to get to NARROW_VECTYPE, and fail if we do not. */
4759 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4760 for (i = 0; i < 3; i++)
4762 intermediate_mode = insn_data[icode1].operand[0].mode;
4763 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4764 TYPE_UNSIGNED (prev_type));
4765 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4766 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4768 if (!optab3 || !optab4
4769 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4770 == CODE_FOR_nothing
4771 || insn_data[icode1].operand[0].mode != intermediate_mode
4772 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4773 == CODE_FOR_nothing
4774 || insn_data[icode2].operand[0].mode != intermediate_mode
4775 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4776 == CODE_FOR_nothing
4777 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4778 == CODE_FOR_nothing)
4779 return false;
4781 VEC_quick_push (tree, *interm_types, intermediate_type);
4782 (*multi_step_cvt)++;
4784 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4785 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4786 return true;
4788 prev_type = intermediate_type;
4789 prev_mode = intermediate_mode;
4792 return false;
4795 *code1 = c1;
4796 *code2 = c2;
4797 return true;
4801 /* Function supportable_narrowing_operation
4803 Check whether an operation represented by the code CODE is a
4804 narrowing operation that is supported by the target platform in
4805 vector form (i.e., when operating on arguments of type VECTYPE).
4807 Narrowing operations we currently support are NOP (CONVERT) and
4808 FIX_TRUNC. This function checks if these operations are supported by
4809 the target platform directly via vector tree-codes.
4811 Output:
4812 - CODE1 is the code of a vector operation to be used when
4813 vectorizing the operation, if available.
4814 - MULTI_STEP_CVT determines the number of required intermediate steps in
4815 case of multi-step conversion (like int->short->char - in that case
4816 MULTI_STEP_CVT will be 1).
4817 - INTERM_TYPES contains the intermediate type required to perform the
4818 narrowing operation (short in the above example). */
4820 bool
4821 supportable_narrowing_operation (enum tree_code code,
4822 const_gimple stmt, tree vectype,
4823 enum tree_code *code1, int *multi_step_cvt,
4824 VEC (tree, heap) **interm_types)
4826 enum machine_mode vec_mode;
4827 enum insn_code icode1;
4828 optab optab1, interm_optab;
4829 tree type = gimple_expr_type (stmt);
4830 tree narrow_vectype = get_vectype_for_scalar_type (type);
4831 enum tree_code c1;
4832 tree intermediate_type, prev_type;
4833 int i;
4835 switch (code)
4837 CASE_CONVERT:
4838 c1 = VEC_PACK_TRUNC_EXPR;
4839 break;
4841 case FIX_TRUNC_EXPR:
4842 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4843 break;
4845 case FLOAT_EXPR:
4846 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4847 tree code and optabs used for computing the operation. */
4848 return false;
4850 default:
4851 gcc_unreachable ();
4854 if (code == FIX_TRUNC_EXPR)
4855 /* The signedness is determined from output operand. */
4856 optab1 = optab_for_tree_code (c1, type, optab_default);
4857 else
4858 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4860 if (!optab1)
4861 return false;
4863 vec_mode = TYPE_MODE (vectype);
4864 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4865 == CODE_FOR_nothing)
4866 return false;
4868 /* Check if it's a multi-step conversion that can be done using intermediate
4869 types. */
4870 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4872 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4874 *code1 = c1;
4875 prev_type = vectype;
4876 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4877 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4878 to get to NARROW_VECTYPE, and fail if we do not. */
4879 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4880 for (i = 0; i < 3; i++)
4882 intermediate_mode = insn_data[icode1].operand[0].mode;
4883 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4884 TYPE_UNSIGNED (prev_type));
4885 interm_optab = optab_for_tree_code (c1, intermediate_type,
4886 optab_default);
4887 if (!interm_optab
4888 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4889 == CODE_FOR_nothing
4890 || insn_data[icode1].operand[0].mode != intermediate_mode
4891 || (icode1
4892 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4893 == CODE_FOR_nothing)
4894 return false;
4896 VEC_quick_push (tree, *interm_types, intermediate_type);
4897 (*multi_step_cvt)++;
4899 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4900 return true;
4902 prev_type = intermediate_type;
4903 prev_mode = intermediate_mode;
4906 return false;
4909 *code1 = c1;
4910 return true;