From Dominique d'Humieres <dominiq@lps.ens.fr>
[official-gcc/alias-decl.git] / gcc / tree-vect-stmts.c
blob0dabb6a365b3e1127238d84157e9d1e5b58dcace
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 if (is_gimple_debug (USE_STMT (use_p)))
149 continue;
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
156 *live_p = true;
161 return (*live_p || *relevant);
165 /* Function exist_non_indexing_operands_for_use_p
167 USE is one of the uses attached to STMT. Check if USE is
168 used in STMT for anything other than indexing an array. */
170 static bool
171 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 tree operand;
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
180 return true;
182 /* STMT has a data_ref. FORNOW this means that its of one of
183 the following forms:
184 -1- ARRAY_REF = var
185 -2- var = ARRAY_REF
186 (This should have been verified in analyze_data_refs).
188 'var' in the second case corresponds to a def, not a use,
189 so USE cannot correspond to any operands that are not used
190 for array indexing.
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
198 return false;
199 operand = gimple_assign_rhs1 (stmt);
200 if (TREE_CODE (operand) != SSA_NAME)
201 return false;
203 if (operand == use)
204 return true;
206 return false;
211 Function process_use.
213 Inputs:
214 - a USE in STMT in a loop represented by LOOP_VINFO
215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219 Outputs:
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
224 Exceptions:
225 - case 1: If USE is used only for address computations (e.g. array indexing),
226 which does not need to be directly vectorized, then the liveness/relevance
227 of the respective DEF_STMT is left unchanged.
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
233 Return true if everything is as expected. Return false otherwise. */
235 static bool
236 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
243 tree def;
244 gimple def_stmt;
245 enum vect_def_type dt;
247 /* case 1: we are only interested in uses that need to be vectorized. Uses
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
250 return true;
252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
256 return false;
259 if (!def_stmt || gimple_nop_p (def_stmt))
260 return true;
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
267 return true;
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
290 return true;
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
295 d = def_stmt
296 inner-loop:
297 stmt # use (d)
298 outer-loop-tail-bb:
299 ... */
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305 switch (relevant)
307 case vect_unused_in_scope:
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
310 break;
312 case vect_used_in_outer_by_reduction:
313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
314 relevant = vect_used_by_reduction;
315 break;
317 case vect_used_in_outer:
318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
319 relevant = vect_used_in_scope;
320 break;
322 case vect_used_in_scope:
323 break;
325 default:
326 gcc_unreachable ();
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
333 inner-loop:
334 d = def_stmt
335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
336 stmt # use (d) */
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342 switch (relevant)
344 case vect_unused_in_scope:
345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
348 break;
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
352 break;
354 case vect_used_in_scope:
355 relevant = vect_used_in_outer;
356 break;
358 default:
359 gcc_unreachable ();
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
364 return true;
368 /* Function vect_mark_stmts_to_be_vectorized.
370 Not all stmts in the loop need to be vectorized. For example:
372 for i...
373 for j...
374 1. T0 = i + j
375 2. T1 = a[T0]
377 3. j = j + 1
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
382 This pass detects such stmts. */
384 bool
385 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
392 gimple stmt;
393 unsigned int i;
394 stmt_vec_info stmt_vinfo;
395 basic_block bb;
396 gimple phi;
397 bool live_p;
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404 worklist = VEC_alloc (gimple, heap, 64);
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
409 bb = bbs[i];
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 phi = gsi_stmt (si);
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
439 use_operand_p use_p;
440 ssa_op_iter iter;
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
463 live_p = false
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
469 order of the results that they produce does not have to be kept. */
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
473 switch (def_type)
475 case vect_reduction_def:
476 switch (tmp_relevant)
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
480 break;
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
484 break;
485 /* fall through */
487 default:
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
491 VEC_free (gimple, heap, worklist);
492 return false;
495 live_p = false;
496 break;
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
506 VEC_free (gimple, heap, worklist);
507 return false;
510 live_p = false;
511 break;
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
517 if (vect_print_dump_info (REPORT_DETAILS))
518 fprintf (vect_dump, "unsupported use of double reduction.");
520 VEC_free (gimple, heap, worklist);
521 return false;
524 live_p = false;
525 break;
527 default:
528 break;
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 VEC_free (gimple, heap, worklist);
537 return false;
540 } /* while worklist */
542 VEC_free (gimple, heap, worklist);
543 return true;
548 cost_for_stmt (gimple stmt)
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552 switch (STMT_VINFO_TYPE (stmt_info))
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
569 default:
570 gcc_unreachable ();
574 /* Function vect_model_simple_cost.
576 Models cost for simple operations, i.e. those that only emit ncopies of a
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
580 void
581 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
582 enum vect_def_type *dt, slp_tree slp_node)
584 int i;
585 int inside_cost = 0, outside_cost = 0;
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
589 return;
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
597 outside_cost += TARG_SCALAR_TO_VEC_COST;
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 /* Function vect_cost_strided_group_size
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
616 static int
617 vect_cost_strided_group_size (stmt_vec_info stmt_info)
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
624 return 1;
628 /* Function vect_model_store_cost
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
633 void
634 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
635 enum vect_def_type dt, slp_tree slp_node)
637 int group_size;
638 int inside_cost = 0, outside_cost = 0;
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
642 return;
644 if (dt == vect_constant_def || dt == vect_external_def)
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
647 /* Strided access? */
648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
651 else
652 group_size = 1;
654 /* Is this an access in a group of stores, which provide strided access?
655 If so, add in the cost of the permutes. */
656 if (group_size > 1)
658 /* Uses a high and low interleave operation for each needed permute. */
659 inside_cost = ncopies * exact_log2(group_size) * group_size
660 * TARG_VEC_STMT_COST;
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 group_size);
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
681 /* Function vect_model_load_cost
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
685 accesses are supported for loads, we also account for the costs of the
686 access scheme chosen. */
688 void
689 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
692 int group_size;
693 int alignment_support_cheme;
694 gimple first_stmt;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
700 return;
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
709 /* Not a strided access. */
710 else
712 group_size = 1;
713 first_dr = dr;
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
718 /* Is this an access in a group of loads providing strided access?
719 If so, add in the cost of the permutes. */
720 if (group_size > 1)
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 group_size);
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
735 case dr_aligned:
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
742 break;
744 case dr_unaligned_supported:
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 "hardware.");
753 break;
755 case dr_explicit_realign:
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
761 outside costs. */
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
765 break;
767 case dr_explicit_realign_optimized:
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
771 "pipelined.");
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
789 break;
792 default:
793 gcc_unreachable ();
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
806 /* Function vect_init_vector.
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
811 Return the DEF of INIT_STMT.
812 It will be used in the vectorization of STMT. */
814 tree
815 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
819 tree new_var;
820 gimple init_stmt;
821 tree vec_oprnd;
822 edge pe;
823 tree new_temp;
824 basic_block new_bb;
826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
827 add_referenced_var (new_var);
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
832 if (gsi)
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
834 else
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
838 if (loop_vinfo)
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
842 if (nested_in_vect_loop_p (loop, stmt))
843 loop = loop->inner;
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
849 else
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
852 basic_block bb;
853 gimple_stmt_iterator gsi_bb_start;
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
857 gsi_bb_start = gsi_after_labels (bb);
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
862 if (vect_print_dump_info (REPORT_DETAILS))
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 vec_oprnd = gimple_assign_lhs (init_stmt);
869 return vec_oprnd;
873 /* Function vect_get_vec_def_for_operand.
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
884 tree
885 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
887 tree vec_oprnd;
888 gimple vec_stmt;
889 gimple def_stmt;
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
895 tree vec_inv;
896 tree vec_cst;
897 tree t = NULL_TREE;
898 tree def;
899 int i;
900 enum vect_def_type dt;
901 bool is_simple_use;
902 tree vector_type;
904 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
911 &dt);
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
915 if (def)
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
920 if (def_stmt)
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
927 switch (dt)
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
935 if (scalar_def)
936 *scalar_def = op;
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
942 for (i = nunits - 1; i >= 0; --i)
944 t = tree_cons (NULL_TREE, op, t);
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
950 /* Case 2: operand is defined outside the loop - loop invariant. */
951 case vect_external_def:
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
957 if (scalar_def)
958 *scalar_def = def;
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
964 for (i = nunits - 1; i >= 0; --i)
966 t = tree_cons (NULL_TREE, def, t);
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 /* Case 3: operand is defined inside the loop. */
975 case vect_internal_def:
977 if (scalar_def)
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
988 else
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
990 return vec_oprnd;
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
995 case vect_double_reduction_def:
996 case vect_nested_cycle:
998 struct loop *loop;
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1001 loop = (gimple_bb (def_stmt))->loop_father;
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1018 return vec_oprnd;
1021 default:
1022 gcc_unreachable ();
1027 /* Function vect_get_vec_def_for_stmt_copy
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1034 of the stmt that defines VEC_OPRND.
1035 DT is the type of the vector def VEC_OPRND.
1037 Context:
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
1041 arises when there are multiple data-types operated upon in the loop; the
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
1045 computing 'VF' results in each iteration). This function is called when
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
1054 VS1.3: vx.3 = memref3
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1083 tree
1084 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1089 /* Do nothing; can reuse same def. */
1090 if (dt == vect_external_def || dt == vect_constant_def )
1091 return vec_oprnd;
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1101 else
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1103 return vec_oprnd;
1107 /* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1110 static void
1111 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
1113 VEC(tree,heap) **vec_oprnds1)
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1120 if (vec_oprnds1 && *vec_oprnds1)
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1129 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1131 static void
1132 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1134 slp_tree slp_node)
1136 if (slp_node)
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1138 else
1140 tree vec_oprnd;
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1146 if (op1)
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1156 /* Function vect_finish_stmt_generation.
1158 Insert a new stmt. */
1160 void
1161 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1173 bb_vinfo));
1175 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1188 tree
1189 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1191 tree fndecl = gimple_call_fndecl (call);
1193 /* We only handle functions that do not read or clobber memory -- i.e.
1194 const or novops ones. */
1195 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1196 return NULL_TREE;
1198 if (!fndecl
1199 || TREE_CODE (fndecl) != FUNCTION_DECL
1200 || !DECL_BUILT_IN (fndecl))
1201 return NULL_TREE;
1203 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1204 vectype_in);
1207 /* Function vectorizable_call.
1209 Check if STMT performs a function call that can be vectorized.
1210 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1211 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1212 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1214 static bool
1215 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1217 tree vec_dest;
1218 tree scalar_dest;
1219 tree op, type;
1220 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1221 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1222 tree vectype_out, vectype_in;
1223 int nunits_in;
1224 int nunits_out;
1225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1226 tree fndecl, new_temp, def, rhs_type;
1227 gimple def_stmt;
1228 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1229 gimple new_stmt = NULL;
1230 int ncopies, j;
1231 VEC(tree, heap) *vargs = NULL;
1232 enum { NARROW, NONE, WIDEN } modifier;
1233 size_t i, nargs;
1235 /* FORNOW: unsupported in basic block SLP. */
1236 gcc_assert (loop_vinfo);
1238 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1239 return false;
1241 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1242 return false;
1244 /* FORNOW: SLP not supported. */
1245 if (STMT_SLP_TYPE (stmt_info))
1246 return false;
1248 /* Is STMT a vectorizable call? */
1249 if (!is_gimple_call (stmt))
1250 return false;
1252 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1253 return false;
1255 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1257 /* Process function arguments. */
1258 rhs_type = NULL_TREE;
1259 vectype_in = NULL_TREE;
1260 nargs = gimple_call_num_args (stmt);
1262 /* Bail out if the function has more than two arguments, we
1263 do not have interesting builtin functions to vectorize with
1264 more than two arguments. No arguments is also not good. */
1265 if (nargs == 0 || nargs > 2)
1266 return false;
1268 for (i = 0; i < nargs; i++)
1270 tree opvectype;
1272 op = gimple_call_arg (stmt, i);
1274 /* We can only handle calls with arguments of the same type. */
1275 if (rhs_type
1276 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1278 if (vect_print_dump_info (REPORT_DETAILS))
1279 fprintf (vect_dump, "argument types differ.");
1280 return false;
1282 if (!rhs_type)
1283 rhs_type = TREE_TYPE (op);
1285 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1286 &def_stmt, &def, &dt[i], &opvectype))
1288 if (vect_print_dump_info (REPORT_DETAILS))
1289 fprintf (vect_dump, "use not simple.");
1290 return false;
1293 if (!vectype_in)
1294 vectype_in = opvectype;
1295 else if (opvectype
1296 && opvectype != vectype_in)
1298 if (vect_print_dump_info (REPORT_DETAILS))
1299 fprintf (vect_dump, "argument vector types differ.");
1300 return false;
1303 /* If all arguments are external or constant defs use a vector type with
1304 the same size as the output vector type. */
1305 if (!vectype_in)
1306 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1308 /* FORNOW */
1309 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1310 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1311 if (nunits_in == nunits_out / 2)
1312 modifier = NARROW;
1313 else if (nunits_out == nunits_in)
1314 modifier = NONE;
1315 else if (nunits_out == nunits_in / 2)
1316 modifier = WIDEN;
1317 else
1318 return false;
1320 /* For now, we only vectorize functions if a target specific builtin
1321 is available. TODO -- in some cases, it might be profitable to
1322 insert the calls for pieces of the vector, in order to be able
1323 to vectorize other operations in the loop. */
1324 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1325 if (fndecl == NULL_TREE)
1327 if (vect_print_dump_info (REPORT_DETAILS))
1328 fprintf (vect_dump, "function is not vectorizable.");
1330 return false;
1333 gcc_assert (!gimple_vuse (stmt));
1335 if (modifier == NARROW)
1336 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1337 else
1338 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1340 /* Sanity check: make sure that at least one copy of the vectorized stmt
1341 needs to be generated. */
1342 gcc_assert (ncopies >= 1);
1344 if (!vec_stmt) /* transformation not required. */
1346 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1347 if (vect_print_dump_info (REPORT_DETAILS))
1348 fprintf (vect_dump, "=== vectorizable_call ===");
1349 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1350 return true;
1353 /** Transform. **/
1355 if (vect_print_dump_info (REPORT_DETAILS))
1356 fprintf (vect_dump, "transform operation.");
1358 /* Handle def. */
1359 scalar_dest = gimple_call_lhs (stmt);
1360 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1362 prev_stmt_info = NULL;
1363 switch (modifier)
1365 case NONE:
1366 for (j = 0; j < ncopies; ++j)
1368 /* Build argument list for the vectorized call. */
1369 if (j == 0)
1370 vargs = VEC_alloc (tree, heap, nargs);
1371 else
1372 VEC_truncate (tree, vargs, 0);
1374 for (i = 0; i < nargs; i++)
1376 op = gimple_call_arg (stmt, i);
1377 if (j == 0)
1378 vec_oprnd0
1379 = vect_get_vec_def_for_operand (op, stmt, NULL);
1380 else
1382 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1383 vec_oprnd0
1384 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1387 VEC_quick_push (tree, vargs, vec_oprnd0);
1390 new_stmt = gimple_build_call_vec (fndecl, vargs);
1391 new_temp = make_ssa_name (vec_dest, new_stmt);
1392 gimple_call_set_lhs (new_stmt, new_temp);
1394 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1395 mark_symbols_for_renaming (new_stmt);
1397 if (j == 0)
1398 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1399 else
1400 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1402 prev_stmt_info = vinfo_for_stmt (new_stmt);
1405 break;
1407 case NARROW:
1408 for (j = 0; j < ncopies; ++j)
1410 /* Build argument list for the vectorized call. */
1411 if (j == 0)
1412 vargs = VEC_alloc (tree, heap, nargs * 2);
1413 else
1414 VEC_truncate (tree, vargs, 0);
1416 for (i = 0; i < nargs; i++)
1418 op = gimple_call_arg (stmt, i);
1419 if (j == 0)
1421 vec_oprnd0
1422 = vect_get_vec_def_for_operand (op, stmt, NULL);
1423 vec_oprnd1
1424 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1426 else
1428 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1429 vec_oprnd0
1430 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1431 vec_oprnd1
1432 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1435 VEC_quick_push (tree, vargs, vec_oprnd0);
1436 VEC_quick_push (tree, vargs, vec_oprnd1);
1439 new_stmt = gimple_build_call_vec (fndecl, vargs);
1440 new_temp = make_ssa_name (vec_dest, new_stmt);
1441 gimple_call_set_lhs (new_stmt, new_temp);
1443 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1444 mark_symbols_for_renaming (new_stmt);
1446 if (j == 0)
1447 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1448 else
1449 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1451 prev_stmt_info = vinfo_for_stmt (new_stmt);
1454 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1456 break;
1458 case WIDEN:
1459 /* No current target implements this case. */
1460 return false;
1463 VEC_free (tree, heap, vargs);
1465 /* Update the exception handling table with the vector stmt if necessary. */
1466 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1467 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1469 /* The call in STMT might prevent it from being removed in dce.
1470 We however cannot remove it here, due to the way the ssa name
1471 it defines is mapped to the new definition. So just replace
1472 rhs of the statement with something harmless. */
1474 type = TREE_TYPE (scalar_dest);
1475 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1476 fold_convert (type, integer_zero_node));
1477 set_vinfo_for_stmt (new_stmt, stmt_info);
1478 set_vinfo_for_stmt (stmt, NULL);
1479 STMT_VINFO_STMT (stmt_info) = new_stmt;
1480 gsi_replace (gsi, new_stmt, false);
1481 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1483 return true;
1487 /* Function vect_gen_widened_results_half
1489 Create a vector stmt whose code, type, number of arguments, and result
1490 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1491 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1492 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1493 needs to be created (DECL is a function-decl of a target-builtin).
1494 STMT is the original scalar stmt that we are vectorizing. */
1496 static gimple
1497 vect_gen_widened_results_half (enum tree_code code,
1498 tree decl,
1499 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1500 tree vec_dest, gimple_stmt_iterator *gsi,
1501 gimple stmt)
1503 gimple new_stmt;
1504 tree new_temp;
1506 /* Generate half of the widened result: */
1507 if (code == CALL_EXPR)
1509 /* Target specific support */
1510 if (op_type == binary_op)
1511 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1512 else
1513 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1514 new_temp = make_ssa_name (vec_dest, new_stmt);
1515 gimple_call_set_lhs (new_stmt, new_temp);
1517 else
1519 /* Generic support */
1520 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1521 if (op_type != binary_op)
1522 vec_oprnd1 = NULL;
1523 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1524 vec_oprnd1);
1525 new_temp = make_ssa_name (vec_dest, new_stmt);
1526 gimple_assign_set_lhs (new_stmt, new_temp);
1528 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1530 return new_stmt;
1534 /* Check if STMT performs a conversion operation, that can be vectorized.
1535 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1536 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1537 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1539 static bool
1540 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1541 gimple *vec_stmt, slp_tree slp_node)
1543 tree vec_dest;
1544 tree scalar_dest;
1545 tree op0;
1546 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1547 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1548 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1549 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1550 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1551 tree new_temp;
1552 tree def;
1553 gimple def_stmt;
1554 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1555 gimple new_stmt = NULL;
1556 stmt_vec_info prev_stmt_info;
1557 int nunits_in;
1558 int nunits_out;
1559 tree vectype_out, vectype_in;
1560 int ncopies, j;
1561 tree rhs_type;
1562 tree builtin_decl;
1563 enum { NARROW, NONE, WIDEN } modifier;
1564 int i;
1565 VEC(tree,heap) *vec_oprnds0 = NULL;
1566 tree vop0;
1567 VEC(tree,heap) *dummy = NULL;
1568 int dummy_int;
1570 /* Is STMT a vectorizable conversion? */
1572 /* FORNOW: unsupported in basic block SLP. */
1573 gcc_assert (loop_vinfo);
1575 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1576 return false;
1578 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1579 return false;
1581 if (!is_gimple_assign (stmt))
1582 return false;
1584 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1585 return false;
1587 code = gimple_assign_rhs_code (stmt);
1588 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1589 return false;
1591 /* Check types of lhs and rhs. */
1592 scalar_dest = gimple_assign_lhs (stmt);
1593 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1595 op0 = gimple_assign_rhs1 (stmt);
1596 rhs_type = TREE_TYPE (op0);
1597 /* Check the operands of the operation. */
1598 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1599 &def_stmt, &def, &dt[0], &vectype_in))
1601 if (vect_print_dump_info (REPORT_DETAILS))
1602 fprintf (vect_dump, "use not simple.");
1603 return false;
1605 /* If op0 is an external or constant defs use a vector type of
1606 the same size as the output vector type. */
1607 if (!vectype_in)
1608 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1610 /* FORNOW */
1611 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1612 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1613 if (nunits_in == nunits_out / 2)
1614 modifier = NARROW;
1615 else if (nunits_out == nunits_in)
1616 modifier = NONE;
1617 else if (nunits_out == nunits_in / 2)
1618 modifier = WIDEN;
1619 else
1620 return false;
1622 if (modifier == NARROW)
1623 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1624 else
1625 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1627 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1628 this, so we can safely override NCOPIES with 1 here. */
1629 if (slp_node)
1630 ncopies = 1;
1632 /* Sanity check: make sure that at least one copy of the vectorized stmt
1633 needs to be generated. */
1634 gcc_assert (ncopies >= 1);
1636 /* Supportable by target? */
1637 if ((modifier == NONE
1638 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1639 || (modifier == WIDEN
1640 && !supportable_widening_operation (code, stmt,
1641 vectype_out, vectype_in,
1642 &decl1, &decl2,
1643 &code1, &code2,
1644 &dummy_int, &dummy))
1645 || (modifier == NARROW
1646 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1647 &code1, &dummy_int, &dummy)))
1649 if (vect_print_dump_info (REPORT_DETAILS))
1650 fprintf (vect_dump, "conversion not supported by target.");
1651 return false;
1654 if (modifier != NONE)
1656 /* FORNOW: SLP not supported. */
1657 if (STMT_SLP_TYPE (stmt_info))
1658 return false;
1661 if (!vec_stmt) /* transformation not required. */
1663 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1664 return true;
1667 /** Transform. **/
1668 if (vect_print_dump_info (REPORT_DETAILS))
1669 fprintf (vect_dump, "transform conversion.");
1671 /* Handle def. */
1672 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1674 if (modifier == NONE && !slp_node)
1675 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1677 prev_stmt_info = NULL;
1678 switch (modifier)
1680 case NONE:
1681 for (j = 0; j < ncopies; j++)
1683 if (j == 0)
1684 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1685 else
1686 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1688 builtin_decl =
1689 targetm.vectorize.builtin_conversion (code,
1690 vectype_out, vectype_in);
1691 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1693 /* Arguments are ready. create the new vector stmt. */
1694 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1695 new_temp = make_ssa_name (vec_dest, new_stmt);
1696 gimple_call_set_lhs (new_stmt, new_temp);
1697 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1698 if (slp_node)
1699 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1702 if (j == 0)
1703 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1704 else
1705 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1706 prev_stmt_info = vinfo_for_stmt (new_stmt);
1708 break;
1710 case WIDEN:
1711 /* In case the vectorization factor (VF) is bigger than the number
1712 of elements that we can fit in a vectype (nunits), we have to
1713 generate more than one vector stmt - i.e - we need to "unroll"
1714 the vector stmt by a factor VF/nunits. */
1715 for (j = 0; j < ncopies; j++)
1717 if (j == 0)
1718 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1719 else
1720 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1722 /* Generate first half of the widened result: */
1723 new_stmt
1724 = vect_gen_widened_results_half (code1, decl1,
1725 vec_oprnd0, vec_oprnd1,
1726 unary_op, vec_dest, gsi, stmt);
1727 if (j == 0)
1728 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1729 else
1730 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1731 prev_stmt_info = vinfo_for_stmt (new_stmt);
1733 /* Generate second half of the widened result: */
1734 new_stmt
1735 = vect_gen_widened_results_half (code2, decl2,
1736 vec_oprnd0, vec_oprnd1,
1737 unary_op, vec_dest, gsi, stmt);
1738 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1739 prev_stmt_info = vinfo_for_stmt (new_stmt);
1741 break;
1743 case NARROW:
1744 /* In case the vectorization factor (VF) is bigger than the number
1745 of elements that we can fit in a vectype (nunits), we have to
1746 generate more than one vector stmt - i.e - we need to "unroll"
1747 the vector stmt by a factor VF/nunits. */
1748 for (j = 0; j < ncopies; j++)
1750 /* Handle uses. */
1751 if (j == 0)
1753 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1754 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1756 else
1758 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1759 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1762 /* Arguments are ready. Create the new vector stmt. */
1763 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1764 vec_oprnd1);
1765 new_temp = make_ssa_name (vec_dest, new_stmt);
1766 gimple_assign_set_lhs (new_stmt, new_temp);
1767 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1769 if (j == 0)
1770 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1771 else
1772 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1774 prev_stmt_info = vinfo_for_stmt (new_stmt);
1777 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1780 if (vec_oprnds0)
1781 VEC_free (tree, heap, vec_oprnds0);
1783 return true;
1785 /* Function vectorizable_assignment.
1787 Check if STMT performs an assignment (copy) that can be vectorized.
1788 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1789 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1790 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1792 static bool
1793 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1794 gimple *vec_stmt, slp_tree slp_node)
1796 tree vec_dest;
1797 tree scalar_dest;
1798 tree op;
1799 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1800 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1801 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1802 tree new_temp;
1803 tree def;
1804 gimple def_stmt;
1805 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1806 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1807 int ncopies;
1808 int i, j;
1809 VEC(tree,heap) *vec_oprnds = NULL;
1810 tree vop;
1811 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1812 gimple new_stmt = NULL;
1813 stmt_vec_info prev_stmt_info = NULL;
1815 /* Multiple types in SLP are handled by creating the appropriate number of
1816 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1817 case of SLP. */
1818 if (slp_node)
1819 ncopies = 1;
1820 else
1821 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1823 gcc_assert (ncopies >= 1);
1825 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1826 return false;
1828 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1829 return false;
1831 /* Is vectorizable assignment? */
1832 if (!is_gimple_assign (stmt))
1833 return false;
1835 scalar_dest = gimple_assign_lhs (stmt);
1836 if (TREE_CODE (scalar_dest) != SSA_NAME)
1837 return false;
1839 if (gimple_assign_single_p (stmt)
1840 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1841 op = gimple_assign_rhs1 (stmt);
1842 else
1843 return false;
1845 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1847 if (vect_print_dump_info (REPORT_DETAILS))
1848 fprintf (vect_dump, "use not simple.");
1849 return false;
1852 if (!vec_stmt) /* transformation not required. */
1854 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1855 if (vect_print_dump_info (REPORT_DETAILS))
1856 fprintf (vect_dump, "=== vectorizable_assignment ===");
1857 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1858 return true;
1861 /** Transform. **/
1862 if (vect_print_dump_info (REPORT_DETAILS))
1863 fprintf (vect_dump, "transform assignment.");
1865 /* Handle def. */
1866 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1868 /* Handle use. */
1869 for (j = 0; j < ncopies; j++)
1871 /* Handle uses. */
1872 if (j == 0)
1873 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1874 else
1875 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
1877 /* Arguments are ready. create the new vector stmt. */
1878 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1880 new_stmt = gimple_build_assign (vec_dest, vop);
1881 new_temp = make_ssa_name (vec_dest, new_stmt);
1882 gimple_assign_set_lhs (new_stmt, new_temp);
1883 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1884 if (slp_node)
1885 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1888 if (slp_node)
1889 continue;
1891 if (j == 0)
1892 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1893 else
1894 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1896 prev_stmt_info = vinfo_for_stmt (new_stmt);
1899 VEC_free (tree, heap, vec_oprnds);
1900 return true;
1903 /* Function vectorizable_operation.
1905 Check if STMT performs a binary or unary operation that can be vectorized.
1906 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1907 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1908 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1910 static bool
1911 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1912 gimple *vec_stmt, slp_tree slp_node)
1914 tree vec_dest;
1915 tree scalar_dest;
1916 tree op0, op1 = NULL;
1917 tree vec_oprnd1 = NULL_TREE;
1918 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1919 tree vectype;
1920 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1921 enum tree_code code;
1922 enum machine_mode vec_mode;
1923 tree new_temp;
1924 int op_type;
1925 optab optab;
1926 int icode;
1927 enum machine_mode optab_op2_mode;
1928 tree def;
1929 gimple def_stmt;
1930 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1931 gimple new_stmt = NULL;
1932 stmt_vec_info prev_stmt_info;
1933 int nunits_in;
1934 int nunits_out;
1935 tree vectype_out;
1936 int ncopies;
1937 int j, i;
1938 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1939 tree vop0, vop1;
1940 unsigned int k;
1941 bool scalar_shift_arg = false;
1942 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1943 int vf;
1945 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1946 return false;
1948 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1949 return false;
1951 /* Is STMT a vectorizable binary/unary operation? */
1952 if (!is_gimple_assign (stmt))
1953 return false;
1955 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1956 return false;
1958 code = gimple_assign_rhs_code (stmt);
1960 /* For pointer addition, we should use the normal plus for
1961 the vector addition. */
1962 if (code == POINTER_PLUS_EXPR)
1963 code = PLUS_EXPR;
1965 /* Support only unary or binary operations. */
1966 op_type = TREE_CODE_LENGTH (code);
1967 if (op_type != unary_op && op_type != binary_op)
1969 if (vect_print_dump_info (REPORT_DETAILS))
1970 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1971 return false;
1974 scalar_dest = gimple_assign_lhs (stmt);
1975 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1977 op0 = gimple_assign_rhs1 (stmt);
1978 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
1979 &def_stmt, &def, &dt[0], &vectype))
1981 if (vect_print_dump_info (REPORT_DETAILS))
1982 fprintf (vect_dump, "use not simple.");
1983 return false;
1985 /* If op0 is an external or constant def use a vector type with
1986 the same size as the output vector type. */
1987 if (!vectype)
1988 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
1989 gcc_assert (vectype);
1991 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1992 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1993 if (nunits_out != nunits_in)
1994 return false;
1996 if (op_type == binary_op)
1998 op1 = gimple_assign_rhs2 (stmt);
1999 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2000 &dt[1]))
2002 if (vect_print_dump_info (REPORT_DETAILS))
2003 fprintf (vect_dump, "use not simple.");
2004 return false;
2008 if (loop_vinfo)
2009 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2010 else
2011 vf = 1;
2013 /* Multiple types in SLP are handled by creating the appropriate number of
2014 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2015 case of SLP. */
2016 if (slp_node)
2017 ncopies = 1;
2018 else
2019 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2021 gcc_assert (ncopies >= 1);
2023 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2024 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2025 shift optabs. */
2026 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2027 || code == RROTATE_EXPR)
2029 /* vector shifted by vector */
2030 if (dt[1] == vect_internal_def)
2032 optab = optab_for_tree_code (code, vectype, optab_vector);
2033 if (vect_print_dump_info (REPORT_DETAILS))
2034 fprintf (vect_dump, "vector/vector shift/rotate found.");
2037 /* See if the machine has a vector shifted by scalar insn and if not
2038 then see if it has a vector shifted by vector insn */
2039 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2041 optab = optab_for_tree_code (code, vectype, optab_scalar);
2042 if (optab
2043 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2044 != CODE_FOR_nothing))
2046 scalar_shift_arg = true;
2047 if (vect_print_dump_info (REPORT_DETAILS))
2048 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2050 else
2052 optab = optab_for_tree_code (code, vectype, optab_vector);
2053 if (optab
2054 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2055 != CODE_FOR_nothing))
2057 if (vect_print_dump_info (REPORT_DETAILS))
2058 fprintf (vect_dump, "vector/vector shift/rotate found.");
2060 /* Unlike the other binary operators, shifts/rotates have
2061 the rhs being int, instead of the same type as the lhs,
2062 so make sure the scalar is the right type if we are
2063 dealing with vectors of short/char. */
2064 if (dt[1] == vect_constant_def)
2065 op1 = fold_convert (TREE_TYPE (vectype), op1);
2070 else
2072 if (vect_print_dump_info (REPORT_DETAILS))
2073 fprintf (vect_dump, "operand mode requires invariant argument.");
2074 return false;
2077 else
2078 optab = optab_for_tree_code (code, vectype, optab_default);
2080 /* Supportable by target? */
2081 if (!optab)
2083 if (vect_print_dump_info (REPORT_DETAILS))
2084 fprintf (vect_dump, "no optab.");
2085 return false;
2087 vec_mode = TYPE_MODE (vectype);
2088 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2089 if (icode == CODE_FOR_nothing)
2091 if (vect_print_dump_info (REPORT_DETAILS))
2092 fprintf (vect_dump, "op not supported by target.");
2093 /* Check only during analysis. */
2094 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2095 || (vf < vect_min_worthwhile_factor (code)
2096 && !vec_stmt))
2097 return false;
2098 if (vect_print_dump_info (REPORT_DETAILS))
2099 fprintf (vect_dump, "proceeding using word mode.");
2102 /* Worthwhile without SIMD support? Check only during analysis. */
2103 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2104 && vf < vect_min_worthwhile_factor (code)
2105 && !vec_stmt)
2107 if (vect_print_dump_info (REPORT_DETAILS))
2108 fprintf (vect_dump, "not worthwhile without SIMD support.");
2109 return false;
2112 if (!vec_stmt) /* transformation not required. */
2114 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2115 if (vect_print_dump_info (REPORT_DETAILS))
2116 fprintf (vect_dump, "=== vectorizable_operation ===");
2117 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2118 return true;
2121 /** Transform. **/
2123 if (vect_print_dump_info (REPORT_DETAILS))
2124 fprintf (vect_dump, "transform binary/unary operation.");
2126 /* Handle def. */
2127 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2129 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2130 created in the previous stages of the recursion, so no allocation is
2131 needed, except for the case of shift with scalar shift argument. In that
2132 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2133 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2134 In case of loop-based vectorization we allocate VECs of size 1. We
2135 allocate VEC_OPRNDS1 only in case of binary operation. */
2136 if (!slp_node)
2138 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2139 if (op_type == binary_op)
2140 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2142 else if (scalar_shift_arg)
2143 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2145 /* In case the vectorization factor (VF) is bigger than the number
2146 of elements that we can fit in a vectype (nunits), we have to generate
2147 more than one vector stmt - i.e - we need to "unroll" the
2148 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2149 from one copy of the vector stmt to the next, in the field
2150 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2151 stages to find the correct vector defs to be used when vectorizing
2152 stmts that use the defs of the current stmt. The example below illustrates
2153 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2154 4 vectorized stmts):
2156 before vectorization:
2157 RELATED_STMT VEC_STMT
2158 S1: x = memref - -
2159 S2: z = x + 1 - -
2161 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2162 there):
2163 RELATED_STMT VEC_STMT
2164 VS1_0: vx0 = memref0 VS1_1 -
2165 VS1_1: vx1 = memref1 VS1_2 -
2166 VS1_2: vx2 = memref2 VS1_3 -
2167 VS1_3: vx3 = memref3 - -
2168 S1: x = load - VS1_0
2169 S2: z = x + 1 - -
2171 step2: vectorize stmt S2 (done here):
2172 To vectorize stmt S2 we first need to find the relevant vector
2173 def for the first operand 'x'. This is, as usual, obtained from
2174 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2175 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2176 relevant vector def 'vx0'. Having found 'vx0' we can generate
2177 the vector stmt VS2_0, and as usual, record it in the
2178 STMT_VINFO_VEC_STMT of stmt S2.
2179 When creating the second copy (VS2_1), we obtain the relevant vector
2180 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2181 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2182 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2183 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2184 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2185 chain of stmts and pointers:
2186 RELATED_STMT VEC_STMT
2187 VS1_0: vx0 = memref0 VS1_1 -
2188 VS1_1: vx1 = memref1 VS1_2 -
2189 VS1_2: vx2 = memref2 VS1_3 -
2190 VS1_3: vx3 = memref3 - -
2191 S1: x = load - VS1_0
2192 VS2_0: vz0 = vx0 + v1 VS2_1 -
2193 VS2_1: vz1 = vx1 + v1 VS2_2 -
2194 VS2_2: vz2 = vx2 + v1 VS2_3 -
2195 VS2_3: vz3 = vx3 + v1 - -
2196 S2: z = x + 1 - VS2_0 */
2198 prev_stmt_info = NULL;
2199 for (j = 0; j < ncopies; j++)
2201 /* Handle uses. */
2202 if (j == 0)
2204 if (op_type == binary_op && scalar_shift_arg)
2206 /* Vector shl and shr insn patterns can be defined with scalar
2207 operand 2 (shift operand). In this case, use constant or loop
2208 invariant op1 directly, without extending it to vector mode
2209 first. */
2210 optab_op2_mode = insn_data[icode].operand[2].mode;
2211 if (!VECTOR_MODE_P (optab_op2_mode))
2213 if (vect_print_dump_info (REPORT_DETAILS))
2214 fprintf (vect_dump, "operand 1 using scalar mode.");
2215 vec_oprnd1 = op1;
2216 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2217 if (slp_node)
2219 /* Store vec_oprnd1 for every vector stmt to be created
2220 for SLP_NODE. We check during the analysis that all the
2221 shift arguments are the same.
2222 TODO: Allow different constants for different vector
2223 stmts generated for an SLP instance. */
2224 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2225 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2230 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2231 (a special case for certain kind of vector shifts); otherwise,
2232 operand 1 should be of a vector type (the usual case). */
2233 if (op_type == binary_op && !vec_oprnd1)
2234 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2235 slp_node);
2236 else
2237 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2238 slp_node);
2240 else
2241 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2243 /* Arguments are ready. Create the new vector stmt. */
2244 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2246 vop1 = ((op_type == binary_op)
2247 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2248 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2249 new_temp = make_ssa_name (vec_dest, new_stmt);
2250 gimple_assign_set_lhs (new_stmt, new_temp);
2251 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2252 if (slp_node)
2253 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2256 if (slp_node)
2257 continue;
2259 if (j == 0)
2260 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2261 else
2262 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2263 prev_stmt_info = vinfo_for_stmt (new_stmt);
2266 VEC_free (tree, heap, vec_oprnds0);
2267 if (vec_oprnds1)
2268 VEC_free (tree, heap, vec_oprnds1);
2270 return true;
2274 /* Get vectorized definitions for loop-based vectorization. For the first
2275 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2276 scalar operand), and for the rest we get a copy with
2277 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2278 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2279 The vectors are collected into VEC_OPRNDS. */
2281 static void
2282 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2283 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2285 tree vec_oprnd;
2287 /* Get first vector operand. */
2288 /* All the vector operands except the very first one (that is scalar oprnd)
2289 are stmt copies. */
2290 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2291 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2292 else
2293 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2295 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2297 /* Get second vector operand. */
2298 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2299 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2301 *oprnd = vec_oprnd;
2303 /* For conversion in multiple steps, continue to get operands
2304 recursively. */
2305 if (multi_step_cvt)
2306 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2310 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2311 For multi-step conversions store the resulting vectors and call the function
2312 recursively. */
2314 static void
2315 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2316 int multi_step_cvt, gimple stmt,
2317 VEC (tree, heap) *vec_dsts,
2318 gimple_stmt_iterator *gsi,
2319 slp_tree slp_node, enum tree_code code,
2320 stmt_vec_info *prev_stmt_info)
2322 unsigned int i;
2323 tree vop0, vop1, new_tmp, vec_dest;
2324 gimple new_stmt;
2325 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2327 vec_dest = VEC_pop (tree, vec_dsts);
2329 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2331 /* Create demotion operation. */
2332 vop0 = VEC_index (tree, *vec_oprnds, i);
2333 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2334 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2335 new_tmp = make_ssa_name (vec_dest, new_stmt);
2336 gimple_assign_set_lhs (new_stmt, new_tmp);
2337 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2339 if (multi_step_cvt)
2340 /* Store the resulting vector for next recursive call. */
2341 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2342 else
2344 /* This is the last step of the conversion sequence. Store the
2345 vectors in SLP_NODE or in vector info of the scalar statement
2346 (or in STMT_VINFO_RELATED_STMT chain). */
2347 if (slp_node)
2348 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2349 else
2351 if (!*prev_stmt_info)
2352 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2353 else
2354 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2356 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2361 /* For multi-step demotion operations we first generate demotion operations
2362 from the source type to the intermediate types, and then combine the
2363 results (stored in VEC_OPRNDS) in demotion operation to the destination
2364 type. */
2365 if (multi_step_cvt)
2367 /* At each level of recursion we have have of the operands we had at the
2368 previous level. */
2369 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2370 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2371 stmt, vec_dsts, gsi, slp_node,
2372 code, prev_stmt_info);
2377 /* Function vectorizable_type_demotion
2379 Check if STMT performs a binary or unary operation that involves
2380 type demotion, and if it can be vectorized.
2381 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2382 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2383 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2385 static bool
2386 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2387 gimple *vec_stmt, slp_tree slp_node)
2389 tree vec_dest;
2390 tree scalar_dest;
2391 tree op0;
2392 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2393 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2394 enum tree_code code, code1 = ERROR_MARK;
2395 tree def;
2396 gimple def_stmt;
2397 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2398 stmt_vec_info prev_stmt_info;
2399 int nunits_in;
2400 int nunits_out;
2401 tree vectype_out;
2402 int ncopies;
2403 int j, i;
2404 tree vectype_in;
2405 int multi_step_cvt = 0;
2406 VEC (tree, heap) *vec_oprnds0 = NULL;
2407 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2408 tree last_oprnd, intermediate_type;
2410 /* FORNOW: not supported by basic block SLP vectorization. */
2411 gcc_assert (loop_vinfo);
2413 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2414 return false;
2416 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2417 return false;
2419 /* Is STMT a vectorizable type-demotion operation? */
2420 if (!is_gimple_assign (stmt))
2421 return false;
2423 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2424 return false;
2426 code = gimple_assign_rhs_code (stmt);
2427 if (!CONVERT_EXPR_CODE_P (code))
2428 return false;
2430 scalar_dest = gimple_assign_lhs (stmt);
2431 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2433 /* Check the operands of the operation. */
2434 op0 = gimple_assign_rhs1 (stmt);
2435 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2436 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2437 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2438 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2439 && CONVERT_EXPR_CODE_P (code))))
2440 return false;
2441 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2442 &def_stmt, &def, &dt[0], &vectype_in))
2444 if (vect_print_dump_info (REPORT_DETAILS))
2445 fprintf (vect_dump, "use not simple.");
2446 return false;
2448 /* If op0 is an external def use a vector type with the
2449 same size as the output vector type if possible. */
2450 if (!vectype_in)
2451 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2452 if (!vectype_in)
2453 return false;
2455 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2456 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2457 if (nunits_in >= nunits_out)
2458 return false;
2460 /* Multiple types in SLP are handled by creating the appropriate number of
2461 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2462 case of SLP. */
2463 if (slp_node)
2464 ncopies = 1;
2465 else
2466 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2467 gcc_assert (ncopies >= 1);
2469 /* Supportable by target? */
2470 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2471 &code1, &multi_step_cvt, &interm_types))
2472 return false;
2474 if (!vec_stmt) /* transformation not required. */
2476 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2477 if (vect_print_dump_info (REPORT_DETAILS))
2478 fprintf (vect_dump, "=== vectorizable_demotion ===");
2479 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2480 return true;
2483 /** Transform. **/
2484 if (vect_print_dump_info (REPORT_DETAILS))
2485 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2486 ncopies);
2488 /* In case of multi-step demotion, we first generate demotion operations to
2489 the intermediate types, and then from that types to the final one.
2490 We create vector destinations for the intermediate type (TYPES) received
2491 from supportable_narrowing_operation, and store them in the correct order
2492 for future use in vect_create_vectorized_demotion_stmts(). */
2493 if (multi_step_cvt)
2494 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2495 else
2496 vec_dsts = VEC_alloc (tree, heap, 1);
2498 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2499 VEC_quick_push (tree, vec_dsts, vec_dest);
2501 if (multi_step_cvt)
2503 for (i = VEC_length (tree, interm_types) - 1;
2504 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2506 vec_dest = vect_create_destination_var (scalar_dest,
2507 intermediate_type);
2508 VEC_quick_push (tree, vec_dsts, vec_dest);
2512 /* In case the vectorization factor (VF) is bigger than the number
2513 of elements that we can fit in a vectype (nunits), we have to generate
2514 more than one vector stmt - i.e - we need to "unroll" the
2515 vector stmt by a factor VF/nunits. */
2516 last_oprnd = op0;
2517 prev_stmt_info = NULL;
2518 for (j = 0; j < ncopies; j++)
2520 /* Handle uses. */
2521 if (slp_node)
2522 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2523 else
2525 VEC_free (tree, heap, vec_oprnds0);
2526 vec_oprnds0 = VEC_alloc (tree, heap,
2527 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2528 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2529 vect_pow2 (multi_step_cvt) - 1);
2532 /* Arguments are ready. Create the new vector stmts. */
2533 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2534 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2535 multi_step_cvt, stmt, tmp_vec_dsts,
2536 gsi, slp_node, code1,
2537 &prev_stmt_info);
2540 VEC_free (tree, heap, vec_oprnds0);
2541 VEC_free (tree, heap, vec_dsts);
2542 VEC_free (tree, heap, tmp_vec_dsts);
2543 VEC_free (tree, heap, interm_types);
2545 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2546 return true;
2550 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2551 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2552 the resulting vectors and call the function recursively. */
2554 static void
2555 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2556 VEC (tree, heap) **vec_oprnds1,
2557 int multi_step_cvt, gimple stmt,
2558 VEC (tree, heap) *vec_dsts,
2559 gimple_stmt_iterator *gsi,
2560 slp_tree slp_node, enum tree_code code1,
2561 enum tree_code code2, tree decl1,
2562 tree decl2, int op_type,
2563 stmt_vec_info *prev_stmt_info)
2565 int i;
2566 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2567 gimple new_stmt1, new_stmt2;
2568 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2569 VEC (tree, heap) *vec_tmp;
2571 vec_dest = VEC_pop (tree, vec_dsts);
2572 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2574 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2576 if (op_type == binary_op)
2577 vop1 = VEC_index (tree, *vec_oprnds1, i);
2578 else
2579 vop1 = NULL_TREE;
2581 /* Generate the two halves of promotion operation. */
2582 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2583 op_type, vec_dest, gsi, stmt);
2584 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2585 op_type, vec_dest, gsi, stmt);
2586 if (is_gimple_call (new_stmt1))
2588 new_tmp1 = gimple_call_lhs (new_stmt1);
2589 new_tmp2 = gimple_call_lhs (new_stmt2);
2591 else
2593 new_tmp1 = gimple_assign_lhs (new_stmt1);
2594 new_tmp2 = gimple_assign_lhs (new_stmt2);
2597 if (multi_step_cvt)
2599 /* Store the results for the recursive call. */
2600 VEC_quick_push (tree, vec_tmp, new_tmp1);
2601 VEC_quick_push (tree, vec_tmp, new_tmp2);
2603 else
2605 /* Last step of promotion sequience - store the results. */
2606 if (slp_node)
2608 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2609 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2611 else
2613 if (!*prev_stmt_info)
2614 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2615 else
2616 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2618 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2619 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2620 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2625 if (multi_step_cvt)
2627 /* For multi-step promotion operation we first generate we call the
2628 function recurcively for every stage. We start from the input type,
2629 create promotion operations to the intermediate types, and then
2630 create promotions to the output type. */
2631 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2632 VEC_free (tree, heap, vec_tmp);
2633 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2634 multi_step_cvt - 1, stmt,
2635 vec_dsts, gsi, slp_node, code1,
2636 code2, decl2, decl2, op_type,
2637 prev_stmt_info);
2642 /* Function vectorizable_type_promotion
2644 Check if STMT performs a binary or unary operation that involves
2645 type promotion, and if it can be vectorized.
2646 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2647 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2648 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2650 static bool
2651 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2652 gimple *vec_stmt, slp_tree slp_node)
2654 tree vec_dest;
2655 tree scalar_dest;
2656 tree op0, op1 = NULL;
2657 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2658 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2659 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2660 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2661 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2662 int op_type;
2663 tree def;
2664 gimple def_stmt;
2665 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2666 stmt_vec_info prev_stmt_info;
2667 int nunits_in;
2668 int nunits_out;
2669 tree vectype_out;
2670 int ncopies;
2671 int j, i;
2672 tree vectype_in;
2673 tree intermediate_type = NULL_TREE;
2674 int multi_step_cvt = 0;
2675 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2676 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2678 /* FORNOW: not supported by basic block SLP vectorization. */
2679 gcc_assert (loop_vinfo);
2681 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2682 return false;
2684 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2685 return false;
2687 /* Is STMT a vectorizable type-promotion operation? */
2688 if (!is_gimple_assign (stmt))
2689 return false;
2691 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2692 return false;
2694 code = gimple_assign_rhs_code (stmt);
2695 if (!CONVERT_EXPR_CODE_P (code)
2696 && code != WIDEN_MULT_EXPR)
2697 return false;
2699 scalar_dest = gimple_assign_lhs (stmt);
2700 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2702 /* Check the operands of the operation. */
2703 op0 = gimple_assign_rhs1 (stmt);
2704 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2705 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2706 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2707 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2708 && CONVERT_EXPR_CODE_P (code))))
2709 return false;
2710 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2711 &def_stmt, &def, &dt[0], &vectype_in))
2713 if (vect_print_dump_info (REPORT_DETAILS))
2714 fprintf (vect_dump, "use not simple.");
2715 return false;
2717 /* If op0 is an external or constant def use a vector type with
2718 the same size as the output vector type. */
2719 if (!vectype_in)
2720 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2721 if (!vectype_in)
2722 return false;
2724 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2725 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2726 if (nunits_in <= nunits_out)
2727 return false;
2729 /* Multiple types in SLP are handled by creating the appropriate number of
2730 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2731 case of SLP. */
2732 if (slp_node)
2733 ncopies = 1;
2734 else
2735 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2737 gcc_assert (ncopies >= 1);
2739 op_type = TREE_CODE_LENGTH (code);
2740 if (op_type == binary_op)
2742 op1 = gimple_assign_rhs2 (stmt);
2743 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2745 if (vect_print_dump_info (REPORT_DETAILS))
2746 fprintf (vect_dump, "use not simple.");
2747 return false;
2751 /* Supportable by target? */
2752 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2753 &decl1, &decl2, &code1, &code2,
2754 &multi_step_cvt, &interm_types))
2755 return false;
2757 /* Binary widening operation can only be supported directly by the
2758 architecture. */
2759 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2761 if (!vec_stmt) /* transformation not required. */
2763 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2764 if (vect_print_dump_info (REPORT_DETAILS))
2765 fprintf (vect_dump, "=== vectorizable_promotion ===");
2766 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2767 return true;
2770 /** Transform. **/
2772 if (vect_print_dump_info (REPORT_DETAILS))
2773 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2774 ncopies);
2776 /* Handle def. */
2777 /* In case of multi-step promotion, we first generate promotion operations
2778 to the intermediate types, and then from that types to the final one.
2779 We store vector destination in VEC_DSTS in the correct order for
2780 recursive creation of promotion operations in
2781 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2782 according to TYPES recieved from supportable_widening_operation(). */
2783 if (multi_step_cvt)
2784 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2785 else
2786 vec_dsts = VEC_alloc (tree, heap, 1);
2788 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2789 VEC_quick_push (tree, vec_dsts, vec_dest);
2791 if (multi_step_cvt)
2793 for (i = VEC_length (tree, interm_types) - 1;
2794 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2796 vec_dest = vect_create_destination_var (scalar_dest,
2797 intermediate_type);
2798 VEC_quick_push (tree, vec_dsts, vec_dest);
2802 if (!slp_node)
2804 vec_oprnds0 = VEC_alloc (tree, heap,
2805 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2806 if (op_type == binary_op)
2807 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2810 /* In case the vectorization factor (VF) is bigger than the number
2811 of elements that we can fit in a vectype (nunits), we have to generate
2812 more than one vector stmt - i.e - we need to "unroll" the
2813 vector stmt by a factor VF/nunits. */
2815 prev_stmt_info = NULL;
2816 for (j = 0; j < ncopies; j++)
2818 /* Handle uses. */
2819 if (j == 0)
2821 if (slp_node)
2822 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2823 else
2825 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2826 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2827 if (op_type == binary_op)
2829 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2830 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2834 else
2836 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2837 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2838 if (op_type == binary_op)
2840 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2841 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2845 /* Arguments are ready. Create the new vector stmts. */
2846 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2847 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2848 multi_step_cvt, stmt,
2849 tmp_vec_dsts,
2850 gsi, slp_node, code1, code2,
2851 decl1, decl2, op_type,
2852 &prev_stmt_info);
2855 VEC_free (tree, heap, vec_dsts);
2856 VEC_free (tree, heap, tmp_vec_dsts);
2857 VEC_free (tree, heap, interm_types);
2858 VEC_free (tree, heap, vec_oprnds0);
2859 VEC_free (tree, heap, vec_oprnds1);
2861 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2862 return true;
2866 /* Function vectorizable_store.
2868 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2869 can be vectorized.
2870 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2871 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2872 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2874 static bool
2875 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2876 slp_tree slp_node)
2878 tree scalar_dest;
2879 tree data_ref;
2880 tree op;
2881 tree vec_oprnd = NULL_TREE;
2882 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2883 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2884 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2885 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2886 struct loop *loop = NULL;
2887 enum machine_mode vec_mode;
2888 tree dummy;
2889 enum dr_alignment_support alignment_support_scheme;
2890 tree def;
2891 gimple def_stmt;
2892 enum vect_def_type dt;
2893 stmt_vec_info prev_stmt_info = NULL;
2894 tree dataref_ptr = NULL_TREE;
2895 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2896 int ncopies;
2897 int j;
2898 gimple next_stmt, first_stmt = NULL;
2899 bool strided_store = false;
2900 unsigned int group_size, i;
2901 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2902 bool inv_p;
2903 VEC(tree,heap) *vec_oprnds = NULL;
2904 bool slp = (slp_node != NULL);
2905 unsigned int vec_num;
2906 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2908 if (loop_vinfo)
2909 loop = LOOP_VINFO_LOOP (loop_vinfo);
2911 /* Multiple types in SLP are handled by creating the appropriate number of
2912 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2913 case of SLP. */
2914 if (slp)
2915 ncopies = 1;
2916 else
2917 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2919 gcc_assert (ncopies >= 1);
2921 /* FORNOW. This restriction should be relaxed. */
2922 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2924 if (vect_print_dump_info (REPORT_DETAILS))
2925 fprintf (vect_dump, "multiple types in nested loop.");
2926 return false;
2929 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2930 return false;
2932 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2933 return false;
2935 /* Is vectorizable store? */
2937 if (!is_gimple_assign (stmt))
2938 return false;
2940 scalar_dest = gimple_assign_lhs (stmt);
2941 if (TREE_CODE (scalar_dest) != ARRAY_REF
2942 && TREE_CODE (scalar_dest) != INDIRECT_REF
2943 && TREE_CODE (scalar_dest) != COMPONENT_REF
2944 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2945 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2946 return false;
2948 gcc_assert (gimple_assign_single_p (stmt));
2949 op = gimple_assign_rhs1 (stmt);
2950 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2952 if (vect_print_dump_info (REPORT_DETAILS))
2953 fprintf (vect_dump, "use not simple.");
2954 return false;
2957 /* The scalar rhs type needs to be trivially convertible to the vector
2958 component type. This should always be the case. */
2959 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2961 if (vect_print_dump_info (REPORT_DETAILS))
2962 fprintf (vect_dump, "??? operands of different types");
2963 return false;
2966 vec_mode = TYPE_MODE (vectype);
2967 /* FORNOW. In some cases can vectorize even if data-type not supported
2968 (e.g. - array initialization with 0). */
2969 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2970 return false;
2972 if (!STMT_VINFO_DATA_REF (stmt_info))
2973 return false;
2975 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2977 strided_store = true;
2978 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2979 if (!vect_strided_store_supported (vectype)
2980 && !PURE_SLP_STMT (stmt_info) && !slp)
2981 return false;
2983 if (first_stmt == stmt)
2985 /* STMT is the leader of the group. Check the operands of all the
2986 stmts of the group. */
2987 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2988 while (next_stmt)
2990 gcc_assert (gimple_assign_single_p (next_stmt));
2991 op = gimple_assign_rhs1 (next_stmt);
2992 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2993 &def, &dt))
2995 if (vect_print_dump_info (REPORT_DETAILS))
2996 fprintf (vect_dump, "use not simple.");
2997 return false;
2999 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3004 if (!vec_stmt) /* transformation not required. */
3006 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3007 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3008 return true;
3011 /** Transform. **/
3013 if (strided_store)
3015 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3016 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3018 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3020 /* FORNOW */
3021 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3023 /* We vectorize all the stmts of the interleaving group when we
3024 reach the last stmt in the group. */
3025 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3026 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3027 && !slp)
3029 *vec_stmt = NULL;
3030 return true;
3033 if (slp)
3035 strided_store = false;
3036 /* VEC_NUM is the number of vect stmts to be created for this
3037 group. */
3038 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3039 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3040 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3042 else
3043 /* VEC_NUM is the number of vect stmts to be created for this
3044 group. */
3045 vec_num = group_size;
3047 else
3049 first_stmt = stmt;
3050 first_dr = dr;
3051 group_size = vec_num = 1;
3054 if (vect_print_dump_info (REPORT_DETAILS))
3055 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3057 dr_chain = VEC_alloc (tree, heap, group_size);
3058 oprnds = VEC_alloc (tree, heap, group_size);
3060 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3061 gcc_assert (alignment_support_scheme);
3063 /* In case the vectorization factor (VF) is bigger than the number
3064 of elements that we can fit in a vectype (nunits), we have to generate
3065 more than one vector stmt - i.e - we need to "unroll" the
3066 vector stmt by a factor VF/nunits. For more details see documentation in
3067 vect_get_vec_def_for_copy_stmt. */
3069 /* In case of interleaving (non-unit strided access):
3071 S1: &base + 2 = x2
3072 S2: &base = x0
3073 S3: &base + 1 = x1
3074 S4: &base + 3 = x3
3076 We create vectorized stores starting from base address (the access of the
3077 first stmt in the chain (S2 in the above example), when the last store stmt
3078 of the chain (S4) is reached:
3080 VS1: &base = vx2
3081 VS2: &base + vec_size*1 = vx0
3082 VS3: &base + vec_size*2 = vx1
3083 VS4: &base + vec_size*3 = vx3
3085 Then permutation statements are generated:
3087 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3088 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3091 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3092 (the order of the data-refs in the output of vect_permute_store_chain
3093 corresponds to the order of scalar stmts in the interleaving chain - see
3094 the documentation of vect_permute_store_chain()).
3096 In case of both multiple types and interleaving, above vector stores and
3097 permutation stmts are created for every copy. The result vector stmts are
3098 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3099 STMT_VINFO_RELATED_STMT for the next copies.
3102 prev_stmt_info = NULL;
3103 for (j = 0; j < ncopies; j++)
3105 gimple new_stmt;
3106 gimple ptr_incr;
3108 if (j == 0)
3110 if (slp)
3112 /* Get vectorized arguments for SLP_NODE. */
3113 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3115 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3117 else
3119 /* For interleaved stores we collect vectorized defs for all the
3120 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3121 used as an input to vect_permute_store_chain(), and OPRNDS as
3122 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3124 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3125 OPRNDS are of size 1. */
3126 next_stmt = first_stmt;
3127 for (i = 0; i < group_size; i++)
3129 /* Since gaps are not supported for interleaved stores,
3130 GROUP_SIZE is the exact number of stmts in the chain.
3131 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3132 there is no interleaving, GROUP_SIZE is 1, and only one
3133 iteration of the loop will be executed. */
3134 gcc_assert (next_stmt
3135 && gimple_assign_single_p (next_stmt));
3136 op = gimple_assign_rhs1 (next_stmt);
3138 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3139 NULL);
3140 VEC_quick_push(tree, dr_chain, vec_oprnd);
3141 VEC_quick_push(tree, oprnds, vec_oprnd);
3142 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3146 /* We should have catched mismatched types earlier. */
3147 gcc_assert (useless_type_conversion_p (vectype,
3148 TREE_TYPE (vec_oprnd)));
3149 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3150 &dummy, &ptr_incr, false,
3151 &inv_p);
3152 gcc_assert (bb_vinfo || !inv_p);
3154 else
3156 /* For interleaved stores we created vectorized defs for all the
3157 defs stored in OPRNDS in the previous iteration (previous copy).
3158 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3159 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3160 next copy.
3161 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3162 OPRNDS are of size 1. */
3163 for (i = 0; i < group_size; i++)
3165 op = VEC_index (tree, oprnds, i);
3166 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3167 &dt);
3168 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3169 VEC_replace(tree, dr_chain, i, vec_oprnd);
3170 VEC_replace(tree, oprnds, i, vec_oprnd);
3172 dataref_ptr =
3173 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3176 if (strided_store)
3178 result_chain = VEC_alloc (tree, heap, group_size);
3179 /* Permute. */
3180 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3181 &result_chain))
3182 return false;
3185 next_stmt = first_stmt;
3186 for (i = 0; i < vec_num; i++)
3188 if (i > 0)
3189 /* Bump the vector pointer. */
3190 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3191 NULL_TREE);
3193 if (slp)
3194 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3195 else if (strided_store)
3196 /* For strided stores vectorized defs are interleaved in
3197 vect_permute_store_chain(). */
3198 vec_oprnd = VEC_index (tree, result_chain, i);
3200 if (aligned_access_p (first_dr))
3201 data_ref = build_fold_indirect_ref (dataref_ptr);
3202 else
3204 int mis = DR_MISALIGNMENT (first_dr);
3205 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3206 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3207 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3210 /* If accesses through a pointer to vectype do not alias the original
3211 memory reference we have a problem. This should never happen. */
3212 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3213 get_alias_set (gimple_assign_lhs (stmt))));
3215 /* Arguments are ready. Create the new vector stmt. */
3216 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3217 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3218 mark_symbols_for_renaming (new_stmt);
3220 if (slp)
3221 continue;
3223 if (j == 0)
3224 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3225 else
3226 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3228 prev_stmt_info = vinfo_for_stmt (new_stmt);
3229 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3230 if (!next_stmt)
3231 break;
3235 VEC_free (tree, heap, dr_chain);
3236 VEC_free (tree, heap, oprnds);
3237 if (result_chain)
3238 VEC_free (tree, heap, result_chain);
3240 return true;
3243 /* vectorizable_load.
3245 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3246 can be vectorized.
3247 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3248 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3249 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3251 static bool
3252 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3253 slp_tree slp_node, slp_instance slp_node_instance)
3255 tree scalar_dest;
3256 tree vec_dest = NULL;
3257 tree data_ref = NULL;
3258 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3259 stmt_vec_info prev_stmt_info;
3260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3261 struct loop *loop = NULL;
3262 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3263 bool nested_in_vect_loop = false;
3264 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3265 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3266 tree new_temp;
3267 int mode;
3268 gimple new_stmt = NULL;
3269 tree dummy;
3270 enum dr_alignment_support alignment_support_scheme;
3271 tree dataref_ptr = NULL_TREE;
3272 gimple ptr_incr;
3273 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3274 int ncopies;
3275 int i, j, group_size;
3276 tree msq = NULL_TREE, lsq;
3277 tree offset = NULL_TREE;
3278 tree realignment_token = NULL_TREE;
3279 gimple phi = NULL;
3280 VEC(tree,heap) *dr_chain = NULL;
3281 bool strided_load = false;
3282 gimple first_stmt;
3283 tree scalar_type;
3284 bool inv_p;
3285 bool compute_in_loop = false;
3286 struct loop *at_loop;
3287 int vec_num;
3288 bool slp = (slp_node != NULL);
3289 bool slp_perm = false;
3290 enum tree_code code;
3291 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3292 int vf;
3294 if (loop_vinfo)
3296 loop = LOOP_VINFO_LOOP (loop_vinfo);
3297 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3298 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3300 else
3301 vf = 1;
3303 /* Multiple types in SLP are handled by creating the appropriate number of
3304 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3305 case of SLP. */
3306 if (slp)
3307 ncopies = 1;
3308 else
3309 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3311 gcc_assert (ncopies >= 1);
3313 /* FORNOW. This restriction should be relaxed. */
3314 if (nested_in_vect_loop && ncopies > 1)
3316 if (vect_print_dump_info (REPORT_DETAILS))
3317 fprintf (vect_dump, "multiple types in nested loop.");
3318 return false;
3321 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3322 return false;
3324 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3325 return false;
3327 /* Is vectorizable load? */
3328 if (!is_gimple_assign (stmt))
3329 return false;
3331 scalar_dest = gimple_assign_lhs (stmt);
3332 if (TREE_CODE (scalar_dest) != SSA_NAME)
3333 return false;
3335 code = gimple_assign_rhs_code (stmt);
3336 if (code != ARRAY_REF
3337 && code != INDIRECT_REF
3338 && code != COMPONENT_REF
3339 && code != IMAGPART_EXPR
3340 && code != REALPART_EXPR)
3341 return false;
3343 if (!STMT_VINFO_DATA_REF (stmt_info))
3344 return false;
3346 scalar_type = TREE_TYPE (DR_REF (dr));
3347 mode = (int) TYPE_MODE (vectype);
3349 /* FORNOW. In some cases can vectorize even if data-type not supported
3350 (e.g. - data copies). */
3351 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3353 if (vect_print_dump_info (REPORT_DETAILS))
3354 fprintf (vect_dump, "Aligned load, but unsupported type.");
3355 return false;
3358 /* The vector component type needs to be trivially convertible to the
3359 scalar lhs. This should always be the case. */
3360 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3362 if (vect_print_dump_info (REPORT_DETAILS))
3363 fprintf (vect_dump, "??? operands of different types");
3364 return false;
3367 /* Check if the load is a part of an interleaving chain. */
3368 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3370 strided_load = true;
3371 /* FORNOW */
3372 gcc_assert (! nested_in_vect_loop);
3374 /* Check if interleaving is supported. */
3375 if (!vect_strided_load_supported (vectype)
3376 && !PURE_SLP_STMT (stmt_info) && !slp)
3377 return false;
3380 if (!vec_stmt) /* transformation not required. */
3382 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3383 vect_model_load_cost (stmt_info, ncopies, NULL);
3384 return true;
3387 if (vect_print_dump_info (REPORT_DETAILS))
3388 fprintf (vect_dump, "transform load.");
3390 /** Transform. **/
3392 if (strided_load)
3394 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3395 /* Check if the chain of loads is already vectorized. */
3396 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3398 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3399 return true;
3401 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3402 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3404 /* VEC_NUM is the number of vect stmts to be created for this group. */
3405 if (slp)
3407 strided_load = false;
3408 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3409 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3410 slp_perm = true;
3412 else
3413 vec_num = group_size;
3415 dr_chain = VEC_alloc (tree, heap, vec_num);
3417 else
3419 first_stmt = stmt;
3420 first_dr = dr;
3421 group_size = vec_num = 1;
3424 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3425 gcc_assert (alignment_support_scheme);
3427 /* In case the vectorization factor (VF) is bigger than the number
3428 of elements that we can fit in a vectype (nunits), we have to generate
3429 more than one vector stmt - i.e - we need to "unroll" the
3430 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3431 from one copy of the vector stmt to the next, in the field
3432 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3433 stages to find the correct vector defs to be used when vectorizing
3434 stmts that use the defs of the current stmt. The example below illustrates
3435 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3436 4 vectorized stmts):
3438 before vectorization:
3439 RELATED_STMT VEC_STMT
3440 S1: x = memref - -
3441 S2: z = x + 1 - -
3443 step 1: vectorize stmt S1:
3444 We first create the vector stmt VS1_0, and, as usual, record a
3445 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3446 Next, we create the vector stmt VS1_1, and record a pointer to
3447 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3448 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3449 stmts and pointers:
3450 RELATED_STMT VEC_STMT
3451 VS1_0: vx0 = memref0 VS1_1 -
3452 VS1_1: vx1 = memref1 VS1_2 -
3453 VS1_2: vx2 = memref2 VS1_3 -
3454 VS1_3: vx3 = memref3 - -
3455 S1: x = load - VS1_0
3456 S2: z = x + 1 - -
3458 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3459 information we recorded in RELATED_STMT field is used to vectorize
3460 stmt S2. */
3462 /* In case of interleaving (non-unit strided access):
3464 S1: x2 = &base + 2
3465 S2: x0 = &base
3466 S3: x1 = &base + 1
3467 S4: x3 = &base + 3
3469 Vectorized loads are created in the order of memory accesses
3470 starting from the access of the first stmt of the chain:
3472 VS1: vx0 = &base
3473 VS2: vx1 = &base + vec_size*1
3474 VS3: vx3 = &base + vec_size*2
3475 VS4: vx4 = &base + vec_size*3
3477 Then permutation statements are generated:
3479 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3480 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3483 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3484 (the order of the data-refs in the output of vect_permute_load_chain
3485 corresponds to the order of scalar stmts in the interleaving chain - see
3486 the documentation of vect_permute_load_chain()).
3487 The generation of permutation stmts and recording them in
3488 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3490 In case of both multiple types and interleaving, the vector loads and
3491 permutation stmts above are created for every copy. The result vector stmts
3492 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3493 STMT_VINFO_RELATED_STMT for the next copies. */
3495 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3496 on a target that supports unaligned accesses (dr_unaligned_supported)
3497 we generate the following code:
3498 p = initial_addr;
3499 indx = 0;
3500 loop {
3501 p = p + indx * vectype_size;
3502 vec_dest = *(p);
3503 indx = indx + 1;
3506 Otherwise, the data reference is potentially unaligned on a target that
3507 does not support unaligned accesses (dr_explicit_realign_optimized) -
3508 then generate the following code, in which the data in each iteration is
3509 obtained by two vector loads, one from the previous iteration, and one
3510 from the current iteration:
3511 p1 = initial_addr;
3512 msq_init = *(floor(p1))
3513 p2 = initial_addr + VS - 1;
3514 realignment_token = call target_builtin;
3515 indx = 0;
3516 loop {
3517 p2 = p2 + indx * vectype_size
3518 lsq = *(floor(p2))
3519 vec_dest = realign_load (msq, lsq, realignment_token)
3520 indx = indx + 1;
3521 msq = lsq;
3522 } */
3524 /* If the misalignment remains the same throughout the execution of the
3525 loop, we can create the init_addr and permutation mask at the loop
3526 preheader. Otherwise, it needs to be created inside the loop.
3527 This can only occur when vectorizing memory accesses in the inner-loop
3528 nested within an outer-loop that is being vectorized. */
3530 if (loop && nested_in_vect_loop_p (loop, stmt)
3531 && (TREE_INT_CST_LOW (DR_STEP (dr))
3532 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3534 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3535 compute_in_loop = true;
3538 if ((alignment_support_scheme == dr_explicit_realign_optimized
3539 || alignment_support_scheme == dr_explicit_realign)
3540 && !compute_in_loop)
3542 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3543 alignment_support_scheme, NULL_TREE,
3544 &at_loop);
3545 if (alignment_support_scheme == dr_explicit_realign_optimized)
3547 phi = SSA_NAME_DEF_STMT (msq);
3548 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3551 else
3552 at_loop = loop;
3554 prev_stmt_info = NULL;
3555 for (j = 0; j < ncopies; j++)
3557 /* 1. Create the vector pointer update chain. */
3558 if (j == 0)
3559 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3560 at_loop, offset,
3561 &dummy, &ptr_incr, false,
3562 &inv_p);
3563 else
3564 dataref_ptr =
3565 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3567 for (i = 0; i < vec_num; i++)
3569 if (i > 0)
3570 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3571 NULL_TREE);
3573 /* 2. Create the vector-load in the loop. */
3574 switch (alignment_support_scheme)
3576 case dr_aligned:
3577 gcc_assert (aligned_access_p (first_dr));
3578 data_ref = build_fold_indirect_ref (dataref_ptr);
3579 break;
3580 case dr_unaligned_supported:
3582 int mis = DR_MISALIGNMENT (first_dr);
3583 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3585 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3586 data_ref =
3587 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3588 break;
3590 case dr_explicit_realign:
3592 tree ptr, bump;
3593 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3595 if (compute_in_loop)
3596 msq = vect_setup_realignment (first_stmt, gsi,
3597 &realignment_token,
3598 dr_explicit_realign,
3599 dataref_ptr, NULL);
3601 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3602 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3603 new_stmt = gimple_build_assign (vec_dest, data_ref);
3604 new_temp = make_ssa_name (vec_dest, new_stmt);
3605 gimple_assign_set_lhs (new_stmt, new_temp);
3606 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3607 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3608 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3609 msq = new_temp;
3611 bump = size_binop (MULT_EXPR, vs_minus_1,
3612 TYPE_SIZE_UNIT (scalar_type));
3613 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3614 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3615 break;
3617 case dr_explicit_realign_optimized:
3618 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3619 break;
3620 default:
3621 gcc_unreachable ();
3623 /* If accesses through a pointer to vectype do not alias the original
3624 memory reference we have a problem. This should never happen. */
3625 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3626 get_alias_set (gimple_assign_rhs1 (stmt))));
3627 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3628 new_stmt = gimple_build_assign (vec_dest, data_ref);
3629 new_temp = make_ssa_name (vec_dest, new_stmt);
3630 gimple_assign_set_lhs (new_stmt, new_temp);
3631 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3632 mark_symbols_for_renaming (new_stmt);
3634 /* 3. Handle explicit realignment if necessary/supported. Create in
3635 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3636 if (alignment_support_scheme == dr_explicit_realign_optimized
3637 || alignment_support_scheme == dr_explicit_realign)
3639 tree tmp;
3641 lsq = gimple_assign_lhs (new_stmt);
3642 if (!realignment_token)
3643 realignment_token = dataref_ptr;
3644 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3645 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3646 realignment_token);
3647 new_stmt = gimple_build_assign (vec_dest, tmp);
3648 new_temp = make_ssa_name (vec_dest, new_stmt);
3649 gimple_assign_set_lhs (new_stmt, new_temp);
3650 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3652 if (alignment_support_scheme == dr_explicit_realign_optimized)
3654 gcc_assert (phi);
3655 if (i == vec_num - 1 && j == ncopies - 1)
3656 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3657 UNKNOWN_LOCATION);
3658 msq = lsq;
3662 /* 4. Handle invariant-load. */
3663 if (inv_p && !bb_vinfo)
3665 gcc_assert (!strided_load);
3666 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3667 if (j == 0)
3669 int k;
3670 tree t = NULL_TREE;
3671 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3673 /* CHECKME: bitpos depends on endianess? */
3674 bitpos = bitsize_zero_node;
3675 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3676 bitsize, bitpos);
3677 vec_dest =
3678 vect_create_destination_var (scalar_dest, NULL_TREE);
3679 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3680 new_temp = make_ssa_name (vec_dest, new_stmt);
3681 gimple_assign_set_lhs (new_stmt, new_temp);
3682 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3684 for (k = nunits - 1; k >= 0; --k)
3685 t = tree_cons (NULL_TREE, new_temp, t);
3686 /* FIXME: use build_constructor directly. */
3687 vec_inv = build_constructor_from_list (vectype, t);
3688 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3689 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3691 else
3692 gcc_unreachable (); /* FORNOW. */
3695 /* Collect vector loads and later create their permutation in
3696 vect_transform_strided_load (). */
3697 if (strided_load || slp_perm)
3698 VEC_quick_push (tree, dr_chain, new_temp);
3700 /* Store vector loads in the corresponding SLP_NODE. */
3701 if (slp && !slp_perm)
3702 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3705 if (slp && !slp_perm)
3706 continue;
3708 if (slp_perm)
3710 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3711 slp_node_instance, false))
3713 VEC_free (tree, heap, dr_chain);
3714 return false;
3717 else
3719 if (strided_load)
3721 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3722 return false;
3724 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3725 VEC_free (tree, heap, dr_chain);
3726 dr_chain = VEC_alloc (tree, heap, group_size);
3728 else
3730 if (j == 0)
3731 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3732 else
3733 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3734 prev_stmt_info = vinfo_for_stmt (new_stmt);
3739 if (dr_chain)
3740 VEC_free (tree, heap, dr_chain);
3742 return true;
3745 /* Function vect_is_simple_cond.
3747 Input:
3748 LOOP - the loop that is being vectorized.
3749 COND - Condition that is checked for simple use.
3751 Returns whether a COND can be vectorized. Checks whether
3752 condition operands are supportable using vec_is_simple_use. */
3754 static bool
3755 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3757 tree lhs, rhs;
3758 tree def;
3759 enum vect_def_type dt;
3761 if (!COMPARISON_CLASS_P (cond))
3762 return false;
3764 lhs = TREE_OPERAND (cond, 0);
3765 rhs = TREE_OPERAND (cond, 1);
3767 if (TREE_CODE (lhs) == SSA_NAME)
3769 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3770 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3771 &dt))
3772 return false;
3774 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3775 && TREE_CODE (lhs) != FIXED_CST)
3776 return false;
3778 if (TREE_CODE (rhs) == SSA_NAME)
3780 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3781 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3782 &dt))
3783 return false;
3785 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3786 && TREE_CODE (rhs) != FIXED_CST)
3787 return false;
3789 return true;
3792 /* vectorizable_condition.
3794 Check if STMT is conditional modify expression that can be vectorized.
3795 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3796 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3797 at GSI.
3799 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3800 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3801 else caluse if it is 2).
3803 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3805 bool
3806 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3807 gimple *vec_stmt, tree reduc_def, int reduc_index)
3809 tree scalar_dest = NULL_TREE;
3810 tree vec_dest = NULL_TREE;
3811 tree op = NULL_TREE;
3812 tree cond_expr, then_clause, else_clause;
3813 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3814 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3815 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3816 tree vec_compare, vec_cond_expr;
3817 tree new_temp;
3818 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3819 enum machine_mode vec_mode;
3820 tree def;
3821 enum vect_def_type dt;
3822 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3823 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3824 enum tree_code code;
3826 /* FORNOW: unsupported in basic block SLP. */
3827 gcc_assert (loop_vinfo);
3829 gcc_assert (ncopies >= 1);
3830 if (ncopies > 1)
3831 return false; /* FORNOW */
3833 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3834 return false;
3836 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3837 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3838 && reduc_def))
3839 return false;
3841 /* FORNOW: SLP not supported. */
3842 if (STMT_SLP_TYPE (stmt_info))
3843 return false;
3845 /* FORNOW: not yet supported. */
3846 if (STMT_VINFO_LIVE_P (stmt_info))
3848 if (vect_print_dump_info (REPORT_DETAILS))
3849 fprintf (vect_dump, "value used after loop.");
3850 return false;
3853 /* Is vectorizable conditional operation? */
3854 if (!is_gimple_assign (stmt))
3855 return false;
3857 code = gimple_assign_rhs_code (stmt);
3859 if (code != COND_EXPR)
3860 return false;
3862 gcc_assert (gimple_assign_single_p (stmt));
3863 op = gimple_assign_rhs1 (stmt);
3864 cond_expr = TREE_OPERAND (op, 0);
3865 then_clause = TREE_OPERAND (op, 1);
3866 else_clause = TREE_OPERAND (op, 2);
3868 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3869 return false;
3871 /* We do not handle two different vector types for the condition
3872 and the values. */
3873 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
3874 TREE_TYPE (vectype)))
3875 return false;
3877 if (TREE_CODE (then_clause) == SSA_NAME)
3879 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3880 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3881 &then_def_stmt, &def, &dt))
3882 return false;
3884 else if (TREE_CODE (then_clause) != INTEGER_CST
3885 && TREE_CODE (then_clause) != REAL_CST
3886 && TREE_CODE (then_clause) != FIXED_CST)
3887 return false;
3889 if (TREE_CODE (else_clause) == SSA_NAME)
3891 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3892 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3893 &else_def_stmt, &def, &dt))
3894 return false;
3896 else if (TREE_CODE (else_clause) != INTEGER_CST
3897 && TREE_CODE (else_clause) != REAL_CST
3898 && TREE_CODE (else_clause) != FIXED_CST)
3899 return false;
3902 vec_mode = TYPE_MODE (vectype);
3904 if (!vec_stmt)
3906 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3907 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3910 /* Transform */
3912 /* Handle def. */
3913 scalar_dest = gimple_assign_lhs (stmt);
3914 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3916 /* Handle cond expr. */
3917 vec_cond_lhs =
3918 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3919 vec_cond_rhs =
3920 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3921 if (reduc_index == 1)
3922 vec_then_clause = reduc_def;
3923 else
3924 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3925 if (reduc_index == 2)
3926 vec_else_clause = reduc_def;
3927 else
3928 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3930 /* Arguments are ready. Create the new vector stmt. */
3931 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3932 vec_cond_lhs, vec_cond_rhs);
3933 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3934 vec_compare, vec_then_clause, vec_else_clause);
3936 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3937 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3938 gimple_assign_set_lhs (*vec_stmt, new_temp);
3939 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3941 return true;
3945 /* Make sure the statement is vectorizable. */
3947 bool
3948 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3950 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3951 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3952 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3953 bool ok;
3954 tree scalar_type, vectype;
3956 if (vect_print_dump_info (REPORT_DETAILS))
3958 fprintf (vect_dump, "==> examining statement: ");
3959 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3962 if (gimple_has_volatile_ops (stmt))
3964 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3965 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3967 return false;
3970 /* Skip stmts that do not need to be vectorized. In loops this is expected
3971 to include:
3972 - the COND_EXPR which is the loop exit condition
3973 - any LABEL_EXPRs in the loop
3974 - computations that are used only for array indexing or loop control.
3975 In basic blocks we only analyze statements that are a part of some SLP
3976 instance, therefore, all the statements are relevant. */
3978 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3979 && !STMT_VINFO_LIVE_P (stmt_info))
3981 if (vect_print_dump_info (REPORT_DETAILS))
3982 fprintf (vect_dump, "irrelevant.");
3984 return true;
3987 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3989 case vect_internal_def:
3990 break;
3992 case vect_reduction_def:
3993 case vect_nested_cycle:
3994 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3995 || relevance == vect_used_in_outer_by_reduction
3996 || relevance == vect_unused_in_scope));
3997 break;
3999 case vect_induction_def:
4000 case vect_constant_def:
4001 case vect_external_def:
4002 case vect_unknown_def_type:
4003 default:
4004 gcc_unreachable ();
4007 if (bb_vinfo)
4009 gcc_assert (PURE_SLP_STMT (stmt_info));
4011 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4012 if (vect_print_dump_info (REPORT_DETAILS))
4014 fprintf (vect_dump, "get vectype for scalar type: ");
4015 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4018 vectype = get_vectype_for_scalar_type (scalar_type);
4019 if (!vectype)
4021 if (vect_print_dump_info (REPORT_DETAILS))
4023 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4024 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4026 return false;
4029 if (vect_print_dump_info (REPORT_DETAILS))
4031 fprintf (vect_dump, "vectype: ");
4032 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4035 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4038 if (STMT_VINFO_RELEVANT_P (stmt_info))
4040 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4041 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4042 *need_to_vectorize = true;
4045 ok = true;
4046 if (!bb_vinfo
4047 && (STMT_VINFO_RELEVANT_P (stmt_info)
4048 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4049 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4050 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4051 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4052 || vectorizable_operation (stmt, NULL, NULL, NULL)
4053 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4054 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4055 || vectorizable_call (stmt, NULL, NULL)
4056 || vectorizable_store (stmt, NULL, NULL, NULL)
4057 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4058 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4059 else
4061 if (bb_vinfo)
4062 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4063 || vectorizable_assignment (stmt, NULL, NULL, node)
4064 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4065 || vectorizable_store (stmt, NULL, NULL, node));
4068 if (!ok)
4070 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4072 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4073 fprintf (vect_dump, "supported: ");
4074 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4077 return false;
4080 if (bb_vinfo)
4081 return true;
4083 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4084 need extra handling, except for vectorizable reductions. */
4085 if (STMT_VINFO_LIVE_P (stmt_info)
4086 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4087 ok = vectorizable_live_operation (stmt, NULL, NULL);
4089 if (!ok)
4091 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4093 fprintf (vect_dump, "not vectorized: live stmt not ");
4094 fprintf (vect_dump, "supported: ");
4095 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4098 return false;
4101 if (!PURE_SLP_STMT (stmt_info))
4103 /* Groups of strided accesses whose size is not a power of 2 are not
4104 vectorizable yet using loop-vectorization. Therefore, if this stmt
4105 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4106 loop-based vectorized), the loop cannot be vectorized. */
4107 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4108 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4109 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4111 if (vect_print_dump_info (REPORT_DETAILS))
4113 fprintf (vect_dump, "not vectorized: the size of group "
4114 "of strided accesses is not a power of 2");
4115 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4118 return false;
4122 return true;
4126 /* Function vect_transform_stmt.
4128 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4130 bool
4131 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4132 bool *strided_store, slp_tree slp_node,
4133 slp_instance slp_node_instance)
4135 bool is_store = false;
4136 gimple vec_stmt = NULL;
4137 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4138 gimple orig_stmt_in_pattern;
4139 bool done;
4141 switch (STMT_VINFO_TYPE (stmt_info))
4143 case type_demotion_vec_info_type:
4144 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4145 gcc_assert (done);
4146 break;
4148 case type_promotion_vec_info_type:
4149 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4150 gcc_assert (done);
4151 break;
4153 case type_conversion_vec_info_type:
4154 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4155 gcc_assert (done);
4156 break;
4158 case induc_vec_info_type:
4159 gcc_assert (!slp_node);
4160 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4161 gcc_assert (done);
4162 break;
4164 case op_vec_info_type:
4165 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4166 gcc_assert (done);
4167 break;
4169 case assignment_vec_info_type:
4170 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4171 gcc_assert (done);
4172 break;
4174 case load_vec_info_type:
4175 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4176 slp_node_instance);
4177 gcc_assert (done);
4178 break;
4180 case store_vec_info_type:
4181 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4182 gcc_assert (done);
4183 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4185 /* In case of interleaving, the whole chain is vectorized when the
4186 last store in the chain is reached. Store stmts before the last
4187 one are skipped, and there vec_stmt_info shouldn't be freed
4188 meanwhile. */
4189 *strided_store = true;
4190 if (STMT_VINFO_VEC_STMT (stmt_info))
4191 is_store = true;
4193 else
4194 is_store = true;
4195 break;
4197 case condition_vec_info_type:
4198 gcc_assert (!slp_node);
4199 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4200 gcc_assert (done);
4201 break;
4203 case call_vec_info_type:
4204 gcc_assert (!slp_node);
4205 done = vectorizable_call (stmt, gsi, &vec_stmt);
4206 break;
4208 case reduc_vec_info_type:
4209 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4210 gcc_assert (done);
4211 break;
4213 default:
4214 if (!STMT_VINFO_LIVE_P (stmt_info))
4216 if (vect_print_dump_info (REPORT_DETAILS))
4217 fprintf (vect_dump, "stmt not supported.");
4218 gcc_unreachable ();
4222 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4223 is being vectorized, but outside the immediately enclosing loop. */
4224 if (vec_stmt
4225 && STMT_VINFO_LOOP_VINFO (stmt_info)
4226 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4227 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4228 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4229 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4230 || STMT_VINFO_RELEVANT (stmt_info) ==
4231 vect_used_in_outer_by_reduction))
4233 struct loop *innerloop = LOOP_VINFO_LOOP (
4234 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4235 imm_use_iterator imm_iter;
4236 use_operand_p use_p;
4237 tree scalar_dest;
4238 gimple exit_phi;
4240 if (vect_print_dump_info (REPORT_DETAILS))
4241 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4243 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4244 (to be used when vectorizing outer-loop stmts that use the DEF of
4245 STMT). */
4246 if (gimple_code (stmt) == GIMPLE_PHI)
4247 scalar_dest = PHI_RESULT (stmt);
4248 else
4249 scalar_dest = gimple_assign_lhs (stmt);
4251 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4253 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4255 exit_phi = USE_STMT (use_p);
4256 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4261 /* Handle stmts whose DEF is used outside the loop-nest that is
4262 being vectorized. */
4263 if (STMT_VINFO_LIVE_P (stmt_info)
4264 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4266 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4267 gcc_assert (done);
4270 if (vec_stmt)
4272 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4273 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4274 if (orig_stmt_in_pattern)
4276 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4277 /* STMT was inserted by the vectorizer to replace a computation idiom.
4278 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4279 computed this idiom. We need to record a pointer to VEC_STMT in
4280 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4281 documentation of vect_pattern_recog. */
4282 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4284 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4285 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4290 return is_store;
4294 /* Remove a group of stores (for SLP or interleaving), free their
4295 stmt_vec_info. */
4297 void
4298 vect_remove_stores (gimple first_stmt)
4300 gimple next = first_stmt;
4301 gimple tmp;
4302 gimple_stmt_iterator next_si;
4304 while (next)
4306 /* Free the attached stmt_vec_info and remove the stmt. */
4307 next_si = gsi_for_stmt (next);
4308 gsi_remove (&next_si, true);
4309 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4310 free_stmt_vec_info (next);
4311 next = tmp;
4316 /* Function new_stmt_vec_info.
4318 Create and initialize a new stmt_vec_info struct for STMT. */
4320 stmt_vec_info
4321 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4322 bb_vec_info bb_vinfo)
4324 stmt_vec_info res;
4325 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4327 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4328 STMT_VINFO_STMT (res) = stmt;
4329 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4330 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4331 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4332 STMT_VINFO_LIVE_P (res) = false;
4333 STMT_VINFO_VECTYPE (res) = NULL;
4334 STMT_VINFO_VEC_STMT (res) = NULL;
4335 STMT_VINFO_VECTORIZABLE (res) = true;
4336 STMT_VINFO_IN_PATTERN_P (res) = false;
4337 STMT_VINFO_RELATED_STMT (res) = NULL;
4338 STMT_VINFO_DATA_REF (res) = NULL;
4340 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4341 STMT_VINFO_DR_OFFSET (res) = NULL;
4342 STMT_VINFO_DR_INIT (res) = NULL;
4343 STMT_VINFO_DR_STEP (res) = NULL;
4344 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4346 if (gimple_code (stmt) == GIMPLE_PHI
4347 && is_loop_header_bb_p (gimple_bb (stmt)))
4348 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4349 else
4350 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4352 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4353 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4354 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4355 STMT_SLP_TYPE (res) = loop_vect;
4356 DR_GROUP_FIRST_DR (res) = NULL;
4357 DR_GROUP_NEXT_DR (res) = NULL;
4358 DR_GROUP_SIZE (res) = 0;
4359 DR_GROUP_STORE_COUNT (res) = 0;
4360 DR_GROUP_GAP (res) = 0;
4361 DR_GROUP_SAME_DR_STMT (res) = NULL;
4362 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4364 return res;
4368 /* Create a hash table for stmt_vec_info. */
4370 void
4371 init_stmt_vec_info_vec (void)
4373 gcc_assert (!stmt_vec_info_vec);
4374 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4378 /* Free hash table for stmt_vec_info. */
4380 void
4381 free_stmt_vec_info_vec (void)
4383 gcc_assert (stmt_vec_info_vec);
4384 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4388 /* Free stmt vectorization related info. */
4390 void
4391 free_stmt_vec_info (gimple stmt)
4393 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4395 if (!stmt_info)
4396 return;
4398 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4399 set_vinfo_for_stmt (stmt, NULL);
4400 free (stmt_info);
4404 /* Function get_vectype_for_scalar_type.
4406 Returns the vector type corresponding to SCALAR_TYPE as supported
4407 by the target. */
4409 tree
4410 get_vectype_for_scalar_type (tree scalar_type)
4412 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4413 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4414 int nunits;
4415 tree vectype;
4417 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4418 return NULL_TREE;
4420 /* We can't build a vector type of elements with alignment bigger than
4421 their size. */
4422 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4423 return NULL_TREE;
4425 /* If we'd build a vector type of elements whose mode precision doesn't
4426 match their types precision we'll get mismatched types on vector
4427 extracts via BIT_FIELD_REFs. This effectively means we disable
4428 vectorization of bool and/or enum types in some languages. */
4429 if (INTEGRAL_TYPE_P (scalar_type)
4430 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4431 return NULL_TREE;
4433 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4434 is expected. */
4435 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4437 vectype = build_vector_type (scalar_type, nunits);
4438 if (vect_print_dump_info (REPORT_DETAILS))
4440 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4441 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4444 if (!vectype)
4445 return NULL_TREE;
4447 if (vect_print_dump_info (REPORT_DETAILS))
4449 fprintf (vect_dump, "vectype: ");
4450 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4453 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4454 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4456 if (vect_print_dump_info (REPORT_DETAILS))
4457 fprintf (vect_dump, "mode not supported by target.");
4458 return NULL_TREE;
4461 return vectype;
4464 /* Function get_same_sized_vectype
4466 Returns a vector type corresponding to SCALAR_TYPE of size
4467 VECTOR_TYPE if supported by the target. */
4469 tree
4470 get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4472 return get_vectype_for_scalar_type (scalar_type);
4475 /* Function vect_is_simple_use.
4477 Input:
4478 LOOP_VINFO - the vect info of the loop that is being vectorized.
4479 BB_VINFO - the vect info of the basic block that is being vectorized.
4480 OPERAND - operand of a stmt in the loop or bb.
4481 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4483 Returns whether a stmt with OPERAND can be vectorized.
4484 For loops, supportable operands are constants, loop invariants, and operands
4485 that are defined by the current iteration of the loop. Unsupportable
4486 operands are those that are defined by a previous iteration of the loop (as
4487 is the case in reduction/induction computations).
4488 For basic blocks, supportable operands are constants and bb invariants.
4489 For now, operands defined outside the basic block are not supported. */
4491 bool
4492 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4493 bb_vec_info bb_vinfo, gimple *def_stmt,
4494 tree *def, enum vect_def_type *dt)
4496 basic_block bb;
4497 stmt_vec_info stmt_vinfo;
4498 struct loop *loop = NULL;
4500 if (loop_vinfo)
4501 loop = LOOP_VINFO_LOOP (loop_vinfo);
4503 *def_stmt = NULL;
4504 *def = NULL_TREE;
4506 if (vect_print_dump_info (REPORT_DETAILS))
4508 fprintf (vect_dump, "vect_is_simple_use: operand ");
4509 print_generic_expr (vect_dump, operand, TDF_SLIM);
4512 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4514 *dt = vect_constant_def;
4515 return true;
4518 if (is_gimple_min_invariant (operand))
4520 *def = operand;
4521 *dt = vect_external_def;
4522 return true;
4525 if (TREE_CODE (operand) == PAREN_EXPR)
4527 if (vect_print_dump_info (REPORT_DETAILS))
4528 fprintf (vect_dump, "non-associatable copy.");
4529 operand = TREE_OPERAND (operand, 0);
4532 if (TREE_CODE (operand) != SSA_NAME)
4534 if (vect_print_dump_info (REPORT_DETAILS))
4535 fprintf (vect_dump, "not ssa-name.");
4536 return false;
4539 *def_stmt = SSA_NAME_DEF_STMT (operand);
4540 if (*def_stmt == NULL)
4542 if (vect_print_dump_info (REPORT_DETAILS))
4543 fprintf (vect_dump, "no def_stmt.");
4544 return false;
4547 if (vect_print_dump_info (REPORT_DETAILS))
4549 fprintf (vect_dump, "def_stmt: ");
4550 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4553 /* Empty stmt is expected only in case of a function argument.
4554 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4555 if (gimple_nop_p (*def_stmt))
4557 *def = operand;
4558 *dt = vect_external_def;
4559 return true;
4562 bb = gimple_bb (*def_stmt);
4564 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4565 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4566 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4567 *dt = vect_external_def;
4568 else
4570 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4571 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4574 if (*dt == vect_unknown_def_type)
4576 if (vect_print_dump_info (REPORT_DETAILS))
4577 fprintf (vect_dump, "Unsupported pattern.");
4578 return false;
4581 if (vect_print_dump_info (REPORT_DETAILS))
4582 fprintf (vect_dump, "type of def: %d.",*dt);
4584 switch (gimple_code (*def_stmt))
4586 case GIMPLE_PHI:
4587 *def = gimple_phi_result (*def_stmt);
4588 break;
4590 case GIMPLE_ASSIGN:
4591 *def = gimple_assign_lhs (*def_stmt);
4592 break;
4594 case GIMPLE_CALL:
4595 *def = gimple_call_lhs (*def_stmt);
4596 if (*def != NULL)
4597 break;
4598 /* FALLTHRU */
4599 default:
4600 if (vect_print_dump_info (REPORT_DETAILS))
4601 fprintf (vect_dump, "unsupported defining stmt: ");
4602 return false;
4605 return true;
4608 /* Function vect_is_simple_use_1.
4610 Same as vect_is_simple_use_1 but also determines the vector operand
4611 type of OPERAND and stores it to *VECTYPE. If the definition of
4612 OPERAND is vect_uninitialized_def, vect_constant_def or
4613 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4614 is responsible to compute the best suited vector type for the
4615 scalar operand. */
4617 bool
4618 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4619 bb_vec_info bb_vinfo, gimple *def_stmt,
4620 tree *def, enum vect_def_type *dt, tree *vectype)
4622 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4623 return false;
4625 /* Now get a vector type if the def is internal, otherwise supply
4626 NULL_TREE and leave it up to the caller to figure out a proper
4627 type for the use stmt. */
4628 if (*dt == vect_internal_def
4629 || *dt == vect_induction_def
4630 || *dt == vect_reduction_def
4631 || *dt == vect_double_reduction_def
4632 || *dt == vect_nested_cycle)
4634 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4635 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4636 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4637 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4638 gcc_assert (*vectype != NULL_TREE);
4640 else if (*dt == vect_uninitialized_def
4641 || *dt == vect_constant_def
4642 || *dt == vect_external_def)
4643 *vectype = NULL_TREE;
4644 else
4645 gcc_unreachable ();
4647 return true;
4651 /* Function supportable_widening_operation
4653 Check whether an operation represented by the code CODE is a
4654 widening operation that is supported by the target platform in
4655 vector form (i.e., when operating on arguments of type VECTYPE_IN
4656 producing a result of type VECTYPE_OUT).
4658 Widening operations we currently support are NOP (CONVERT), FLOAT
4659 and WIDEN_MULT. This function checks if these operations are supported
4660 by the target platform either directly (via vector tree-codes), or via
4661 target builtins.
4663 Output:
4664 - CODE1 and CODE2 are codes of vector operations to be used when
4665 vectorizing the operation, if available.
4666 - DECL1 and DECL2 are decls of target builtin functions to be used
4667 when vectorizing the operation, if available. In this case,
4668 CODE1 and CODE2 are CALL_EXPR.
4669 - MULTI_STEP_CVT determines the number of required intermediate steps in
4670 case of multi-step conversion (like char->short->int - in that case
4671 MULTI_STEP_CVT will be 1).
4672 - INTERM_TYPES contains the intermediate type required to perform the
4673 widening operation (short in the above example). */
4675 bool
4676 supportable_widening_operation (enum tree_code code, gimple stmt,
4677 tree vectype_out, tree vectype_in,
4678 tree *decl1, tree *decl2,
4679 enum tree_code *code1, enum tree_code *code2,
4680 int *multi_step_cvt,
4681 VEC (tree, heap) **interm_types)
4683 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4684 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4685 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4686 bool ordered_p;
4687 enum machine_mode vec_mode;
4688 enum insn_code icode1, icode2;
4689 optab optab1, optab2;
4690 tree vectype = vectype_in;
4691 tree wide_vectype = vectype_out;
4692 enum tree_code c1, c2;
4694 /* The result of a vectorized widening operation usually requires two vectors
4695 (because the widened results do not fit int one vector). The generated
4696 vector results would normally be expected to be generated in the same
4697 order as in the original scalar computation, i.e. if 8 results are
4698 generated in each vector iteration, they are to be organized as follows:
4699 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4701 However, in the special case that the result of the widening operation is
4702 used in a reduction computation only, the order doesn't matter (because
4703 when vectorizing a reduction we change the order of the computation).
4704 Some targets can take advantage of this and generate more efficient code.
4705 For example, targets like Altivec, that support widen_mult using a sequence
4706 of {mult_even,mult_odd} generate the following vectors:
4707 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4709 When vectorizing outer-loops, we execute the inner-loop sequentially
4710 (each vectorized inner-loop iteration contributes to VF outer-loop
4711 iterations in parallel). We therefore don't allow to change the order
4712 of the computation in the inner-loop during outer-loop vectorization. */
4714 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4715 && !nested_in_vect_loop_p (vect_loop, stmt))
4716 ordered_p = false;
4717 else
4718 ordered_p = true;
4720 if (!ordered_p
4721 && code == WIDEN_MULT_EXPR
4722 && targetm.vectorize.builtin_mul_widen_even
4723 && targetm.vectorize.builtin_mul_widen_even (vectype)
4724 && targetm.vectorize.builtin_mul_widen_odd
4725 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4727 if (vect_print_dump_info (REPORT_DETAILS))
4728 fprintf (vect_dump, "Unordered widening operation detected.");
4730 *code1 = *code2 = CALL_EXPR;
4731 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4732 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4733 return true;
4736 switch (code)
4738 case WIDEN_MULT_EXPR:
4739 if (BYTES_BIG_ENDIAN)
4741 c1 = VEC_WIDEN_MULT_HI_EXPR;
4742 c2 = VEC_WIDEN_MULT_LO_EXPR;
4744 else
4746 c2 = VEC_WIDEN_MULT_HI_EXPR;
4747 c1 = VEC_WIDEN_MULT_LO_EXPR;
4749 break;
4751 CASE_CONVERT:
4752 if (BYTES_BIG_ENDIAN)
4754 c1 = VEC_UNPACK_HI_EXPR;
4755 c2 = VEC_UNPACK_LO_EXPR;
4757 else
4759 c2 = VEC_UNPACK_HI_EXPR;
4760 c1 = VEC_UNPACK_LO_EXPR;
4762 break;
4764 case FLOAT_EXPR:
4765 if (BYTES_BIG_ENDIAN)
4767 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4768 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4770 else
4772 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4773 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4775 break;
4777 case FIX_TRUNC_EXPR:
4778 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4779 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4780 computing the operation. */
4781 return false;
4783 default:
4784 gcc_unreachable ();
4787 if (code == FIX_TRUNC_EXPR)
4789 /* The signedness is determined from output operand. */
4790 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4791 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
4793 else
4795 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4796 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4799 if (!optab1 || !optab2)
4800 return false;
4802 vec_mode = TYPE_MODE (vectype);
4803 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4804 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4805 == CODE_FOR_nothing)
4806 return false;
4808 /* Check if it's a multi-step conversion that can be done using intermediate
4809 types. */
4810 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4811 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4813 int i;
4814 tree prev_type = vectype, intermediate_type;
4815 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4816 optab optab3, optab4;
4818 if (!CONVERT_EXPR_CODE_P (code))
4819 return false;
4821 *code1 = c1;
4822 *code2 = c2;
4824 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4825 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4826 to get to NARROW_VECTYPE, and fail if we do not. */
4827 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4828 for (i = 0; i < 3; i++)
4830 intermediate_mode = insn_data[icode1].operand[0].mode;
4831 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4832 TYPE_UNSIGNED (prev_type));
4833 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4834 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4836 if (!optab3 || !optab4
4837 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4838 == CODE_FOR_nothing
4839 || insn_data[icode1].operand[0].mode != intermediate_mode
4840 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4841 == CODE_FOR_nothing
4842 || insn_data[icode2].operand[0].mode != intermediate_mode
4843 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4844 == CODE_FOR_nothing
4845 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4846 == CODE_FOR_nothing)
4847 return false;
4849 VEC_quick_push (tree, *interm_types, intermediate_type);
4850 (*multi_step_cvt)++;
4852 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4853 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4854 return true;
4856 prev_type = intermediate_type;
4857 prev_mode = intermediate_mode;
4860 return false;
4863 *code1 = c1;
4864 *code2 = c2;
4865 return true;
4869 /* Function supportable_narrowing_operation
4871 Check whether an operation represented by the code CODE is a
4872 narrowing operation that is supported by the target platform in
4873 vector form (i.e., when operating on arguments of type VECTYPE_IN
4874 and producing a result of type VECTYPE_OUT).
4876 Narrowing operations we currently support are NOP (CONVERT) and
4877 FIX_TRUNC. This function checks if these operations are supported by
4878 the target platform directly via vector tree-codes.
4880 Output:
4881 - CODE1 is the code of a vector operation to be used when
4882 vectorizing the operation, if available.
4883 - MULTI_STEP_CVT determines the number of required intermediate steps in
4884 case of multi-step conversion (like int->short->char - in that case
4885 MULTI_STEP_CVT will be 1).
4886 - INTERM_TYPES contains the intermediate type required to perform the
4887 narrowing operation (short in the above example). */
4889 bool
4890 supportable_narrowing_operation (enum tree_code code,
4891 tree vectype_out, tree vectype_in,
4892 enum tree_code *code1, int *multi_step_cvt,
4893 VEC (tree, heap) **interm_types)
4895 enum machine_mode vec_mode;
4896 enum insn_code icode1;
4897 optab optab1, interm_optab;
4898 tree vectype = vectype_in;
4899 tree narrow_vectype = vectype_out;
4900 enum tree_code c1;
4901 tree intermediate_type, prev_type;
4902 int i;
4904 switch (code)
4906 CASE_CONVERT:
4907 c1 = VEC_PACK_TRUNC_EXPR;
4908 break;
4910 case FIX_TRUNC_EXPR:
4911 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4912 break;
4914 case FLOAT_EXPR:
4915 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4916 tree code and optabs used for computing the operation. */
4917 return false;
4919 default:
4920 gcc_unreachable ();
4923 if (code == FIX_TRUNC_EXPR)
4924 /* The signedness is determined from output operand. */
4925 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4926 else
4927 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4929 if (!optab1)
4930 return false;
4932 vec_mode = TYPE_MODE (vectype);
4933 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4934 == CODE_FOR_nothing)
4935 return false;
4937 /* Check if it's a multi-step conversion that can be done using intermediate
4938 types. */
4939 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4941 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4943 *code1 = c1;
4944 prev_type = vectype;
4945 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4946 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4947 to get to NARROW_VECTYPE, and fail if we do not. */
4948 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4949 for (i = 0; i < 3; i++)
4951 intermediate_mode = insn_data[icode1].operand[0].mode;
4952 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4953 TYPE_UNSIGNED (prev_type));
4954 interm_optab = optab_for_tree_code (c1, intermediate_type,
4955 optab_default);
4956 if (!interm_optab
4957 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4958 == CODE_FOR_nothing
4959 || insn_data[icode1].operand[0].mode != intermediate_mode
4960 || (icode1
4961 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4962 == CODE_FOR_nothing)
4963 return false;
4965 VEC_quick_push (tree, *interm_types, intermediate_type);
4966 (*multi_step_cvt)++;
4968 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4969 return true;
4971 prev_type = intermediate_type;
4972 prev_mode = intermediate_mode;
4975 return false;
4978 *code1 = c1;
4979 return true;