* events.c (hash_param_callback): Allow NULL to stand for empty
[official-gcc.git] / gcc / tree-vect-stmts.c
blobfb4a5bf5dd768d25c673d7050a4288334ee03c39
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
3 Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "diagnostic.h"
32 #include "tree-flow.h"
33 #include "tree-dump.h"
34 #include "cfgloop.h"
35 #include "cfglayout.h"
36 #include "expr.h"
37 #include "recog.h"
38 #include "optabs.h"
39 #include "toplev.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
44 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
46 /* Function vect_mark_relevant.
48 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
50 static void
51 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
52 enum vect_relevant relevant, bool live_p)
54 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
55 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
56 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
58 if (vect_print_dump_info (REPORT_DETAILS))
59 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
61 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
63 gimple pattern_stmt;
65 /* This is the last stmt in a sequence that was detected as a
66 pattern that can potentially be vectorized. Don't mark the stmt
67 as relevant/live because it's not going to be vectorized.
68 Instead mark the pattern-stmt that replaces it. */
70 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
72 if (vect_print_dump_info (REPORT_DETAILS))
73 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
74 stmt_info = vinfo_for_stmt (pattern_stmt);
75 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
76 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
77 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
78 stmt = pattern_stmt;
81 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
82 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
83 STMT_VINFO_RELEVANT (stmt_info) = relevant;
85 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
86 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
88 if (vect_print_dump_info (REPORT_DETAILS))
89 fprintf (vect_dump, "already marked relevant/live.");
90 return;
93 VEC_safe_push (gimple, heap, *worklist, stmt);
97 /* Function vect_stmt_relevant_p.
99 Return true if STMT in loop that is represented by LOOP_VINFO is
100 "relevant for vectorization".
102 A stmt is considered "relevant for vectorization" if:
103 - it has uses outside the loop.
104 - it has vdefs (it alters memory).
105 - control stmts in the loop (except for the exit condition).
107 CHECKME: what other side effects would the vectorizer allow? */
109 static bool
110 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
111 enum vect_relevant *relevant, bool *live_p)
113 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
114 ssa_op_iter op_iter;
115 imm_use_iterator imm_iter;
116 use_operand_p use_p;
117 def_operand_p def_p;
119 *relevant = vect_unused_in_scope;
120 *live_p = false;
122 /* cond stmt other than loop exit cond. */
123 if (is_ctrl_stmt (stmt)
124 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
125 != loop_exit_ctrl_vec_info_type)
126 *relevant = vect_used_in_scope;
128 /* changing memory. */
129 if (gimple_code (stmt) != GIMPLE_PHI)
130 if (gimple_vdef (stmt))
132 if (vect_print_dump_info (REPORT_DETAILS))
133 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
134 *relevant = vect_used_in_scope;
137 /* uses outside the loop. */
138 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
140 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
142 basic_block bb = gimple_bb (USE_STMT (use_p));
143 if (!flow_bb_inside_loop_p (loop, bb))
145 if (vect_print_dump_info (REPORT_DETAILS))
146 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
148 if (is_gimple_debug (USE_STMT (use_p)))
149 continue;
151 /* We expect all such uses to be in the loop exit phis
152 (because of loop closed form) */
153 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
154 gcc_assert (bb == single_exit (loop)->dest);
156 *live_p = true;
161 return (*live_p || *relevant);
165 /* Function exist_non_indexing_operands_for_use_p
167 USE is one of the uses attached to STMT. Check if USE is
168 used in STMT for anything other than indexing an array. */
170 static bool
171 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
173 tree operand;
174 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
176 /* USE corresponds to some operand in STMT. If there is no data
177 reference in STMT, then any operand that corresponds to USE
178 is not indexing an array. */
179 if (!STMT_VINFO_DATA_REF (stmt_info))
180 return true;
182 /* STMT has a data_ref. FORNOW this means that its of one of
183 the following forms:
184 -1- ARRAY_REF = var
185 -2- var = ARRAY_REF
186 (This should have been verified in analyze_data_refs).
188 'var' in the second case corresponds to a def, not a use,
189 so USE cannot correspond to any operands that are not used
190 for array indexing.
192 Therefore, all we need to check is if STMT falls into the
193 first case, and whether var corresponds to USE. */
195 if (!gimple_assign_copy_p (stmt))
196 return false;
197 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
198 return false;
199 operand = gimple_assign_rhs1 (stmt);
200 if (TREE_CODE (operand) != SSA_NAME)
201 return false;
203 if (operand == use)
204 return true;
206 return false;
211 Function process_use.
213 Inputs:
214 - a USE in STMT in a loop represented by LOOP_VINFO
215 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
216 that defined USE. This is done by calling mark_relevant and passing it
217 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
219 Outputs:
220 Generally, LIVE_P and RELEVANT are used to define the liveness and
221 relevance info of the DEF_STMT of this USE:
222 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
223 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
224 Exceptions:
225 - case 1: If USE is used only for address computations (e.g. array indexing),
226 which does not need to be directly vectorized, then the liveness/relevance
227 of the respective DEF_STMT is left unchanged.
228 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
229 skip DEF_STMT cause it had already been processed.
230 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
231 be modified accordingly.
233 Return true if everything is as expected. Return false otherwise. */
235 static bool
236 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
237 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
240 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
241 stmt_vec_info dstmt_vinfo;
242 basic_block bb, def_bb;
243 tree def;
244 gimple def_stmt;
245 enum vect_def_type dt;
247 /* case 1: we are only interested in uses that need to be vectorized. Uses
248 that are used for address computation are not considered relevant. */
249 if (!exist_non_indexing_operands_for_use_p (use, stmt))
250 return true;
252 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
254 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
255 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
256 return false;
259 if (!def_stmt || gimple_nop_p (def_stmt))
260 return true;
262 def_bb = gimple_bb (def_stmt);
263 if (!flow_bb_inside_loop_p (loop, def_bb))
265 if (vect_print_dump_info (REPORT_DETAILS))
266 fprintf (vect_dump, "def_stmt is out of loop.");
267 return true;
270 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
271 DEF_STMT must have already been processed, because this should be the
272 only way that STMT, which is a reduction-phi, was put in the worklist,
273 as there should be no other uses for DEF_STMT in the loop. So we just
274 check that everything is as expected, and we are done. */
275 dstmt_vinfo = vinfo_for_stmt (def_stmt);
276 bb = gimple_bb (stmt);
277 if (gimple_code (stmt) == GIMPLE_PHI
278 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
279 && gimple_code (def_stmt) != GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
281 && bb->loop_father == def_bb->loop_father)
283 if (vect_print_dump_info (REPORT_DETAILS))
284 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
285 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
286 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
287 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
288 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
289 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
290 return true;
293 /* case 3a: outer-loop stmt defining an inner-loop stmt:
294 outer-loop-header-bb:
295 d = def_stmt
296 inner-loop:
297 stmt # use (d)
298 outer-loop-tail-bb:
299 ... */
300 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
302 if (vect_print_dump_info (REPORT_DETAILS))
303 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
305 switch (relevant)
307 case vect_unused_in_scope:
308 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
309 vect_used_in_scope : vect_unused_in_scope;
310 break;
312 case vect_used_in_outer_by_reduction:
313 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
314 relevant = vect_used_by_reduction;
315 break;
317 case vect_used_in_outer:
318 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
319 relevant = vect_used_in_scope;
320 break;
322 case vect_used_in_scope:
323 break;
325 default:
326 gcc_unreachable ();
330 /* case 3b: inner-loop stmt defining an outer-loop stmt:
331 outer-loop-header-bb:
333 inner-loop:
334 d = def_stmt
335 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
336 stmt # use (d) */
337 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
339 if (vect_print_dump_info (REPORT_DETAILS))
340 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
342 switch (relevant)
344 case vect_unused_in_scope:
345 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
346 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
347 vect_used_in_outer_by_reduction : vect_unused_in_scope;
348 break;
350 case vect_used_by_reduction:
351 relevant = vect_used_in_outer_by_reduction;
352 break;
354 case vect_used_in_scope:
355 relevant = vect_used_in_outer;
356 break;
358 default:
359 gcc_unreachable ();
363 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
364 return true;
368 /* Function vect_mark_stmts_to_be_vectorized.
370 Not all stmts in the loop need to be vectorized. For example:
372 for i...
373 for j...
374 1. T0 = i + j
375 2. T1 = a[T0]
377 3. j = j + 1
379 Stmt 1 and 3 do not need to be vectorized, because loop control and
380 addressing of vectorized data-refs are handled differently.
382 This pass detects such stmts. */
384 bool
385 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
387 VEC(gimple,heap) *worklist;
388 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
389 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
390 unsigned int nbbs = loop->num_nodes;
391 gimple_stmt_iterator si;
392 gimple stmt;
393 unsigned int i;
394 stmt_vec_info stmt_vinfo;
395 basic_block bb;
396 gimple phi;
397 bool live_p;
398 enum vect_relevant relevant, tmp_relevant;
399 enum vect_def_type def_type;
401 if (vect_print_dump_info (REPORT_DETAILS))
402 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
404 worklist = VEC_alloc (gimple, heap, 64);
406 /* 1. Init worklist. */
407 for (i = 0; i < nbbs; i++)
409 bb = bbs[i];
410 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
412 phi = gsi_stmt (si);
413 if (vect_print_dump_info (REPORT_DETAILS))
415 fprintf (vect_dump, "init: phi relevant? ");
416 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
419 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
420 vect_mark_relevant (&worklist, phi, relevant, live_p);
422 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
424 stmt = gsi_stmt (si);
425 if (vect_print_dump_info (REPORT_DETAILS))
427 fprintf (vect_dump, "init: stmt relevant? ");
428 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
431 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
432 vect_mark_relevant (&worklist, stmt, relevant, live_p);
436 /* 2. Process_worklist */
437 while (VEC_length (gimple, worklist) > 0)
439 use_operand_p use_p;
440 ssa_op_iter iter;
442 stmt = VEC_pop (gimple, worklist);
443 if (vect_print_dump_info (REPORT_DETAILS))
445 fprintf (vect_dump, "worklist: examine stmt: ");
446 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
449 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
450 (DEF_STMT) as relevant/irrelevant and live/dead according to the
451 liveness and relevance properties of STMT. */
452 stmt_vinfo = vinfo_for_stmt (stmt);
453 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
454 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
456 /* Generally, the liveness and relevance properties of STMT are
457 propagated as is to the DEF_STMTs of its USEs:
458 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
459 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
461 One exception is when STMT has been identified as defining a reduction
462 variable; in this case we set the liveness/relevance as follows:
463 live_p = false
464 relevant = vect_used_by_reduction
465 This is because we distinguish between two kinds of relevant stmts -
466 those that are used by a reduction computation, and those that are
467 (also) used by a regular computation. This allows us later on to
468 identify stmts that are used solely by a reduction, and therefore the
469 order of the results that they produce does not have to be kept. */
471 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
472 tmp_relevant = relevant;
473 switch (def_type)
475 case vect_reduction_def:
476 switch (tmp_relevant)
478 case vect_unused_in_scope:
479 relevant = vect_used_by_reduction;
480 break;
482 case vect_used_by_reduction:
483 if (gimple_code (stmt) == GIMPLE_PHI)
484 break;
485 /* fall through */
487 default:
488 if (vect_print_dump_info (REPORT_DETAILS))
489 fprintf (vect_dump, "unsupported use of reduction.");
491 VEC_free (gimple, heap, worklist);
492 return false;
495 live_p = false;
496 break;
498 case vect_nested_cycle:
499 if (tmp_relevant != vect_unused_in_scope
500 && tmp_relevant != vect_used_in_outer_by_reduction
501 && tmp_relevant != vect_used_in_outer)
503 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "unsupported use of nested cycle.");
506 VEC_free (gimple, heap, worklist);
507 return false;
510 live_p = false;
511 break;
513 case vect_double_reduction_def:
514 if (tmp_relevant != vect_unused_in_scope
515 && tmp_relevant != vect_used_by_reduction)
517 if (vect_print_dump_info (REPORT_DETAILS))
518 fprintf (vect_dump, "unsupported use of double reduction.");
520 VEC_free (gimple, heap, worklist);
521 return false;
524 live_p = false;
525 break;
527 default:
528 break;
531 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
533 tree op = USE_FROM_PTR (use_p);
534 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
536 VEC_free (gimple, heap, worklist);
537 return false;
540 } /* while worklist */
542 VEC_free (gimple, heap, worklist);
543 return true;
548 cost_for_stmt (gimple stmt)
550 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
552 switch (STMT_VINFO_TYPE (stmt_info))
554 case load_vec_info_type:
555 return TARG_SCALAR_LOAD_COST;
556 case store_vec_info_type:
557 return TARG_SCALAR_STORE_COST;
558 case op_vec_info_type:
559 case condition_vec_info_type:
560 case assignment_vec_info_type:
561 case reduc_vec_info_type:
562 case induc_vec_info_type:
563 case type_promotion_vec_info_type:
564 case type_demotion_vec_info_type:
565 case type_conversion_vec_info_type:
566 case call_vec_info_type:
567 return TARG_SCALAR_STMT_COST;
568 case undef_vec_info_type:
569 default:
570 gcc_unreachable ();
574 /* Function vect_model_simple_cost.
576 Models cost for simple operations, i.e. those that only emit ncopies of a
577 single op. Right now, this does not account for multiple insns that could
578 be generated for the single vector op. We will handle that shortly. */
580 void
581 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
582 enum vect_def_type *dt, slp_tree slp_node)
584 int i;
585 int inside_cost = 0, outside_cost = 0;
587 /* The SLP costs were already calculated during SLP tree build. */
588 if (PURE_SLP_STMT (stmt_info))
589 return;
591 inside_cost = ncopies * TARG_VEC_STMT_COST;
593 /* FORNOW: Assuming maximum 2 args per stmts. */
594 for (i = 0; i < 2; i++)
596 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
597 outside_cost += TARG_SCALAR_TO_VEC_COST;
600 if (vect_print_dump_info (REPORT_COST))
601 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
602 "outside_cost = %d .", inside_cost, outside_cost);
604 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
605 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
606 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
610 /* Function vect_cost_strided_group_size
612 For strided load or store, return the group_size only if it is the first
613 load or store of a group, else return 1. This ensures that group size is
614 only returned once per group. */
616 static int
617 vect_cost_strided_group_size (stmt_vec_info stmt_info)
619 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
621 if (first_stmt == STMT_VINFO_STMT (stmt_info))
622 return DR_GROUP_SIZE (stmt_info);
624 return 1;
628 /* Function vect_model_store_cost
630 Models cost for stores. In the case of strided accesses, one access
631 has the overhead of the strided access attributed to it. */
633 void
634 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
635 enum vect_def_type dt, slp_tree slp_node)
637 int group_size;
638 int inside_cost = 0, outside_cost = 0;
640 /* The SLP costs were already calculated during SLP tree build. */
641 if (PURE_SLP_STMT (stmt_info))
642 return;
644 if (dt == vect_constant_def || dt == vect_external_def)
645 outside_cost = TARG_SCALAR_TO_VEC_COST;
647 /* Strided access? */
648 if (DR_GROUP_FIRST_DR (stmt_info) && !slp_node)
649 group_size = vect_cost_strided_group_size (stmt_info);
650 /* Not a strided access. */
651 else
652 group_size = 1;
654 /* Is this an access in a group of stores, which provide strided access?
655 If so, add in the cost of the permutes. */
656 if (group_size > 1)
658 /* Uses a high and low interleave operation for each needed permute. */
659 inside_cost = ncopies * exact_log2(group_size) * group_size
660 * TARG_VEC_STMT_COST;
662 if (vect_print_dump_info (REPORT_COST))
663 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
664 group_size);
668 /* Costs of the stores. */
669 inside_cost += ncopies * TARG_VEC_STORE_COST;
671 if (vect_print_dump_info (REPORT_COST))
672 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
673 "outside_cost = %d .", inside_cost, outside_cost);
675 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
676 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
677 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
681 /* Function vect_model_load_cost
683 Models cost for loads. In the case of strided accesses, the last access
684 has the overhead of the strided access attributed to it. Since unaligned
685 accesses are supported for loads, we also account for the costs of the
686 access scheme chosen. */
688 void
689 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
692 int group_size;
693 int alignment_support_cheme;
694 gimple first_stmt;
695 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
696 int inside_cost = 0, outside_cost = 0;
698 /* The SLP costs were already calculated during SLP tree build. */
699 if (PURE_SLP_STMT (stmt_info))
700 return;
702 /* Strided accesses? */
703 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
704 if (first_stmt && !slp_node)
706 group_size = vect_cost_strided_group_size (stmt_info);
707 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
709 /* Not a strided access. */
710 else
712 group_size = 1;
713 first_dr = dr;
716 alignment_support_cheme = vect_supportable_dr_alignment (first_dr);
718 /* Is this an access in a group of loads providing strided access?
719 If so, add in the cost of the permutes. */
720 if (group_size > 1)
722 /* Uses an even and odd extract operations for each needed permute. */
723 inside_cost = ncopies * exact_log2(group_size) * group_size
724 * TARG_VEC_STMT_COST;
726 if (vect_print_dump_info (REPORT_COST))
727 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
728 group_size);
732 /* The loads themselves. */
733 switch (alignment_support_cheme)
735 case dr_aligned:
737 inside_cost += ncopies * TARG_VEC_LOAD_COST;
739 if (vect_print_dump_info (REPORT_COST))
740 fprintf (vect_dump, "vect_model_load_cost: aligned.");
742 break;
744 case dr_unaligned_supported:
746 /* Here, we assign an additional cost for the unaligned load. */
747 inside_cost += ncopies * TARG_VEC_UNALIGNED_LOAD_COST;
749 if (vect_print_dump_info (REPORT_COST))
750 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
751 "hardware.");
753 break;
755 case dr_explicit_realign:
757 inside_cost += ncopies * (2*TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
759 /* FIXME: If the misalignment remains fixed across the iterations of
760 the containing loop, the following cost should be added to the
761 outside costs. */
762 if (targetm.vectorize.builtin_mask_for_load)
763 inside_cost += TARG_VEC_STMT_COST;
765 break;
767 case dr_explicit_realign_optimized:
769 if (vect_print_dump_info (REPORT_COST))
770 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
771 "pipelined.");
773 /* Unaligned software pipeline has a load of an address, an initial
774 load, and possibly a mask operation to "prime" the loop. However,
775 if this is an access in a group of loads, which provide strided
776 access, then the above cost should only be considered for one
777 access in the group. Inside the loop, there is a load op
778 and a realignment op. */
780 if ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node)
782 outside_cost = 2*TARG_VEC_STMT_COST;
783 if (targetm.vectorize.builtin_mask_for_load)
784 outside_cost += TARG_VEC_STMT_COST;
787 inside_cost += ncopies * (TARG_VEC_LOAD_COST + TARG_VEC_STMT_COST);
789 break;
792 default:
793 gcc_unreachable ();
796 if (vect_print_dump_info (REPORT_COST))
797 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
798 "outside_cost = %d .", inside_cost, outside_cost);
800 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
801 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
802 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
806 /* Function vect_init_vector.
808 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
809 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
810 is not NULL. Otherwise, place the initialization at the loop preheader.
811 Return the DEF of INIT_STMT.
812 It will be used in the vectorization of STMT. */
814 tree
815 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
816 gimple_stmt_iterator *gsi)
818 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
819 tree new_var;
820 gimple init_stmt;
821 tree vec_oprnd;
822 edge pe;
823 tree new_temp;
824 basic_block new_bb;
826 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
827 add_referenced_var (new_var);
828 init_stmt = gimple_build_assign (new_var, vector_var);
829 new_temp = make_ssa_name (new_var, init_stmt);
830 gimple_assign_set_lhs (init_stmt, new_temp);
832 if (gsi)
833 vect_finish_stmt_generation (stmt, init_stmt, gsi);
834 else
836 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
838 if (loop_vinfo)
840 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
842 if (nested_in_vect_loop_p (loop, stmt))
843 loop = loop->inner;
845 pe = loop_preheader_edge (loop);
846 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
847 gcc_assert (!new_bb);
849 else
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
852 basic_block bb;
853 gimple_stmt_iterator gsi_bb_start;
855 gcc_assert (bb_vinfo);
856 bb = BB_VINFO_BB (bb_vinfo);
857 gsi_bb_start = gsi_after_labels (bb);
858 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
862 if (vect_print_dump_info (REPORT_DETAILS))
864 fprintf (vect_dump, "created new init_stmt: ");
865 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
868 vec_oprnd = gimple_assign_lhs (init_stmt);
869 return vec_oprnd;
873 /* Function vect_get_vec_def_for_operand.
875 OP is an operand in STMT. This function returns a (vector) def that will be
876 used in the vectorized stmt for STMT.
878 In the case that OP is an SSA_NAME which is defined in the loop, then
879 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
881 In case OP is an invariant or constant, a new stmt that creates a vector def
882 needs to be introduced. */
884 tree
885 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
887 tree vec_oprnd;
888 gimple vec_stmt;
889 gimple def_stmt;
890 stmt_vec_info def_stmt_info = NULL;
891 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
892 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
893 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
894 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
895 tree vec_inv;
896 tree vec_cst;
897 tree t = NULL_TREE;
898 tree def;
899 int i;
900 enum vect_def_type dt;
901 bool is_simple_use;
902 tree vector_type;
904 if (vect_print_dump_info (REPORT_DETAILS))
906 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
907 print_generic_expr (vect_dump, op, TDF_SLIM);
910 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
911 &dt);
912 gcc_assert (is_simple_use);
913 if (vect_print_dump_info (REPORT_DETAILS))
915 if (def)
917 fprintf (vect_dump, "def = ");
918 print_generic_expr (vect_dump, def, TDF_SLIM);
920 if (def_stmt)
922 fprintf (vect_dump, " def_stmt = ");
923 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
927 switch (dt)
929 /* Case 1: operand is a constant. */
930 case vect_constant_def:
932 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
933 gcc_assert (vector_type);
935 if (scalar_def)
936 *scalar_def = op;
938 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
939 if (vect_print_dump_info (REPORT_DETAILS))
940 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
942 for (i = nunits - 1; i >= 0; --i)
944 t = tree_cons (NULL_TREE, op, t);
946 vec_cst = build_vector (vector_type, t);
947 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
950 /* Case 2: operand is defined outside the loop - loop invariant. */
951 case vect_external_def:
953 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
954 gcc_assert (vector_type);
955 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
957 if (scalar_def)
958 *scalar_def = def;
960 /* Create 'vec_inv = {inv,inv,..,inv}' */
961 if (vect_print_dump_info (REPORT_DETAILS))
962 fprintf (vect_dump, "Create vector_inv.");
964 for (i = nunits - 1; i >= 0; --i)
966 t = tree_cons (NULL_TREE, def, t);
969 /* FIXME: use build_constructor directly. */
970 vec_inv = build_constructor_from_list (vector_type, t);
971 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
974 /* Case 3: operand is defined inside the loop. */
975 case vect_internal_def:
977 if (scalar_def)
978 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
980 /* Get the def from the vectorized stmt. */
981 def_stmt_info = vinfo_for_stmt (def_stmt);
982 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
983 gcc_assert (vec_stmt);
984 if (gimple_code (vec_stmt) == GIMPLE_PHI)
985 vec_oprnd = PHI_RESULT (vec_stmt);
986 else if (is_gimple_call (vec_stmt))
987 vec_oprnd = gimple_call_lhs (vec_stmt);
988 else
989 vec_oprnd = gimple_assign_lhs (vec_stmt);
990 return vec_oprnd;
993 /* Case 4: operand is defined by a loop header phi - reduction */
994 case vect_reduction_def:
995 case vect_double_reduction_def:
996 case vect_nested_cycle:
998 struct loop *loop;
1000 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1001 loop = (gimple_bb (def_stmt))->loop_father;
1003 /* Get the def before the loop */
1004 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1005 return get_initial_def_for_reduction (stmt, op, scalar_def);
1008 /* Case 5: operand is defined by loop-header phi - induction. */
1009 case vect_induction_def:
1011 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1013 /* Get the def from the vectorized stmt. */
1014 def_stmt_info = vinfo_for_stmt (def_stmt);
1015 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1016 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1017 vec_oprnd = PHI_RESULT (vec_stmt);
1018 return vec_oprnd;
1021 default:
1022 gcc_unreachable ();
1027 /* Function vect_get_vec_def_for_stmt_copy
1029 Return a vector-def for an operand. This function is used when the
1030 vectorized stmt to be created (by the caller to this function) is a "copy"
1031 created in case the vectorized result cannot fit in one vector, and several
1032 copies of the vector-stmt are required. In this case the vector-def is
1033 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1034 of the stmt that defines VEC_OPRND.
1035 DT is the type of the vector def VEC_OPRND.
1037 Context:
1038 In case the vectorization factor (VF) is bigger than the number
1039 of elements that can fit in a vectype (nunits), we have to generate
1040 more than one vector stmt to vectorize the scalar stmt. This situation
1041 arises when there are multiple data-types operated upon in the loop; the
1042 smallest data-type determines the VF, and as a result, when vectorizing
1043 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1044 vector stmt (each computing a vector of 'nunits' results, and together
1045 computing 'VF' results in each iteration). This function is called when
1046 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1047 which VF=16 and nunits=4, so the number of copies required is 4):
1049 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1051 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1052 VS1.1: vx.1 = memref1 VS1.2
1053 VS1.2: vx.2 = memref2 VS1.3
1054 VS1.3: vx.3 = memref3
1056 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1057 VSnew.1: vz1 = vx.1 + ... VSnew.2
1058 VSnew.2: vz2 = vx.2 + ... VSnew.3
1059 VSnew.3: vz3 = vx.3 + ...
1061 The vectorization of S1 is explained in vectorizable_load.
1062 The vectorization of S2:
1063 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1064 the function 'vect_get_vec_def_for_operand' is called to
1065 get the relevant vector-def for each operand of S2. For operand x it
1066 returns the vector-def 'vx.0'.
1068 To create the remaining copies of the vector-stmt (VSnew.j), this
1069 function is called to get the relevant vector-def for each operand. It is
1070 obtained from the respective VS1.j stmt, which is recorded in the
1071 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1073 For example, to obtain the vector-def 'vx.1' in order to create the
1074 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1075 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1076 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1077 and return its def ('vx.1').
1078 Overall, to create the above sequence this function will be called 3 times:
1079 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1080 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1081 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1083 tree
1084 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1086 gimple vec_stmt_for_operand;
1087 stmt_vec_info def_stmt_info;
1089 /* Do nothing; can reuse same def. */
1090 if (dt == vect_external_def || dt == vect_constant_def )
1091 return vec_oprnd;
1093 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1094 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1095 gcc_assert (def_stmt_info);
1096 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1097 gcc_assert (vec_stmt_for_operand);
1098 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1099 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1100 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1101 else
1102 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1103 return vec_oprnd;
1107 /* Get vectorized definitions for the operands to create a copy of an original
1108 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1110 static void
1111 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1112 VEC(tree,heap) **vec_oprnds0,
1113 VEC(tree,heap) **vec_oprnds1)
1115 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1117 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1118 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1120 if (vec_oprnds1 && *vec_oprnds1)
1122 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1123 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1124 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1129 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1131 static void
1132 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1133 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1134 slp_tree slp_node)
1136 if (slp_node)
1137 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1);
1138 else
1140 tree vec_oprnd;
1142 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1143 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1144 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1146 if (op1)
1148 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1149 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1150 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1156 /* Function vect_finish_stmt_generation.
1158 Insert a new stmt. */
1160 void
1161 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1162 gimple_stmt_iterator *gsi)
1164 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1165 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1166 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1168 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1170 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1172 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1173 bb_vinfo));
1175 if (vect_print_dump_info (REPORT_DETAILS))
1177 fprintf (vect_dump, "add new stmt: ");
1178 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1181 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1184 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1185 a function declaration if the target has a vectorized version
1186 of the function, or NULL_TREE if the function cannot be vectorized. */
1188 tree
1189 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1191 tree fndecl = gimple_call_fndecl (call);
1192 enum built_in_function code;
1194 /* We only handle functions that do not read or clobber memory -- i.e.
1195 const or novops ones. */
1196 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1197 return NULL_TREE;
1199 if (!fndecl
1200 || TREE_CODE (fndecl) != FUNCTION_DECL
1201 || !DECL_BUILT_IN (fndecl))
1202 return NULL_TREE;
1204 code = DECL_FUNCTION_CODE (fndecl);
1205 return targetm.vectorize.builtin_vectorized_function (code, vectype_out,
1206 vectype_in);
1209 /* Function vectorizable_call.
1211 Check if STMT performs a function call that can be vectorized.
1212 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1213 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1214 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1216 static bool
1217 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1219 tree vec_dest;
1220 tree scalar_dest;
1221 tree op, type;
1222 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1223 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1224 tree vectype_out, vectype_in;
1225 int nunits_in;
1226 int nunits_out;
1227 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1228 tree fndecl, new_temp, def, rhs_type, lhs_type;
1229 gimple def_stmt;
1230 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1231 gimple new_stmt = NULL;
1232 int ncopies, j;
1233 VEC(tree, heap) *vargs = NULL;
1234 enum { NARROW, NONE, WIDEN } modifier;
1235 size_t i, nargs;
1237 /* FORNOW: unsupported in basic block SLP. */
1238 gcc_assert (loop_vinfo);
1240 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1241 return false;
1243 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1244 return false;
1246 /* FORNOW: SLP not supported. */
1247 if (STMT_SLP_TYPE (stmt_info))
1248 return false;
1250 /* Is STMT a vectorizable call? */
1251 if (!is_gimple_call (stmt))
1252 return false;
1254 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1255 return false;
1257 /* Process function arguments. */
1258 rhs_type = NULL_TREE;
1259 nargs = gimple_call_num_args (stmt);
1261 /* Bail out if the function has more than two arguments, we
1262 do not have interesting builtin functions to vectorize with
1263 more than two arguments. No arguments is also not good. */
1264 if (nargs == 0 || nargs > 2)
1265 return false;
1267 for (i = 0; i < nargs; i++)
1269 op = gimple_call_arg (stmt, i);
1271 /* We can only handle calls with arguments of the same type. */
1272 if (rhs_type
1273 && rhs_type != TREE_TYPE (op))
1275 if (vect_print_dump_info (REPORT_DETAILS))
1276 fprintf (vect_dump, "argument types differ.");
1277 return false;
1279 rhs_type = TREE_TYPE (op);
1281 if (!vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def, &dt[i]))
1283 if (vect_print_dump_info (REPORT_DETAILS))
1284 fprintf (vect_dump, "use not simple.");
1285 return false;
1289 vectype_in = get_vectype_for_scalar_type (rhs_type);
1290 if (!vectype_in)
1291 return false;
1292 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1294 lhs_type = TREE_TYPE (gimple_call_lhs (stmt));
1295 vectype_out = get_vectype_for_scalar_type (lhs_type);
1296 if (!vectype_out)
1297 return false;
1298 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1300 /* FORNOW */
1301 if (nunits_in == nunits_out / 2)
1302 modifier = NARROW;
1303 else if (nunits_out == nunits_in)
1304 modifier = NONE;
1305 else if (nunits_out == nunits_in / 2)
1306 modifier = WIDEN;
1307 else
1308 return false;
1310 /* For now, we only vectorize functions if a target specific builtin
1311 is available. TODO -- in some cases, it might be profitable to
1312 insert the calls for pieces of the vector, in order to be able
1313 to vectorize other operations in the loop. */
1314 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1315 if (fndecl == NULL_TREE)
1317 if (vect_print_dump_info (REPORT_DETAILS))
1318 fprintf (vect_dump, "function is not vectorizable.");
1320 return false;
1323 gcc_assert (!gimple_vuse (stmt));
1325 if (modifier == NARROW)
1326 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1327 else
1328 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1330 /* Sanity check: make sure that at least one copy of the vectorized stmt
1331 needs to be generated. */
1332 gcc_assert (ncopies >= 1);
1334 if (!vec_stmt) /* transformation not required. */
1336 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1337 if (vect_print_dump_info (REPORT_DETAILS))
1338 fprintf (vect_dump, "=== vectorizable_call ===");
1339 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1340 return true;
1343 /** Transform. **/
1345 if (vect_print_dump_info (REPORT_DETAILS))
1346 fprintf (vect_dump, "transform operation.");
1348 /* Handle def. */
1349 scalar_dest = gimple_call_lhs (stmt);
1350 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1352 prev_stmt_info = NULL;
1353 switch (modifier)
1355 case NONE:
1356 for (j = 0; j < ncopies; ++j)
1358 /* Build argument list for the vectorized call. */
1359 if (j == 0)
1360 vargs = VEC_alloc (tree, heap, nargs);
1361 else
1362 VEC_truncate (tree, vargs, 0);
1364 for (i = 0; i < nargs; i++)
1366 op = gimple_call_arg (stmt, i);
1367 if (j == 0)
1368 vec_oprnd0
1369 = vect_get_vec_def_for_operand (op, stmt, NULL);
1370 else
1372 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1373 vec_oprnd0
1374 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1377 VEC_quick_push (tree, vargs, vec_oprnd0);
1380 new_stmt = gimple_build_call_vec (fndecl, vargs);
1381 new_temp = make_ssa_name (vec_dest, new_stmt);
1382 gimple_call_set_lhs (new_stmt, new_temp);
1384 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1385 mark_symbols_for_renaming (new_stmt);
1387 if (j == 0)
1388 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1389 else
1390 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1392 prev_stmt_info = vinfo_for_stmt (new_stmt);
1395 break;
1397 case NARROW:
1398 for (j = 0; j < ncopies; ++j)
1400 /* Build argument list for the vectorized call. */
1401 if (j == 0)
1402 vargs = VEC_alloc (tree, heap, nargs * 2);
1403 else
1404 VEC_truncate (tree, vargs, 0);
1406 for (i = 0; i < nargs; i++)
1408 op = gimple_call_arg (stmt, i);
1409 if (j == 0)
1411 vec_oprnd0
1412 = vect_get_vec_def_for_operand (op, stmt, NULL);
1413 vec_oprnd1
1414 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1416 else
1418 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1419 vec_oprnd0
1420 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1421 vec_oprnd1
1422 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1425 VEC_quick_push (tree, vargs, vec_oprnd0);
1426 VEC_quick_push (tree, vargs, vec_oprnd1);
1429 new_stmt = gimple_build_call_vec (fndecl, vargs);
1430 new_temp = make_ssa_name (vec_dest, new_stmt);
1431 gimple_call_set_lhs (new_stmt, new_temp);
1433 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1434 mark_symbols_for_renaming (new_stmt);
1436 if (j == 0)
1437 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1438 else
1439 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1441 prev_stmt_info = vinfo_for_stmt (new_stmt);
1444 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1446 break;
1448 case WIDEN:
1449 /* No current target implements this case. */
1450 return false;
1453 VEC_free (tree, heap, vargs);
1455 /* Update the exception handling table with the vector stmt if necessary. */
1456 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1457 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1459 /* The call in STMT might prevent it from being removed in dce.
1460 We however cannot remove it here, due to the way the ssa name
1461 it defines is mapped to the new definition. So just replace
1462 rhs of the statement with something harmless. */
1464 type = TREE_TYPE (scalar_dest);
1465 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1466 fold_convert (type, integer_zero_node));
1467 set_vinfo_for_stmt (new_stmt, stmt_info);
1468 set_vinfo_for_stmt (stmt, NULL);
1469 STMT_VINFO_STMT (stmt_info) = new_stmt;
1470 gsi_replace (gsi, new_stmt, false);
1471 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1473 return true;
1477 /* Function vect_gen_widened_results_half
1479 Create a vector stmt whose code, type, number of arguments, and result
1480 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1481 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1482 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1483 needs to be created (DECL is a function-decl of a target-builtin).
1484 STMT is the original scalar stmt that we are vectorizing. */
1486 static gimple
1487 vect_gen_widened_results_half (enum tree_code code,
1488 tree decl,
1489 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1490 tree vec_dest, gimple_stmt_iterator *gsi,
1491 gimple stmt)
1493 gimple new_stmt;
1494 tree new_temp;
1496 /* Generate half of the widened result: */
1497 if (code == CALL_EXPR)
1499 /* Target specific support */
1500 if (op_type == binary_op)
1501 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1502 else
1503 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1504 new_temp = make_ssa_name (vec_dest, new_stmt);
1505 gimple_call_set_lhs (new_stmt, new_temp);
1507 else
1509 /* Generic support */
1510 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1511 if (op_type != binary_op)
1512 vec_oprnd1 = NULL;
1513 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1514 vec_oprnd1);
1515 new_temp = make_ssa_name (vec_dest, new_stmt);
1516 gimple_assign_set_lhs (new_stmt, new_temp);
1518 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1520 return new_stmt;
1524 /* Check if STMT performs a conversion operation, that can be vectorized.
1525 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1526 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1527 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1529 static bool
1530 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1531 gimple *vec_stmt, slp_tree slp_node)
1533 tree vec_dest;
1534 tree scalar_dest;
1535 tree op0;
1536 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1537 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1538 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1539 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1540 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1541 tree new_temp;
1542 tree def;
1543 gimple def_stmt;
1544 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1545 gimple new_stmt = NULL;
1546 stmt_vec_info prev_stmt_info;
1547 int nunits_in;
1548 int nunits_out;
1549 tree vectype_out, vectype_in;
1550 int ncopies, j;
1551 tree expr;
1552 tree rhs_type, lhs_type;
1553 tree builtin_decl;
1554 enum { NARROW, NONE, WIDEN } modifier;
1555 int i;
1556 VEC(tree,heap) *vec_oprnds0 = NULL;
1557 tree vop0;
1558 tree integral_type;
1559 VEC(tree,heap) *dummy = NULL;
1560 int dummy_int;
1562 /* Is STMT a vectorizable conversion? */
1564 /* FORNOW: unsupported in basic block SLP. */
1565 gcc_assert (loop_vinfo);
1567 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1568 return false;
1570 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1571 return false;
1573 if (!is_gimple_assign (stmt))
1574 return false;
1576 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1577 return false;
1579 code = gimple_assign_rhs_code (stmt);
1580 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1581 return false;
1583 /* Check types of lhs and rhs. */
1584 op0 = gimple_assign_rhs1 (stmt);
1585 rhs_type = TREE_TYPE (op0);
1586 vectype_in = get_vectype_for_scalar_type (rhs_type);
1587 if (!vectype_in)
1588 return false;
1589 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1591 scalar_dest = gimple_assign_lhs (stmt);
1592 lhs_type = TREE_TYPE (scalar_dest);
1593 vectype_out = get_vectype_for_scalar_type (lhs_type);
1594 if (!vectype_out)
1595 return false;
1596 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1598 /* FORNOW */
1599 if (nunits_in == nunits_out / 2)
1600 modifier = NARROW;
1601 else if (nunits_out == nunits_in)
1602 modifier = NONE;
1603 else if (nunits_out == nunits_in / 2)
1604 modifier = WIDEN;
1605 else
1606 return false;
1608 if (modifier == NONE)
1609 gcc_assert (STMT_VINFO_VECTYPE (stmt_info) == vectype_out);
1611 /* Bail out if the types are both integral or non-integral. */
1612 if ((INTEGRAL_TYPE_P (rhs_type) && INTEGRAL_TYPE_P (lhs_type))
1613 || (!INTEGRAL_TYPE_P (rhs_type) && !INTEGRAL_TYPE_P (lhs_type)))
1614 return false;
1616 integral_type = INTEGRAL_TYPE_P (rhs_type) ? vectype_in : vectype_out;
1618 if (modifier == NARROW)
1619 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1620 else
1621 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1623 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1624 this, so we can safely override NCOPIES with 1 here. */
1625 if (slp_node)
1626 ncopies = 1;
1628 /* Sanity check: make sure that at least one copy of the vectorized stmt
1629 needs to be generated. */
1630 gcc_assert (ncopies >= 1);
1632 /* Check the operands of the operation. */
1633 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
1635 if (vect_print_dump_info (REPORT_DETAILS))
1636 fprintf (vect_dump, "use not simple.");
1637 return false;
1640 /* Supportable by target? */
1641 if ((modifier == NONE
1642 && !targetm.vectorize.builtin_conversion (code, integral_type))
1643 || (modifier == WIDEN
1644 && !supportable_widening_operation (code, stmt, vectype_in,
1645 &decl1, &decl2,
1646 &code1, &code2,
1647 &dummy_int, &dummy))
1648 || (modifier == NARROW
1649 && !supportable_narrowing_operation (code, stmt, vectype_in,
1650 &code1, &dummy_int, &dummy)))
1652 if (vect_print_dump_info (REPORT_DETAILS))
1653 fprintf (vect_dump, "conversion not supported by target.");
1654 return false;
1657 if (modifier != NONE)
1659 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1660 /* FORNOW: SLP not supported. */
1661 if (STMT_SLP_TYPE (stmt_info))
1662 return false;
1665 if (!vec_stmt) /* transformation not required. */
1667 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1668 return true;
1671 /** Transform. **/
1672 if (vect_print_dump_info (REPORT_DETAILS))
1673 fprintf (vect_dump, "transform conversion.");
1675 /* Handle def. */
1676 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1678 if (modifier == NONE && !slp_node)
1679 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1681 prev_stmt_info = NULL;
1682 switch (modifier)
1684 case NONE:
1685 for (j = 0; j < ncopies; j++)
1687 if (j == 0)
1688 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1689 else
1690 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1692 builtin_decl =
1693 targetm.vectorize.builtin_conversion (code, integral_type);
1694 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1696 /* Arguments are ready. create the new vector stmt. */
1697 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1698 new_temp = make_ssa_name (vec_dest, new_stmt);
1699 gimple_call_set_lhs (new_stmt, new_temp);
1700 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1701 if (slp_node)
1702 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1705 if (j == 0)
1706 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1707 else
1708 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1709 prev_stmt_info = vinfo_for_stmt (new_stmt);
1711 break;
1713 case WIDEN:
1714 /* In case the vectorization factor (VF) is bigger than the number
1715 of elements that we can fit in a vectype (nunits), we have to
1716 generate more than one vector stmt - i.e - we need to "unroll"
1717 the vector stmt by a factor VF/nunits. */
1718 for (j = 0; j < ncopies; j++)
1720 if (j == 0)
1721 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1722 else
1723 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1725 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
1727 /* Generate first half of the widened result: */
1728 new_stmt
1729 = vect_gen_widened_results_half (code1, decl1,
1730 vec_oprnd0, vec_oprnd1,
1731 unary_op, vec_dest, gsi, stmt);
1732 if (j == 0)
1733 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1734 else
1735 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1736 prev_stmt_info = vinfo_for_stmt (new_stmt);
1738 /* Generate second half of the widened result: */
1739 new_stmt
1740 = vect_gen_widened_results_half (code2, decl2,
1741 vec_oprnd0, vec_oprnd1,
1742 unary_op, vec_dest, gsi, stmt);
1743 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1744 prev_stmt_info = vinfo_for_stmt (new_stmt);
1746 break;
1748 case NARROW:
1749 /* In case the vectorization factor (VF) is bigger than the number
1750 of elements that we can fit in a vectype (nunits), we have to
1751 generate more than one vector stmt - i.e - we need to "unroll"
1752 the vector stmt by a factor VF/nunits. */
1753 for (j = 0; j < ncopies; j++)
1755 /* Handle uses. */
1756 if (j == 0)
1758 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1759 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1761 else
1763 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1764 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1767 /* Arguments are ready. Create the new vector stmt. */
1768 expr = build2 (code1, vectype_out, vec_oprnd0, vec_oprnd1);
1769 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1770 vec_oprnd1);
1771 new_temp = make_ssa_name (vec_dest, new_stmt);
1772 gimple_assign_set_lhs (new_stmt, new_temp);
1773 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1775 if (j == 0)
1776 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1777 else
1778 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1780 prev_stmt_info = vinfo_for_stmt (new_stmt);
1783 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1786 if (vec_oprnds0)
1787 VEC_free (tree, heap, vec_oprnds0);
1789 return true;
1791 /* Function vectorizable_assignment.
1793 Check if STMT performs an assignment (copy) that can be vectorized.
1794 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1795 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1796 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1798 static bool
1799 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1800 gimple *vec_stmt, slp_tree slp_node)
1802 tree vec_dest;
1803 tree scalar_dest;
1804 tree op;
1805 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1806 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1807 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1808 tree new_temp;
1809 tree def;
1810 gimple def_stmt;
1811 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1812 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1813 int ncopies;
1814 int i;
1815 VEC(tree,heap) *vec_oprnds = NULL;
1816 tree vop;
1817 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1819 /* Multiple types in SLP are handled by creating the appropriate number of
1820 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1821 case of SLP. */
1822 if (slp_node)
1823 ncopies = 1;
1824 else
1825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1827 gcc_assert (ncopies >= 1);
1828 if (ncopies > 1)
1829 return false; /* FORNOW */
1831 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1832 return false;
1834 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1835 return false;
1837 /* Is vectorizable assignment? */
1838 if (!is_gimple_assign (stmt))
1839 return false;
1841 scalar_dest = gimple_assign_lhs (stmt);
1842 if (TREE_CODE (scalar_dest) != SSA_NAME)
1843 return false;
1845 if (gimple_assign_single_p (stmt)
1846 || gimple_assign_rhs_code (stmt) == PAREN_EXPR)
1847 op = gimple_assign_rhs1 (stmt);
1848 else
1849 return false;
1851 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1853 if (vect_print_dump_info (REPORT_DETAILS))
1854 fprintf (vect_dump, "use not simple.");
1855 return false;
1858 if (!vec_stmt) /* transformation not required. */
1860 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1861 if (vect_print_dump_info (REPORT_DETAILS))
1862 fprintf (vect_dump, "=== vectorizable_assignment ===");
1863 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1864 return true;
1867 /** Transform. **/
1868 if (vect_print_dump_info (REPORT_DETAILS))
1869 fprintf (vect_dump, "transform assignment.");
1871 /* Handle def. */
1872 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1874 /* Handle use. */
1875 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
1877 /* Arguments are ready. create the new vector stmt. */
1878 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
1880 *vec_stmt = gimple_build_assign (vec_dest, vop);
1881 new_temp = make_ssa_name (vec_dest, *vec_stmt);
1882 gimple_assign_set_lhs (*vec_stmt, new_temp);
1883 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
1884 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt;
1886 if (slp_node)
1887 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), *vec_stmt);
1890 VEC_free (tree, heap, vec_oprnds);
1891 return true;
1894 /* Function vectorizable_operation.
1896 Check if STMT performs a binary or unary operation that can be vectorized.
1897 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1898 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1899 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1901 static bool
1902 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
1903 gimple *vec_stmt, slp_tree slp_node)
1905 tree vec_dest;
1906 tree scalar_dest;
1907 tree op0, op1 = NULL;
1908 tree vec_oprnd1 = NULL_TREE;
1909 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1910 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1911 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1912 enum tree_code code;
1913 enum machine_mode vec_mode;
1914 tree new_temp;
1915 int op_type;
1916 optab optab;
1917 int icode;
1918 enum machine_mode optab_op2_mode;
1919 tree def;
1920 gimple def_stmt;
1921 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1922 gimple new_stmt = NULL;
1923 stmt_vec_info prev_stmt_info;
1924 int nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
1925 int nunits_out;
1926 tree vectype_out;
1927 int ncopies;
1928 int j, i;
1929 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
1930 tree vop0, vop1;
1931 unsigned int k;
1932 bool shift_p = false;
1933 bool scalar_shift_arg = false;
1934 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1935 int vf;
1937 if (loop_vinfo)
1938 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1939 else
1940 /* FORNOW: multiple types are not supported in basic block SLP. */
1941 vf = nunits_in;
1943 /* Multiple types in SLP are handled by creating the appropriate number of
1944 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1945 case of SLP. */
1946 if (slp_node)
1947 ncopies = 1;
1948 else
1949 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1951 gcc_assert (ncopies >= 1);
1953 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1954 return false;
1956 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1957 return false;
1959 /* Is STMT a vectorizable binary/unary operation? */
1960 if (!is_gimple_assign (stmt))
1961 return false;
1963 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1964 return false;
1966 scalar_dest = gimple_assign_lhs (stmt);
1967 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
1968 if (!vectype_out)
1969 return false;
1970 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1971 if (nunits_out != nunits_in)
1972 return false;
1974 code = gimple_assign_rhs_code (stmt);
1976 /* For pointer addition, we should use the normal plus for
1977 the vector addition. */
1978 if (code == POINTER_PLUS_EXPR)
1979 code = PLUS_EXPR;
1981 /* Support only unary or binary operations. */
1982 op_type = TREE_CODE_LENGTH (code);
1983 if (op_type != unary_op && op_type != binary_op)
1985 if (vect_print_dump_info (REPORT_DETAILS))
1986 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
1987 return false;
1990 op0 = gimple_assign_rhs1 (stmt);
1991 if (!vect_is_simple_use (op0, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[0]))
1993 if (vect_print_dump_info (REPORT_DETAILS))
1994 fprintf (vect_dump, "use not simple.");
1995 return false;
1998 if (op_type == binary_op)
2000 op1 = gimple_assign_rhs2 (stmt);
2001 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2002 &dt[1]))
2004 if (vect_print_dump_info (REPORT_DETAILS))
2005 fprintf (vect_dump, "use not simple.");
2006 return false;
2010 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2011 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2012 shift optabs. */
2013 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2014 || code == RROTATE_EXPR)
2016 shift_p = true;
2018 /* vector shifted by vector */
2019 if (dt[1] == vect_internal_def)
2021 optab = optab_for_tree_code (code, vectype, optab_vector);
2022 if (vect_print_dump_info (REPORT_DETAILS))
2023 fprintf (vect_dump, "vector/vector shift/rotate found.");
2026 /* See if the machine has a vector shifted by scalar insn and if not
2027 then see if it has a vector shifted by vector insn */
2028 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2030 optab = optab_for_tree_code (code, vectype, optab_scalar);
2031 if (optab
2032 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2033 != CODE_FOR_nothing))
2035 scalar_shift_arg = true;
2036 if (vect_print_dump_info (REPORT_DETAILS))
2037 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2039 else
2041 optab = optab_for_tree_code (code, vectype, optab_vector);
2042 if (optab
2043 && (optab_handler (optab, TYPE_MODE (vectype))->insn_code
2044 != CODE_FOR_nothing))
2046 if (vect_print_dump_info (REPORT_DETAILS))
2047 fprintf (vect_dump, "vector/vector shift/rotate found.");
2049 /* Unlike the other binary operators, shifts/rotates have
2050 the rhs being int, instead of the same type as the lhs,
2051 so make sure the scalar is the right type if we are
2052 dealing with vectors of short/char. */
2053 if (dt[1] == vect_constant_def)
2054 op1 = fold_convert (TREE_TYPE (vectype), op1);
2059 else
2061 if (vect_print_dump_info (REPORT_DETAILS))
2062 fprintf (vect_dump, "operand mode requires invariant argument.");
2063 return false;
2066 else
2067 optab = optab_for_tree_code (code, vectype, optab_default);
2069 /* Supportable by target? */
2070 if (!optab)
2072 if (vect_print_dump_info (REPORT_DETAILS))
2073 fprintf (vect_dump, "no optab.");
2074 return false;
2076 vec_mode = TYPE_MODE (vectype);
2077 icode = (int) optab_handler (optab, vec_mode)->insn_code;
2078 if (icode == CODE_FOR_nothing)
2080 if (vect_print_dump_info (REPORT_DETAILS))
2081 fprintf (vect_dump, "op not supported by target.");
2082 /* Check only during analysis. */
2083 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2084 || (vf < vect_min_worthwhile_factor (code)
2085 && !vec_stmt))
2086 return false;
2087 if (vect_print_dump_info (REPORT_DETAILS))
2088 fprintf (vect_dump, "proceeding using word mode.");
2091 /* Worthwhile without SIMD support? Check only during analysis. */
2092 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2093 && vf < vect_min_worthwhile_factor (code)
2094 && !vec_stmt)
2096 if (vect_print_dump_info (REPORT_DETAILS))
2097 fprintf (vect_dump, "not worthwhile without SIMD support.");
2098 return false;
2101 if (!vec_stmt) /* transformation not required. */
2103 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2104 if (vect_print_dump_info (REPORT_DETAILS))
2105 fprintf (vect_dump, "=== vectorizable_operation ===");
2106 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2107 return true;
2110 /** Transform. **/
2112 if (vect_print_dump_info (REPORT_DETAILS))
2113 fprintf (vect_dump, "transform binary/unary operation.");
2115 /* Handle def. */
2116 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2118 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2119 created in the previous stages of the recursion, so no allocation is
2120 needed, except for the case of shift with scalar shift argument. In that
2121 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2122 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2123 In case of loop-based vectorization we allocate VECs of size 1. We
2124 allocate VEC_OPRNDS1 only in case of binary operation. */
2125 if (!slp_node)
2127 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2128 if (op_type == binary_op)
2129 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2131 else if (scalar_shift_arg)
2132 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2134 /* In case the vectorization factor (VF) is bigger than the number
2135 of elements that we can fit in a vectype (nunits), we have to generate
2136 more than one vector stmt - i.e - we need to "unroll" the
2137 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2138 from one copy of the vector stmt to the next, in the field
2139 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2140 stages to find the correct vector defs to be used when vectorizing
2141 stmts that use the defs of the current stmt. The example below illustrates
2142 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2143 4 vectorized stmts):
2145 before vectorization:
2146 RELATED_STMT VEC_STMT
2147 S1: x = memref - -
2148 S2: z = x + 1 - -
2150 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2151 there):
2152 RELATED_STMT VEC_STMT
2153 VS1_0: vx0 = memref0 VS1_1 -
2154 VS1_1: vx1 = memref1 VS1_2 -
2155 VS1_2: vx2 = memref2 VS1_3 -
2156 VS1_3: vx3 = memref3 - -
2157 S1: x = load - VS1_0
2158 S2: z = x + 1 - -
2160 step2: vectorize stmt S2 (done here):
2161 To vectorize stmt S2 we first need to find the relevant vector
2162 def for the first operand 'x'. This is, as usual, obtained from
2163 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2164 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2165 relevant vector def 'vx0'. Having found 'vx0' we can generate
2166 the vector stmt VS2_0, and as usual, record it in the
2167 STMT_VINFO_VEC_STMT of stmt S2.
2168 When creating the second copy (VS2_1), we obtain the relevant vector
2169 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2170 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2171 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2172 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2173 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2174 chain of stmts and pointers:
2175 RELATED_STMT VEC_STMT
2176 VS1_0: vx0 = memref0 VS1_1 -
2177 VS1_1: vx1 = memref1 VS1_2 -
2178 VS1_2: vx2 = memref2 VS1_3 -
2179 VS1_3: vx3 = memref3 - -
2180 S1: x = load - VS1_0
2181 VS2_0: vz0 = vx0 + v1 VS2_1 -
2182 VS2_1: vz1 = vx1 + v1 VS2_2 -
2183 VS2_2: vz2 = vx2 + v1 VS2_3 -
2184 VS2_3: vz3 = vx3 + v1 - -
2185 S2: z = x + 1 - VS2_0 */
2187 prev_stmt_info = NULL;
2188 for (j = 0; j < ncopies; j++)
2190 /* Handle uses. */
2191 if (j == 0)
2193 if (op_type == binary_op && scalar_shift_arg)
2195 /* Vector shl and shr insn patterns can be defined with scalar
2196 operand 2 (shift operand). In this case, use constant or loop
2197 invariant op1 directly, without extending it to vector mode
2198 first. */
2199 optab_op2_mode = insn_data[icode].operand[2].mode;
2200 if (!VECTOR_MODE_P (optab_op2_mode))
2202 if (vect_print_dump_info (REPORT_DETAILS))
2203 fprintf (vect_dump, "operand 1 using scalar mode.");
2204 vec_oprnd1 = op1;
2205 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2206 if (slp_node)
2208 /* Store vec_oprnd1 for every vector stmt to be created
2209 for SLP_NODE. We check during the analysis that all the
2210 shift arguments are the same.
2211 TODO: Allow different constants for different vector
2212 stmts generated for an SLP instance. */
2213 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2214 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2219 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2220 (a special case for certain kind of vector shifts); otherwise,
2221 operand 1 should be of a vector type (the usual case). */
2222 if (op_type == binary_op && !vec_oprnd1)
2223 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2224 slp_node);
2225 else
2226 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2227 slp_node);
2229 else
2230 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2232 /* Arguments are ready. Create the new vector stmt. */
2233 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2235 vop1 = ((op_type == binary_op)
2236 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2237 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2238 new_temp = make_ssa_name (vec_dest, new_stmt);
2239 gimple_assign_set_lhs (new_stmt, new_temp);
2240 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2241 if (slp_node)
2242 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2245 if (slp_node)
2246 continue;
2248 if (j == 0)
2249 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2250 else
2251 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2252 prev_stmt_info = vinfo_for_stmt (new_stmt);
2255 VEC_free (tree, heap, vec_oprnds0);
2256 if (vec_oprnds1)
2257 VEC_free (tree, heap, vec_oprnds1);
2259 return true;
2263 /* Get vectorized definitions for loop-based vectorization. For the first
2264 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2265 scalar operand), and for the rest we get a copy with
2266 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2267 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2268 The vectors are collected into VEC_OPRNDS. */
2270 static void
2271 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2272 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2274 tree vec_oprnd;
2276 /* Get first vector operand. */
2277 /* All the vector operands except the very first one (that is scalar oprnd)
2278 are stmt copies. */
2279 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2280 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2281 else
2282 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2284 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2286 /* Get second vector operand. */
2287 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2288 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2290 *oprnd = vec_oprnd;
2292 /* For conversion in multiple steps, continue to get operands
2293 recursively. */
2294 if (multi_step_cvt)
2295 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2299 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2300 For multi-step conversions store the resulting vectors and call the function
2301 recursively. */
2303 static void
2304 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2305 int multi_step_cvt, gimple stmt,
2306 VEC (tree, heap) *vec_dsts,
2307 gimple_stmt_iterator *gsi,
2308 slp_tree slp_node, enum tree_code code,
2309 stmt_vec_info *prev_stmt_info)
2311 unsigned int i;
2312 tree vop0, vop1, new_tmp, vec_dest;
2313 gimple new_stmt;
2314 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2316 vec_dest = VEC_pop (tree, vec_dsts);
2318 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2320 /* Create demotion operation. */
2321 vop0 = VEC_index (tree, *vec_oprnds, i);
2322 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2323 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2324 new_tmp = make_ssa_name (vec_dest, new_stmt);
2325 gimple_assign_set_lhs (new_stmt, new_tmp);
2326 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2328 if (multi_step_cvt)
2329 /* Store the resulting vector for next recursive call. */
2330 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2331 else
2333 /* This is the last step of the conversion sequence. Store the
2334 vectors in SLP_NODE or in vector info of the scalar statement
2335 (or in STMT_VINFO_RELATED_STMT chain). */
2336 if (slp_node)
2337 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2338 else
2340 if (!*prev_stmt_info)
2341 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2342 else
2343 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2345 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2350 /* For multi-step demotion operations we first generate demotion operations
2351 from the source type to the intermediate types, and then combine the
2352 results (stored in VEC_OPRNDS) in demotion operation to the destination
2353 type. */
2354 if (multi_step_cvt)
2356 /* At each level of recursion we have have of the operands we had at the
2357 previous level. */
2358 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2359 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2360 stmt, vec_dsts, gsi, slp_node,
2361 code, prev_stmt_info);
2366 /* Function vectorizable_type_demotion
2368 Check if STMT performs a binary or unary operation that involves
2369 type demotion, and if it can be vectorized.
2370 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2371 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2372 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2374 static bool
2375 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2376 gimple *vec_stmt, slp_tree slp_node)
2378 tree vec_dest;
2379 tree scalar_dest;
2380 tree op0;
2381 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2382 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2383 enum tree_code code, code1 = ERROR_MARK;
2384 tree def;
2385 gimple def_stmt;
2386 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2387 stmt_vec_info prev_stmt_info;
2388 int nunits_in;
2389 int nunits_out;
2390 tree vectype_out;
2391 int ncopies;
2392 int j, i;
2393 tree vectype_in;
2394 int multi_step_cvt = 0;
2395 VEC (tree, heap) *vec_oprnds0 = NULL;
2396 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2397 tree last_oprnd, intermediate_type;
2399 /* FORNOW: not supported by basic block SLP vectorization. */
2400 gcc_assert (loop_vinfo);
2402 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2403 return false;
2405 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2406 return false;
2408 /* Is STMT a vectorizable type-demotion operation? */
2409 if (!is_gimple_assign (stmt))
2410 return false;
2412 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2413 return false;
2415 code = gimple_assign_rhs_code (stmt);
2416 if (!CONVERT_EXPR_CODE_P (code))
2417 return false;
2419 op0 = gimple_assign_rhs1 (stmt);
2420 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2421 if (!vectype_in)
2422 return false;
2423 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2425 scalar_dest = gimple_assign_lhs (stmt);
2426 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2427 if (!vectype_out)
2428 return false;
2429 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2430 if (nunits_in >= nunits_out)
2431 return false;
2433 /* Multiple types in SLP are handled by creating the appropriate number of
2434 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2435 case of SLP. */
2436 if (slp_node)
2437 ncopies = 1;
2438 else
2439 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2440 gcc_assert (ncopies >= 1);
2442 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2443 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2444 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2445 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2446 && CONVERT_EXPR_CODE_P (code))))
2447 return false;
2449 /* Check the operands of the operation. */
2450 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2452 if (vect_print_dump_info (REPORT_DETAILS))
2453 fprintf (vect_dump, "use not simple.");
2454 return false;
2457 /* Supportable by target? */
2458 if (!supportable_narrowing_operation (code, stmt, vectype_in, &code1,
2459 &multi_step_cvt, &interm_types))
2460 return false;
2462 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2464 if (!vec_stmt) /* transformation not required. */
2466 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2467 if (vect_print_dump_info (REPORT_DETAILS))
2468 fprintf (vect_dump, "=== vectorizable_demotion ===");
2469 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2470 return true;
2473 /** Transform. **/
2474 if (vect_print_dump_info (REPORT_DETAILS))
2475 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2476 ncopies);
2478 /* In case of multi-step demotion, we first generate demotion operations to
2479 the intermediate types, and then from that types to the final one.
2480 We create vector destinations for the intermediate type (TYPES) received
2481 from supportable_narrowing_operation, and store them in the correct order
2482 for future use in vect_create_vectorized_demotion_stmts(). */
2483 if (multi_step_cvt)
2484 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2485 else
2486 vec_dsts = VEC_alloc (tree, heap, 1);
2488 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2489 VEC_quick_push (tree, vec_dsts, vec_dest);
2491 if (multi_step_cvt)
2493 for (i = VEC_length (tree, interm_types) - 1;
2494 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2496 vec_dest = vect_create_destination_var (scalar_dest,
2497 intermediate_type);
2498 VEC_quick_push (tree, vec_dsts, vec_dest);
2502 /* In case the vectorization factor (VF) is bigger than the number
2503 of elements that we can fit in a vectype (nunits), we have to generate
2504 more than one vector stmt - i.e - we need to "unroll" the
2505 vector stmt by a factor VF/nunits. */
2506 last_oprnd = op0;
2507 prev_stmt_info = NULL;
2508 for (j = 0; j < ncopies; j++)
2510 /* Handle uses. */
2511 if (slp_node)
2512 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL);
2513 else
2515 VEC_free (tree, heap, vec_oprnds0);
2516 vec_oprnds0 = VEC_alloc (tree, heap,
2517 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2518 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2519 vect_pow2 (multi_step_cvt) - 1);
2522 /* Arguments are ready. Create the new vector stmts. */
2523 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2524 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2525 multi_step_cvt, stmt, tmp_vec_dsts,
2526 gsi, slp_node, code1,
2527 &prev_stmt_info);
2530 VEC_free (tree, heap, vec_oprnds0);
2531 VEC_free (tree, heap, vec_dsts);
2532 VEC_free (tree, heap, tmp_vec_dsts);
2533 VEC_free (tree, heap, interm_types);
2535 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2536 return true;
2540 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2541 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2542 the resulting vectors and call the function recursively. */
2544 static void
2545 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2546 VEC (tree, heap) **vec_oprnds1,
2547 int multi_step_cvt, gimple stmt,
2548 VEC (tree, heap) *vec_dsts,
2549 gimple_stmt_iterator *gsi,
2550 slp_tree slp_node, enum tree_code code1,
2551 enum tree_code code2, tree decl1,
2552 tree decl2, int op_type,
2553 stmt_vec_info *prev_stmt_info)
2555 int i;
2556 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2557 gimple new_stmt1, new_stmt2;
2558 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2559 VEC (tree, heap) *vec_tmp;
2561 vec_dest = VEC_pop (tree, vec_dsts);
2562 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2564 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2566 if (op_type == binary_op)
2567 vop1 = VEC_index (tree, *vec_oprnds1, i);
2568 else
2569 vop1 = NULL_TREE;
2571 /* Generate the two halves of promotion operation. */
2572 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2573 op_type, vec_dest, gsi, stmt);
2574 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2575 op_type, vec_dest, gsi, stmt);
2576 if (is_gimple_call (new_stmt1))
2578 new_tmp1 = gimple_call_lhs (new_stmt1);
2579 new_tmp2 = gimple_call_lhs (new_stmt2);
2581 else
2583 new_tmp1 = gimple_assign_lhs (new_stmt1);
2584 new_tmp2 = gimple_assign_lhs (new_stmt2);
2587 if (multi_step_cvt)
2589 /* Store the results for the recursive call. */
2590 VEC_quick_push (tree, vec_tmp, new_tmp1);
2591 VEC_quick_push (tree, vec_tmp, new_tmp2);
2593 else
2595 /* Last step of promotion sequience - store the results. */
2596 if (slp_node)
2598 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2599 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2601 else
2603 if (!*prev_stmt_info)
2604 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2605 else
2606 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2608 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2609 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2610 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2615 if (multi_step_cvt)
2617 /* For multi-step promotion operation we first generate we call the
2618 function recurcively for every stage. We start from the input type,
2619 create promotion operations to the intermediate types, and then
2620 create promotions to the output type. */
2621 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2622 VEC_free (tree, heap, vec_tmp);
2623 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2624 multi_step_cvt - 1, stmt,
2625 vec_dsts, gsi, slp_node, code1,
2626 code2, decl2, decl2, op_type,
2627 prev_stmt_info);
2632 /* Function vectorizable_type_promotion
2634 Check if STMT performs a binary or unary operation that involves
2635 type promotion, and if it can be vectorized.
2636 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2637 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2638 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2640 static bool
2641 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2642 gimple *vec_stmt, slp_tree slp_node)
2644 tree vec_dest;
2645 tree scalar_dest;
2646 tree op0, op1 = NULL;
2647 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2648 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2649 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2650 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2651 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2652 int op_type;
2653 tree def;
2654 gimple def_stmt;
2655 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2656 stmt_vec_info prev_stmt_info;
2657 int nunits_in;
2658 int nunits_out;
2659 tree vectype_out;
2660 int ncopies;
2661 int j, i;
2662 tree vectype_in;
2663 tree intermediate_type = NULL_TREE;
2664 int multi_step_cvt = 0;
2665 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2666 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2668 /* FORNOW: not supported by basic block SLP vectorization. */
2669 gcc_assert (loop_vinfo);
2671 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2672 return false;
2674 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2675 return false;
2677 /* Is STMT a vectorizable type-promotion operation? */
2678 if (!is_gimple_assign (stmt))
2679 return false;
2681 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2682 return false;
2684 code = gimple_assign_rhs_code (stmt);
2685 if (!CONVERT_EXPR_CODE_P (code)
2686 && code != WIDEN_MULT_EXPR)
2687 return false;
2689 op0 = gimple_assign_rhs1 (stmt);
2690 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op0));
2691 if (!vectype_in)
2692 return false;
2693 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2695 scalar_dest = gimple_assign_lhs (stmt);
2696 vectype_out = get_vectype_for_scalar_type (TREE_TYPE (scalar_dest));
2697 if (!vectype_out)
2698 return false;
2699 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2700 if (nunits_in <= nunits_out)
2701 return false;
2703 /* Multiple types in SLP are handled by creating the appropriate number of
2704 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2705 case of SLP. */
2706 if (slp_node)
2707 ncopies = 1;
2708 else
2709 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2711 gcc_assert (ncopies >= 1);
2713 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2714 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2715 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2716 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2717 && CONVERT_EXPR_CODE_P (code))))
2718 return false;
2720 /* Check the operands of the operation. */
2721 if (!vect_is_simple_use (op0, loop_vinfo, NULL, &def_stmt, &def, &dt[0]))
2723 if (vect_print_dump_info (REPORT_DETAILS))
2724 fprintf (vect_dump, "use not simple.");
2725 return false;
2728 op_type = TREE_CODE_LENGTH (code);
2729 if (op_type == binary_op)
2731 op1 = gimple_assign_rhs2 (stmt);
2732 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2734 if (vect_print_dump_info (REPORT_DETAILS))
2735 fprintf (vect_dump, "use not simple.");
2736 return false;
2740 /* Supportable by target? */
2741 if (!supportable_widening_operation (code, stmt, vectype_in,
2742 &decl1, &decl2, &code1, &code2,
2743 &multi_step_cvt, &interm_types))
2744 return false;
2746 /* Binary widening operation can only be supported directly by the
2747 architecture. */
2748 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2750 STMT_VINFO_VECTYPE (stmt_info) = vectype_in;
2752 if (!vec_stmt) /* transformation not required. */
2754 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2755 if (vect_print_dump_info (REPORT_DETAILS))
2756 fprintf (vect_dump, "=== vectorizable_promotion ===");
2757 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2758 return true;
2761 /** Transform. **/
2763 if (vect_print_dump_info (REPORT_DETAILS))
2764 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2765 ncopies);
2767 /* Handle def. */
2768 /* In case of multi-step promotion, we first generate promotion operations
2769 to the intermediate types, and then from that types to the final one.
2770 We store vector destination in VEC_DSTS in the correct order for
2771 recursive creation of promotion operations in
2772 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2773 according to TYPES recieved from supportable_widening_operation(). */
2774 if (multi_step_cvt)
2775 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2776 else
2777 vec_dsts = VEC_alloc (tree, heap, 1);
2779 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2780 VEC_quick_push (tree, vec_dsts, vec_dest);
2782 if (multi_step_cvt)
2784 for (i = VEC_length (tree, interm_types) - 1;
2785 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2787 vec_dest = vect_create_destination_var (scalar_dest,
2788 intermediate_type);
2789 VEC_quick_push (tree, vec_dsts, vec_dest);
2793 if (!slp_node)
2795 vec_oprnds0 = VEC_alloc (tree, heap,
2796 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2797 if (op_type == binary_op)
2798 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2801 /* In case the vectorization factor (VF) is bigger than the number
2802 of elements that we can fit in a vectype (nunits), we have to generate
2803 more than one vector stmt - i.e - we need to "unroll" the
2804 vector stmt by a factor VF/nunits. */
2806 prev_stmt_info = NULL;
2807 for (j = 0; j < ncopies; j++)
2809 /* Handle uses. */
2810 if (j == 0)
2812 if (slp_node)
2813 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1);
2814 else
2816 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2817 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2818 if (op_type == binary_op)
2820 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2821 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2825 else
2827 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2828 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2829 if (op_type == binary_op)
2831 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
2832 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
2836 /* Arguments are ready. Create the new vector stmts. */
2837 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2838 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
2839 multi_step_cvt, stmt,
2840 tmp_vec_dsts,
2841 gsi, slp_node, code1, code2,
2842 decl1, decl2, op_type,
2843 &prev_stmt_info);
2846 VEC_free (tree, heap, vec_dsts);
2847 VEC_free (tree, heap, tmp_vec_dsts);
2848 VEC_free (tree, heap, interm_types);
2849 VEC_free (tree, heap, vec_oprnds0);
2850 VEC_free (tree, heap, vec_oprnds1);
2852 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2853 return true;
2857 /* Function vectorizable_store.
2859 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
2860 can be vectorized.
2861 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2862 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2863 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2865 static bool
2866 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2867 slp_tree slp_node)
2869 tree scalar_dest;
2870 tree data_ref;
2871 tree op;
2872 tree vec_oprnd = NULL_TREE;
2873 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2874 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
2875 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2876 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2877 struct loop *loop = NULL;
2878 enum machine_mode vec_mode;
2879 tree dummy;
2880 enum dr_alignment_support alignment_support_scheme;
2881 tree def;
2882 gimple def_stmt;
2883 enum vect_def_type dt;
2884 stmt_vec_info prev_stmt_info = NULL;
2885 tree dataref_ptr = NULL_TREE;
2886 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2887 int ncopies;
2888 int j;
2889 gimple next_stmt, first_stmt = NULL;
2890 bool strided_store = false;
2891 unsigned int group_size, i;
2892 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
2893 bool inv_p;
2894 VEC(tree,heap) *vec_oprnds = NULL;
2895 bool slp = (slp_node != NULL);
2896 stmt_vec_info first_stmt_vinfo;
2897 unsigned int vec_num;
2898 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2900 if (loop_vinfo)
2901 loop = LOOP_VINFO_LOOP (loop_vinfo);
2903 /* Multiple types in SLP are handled by creating the appropriate number of
2904 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2905 case of SLP. */
2906 if (slp)
2907 ncopies = 1;
2908 else
2909 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2911 gcc_assert (ncopies >= 1);
2913 /* FORNOW. This restriction should be relaxed. */
2914 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
2916 if (vect_print_dump_info (REPORT_DETAILS))
2917 fprintf (vect_dump, "multiple types in nested loop.");
2918 return false;
2921 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2922 return false;
2924 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2925 return false;
2927 /* Is vectorizable store? */
2929 if (!is_gimple_assign (stmt))
2930 return false;
2932 scalar_dest = gimple_assign_lhs (stmt);
2933 if (TREE_CODE (scalar_dest) != ARRAY_REF
2934 && TREE_CODE (scalar_dest) != INDIRECT_REF
2935 && TREE_CODE (scalar_dest) != COMPONENT_REF
2936 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
2937 && TREE_CODE (scalar_dest) != REALPART_EXPR)
2938 return false;
2940 gcc_assert (gimple_assign_single_p (stmt));
2941 op = gimple_assign_rhs1 (stmt);
2942 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
2944 if (vect_print_dump_info (REPORT_DETAILS))
2945 fprintf (vect_dump, "use not simple.");
2946 return false;
2949 /* The scalar rhs type needs to be trivially convertible to the vector
2950 component type. This should always be the case. */
2951 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
2953 if (vect_print_dump_info (REPORT_DETAILS))
2954 fprintf (vect_dump, "??? operands of different types");
2955 return false;
2958 vec_mode = TYPE_MODE (vectype);
2959 /* FORNOW. In some cases can vectorize even if data-type not supported
2960 (e.g. - array initialization with 0). */
2961 if (optab_handler (mov_optab, (int)vec_mode)->insn_code == CODE_FOR_nothing)
2962 return false;
2964 if (!STMT_VINFO_DATA_REF (stmt_info))
2965 return false;
2967 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
2969 strided_store = true;
2970 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
2971 if (!vect_strided_store_supported (vectype)
2972 && !PURE_SLP_STMT (stmt_info) && !slp)
2973 return false;
2975 if (first_stmt == stmt)
2977 /* STMT is the leader of the group. Check the operands of all the
2978 stmts of the group. */
2979 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
2980 while (next_stmt)
2982 gcc_assert (gimple_assign_single_p (next_stmt));
2983 op = gimple_assign_rhs1 (next_stmt);
2984 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
2985 &def, &dt))
2987 if (vect_print_dump_info (REPORT_DETAILS))
2988 fprintf (vect_dump, "use not simple.");
2989 return false;
2991 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
2996 if (!vec_stmt) /* transformation not required. */
2998 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
2999 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3000 return true;
3003 /** Transform. **/
3005 if (strided_store)
3007 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3008 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3010 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3012 /* FORNOW */
3013 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3015 /* We vectorize all the stmts of the interleaving group when we
3016 reach the last stmt in the group. */
3017 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3018 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3019 && !slp)
3021 *vec_stmt = NULL;
3022 return true;
3025 if (slp)
3026 strided_store = false;
3028 /* VEC_NUM is the number of vect stmts to be created for this group. */
3029 if (slp)
3030 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3031 else
3032 vec_num = group_size;
3034 else
3036 first_stmt = stmt;
3037 first_dr = dr;
3038 group_size = vec_num = 1;
3039 first_stmt_vinfo = stmt_info;
3042 if (vect_print_dump_info (REPORT_DETAILS))
3043 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3045 dr_chain = VEC_alloc (tree, heap, group_size);
3046 oprnds = VEC_alloc (tree, heap, group_size);
3048 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3049 gcc_assert (alignment_support_scheme);
3051 /* In case the vectorization factor (VF) is bigger than the number
3052 of elements that we can fit in a vectype (nunits), we have to generate
3053 more than one vector stmt - i.e - we need to "unroll" the
3054 vector stmt by a factor VF/nunits. For more details see documentation in
3055 vect_get_vec_def_for_copy_stmt. */
3057 /* In case of interleaving (non-unit strided access):
3059 S1: &base + 2 = x2
3060 S2: &base = x0
3061 S3: &base + 1 = x1
3062 S4: &base + 3 = x3
3064 We create vectorized stores starting from base address (the access of the
3065 first stmt in the chain (S2 in the above example), when the last store stmt
3066 of the chain (S4) is reached:
3068 VS1: &base = vx2
3069 VS2: &base + vec_size*1 = vx0
3070 VS3: &base + vec_size*2 = vx1
3071 VS4: &base + vec_size*3 = vx3
3073 Then permutation statements are generated:
3075 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3076 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3079 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3080 (the order of the data-refs in the output of vect_permute_store_chain
3081 corresponds to the order of scalar stmts in the interleaving chain - see
3082 the documentation of vect_permute_store_chain()).
3084 In case of both multiple types and interleaving, above vector stores and
3085 permutation stmts are created for every copy. The result vector stmts are
3086 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3087 STMT_VINFO_RELATED_STMT for the next copies.
3090 prev_stmt_info = NULL;
3091 for (j = 0; j < ncopies; j++)
3093 gimple new_stmt;
3094 gimple ptr_incr;
3096 if (j == 0)
3098 if (slp)
3100 /* Get vectorized arguments for SLP_NODE. */
3101 vect_get_slp_defs (slp_node, &vec_oprnds, NULL);
3103 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3105 else
3107 /* For interleaved stores we collect vectorized defs for all the
3108 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3109 used as an input to vect_permute_store_chain(), and OPRNDS as
3110 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3112 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3113 OPRNDS are of size 1. */
3114 next_stmt = first_stmt;
3115 for (i = 0; i < group_size; i++)
3117 /* Since gaps are not supported for interleaved stores,
3118 GROUP_SIZE is the exact number of stmts in the chain.
3119 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3120 there is no interleaving, GROUP_SIZE is 1, and only one
3121 iteration of the loop will be executed. */
3122 gcc_assert (next_stmt
3123 && gimple_assign_single_p (next_stmt));
3124 op = gimple_assign_rhs1 (next_stmt);
3126 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3127 NULL);
3128 VEC_quick_push(tree, dr_chain, vec_oprnd);
3129 VEC_quick_push(tree, oprnds, vec_oprnd);
3130 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3134 /* We should have catched mismatched types earlier. */
3135 gcc_assert (useless_type_conversion_p (vectype,
3136 TREE_TYPE (vec_oprnd)));
3137 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3138 &dummy, &ptr_incr, false,
3139 &inv_p);
3140 gcc_assert (bb_vinfo || !inv_p);
3142 else
3144 /* For interleaved stores we created vectorized defs for all the
3145 defs stored in OPRNDS in the previous iteration (previous copy).
3146 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3147 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3148 next copy.
3149 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3150 OPRNDS are of size 1. */
3151 for (i = 0; i < group_size; i++)
3153 op = VEC_index (tree, oprnds, i);
3154 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3155 &dt);
3156 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3157 VEC_replace(tree, dr_chain, i, vec_oprnd);
3158 VEC_replace(tree, oprnds, i, vec_oprnd);
3160 dataref_ptr =
3161 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3164 if (strided_store)
3166 result_chain = VEC_alloc (tree, heap, group_size);
3167 /* Permute. */
3168 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3169 &result_chain))
3170 return false;
3173 next_stmt = first_stmt;
3174 for (i = 0; i < vec_num; i++)
3176 if (i > 0)
3177 /* Bump the vector pointer. */
3178 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3179 NULL_TREE);
3181 if (slp)
3182 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3183 else if (strided_store)
3184 /* For strided stores vectorized defs are interleaved in
3185 vect_permute_store_chain(). */
3186 vec_oprnd = VEC_index (tree, result_chain, i);
3188 if (aligned_access_p (first_dr))
3189 data_ref = build_fold_indirect_ref (dataref_ptr);
3190 else
3192 int mis = DR_MISALIGNMENT (first_dr);
3193 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3194 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3195 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3198 /* If accesses through a pointer to vectype do not alias the original
3199 memory reference we have a problem. This should never happen. */
3200 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3201 get_alias_set (gimple_assign_lhs (stmt))));
3203 /* Arguments are ready. Create the new vector stmt. */
3204 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3205 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3206 mark_symbols_for_renaming (new_stmt);
3208 if (slp)
3209 continue;
3211 if (j == 0)
3212 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3213 else
3214 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3216 prev_stmt_info = vinfo_for_stmt (new_stmt);
3217 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3218 if (!next_stmt)
3219 break;
3223 VEC_free (tree, heap, dr_chain);
3224 VEC_free (tree, heap, oprnds);
3225 if (result_chain)
3226 VEC_free (tree, heap, result_chain);
3228 return true;
3231 /* vectorizable_load.
3233 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3234 can be vectorized.
3235 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3236 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3237 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3239 static bool
3240 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3241 slp_tree slp_node, slp_instance slp_node_instance)
3243 tree scalar_dest;
3244 tree vec_dest = NULL;
3245 tree data_ref = NULL;
3246 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3247 stmt_vec_info prev_stmt_info;
3248 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3249 struct loop *loop = NULL;
3250 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3251 bool nested_in_vect_loop = false;
3252 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3253 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3254 tree new_temp;
3255 int mode;
3256 gimple new_stmt = NULL;
3257 tree dummy;
3258 enum dr_alignment_support alignment_support_scheme;
3259 tree dataref_ptr = NULL_TREE;
3260 gimple ptr_incr;
3261 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3262 int ncopies;
3263 int i, j, group_size;
3264 tree msq = NULL_TREE, lsq;
3265 tree offset = NULL_TREE;
3266 tree realignment_token = NULL_TREE;
3267 gimple phi = NULL;
3268 VEC(tree,heap) *dr_chain = NULL;
3269 bool strided_load = false;
3270 gimple first_stmt;
3271 tree scalar_type;
3272 bool inv_p;
3273 bool compute_in_loop = false;
3274 struct loop *at_loop;
3275 int vec_num;
3276 bool slp = (slp_node != NULL);
3277 bool slp_perm = false;
3278 enum tree_code code;
3279 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3280 int vf;
3282 if (loop_vinfo)
3284 loop = LOOP_VINFO_LOOP (loop_vinfo);
3285 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3286 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3288 else
3289 /* FORNOW: multiple types are not supported in basic block SLP. */
3290 vf = nunits;
3292 /* Multiple types in SLP are handled by creating the appropriate number of
3293 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3294 case of SLP. */
3295 if (slp)
3296 ncopies = 1;
3297 else
3298 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3300 gcc_assert (ncopies >= 1);
3302 /* FORNOW. This restriction should be relaxed. */
3303 if (nested_in_vect_loop && ncopies > 1)
3305 if (vect_print_dump_info (REPORT_DETAILS))
3306 fprintf (vect_dump, "multiple types in nested loop.");
3307 return false;
3310 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3311 return false;
3313 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3314 return false;
3316 /* Is vectorizable load? */
3317 if (!is_gimple_assign (stmt))
3318 return false;
3320 scalar_dest = gimple_assign_lhs (stmt);
3321 if (TREE_CODE (scalar_dest) != SSA_NAME)
3322 return false;
3324 code = gimple_assign_rhs_code (stmt);
3325 if (code != ARRAY_REF
3326 && code != INDIRECT_REF
3327 && code != COMPONENT_REF
3328 && code != IMAGPART_EXPR
3329 && code != REALPART_EXPR)
3330 return false;
3332 if (!STMT_VINFO_DATA_REF (stmt_info))
3333 return false;
3335 scalar_type = TREE_TYPE (DR_REF (dr));
3336 mode = (int) TYPE_MODE (vectype);
3338 /* FORNOW. In some cases can vectorize even if data-type not supported
3339 (e.g. - data copies). */
3340 if (optab_handler (mov_optab, mode)->insn_code == CODE_FOR_nothing)
3342 if (vect_print_dump_info (REPORT_DETAILS))
3343 fprintf (vect_dump, "Aligned load, but unsupported type.");
3344 return false;
3347 /* The vector component type needs to be trivially convertible to the
3348 scalar lhs. This should always be the case. */
3349 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3351 if (vect_print_dump_info (REPORT_DETAILS))
3352 fprintf (vect_dump, "??? operands of different types");
3353 return false;
3356 /* Check if the load is a part of an interleaving chain. */
3357 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3359 strided_load = true;
3360 /* FORNOW */
3361 gcc_assert (! nested_in_vect_loop);
3363 /* Check if interleaving is supported. */
3364 if (!vect_strided_load_supported (vectype)
3365 && !PURE_SLP_STMT (stmt_info) && !slp)
3366 return false;
3369 if (!vec_stmt) /* transformation not required. */
3371 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3372 vect_model_load_cost (stmt_info, ncopies, NULL);
3373 return true;
3376 if (vect_print_dump_info (REPORT_DETAILS))
3377 fprintf (vect_dump, "transform load.");
3379 /** Transform. **/
3381 if (strided_load)
3383 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3384 /* Check if the chain of loads is already vectorized. */
3385 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3387 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3388 return true;
3390 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3391 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3393 /* VEC_NUM is the number of vect stmts to be created for this group. */
3394 if (slp)
3396 strided_load = false;
3397 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3398 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3399 slp_perm = true;
3401 else
3402 vec_num = group_size;
3404 dr_chain = VEC_alloc (tree, heap, vec_num);
3406 else
3408 first_stmt = stmt;
3409 first_dr = dr;
3410 group_size = vec_num = 1;
3413 alignment_support_scheme = vect_supportable_dr_alignment (first_dr);
3414 gcc_assert (alignment_support_scheme);
3416 /* In case the vectorization factor (VF) is bigger than the number
3417 of elements that we can fit in a vectype (nunits), we have to generate
3418 more than one vector stmt - i.e - we need to "unroll" the
3419 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3420 from one copy of the vector stmt to the next, in the field
3421 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3422 stages to find the correct vector defs to be used when vectorizing
3423 stmts that use the defs of the current stmt. The example below illustrates
3424 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3425 4 vectorized stmts):
3427 before vectorization:
3428 RELATED_STMT VEC_STMT
3429 S1: x = memref - -
3430 S2: z = x + 1 - -
3432 step 1: vectorize stmt S1:
3433 We first create the vector stmt VS1_0, and, as usual, record a
3434 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3435 Next, we create the vector stmt VS1_1, and record a pointer to
3436 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3437 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3438 stmts and pointers:
3439 RELATED_STMT VEC_STMT
3440 VS1_0: vx0 = memref0 VS1_1 -
3441 VS1_1: vx1 = memref1 VS1_2 -
3442 VS1_2: vx2 = memref2 VS1_3 -
3443 VS1_3: vx3 = memref3 - -
3444 S1: x = load - VS1_0
3445 S2: z = x + 1 - -
3447 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3448 information we recorded in RELATED_STMT field is used to vectorize
3449 stmt S2. */
3451 /* In case of interleaving (non-unit strided access):
3453 S1: x2 = &base + 2
3454 S2: x0 = &base
3455 S3: x1 = &base + 1
3456 S4: x3 = &base + 3
3458 Vectorized loads are created in the order of memory accesses
3459 starting from the access of the first stmt of the chain:
3461 VS1: vx0 = &base
3462 VS2: vx1 = &base + vec_size*1
3463 VS3: vx3 = &base + vec_size*2
3464 VS4: vx4 = &base + vec_size*3
3466 Then permutation statements are generated:
3468 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3469 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3472 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3473 (the order of the data-refs in the output of vect_permute_load_chain
3474 corresponds to the order of scalar stmts in the interleaving chain - see
3475 the documentation of vect_permute_load_chain()).
3476 The generation of permutation stmts and recording them in
3477 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3479 In case of both multiple types and interleaving, the vector loads and
3480 permutation stmts above are created for every copy. The result vector stmts
3481 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3482 STMT_VINFO_RELATED_STMT for the next copies. */
3484 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3485 on a target that supports unaligned accesses (dr_unaligned_supported)
3486 we generate the following code:
3487 p = initial_addr;
3488 indx = 0;
3489 loop {
3490 p = p + indx * vectype_size;
3491 vec_dest = *(p);
3492 indx = indx + 1;
3495 Otherwise, the data reference is potentially unaligned on a target that
3496 does not support unaligned accesses (dr_explicit_realign_optimized) -
3497 then generate the following code, in which the data in each iteration is
3498 obtained by two vector loads, one from the previous iteration, and one
3499 from the current iteration:
3500 p1 = initial_addr;
3501 msq_init = *(floor(p1))
3502 p2 = initial_addr + VS - 1;
3503 realignment_token = call target_builtin;
3504 indx = 0;
3505 loop {
3506 p2 = p2 + indx * vectype_size
3507 lsq = *(floor(p2))
3508 vec_dest = realign_load (msq, lsq, realignment_token)
3509 indx = indx + 1;
3510 msq = lsq;
3511 } */
3513 /* If the misalignment remains the same throughout the execution of the
3514 loop, we can create the init_addr and permutation mask at the loop
3515 preheader. Otherwise, it needs to be created inside the loop.
3516 This can only occur when vectorizing memory accesses in the inner-loop
3517 nested within an outer-loop that is being vectorized. */
3519 if (loop && nested_in_vect_loop_p (loop, stmt)
3520 && (TREE_INT_CST_LOW (DR_STEP (dr))
3521 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3523 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3524 compute_in_loop = true;
3527 if ((alignment_support_scheme == dr_explicit_realign_optimized
3528 || alignment_support_scheme == dr_explicit_realign)
3529 && !compute_in_loop)
3531 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3532 alignment_support_scheme, NULL_TREE,
3533 &at_loop);
3534 if (alignment_support_scheme == dr_explicit_realign_optimized)
3536 phi = SSA_NAME_DEF_STMT (msq);
3537 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3540 else
3541 at_loop = loop;
3543 prev_stmt_info = NULL;
3544 for (j = 0; j < ncopies; j++)
3546 /* 1. Create the vector pointer update chain. */
3547 if (j == 0)
3548 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3549 at_loop, offset,
3550 &dummy, &ptr_incr, false,
3551 &inv_p);
3552 else
3553 dataref_ptr =
3554 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3556 for (i = 0; i < vec_num; i++)
3558 if (i > 0)
3559 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3560 NULL_TREE);
3562 /* 2. Create the vector-load in the loop. */
3563 switch (alignment_support_scheme)
3565 case dr_aligned:
3566 gcc_assert (aligned_access_p (first_dr));
3567 data_ref = build_fold_indirect_ref (dataref_ptr);
3568 break;
3569 case dr_unaligned_supported:
3571 int mis = DR_MISALIGNMENT (first_dr);
3572 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3574 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3575 data_ref =
3576 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3577 break;
3579 case dr_explicit_realign:
3581 tree ptr, bump;
3582 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3584 if (compute_in_loop)
3585 msq = vect_setup_realignment (first_stmt, gsi,
3586 &realignment_token,
3587 dr_explicit_realign,
3588 dataref_ptr, NULL);
3590 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3591 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3592 new_stmt = gimple_build_assign (vec_dest, data_ref);
3593 new_temp = make_ssa_name (vec_dest, new_stmt);
3594 gimple_assign_set_lhs (new_stmt, new_temp);
3595 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3596 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3597 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3598 msq = new_temp;
3600 bump = size_binop (MULT_EXPR, vs_minus_1,
3601 TYPE_SIZE_UNIT (scalar_type));
3602 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3603 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, ptr);
3604 break;
3606 case dr_explicit_realign_optimized:
3607 data_ref = build1 (ALIGN_INDIRECT_REF, vectype, dataref_ptr);
3608 break;
3609 default:
3610 gcc_unreachable ();
3612 /* If accesses through a pointer to vectype do not alias the original
3613 memory reference we have a problem. This should never happen. */
3614 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3615 get_alias_set (gimple_assign_rhs1 (stmt))));
3616 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3617 new_stmt = gimple_build_assign (vec_dest, data_ref);
3618 new_temp = make_ssa_name (vec_dest, new_stmt);
3619 gimple_assign_set_lhs (new_stmt, new_temp);
3620 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3621 mark_symbols_for_renaming (new_stmt);
3623 /* 3. Handle explicit realignment if necessary/supported. Create in
3624 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3625 if (alignment_support_scheme == dr_explicit_realign_optimized
3626 || alignment_support_scheme == dr_explicit_realign)
3628 tree tmp;
3630 lsq = gimple_assign_lhs (new_stmt);
3631 if (!realignment_token)
3632 realignment_token = dataref_ptr;
3633 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3634 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3635 realignment_token);
3636 new_stmt = gimple_build_assign (vec_dest, tmp);
3637 new_temp = make_ssa_name (vec_dest, new_stmt);
3638 gimple_assign_set_lhs (new_stmt, new_temp);
3639 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3641 if (alignment_support_scheme == dr_explicit_realign_optimized)
3643 gcc_assert (phi);
3644 if (i == vec_num - 1 && j == ncopies - 1)
3645 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3646 UNKNOWN_LOCATION);
3647 msq = lsq;
3651 /* 4. Handle invariant-load. */
3652 if (inv_p && !bb_vinfo)
3654 gcc_assert (!strided_load);
3655 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3656 if (j == 0)
3658 int k;
3659 tree t = NULL_TREE;
3660 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3662 /* CHECKME: bitpos depends on endianess? */
3663 bitpos = bitsize_zero_node;
3664 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3665 bitsize, bitpos);
3666 vec_dest =
3667 vect_create_destination_var (scalar_dest, NULL_TREE);
3668 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3669 new_temp = make_ssa_name (vec_dest, new_stmt);
3670 gimple_assign_set_lhs (new_stmt, new_temp);
3671 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3673 for (k = nunits - 1; k >= 0; --k)
3674 t = tree_cons (NULL_TREE, new_temp, t);
3675 /* FIXME: use build_constructor directly. */
3676 vec_inv = build_constructor_from_list (vectype, t);
3677 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3678 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3680 else
3681 gcc_unreachable (); /* FORNOW. */
3684 /* Collect vector loads and later create their permutation in
3685 vect_transform_strided_load (). */
3686 if (strided_load || slp_perm)
3687 VEC_quick_push (tree, dr_chain, new_temp);
3689 /* Store vector loads in the corresponding SLP_NODE. */
3690 if (slp && !slp_perm)
3691 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3694 if (slp && !slp_perm)
3695 continue;
3697 if (slp_perm)
3699 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3700 slp_node_instance, false))
3702 VEC_free (tree, heap, dr_chain);
3703 return false;
3706 else
3708 if (strided_load)
3710 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3711 return false;
3713 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3714 VEC_free (tree, heap, dr_chain);
3715 dr_chain = VEC_alloc (tree, heap, group_size);
3717 else
3719 if (j == 0)
3720 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3721 else
3722 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3723 prev_stmt_info = vinfo_for_stmt (new_stmt);
3728 if (dr_chain)
3729 VEC_free (tree, heap, dr_chain);
3731 return true;
3734 /* Function vect_is_simple_cond.
3736 Input:
3737 LOOP - the loop that is being vectorized.
3738 COND - Condition that is checked for simple use.
3740 Returns whether a COND can be vectorized. Checks whether
3741 condition operands are supportable using vec_is_simple_use. */
3743 static bool
3744 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3746 tree lhs, rhs;
3747 tree def;
3748 enum vect_def_type dt;
3750 if (!COMPARISON_CLASS_P (cond))
3751 return false;
3753 lhs = TREE_OPERAND (cond, 0);
3754 rhs = TREE_OPERAND (cond, 1);
3756 if (TREE_CODE (lhs) == SSA_NAME)
3758 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3759 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3760 &dt))
3761 return false;
3763 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3764 && TREE_CODE (lhs) != FIXED_CST)
3765 return false;
3767 if (TREE_CODE (rhs) == SSA_NAME)
3769 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3770 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3771 &dt))
3772 return false;
3774 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3775 && TREE_CODE (rhs) != FIXED_CST)
3776 return false;
3778 return true;
3781 /* vectorizable_condition.
3783 Check if STMT is conditional modify expression that can be vectorized.
3784 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3785 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3786 at GSI.
3788 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3789 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3790 else caluse if it is 2).
3792 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3794 bool
3795 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3796 gimple *vec_stmt, tree reduc_def, int reduc_index)
3798 tree scalar_dest = NULL_TREE;
3799 tree vec_dest = NULL_TREE;
3800 tree op = NULL_TREE;
3801 tree cond_expr, then_clause, else_clause;
3802 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3803 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3804 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
3805 tree vec_compare, vec_cond_expr;
3806 tree new_temp;
3807 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3808 enum machine_mode vec_mode;
3809 tree def;
3810 enum vect_def_type dt;
3811 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3812 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3813 enum tree_code code;
3815 /* FORNOW: unsupported in basic block SLP. */
3816 gcc_assert (loop_vinfo);
3818 gcc_assert (ncopies >= 1);
3819 if (ncopies > 1)
3820 return false; /* FORNOW */
3822 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3823 return false;
3825 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3826 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
3827 && reduc_def))
3828 return false;
3830 /* FORNOW: SLP not supported. */
3831 if (STMT_SLP_TYPE (stmt_info))
3832 return false;
3834 /* FORNOW: not yet supported. */
3835 if (STMT_VINFO_LIVE_P (stmt_info))
3837 if (vect_print_dump_info (REPORT_DETAILS))
3838 fprintf (vect_dump, "value used after loop.");
3839 return false;
3842 /* Is vectorizable conditional operation? */
3843 if (!is_gimple_assign (stmt))
3844 return false;
3846 code = gimple_assign_rhs_code (stmt);
3848 if (code != COND_EXPR)
3849 return false;
3851 gcc_assert (gimple_assign_single_p (stmt));
3852 op = gimple_assign_rhs1 (stmt);
3853 cond_expr = TREE_OPERAND (op, 0);
3854 then_clause = TREE_OPERAND (op, 1);
3855 else_clause = TREE_OPERAND (op, 2);
3857 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
3858 return false;
3860 /* We do not handle two different vector types for the condition
3861 and the values. */
3862 if (TREE_TYPE (TREE_OPERAND (cond_expr, 0)) != TREE_TYPE (vectype))
3863 return false;
3865 if (TREE_CODE (then_clause) == SSA_NAME)
3867 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
3868 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
3869 &then_def_stmt, &def, &dt))
3870 return false;
3872 else if (TREE_CODE (then_clause) != INTEGER_CST
3873 && TREE_CODE (then_clause) != REAL_CST
3874 && TREE_CODE (then_clause) != FIXED_CST)
3875 return false;
3877 if (TREE_CODE (else_clause) == SSA_NAME)
3879 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
3880 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
3881 &else_def_stmt, &def, &dt))
3882 return false;
3884 else if (TREE_CODE (else_clause) != INTEGER_CST
3885 && TREE_CODE (else_clause) != REAL_CST
3886 && TREE_CODE (else_clause) != FIXED_CST)
3887 return false;
3890 vec_mode = TYPE_MODE (vectype);
3892 if (!vec_stmt)
3894 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
3895 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
3898 /* Transform */
3900 /* Handle def. */
3901 scalar_dest = gimple_assign_lhs (stmt);
3902 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3904 /* Handle cond expr. */
3905 vec_cond_lhs =
3906 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
3907 vec_cond_rhs =
3908 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
3909 if (reduc_index == 1)
3910 vec_then_clause = reduc_def;
3911 else
3912 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
3913 if (reduc_index == 2)
3914 vec_else_clause = reduc_def;
3915 else
3916 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
3918 /* Arguments are ready. Create the new vector stmt. */
3919 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
3920 vec_cond_lhs, vec_cond_rhs);
3921 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
3922 vec_compare, vec_then_clause, vec_else_clause);
3924 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
3925 new_temp = make_ssa_name (vec_dest, *vec_stmt);
3926 gimple_assign_set_lhs (*vec_stmt, new_temp);
3927 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
3929 return true;
3933 /* Make sure the statement is vectorizable. */
3935 bool
3936 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
3938 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3939 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3940 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
3941 bool ok;
3942 HOST_WIDE_INT dummy;
3943 tree scalar_type, vectype;
3945 if (vect_print_dump_info (REPORT_DETAILS))
3947 fprintf (vect_dump, "==> examining statement: ");
3948 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
3951 if (gimple_has_volatile_ops (stmt))
3953 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
3954 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
3956 return false;
3959 /* Skip stmts that do not need to be vectorized. In loops this is expected
3960 to include:
3961 - the COND_EXPR which is the loop exit condition
3962 - any LABEL_EXPRs in the loop
3963 - computations that are used only for array indexing or loop control.
3964 In basic blocks we only analyze statements that are a part of some SLP
3965 instance, therefore, all the statements are relevant. */
3967 if (!STMT_VINFO_RELEVANT_P (stmt_info)
3968 && !STMT_VINFO_LIVE_P (stmt_info))
3970 if (vect_print_dump_info (REPORT_DETAILS))
3971 fprintf (vect_dump, "irrelevant.");
3973 return true;
3976 switch (STMT_VINFO_DEF_TYPE (stmt_info))
3978 case vect_internal_def:
3979 break;
3981 case vect_reduction_def:
3982 case vect_nested_cycle:
3983 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
3984 || relevance == vect_used_in_outer_by_reduction
3985 || relevance == vect_unused_in_scope));
3986 break;
3988 case vect_induction_def:
3989 case vect_constant_def:
3990 case vect_external_def:
3991 case vect_unknown_def_type:
3992 default:
3993 gcc_unreachable ();
3996 if (bb_vinfo)
3998 gcc_assert (PURE_SLP_STMT (stmt_info));
4000 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
4001 if (vect_print_dump_info (REPORT_DETAILS))
4003 fprintf (vect_dump, "get vectype for scalar type: ");
4004 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4007 vectype = get_vectype_for_scalar_type (scalar_type);
4008 if (!vectype)
4010 if (vect_print_dump_info (REPORT_DETAILS))
4012 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4013 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4015 return false;
4018 if (vect_print_dump_info (REPORT_DETAILS))
4020 fprintf (vect_dump, "vectype: ");
4021 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4024 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4027 if (STMT_VINFO_RELEVANT_P (stmt_info))
4029 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4030 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4031 *need_to_vectorize = true;
4034 ok = true;
4035 if (!bb_vinfo
4036 && (STMT_VINFO_RELEVANT_P (stmt_info)
4037 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4038 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4039 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4040 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4041 || vectorizable_operation (stmt, NULL, NULL, NULL)
4042 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4043 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4044 || vectorizable_call (stmt, NULL, NULL)
4045 || vectorizable_store (stmt, NULL, NULL, NULL)
4046 || vectorizable_reduction (stmt, NULL, NULL)
4047 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4048 else
4050 if (bb_vinfo)
4051 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4052 || vectorizable_assignment (stmt, NULL, NULL, node)
4053 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4054 || vectorizable_store (stmt, NULL, NULL, node));
4057 if (!ok)
4059 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4061 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4062 fprintf (vect_dump, "supported: ");
4063 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4066 return false;
4069 if (bb_vinfo)
4070 return true;
4072 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4073 need extra handling, except for vectorizable reductions. */
4074 if (STMT_VINFO_LIVE_P (stmt_info)
4075 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4076 ok = vectorizable_live_operation (stmt, NULL, NULL);
4078 if (!ok)
4080 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4082 fprintf (vect_dump, "not vectorized: live stmt not ");
4083 fprintf (vect_dump, "supported: ");
4084 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4087 return false;
4090 if (!PURE_SLP_STMT (stmt_info))
4092 /* Groups of strided accesses whose size is not a power of 2 are not
4093 vectorizable yet using loop-vectorization. Therefore, if this stmt
4094 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4095 loop-based vectorized), the loop cannot be vectorized. */
4096 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4097 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4098 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4100 if (vect_print_dump_info (REPORT_DETAILS))
4102 fprintf (vect_dump, "not vectorized: the size of group "
4103 "of strided accesses is not a power of 2");
4104 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4107 return false;
4111 return true;
4115 /* Function vect_transform_stmt.
4117 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4119 bool
4120 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4121 bool *strided_store, slp_tree slp_node,
4122 slp_instance slp_node_instance)
4124 bool is_store = false;
4125 gimple vec_stmt = NULL;
4126 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4127 gimple orig_stmt_in_pattern;
4128 bool done;
4130 switch (STMT_VINFO_TYPE (stmt_info))
4132 case type_demotion_vec_info_type:
4133 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4134 gcc_assert (done);
4135 break;
4137 case type_promotion_vec_info_type:
4138 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4139 gcc_assert (done);
4140 break;
4142 case type_conversion_vec_info_type:
4143 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4144 gcc_assert (done);
4145 break;
4147 case induc_vec_info_type:
4148 gcc_assert (!slp_node);
4149 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4150 gcc_assert (done);
4151 break;
4153 case op_vec_info_type:
4154 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4155 gcc_assert (done);
4156 break;
4158 case assignment_vec_info_type:
4159 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4160 gcc_assert (done);
4161 break;
4163 case load_vec_info_type:
4164 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4165 slp_node_instance);
4166 gcc_assert (done);
4167 break;
4169 case store_vec_info_type:
4170 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4171 gcc_assert (done);
4172 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4174 /* In case of interleaving, the whole chain is vectorized when the
4175 last store in the chain is reached. Store stmts before the last
4176 one are skipped, and there vec_stmt_info shouldn't be freed
4177 meanwhile. */
4178 *strided_store = true;
4179 if (STMT_VINFO_VEC_STMT (stmt_info))
4180 is_store = true;
4182 else
4183 is_store = true;
4184 break;
4186 case condition_vec_info_type:
4187 gcc_assert (!slp_node);
4188 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4189 gcc_assert (done);
4190 break;
4192 case call_vec_info_type:
4193 gcc_assert (!slp_node);
4194 done = vectorizable_call (stmt, gsi, &vec_stmt);
4195 break;
4197 case reduc_vec_info_type:
4198 gcc_assert (!slp_node);
4199 done = vectorizable_reduction (stmt, gsi, &vec_stmt);
4200 gcc_assert (done);
4201 break;
4203 default:
4204 if (!STMT_VINFO_LIVE_P (stmt_info))
4206 if (vect_print_dump_info (REPORT_DETAILS))
4207 fprintf (vect_dump, "stmt not supported.");
4208 gcc_unreachable ();
4212 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4213 is being vectorized, but outside the immediately enclosing loop. */
4214 if (vec_stmt
4215 && STMT_VINFO_LOOP_VINFO (stmt_info)
4216 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4217 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4218 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4219 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4220 || STMT_VINFO_RELEVANT (stmt_info) ==
4221 vect_used_in_outer_by_reduction))
4223 struct loop *innerloop = LOOP_VINFO_LOOP (
4224 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4225 imm_use_iterator imm_iter;
4226 use_operand_p use_p;
4227 tree scalar_dest;
4228 gimple exit_phi;
4230 if (vect_print_dump_info (REPORT_DETAILS))
4231 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4233 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4234 (to be used when vectorizing outer-loop stmts that use the DEF of
4235 STMT). */
4236 if (gimple_code (stmt) == GIMPLE_PHI)
4237 scalar_dest = PHI_RESULT (stmt);
4238 else
4239 scalar_dest = gimple_assign_lhs (stmt);
4241 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4243 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4245 exit_phi = USE_STMT (use_p);
4246 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4251 /* Handle stmts whose DEF is used outside the loop-nest that is
4252 being vectorized. */
4253 if (STMT_VINFO_LIVE_P (stmt_info)
4254 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4256 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4257 gcc_assert (done);
4260 if (vec_stmt)
4262 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4263 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4264 if (orig_stmt_in_pattern)
4266 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4267 /* STMT was inserted by the vectorizer to replace a computation idiom.
4268 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4269 computed this idiom. We need to record a pointer to VEC_STMT in
4270 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4271 documentation of vect_pattern_recog. */
4272 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4274 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4275 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4280 return is_store;
4284 /* Remove a group of stores (for SLP or interleaving), free their
4285 stmt_vec_info. */
4287 void
4288 vect_remove_stores (gimple first_stmt)
4290 gimple next = first_stmt;
4291 gimple tmp;
4292 gimple_stmt_iterator next_si;
4294 while (next)
4296 /* Free the attached stmt_vec_info and remove the stmt. */
4297 next_si = gsi_for_stmt (next);
4298 gsi_remove (&next_si, true);
4299 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4300 free_stmt_vec_info (next);
4301 next = tmp;
4306 /* Function new_stmt_vec_info.
4308 Create and initialize a new stmt_vec_info struct for STMT. */
4310 stmt_vec_info
4311 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4312 bb_vec_info bb_vinfo)
4314 stmt_vec_info res;
4315 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4317 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4318 STMT_VINFO_STMT (res) = stmt;
4319 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4320 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4321 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4322 STMT_VINFO_LIVE_P (res) = false;
4323 STMT_VINFO_VECTYPE (res) = NULL;
4324 STMT_VINFO_VEC_STMT (res) = NULL;
4325 STMT_VINFO_IN_PATTERN_P (res) = false;
4326 STMT_VINFO_RELATED_STMT (res) = NULL;
4327 STMT_VINFO_DATA_REF (res) = NULL;
4329 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4330 STMT_VINFO_DR_OFFSET (res) = NULL;
4331 STMT_VINFO_DR_INIT (res) = NULL;
4332 STMT_VINFO_DR_STEP (res) = NULL;
4333 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4335 if (gimple_code (stmt) == GIMPLE_PHI
4336 && is_loop_header_bb_p (gimple_bb (stmt)))
4337 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4338 else
4339 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4341 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4342 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4343 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4344 STMT_SLP_TYPE (res) = loop_vect;
4345 DR_GROUP_FIRST_DR (res) = NULL;
4346 DR_GROUP_NEXT_DR (res) = NULL;
4347 DR_GROUP_SIZE (res) = 0;
4348 DR_GROUP_STORE_COUNT (res) = 0;
4349 DR_GROUP_GAP (res) = 0;
4350 DR_GROUP_SAME_DR_STMT (res) = NULL;
4351 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4353 return res;
4357 /* Create a hash table for stmt_vec_info. */
4359 void
4360 init_stmt_vec_info_vec (void)
4362 gcc_assert (!stmt_vec_info_vec);
4363 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4367 /* Free hash table for stmt_vec_info. */
4369 void
4370 free_stmt_vec_info_vec (void)
4372 gcc_assert (stmt_vec_info_vec);
4373 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4377 /* Free stmt vectorization related info. */
4379 void
4380 free_stmt_vec_info (gimple stmt)
4382 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4384 if (!stmt_info)
4385 return;
4387 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4388 set_vinfo_for_stmt (stmt, NULL);
4389 free (stmt_info);
4393 /* Function get_vectype_for_scalar_type.
4395 Returns the vector type corresponding to SCALAR_TYPE as supported
4396 by the target. */
4398 tree
4399 get_vectype_for_scalar_type (tree scalar_type)
4401 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4402 int nbytes = GET_MODE_SIZE (inner_mode);
4403 int nunits;
4404 tree vectype;
4406 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4407 return NULL_TREE;
4409 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4410 is expected. */
4411 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4413 vectype = build_vector_type (scalar_type, nunits);
4414 if (vect_print_dump_info (REPORT_DETAILS))
4416 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4417 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4420 if (!vectype)
4421 return NULL_TREE;
4423 if (vect_print_dump_info (REPORT_DETAILS))
4425 fprintf (vect_dump, "vectype: ");
4426 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4429 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4430 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4432 if (vect_print_dump_info (REPORT_DETAILS))
4433 fprintf (vect_dump, "mode not supported by target.");
4434 return NULL_TREE;
4437 return vectype;
4440 /* Function vect_is_simple_use.
4442 Input:
4443 LOOP_VINFO - the vect info of the loop that is being vectorized.
4444 BB_VINFO - the vect info of the basic block that is being vectorized.
4445 OPERAND - operand of a stmt in the loop or bb.
4446 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4448 Returns whether a stmt with OPERAND can be vectorized.
4449 For loops, supportable operands are constants, loop invariants, and operands
4450 that are defined by the current iteration of the loop. Unsupportable
4451 operands are those that are defined by a previous iteration of the loop (as
4452 is the case in reduction/induction computations).
4453 For basic blocks, supportable operands are constants and bb invariants.
4454 For now, operands defined outside the basic block are not supported. */
4456 bool
4457 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4458 bb_vec_info bb_vinfo, gimple *def_stmt,
4459 tree *def, enum vect_def_type *dt)
4461 basic_block bb;
4462 stmt_vec_info stmt_vinfo;
4463 struct loop *loop = NULL;
4465 if (loop_vinfo)
4466 loop = LOOP_VINFO_LOOP (loop_vinfo);
4468 *def_stmt = NULL;
4469 *def = NULL_TREE;
4471 if (vect_print_dump_info (REPORT_DETAILS))
4473 fprintf (vect_dump, "vect_is_simple_use: operand ");
4474 print_generic_expr (vect_dump, operand, TDF_SLIM);
4477 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4479 *dt = vect_constant_def;
4480 return true;
4483 if (is_gimple_min_invariant (operand))
4485 *def = operand;
4486 *dt = vect_external_def;
4487 return true;
4490 if (TREE_CODE (operand) == PAREN_EXPR)
4492 if (vect_print_dump_info (REPORT_DETAILS))
4493 fprintf (vect_dump, "non-associatable copy.");
4494 operand = TREE_OPERAND (operand, 0);
4497 if (TREE_CODE (operand) != SSA_NAME)
4499 if (vect_print_dump_info (REPORT_DETAILS))
4500 fprintf (vect_dump, "not ssa-name.");
4501 return false;
4504 *def_stmt = SSA_NAME_DEF_STMT (operand);
4505 if (*def_stmt == NULL)
4507 if (vect_print_dump_info (REPORT_DETAILS))
4508 fprintf (vect_dump, "no def_stmt.");
4509 return false;
4512 if (vect_print_dump_info (REPORT_DETAILS))
4514 fprintf (vect_dump, "def_stmt: ");
4515 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4518 /* Empty stmt is expected only in case of a function argument.
4519 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4520 if (gimple_nop_p (*def_stmt))
4522 *def = operand;
4523 *dt = vect_external_def;
4524 return true;
4527 bb = gimple_bb (*def_stmt);
4529 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4530 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4531 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4532 *dt = vect_external_def;
4533 else
4535 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4536 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4539 if (*dt == vect_unknown_def_type)
4541 if (vect_print_dump_info (REPORT_DETAILS))
4542 fprintf (vect_dump, "Unsupported pattern.");
4543 return false;
4546 if (vect_print_dump_info (REPORT_DETAILS))
4547 fprintf (vect_dump, "type of def: %d.",*dt);
4549 switch (gimple_code (*def_stmt))
4551 case GIMPLE_PHI:
4552 *def = gimple_phi_result (*def_stmt);
4553 break;
4555 case GIMPLE_ASSIGN:
4556 *def = gimple_assign_lhs (*def_stmt);
4557 break;
4559 case GIMPLE_CALL:
4560 *def = gimple_call_lhs (*def_stmt);
4561 if (*def != NULL)
4562 break;
4563 /* FALLTHRU */
4564 default:
4565 if (vect_print_dump_info (REPORT_DETAILS))
4566 fprintf (vect_dump, "unsupported defining stmt: ");
4567 return false;
4570 return true;
4574 /* Function supportable_widening_operation
4576 Check whether an operation represented by the code CODE is a
4577 widening operation that is supported by the target platform in
4578 vector form (i.e., when operating on arguments of type VECTYPE).
4580 Widening operations we currently support are NOP (CONVERT), FLOAT
4581 and WIDEN_MULT. This function checks if these operations are supported
4582 by the target platform either directly (via vector tree-codes), or via
4583 target builtins.
4585 Output:
4586 - CODE1 and CODE2 are codes of vector operations to be used when
4587 vectorizing the operation, if available.
4588 - DECL1 and DECL2 are decls of target builtin functions to be used
4589 when vectorizing the operation, if available. In this case,
4590 CODE1 and CODE2 are CALL_EXPR.
4591 - MULTI_STEP_CVT determines the number of required intermediate steps in
4592 case of multi-step conversion (like char->short->int - in that case
4593 MULTI_STEP_CVT will be 1).
4594 - INTERM_TYPES contains the intermediate type required to perform the
4595 widening operation (short in the above example). */
4597 bool
4598 supportable_widening_operation (enum tree_code code, gimple stmt, tree vectype,
4599 tree *decl1, tree *decl2,
4600 enum tree_code *code1, enum tree_code *code2,
4601 int *multi_step_cvt,
4602 VEC (tree, heap) **interm_types)
4604 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4605 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4606 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4607 bool ordered_p;
4608 enum machine_mode vec_mode;
4609 enum insn_code icode1, icode2;
4610 optab optab1, optab2;
4611 tree type = gimple_expr_type (stmt);
4612 tree wide_vectype = get_vectype_for_scalar_type (type);
4613 enum tree_code c1, c2;
4615 /* The result of a vectorized widening operation usually requires two vectors
4616 (because the widened results do not fit int one vector). The generated
4617 vector results would normally be expected to be generated in the same
4618 order as in the original scalar computation, i.e. if 8 results are
4619 generated in each vector iteration, they are to be organized as follows:
4620 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4622 However, in the special case that the result of the widening operation is
4623 used in a reduction computation only, the order doesn't matter (because
4624 when vectorizing a reduction we change the order of the computation).
4625 Some targets can take advantage of this and generate more efficient code.
4626 For example, targets like Altivec, that support widen_mult using a sequence
4627 of {mult_even,mult_odd} generate the following vectors:
4628 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4630 When vectorizing outer-loops, we execute the inner-loop sequentially
4631 (each vectorized inner-loop iteration contributes to VF outer-loop
4632 iterations in parallel). We therefore don't allow to change the order
4633 of the computation in the inner-loop during outer-loop vectorization. */
4635 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4636 && !nested_in_vect_loop_p (vect_loop, stmt))
4637 ordered_p = false;
4638 else
4639 ordered_p = true;
4641 if (!ordered_p
4642 && code == WIDEN_MULT_EXPR
4643 && targetm.vectorize.builtin_mul_widen_even
4644 && targetm.vectorize.builtin_mul_widen_even (vectype)
4645 && targetm.vectorize.builtin_mul_widen_odd
4646 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4648 if (vect_print_dump_info (REPORT_DETAILS))
4649 fprintf (vect_dump, "Unordered widening operation detected.");
4651 *code1 = *code2 = CALL_EXPR;
4652 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4653 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4654 return true;
4657 switch (code)
4659 case WIDEN_MULT_EXPR:
4660 if (BYTES_BIG_ENDIAN)
4662 c1 = VEC_WIDEN_MULT_HI_EXPR;
4663 c2 = VEC_WIDEN_MULT_LO_EXPR;
4665 else
4667 c2 = VEC_WIDEN_MULT_HI_EXPR;
4668 c1 = VEC_WIDEN_MULT_LO_EXPR;
4670 break;
4672 CASE_CONVERT:
4673 if (BYTES_BIG_ENDIAN)
4675 c1 = VEC_UNPACK_HI_EXPR;
4676 c2 = VEC_UNPACK_LO_EXPR;
4678 else
4680 c2 = VEC_UNPACK_HI_EXPR;
4681 c1 = VEC_UNPACK_LO_EXPR;
4683 break;
4685 case FLOAT_EXPR:
4686 if (BYTES_BIG_ENDIAN)
4688 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4689 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4691 else
4693 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4694 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4696 break;
4698 case FIX_TRUNC_EXPR:
4699 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4700 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4701 computing the operation. */
4702 return false;
4704 default:
4705 gcc_unreachable ();
4708 if (code == FIX_TRUNC_EXPR)
4710 /* The signedness is determined from output operand. */
4711 optab1 = optab_for_tree_code (c1, type, optab_default);
4712 optab2 = optab_for_tree_code (c2, type, optab_default);
4714 else
4716 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4717 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4720 if (!optab1 || !optab2)
4721 return false;
4723 vec_mode = TYPE_MODE (vectype);
4724 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code) == CODE_FOR_nothing
4725 || (icode2 = optab_handler (optab2, vec_mode)->insn_code)
4726 == CODE_FOR_nothing)
4727 return false;
4729 /* Check if it's a multi-step conversion that can be done using intermediate
4730 types. */
4731 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4732 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4734 int i;
4735 tree prev_type = vectype, intermediate_type;
4736 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4737 optab optab3, optab4;
4739 if (!CONVERT_EXPR_CODE_P (code))
4740 return false;
4742 *code1 = c1;
4743 *code2 = c2;
4745 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4746 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4747 to get to NARROW_VECTYPE, and fail if we do not. */
4748 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4749 for (i = 0; i < 3; i++)
4751 intermediate_mode = insn_data[icode1].operand[0].mode;
4752 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4753 TYPE_UNSIGNED (prev_type));
4754 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
4755 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
4757 if (!optab3 || !optab4
4758 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4759 == CODE_FOR_nothing
4760 || insn_data[icode1].operand[0].mode != intermediate_mode
4761 || (icode2 = optab2->handlers[(int) prev_mode].insn_code)
4762 == CODE_FOR_nothing
4763 || insn_data[icode2].operand[0].mode != intermediate_mode
4764 || (icode1 = optab3->handlers[(int) intermediate_mode].insn_code)
4765 == CODE_FOR_nothing
4766 || (icode2 = optab4->handlers[(int) intermediate_mode].insn_code)
4767 == CODE_FOR_nothing)
4768 return false;
4770 VEC_quick_push (tree, *interm_types, intermediate_type);
4771 (*multi_step_cvt)++;
4773 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
4774 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
4775 return true;
4777 prev_type = intermediate_type;
4778 prev_mode = intermediate_mode;
4781 return false;
4784 *code1 = c1;
4785 *code2 = c2;
4786 return true;
4790 /* Function supportable_narrowing_operation
4792 Check whether an operation represented by the code CODE is a
4793 narrowing operation that is supported by the target platform in
4794 vector form (i.e., when operating on arguments of type VECTYPE).
4796 Narrowing operations we currently support are NOP (CONVERT) and
4797 FIX_TRUNC. This function checks if these operations are supported by
4798 the target platform directly via vector tree-codes.
4800 Output:
4801 - CODE1 is the code of a vector operation to be used when
4802 vectorizing the operation, if available.
4803 - MULTI_STEP_CVT determines the number of required intermediate steps in
4804 case of multi-step conversion (like int->short->char - in that case
4805 MULTI_STEP_CVT will be 1).
4806 - INTERM_TYPES contains the intermediate type required to perform the
4807 narrowing operation (short in the above example). */
4809 bool
4810 supportable_narrowing_operation (enum tree_code code,
4811 const_gimple stmt, tree vectype,
4812 enum tree_code *code1, int *multi_step_cvt,
4813 VEC (tree, heap) **interm_types)
4815 enum machine_mode vec_mode;
4816 enum insn_code icode1;
4817 optab optab1, interm_optab;
4818 tree type = gimple_expr_type (stmt);
4819 tree narrow_vectype = get_vectype_for_scalar_type (type);
4820 enum tree_code c1;
4821 tree intermediate_type, prev_type;
4822 int i;
4824 switch (code)
4826 CASE_CONVERT:
4827 c1 = VEC_PACK_TRUNC_EXPR;
4828 break;
4830 case FIX_TRUNC_EXPR:
4831 c1 = VEC_PACK_FIX_TRUNC_EXPR;
4832 break;
4834 case FLOAT_EXPR:
4835 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
4836 tree code and optabs used for computing the operation. */
4837 return false;
4839 default:
4840 gcc_unreachable ();
4843 if (code == FIX_TRUNC_EXPR)
4844 /* The signedness is determined from output operand. */
4845 optab1 = optab_for_tree_code (c1, type, optab_default);
4846 else
4847 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4849 if (!optab1)
4850 return false;
4852 vec_mode = TYPE_MODE (vectype);
4853 if ((icode1 = optab_handler (optab1, vec_mode)->insn_code)
4854 == CODE_FOR_nothing)
4855 return false;
4857 /* Check if it's a multi-step conversion that can be done using intermediate
4858 types. */
4859 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
4861 enum machine_mode intermediate_mode, prev_mode = vec_mode;
4863 *code1 = c1;
4864 prev_type = vectype;
4865 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
4866 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
4867 to get to NARROW_VECTYPE, and fail if we do not. */
4868 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
4869 for (i = 0; i < 3; i++)
4871 intermediate_mode = insn_data[icode1].operand[0].mode;
4872 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
4873 TYPE_UNSIGNED (prev_type));
4874 interm_optab = optab_for_tree_code (c1, intermediate_type,
4875 optab_default);
4876 if (!interm_optab
4877 || (icode1 = optab1->handlers[(int) prev_mode].insn_code)
4878 == CODE_FOR_nothing
4879 || insn_data[icode1].operand[0].mode != intermediate_mode
4880 || (icode1
4881 = interm_optab->handlers[(int) intermediate_mode].insn_code)
4882 == CODE_FOR_nothing)
4883 return false;
4885 VEC_quick_push (tree, *interm_types, intermediate_type);
4886 (*multi_step_cvt)++;
4888 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
4889 return true;
4891 prev_type = intermediate_type;
4892 prev_mode = intermediate_mode;
4895 return false;
4898 *code1 = c1;
4899 return true;