* gcc.target/powerpc/altivec-volatile.c: Adjust expected warning.
[official-gcc.git] / gcc / tree-vect-stmts.c
blobee3c2dd76e5066d06d9272515de6083ef6637d9e
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "toplev.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47 /* Function vect_mark_relevant.
49 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
51 static void
52 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
53 enum vect_relevant relevant, bool live_p)
55 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
56 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
57 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
59 if (vect_print_dump_info (REPORT_DETAILS))
60 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
62 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
64 gimple pattern_stmt;
66 /* This is the last stmt in a sequence that was detected as a
67 pattern that can potentially be vectorized. Don't mark the stmt
68 as relevant/live because it's not going to be vectorized.
69 Instead mark the pattern-stmt that replaces it. */
71 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
73 if (vect_print_dump_info (REPORT_DETAILS))
74 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
75 stmt_info = vinfo_for_stmt (pattern_stmt);
76 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
77 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
78 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
79 stmt = pattern_stmt;
82 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
83 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
84 STMT_VINFO_RELEVANT (stmt_info) = relevant;
86 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
87 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
89 if (vect_print_dump_info (REPORT_DETAILS))
90 fprintf (vect_dump, "already marked relevant/live.");
91 return;
94 VEC_safe_push (gimple, heap, *worklist, stmt);
98 /* Function vect_stmt_relevant_p.
100 Return true if STMT in loop that is represented by LOOP_VINFO is
101 "relevant for vectorization".
103 A stmt is considered "relevant for vectorization" if:
104 - it has uses outside the loop.
105 - it has vdefs (it alters memory).
106 - control stmts in the loop (except for the exit condition).
108 CHECKME: what other side effects would the vectorizer allow? */
110 static bool
111 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
112 enum vect_relevant *relevant, bool *live_p)
114 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
115 ssa_op_iter op_iter;
116 imm_use_iterator imm_iter;
117 use_operand_p use_p;
118 def_operand_p def_p;
120 *relevant = vect_unused_in_scope;
121 *live_p = false;
123 /* cond stmt other than loop exit cond. */
124 if (is_ctrl_stmt (stmt)
125 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
126 != loop_exit_ctrl_vec_info_type)
127 *relevant = vect_used_in_scope;
129 /* changing memory. */
130 if (gimple_code (stmt) != GIMPLE_PHI)
131 if (gimple_vdef (stmt))
133 if (vect_print_dump_info (REPORT_DETAILS))
134 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
135 *relevant = vect_used_in_scope;
138 /* uses outside the loop. */
139 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
141 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
143 basic_block bb = gimple_bb (USE_STMT (use_p));
144 if (!flow_bb_inside_loop_p (loop, bb))
146 if (vect_print_dump_info (REPORT_DETAILS))
147 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
149 if (is_gimple_debug (USE_STMT (use_p)))
150 continue;
152 /* We expect all such uses to be in the loop exit phis
153 (because of loop closed form) */
154 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
155 gcc_assert (bb == single_exit (loop)->dest);
157 *live_p = true;
162 return (*live_p || *relevant);
166 /* Function exist_non_indexing_operands_for_use_p
168 USE is one of the uses attached to STMT. Check if USE is
169 used in STMT for anything other than indexing an array. */
171 static bool
172 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
174 tree operand;
175 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
177 /* USE corresponds to some operand in STMT. If there is no data
178 reference in STMT, then any operand that corresponds to USE
179 is not indexing an array. */
180 if (!STMT_VINFO_DATA_REF (stmt_info))
181 return true;
183 /* STMT has a data_ref. FORNOW this means that its of one of
184 the following forms:
185 -1- ARRAY_REF = var
186 -2- var = ARRAY_REF
187 (This should have been verified in analyze_data_refs).
189 'var' in the second case corresponds to a def, not a use,
190 so USE cannot correspond to any operands that are not used
191 for array indexing.
193 Therefore, all we need to check is if STMT falls into the
194 first case, and whether var corresponds to USE. */
196 if (!gimple_assign_copy_p (stmt))
197 return false;
198 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
199 return false;
200 operand = gimple_assign_rhs1 (stmt);
201 if (TREE_CODE (operand) != SSA_NAME)
202 return false;
204 if (operand == use)
205 return true;
207 return false;
212 Function process_use.
214 Inputs:
215 - a USE in STMT in a loop represented by LOOP_VINFO
216 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
217 that defined USE. This is done by calling mark_relevant and passing it
218 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
220 Outputs:
221 Generally, LIVE_P and RELEVANT are used to define the liveness and
222 relevance info of the DEF_STMT of this USE:
223 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
224 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
225 Exceptions:
226 - case 1: If USE is used only for address computations (e.g. array indexing),
227 which does not need to be directly vectorized, then the liveness/relevance
228 of the respective DEF_STMT is left unchanged.
229 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
230 skip DEF_STMT cause it had already been processed.
231 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
232 be modified accordingly.
234 Return true if everything is as expected. Return false otherwise. */
236 static bool
237 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
238 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
241 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
242 stmt_vec_info dstmt_vinfo;
243 basic_block bb, def_bb;
244 tree def;
245 gimple def_stmt;
246 enum vect_def_type dt;
248 /* case 1: we are only interested in uses that need to be vectorized. Uses
249 that are used for address computation are not considered relevant. */
250 if (!exist_non_indexing_operands_for_use_p (use, stmt))
251 return true;
253 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
255 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
256 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
257 return false;
260 if (!def_stmt || gimple_nop_p (def_stmt))
261 return true;
263 def_bb = gimple_bb (def_stmt);
264 if (!flow_bb_inside_loop_p (loop, def_bb))
266 if (vect_print_dump_info (REPORT_DETAILS))
267 fprintf (vect_dump, "def_stmt is out of loop.");
268 return true;
271 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
272 DEF_STMT must have already been processed, because this should be the
273 only way that STMT, which is a reduction-phi, was put in the worklist,
274 as there should be no other uses for DEF_STMT in the loop. So we just
275 check that everything is as expected, and we are done. */
276 dstmt_vinfo = vinfo_for_stmt (def_stmt);
277 bb = gimple_bb (stmt);
278 if (gimple_code (stmt) == GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
280 && gimple_code (def_stmt) != GIMPLE_PHI
281 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
282 && bb->loop_father == def_bb->loop_father)
284 if (vect_print_dump_info (REPORT_DETAILS))
285 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
286 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
287 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
288 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
289 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
290 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
291 return true;
294 /* case 3a: outer-loop stmt defining an inner-loop stmt:
295 outer-loop-header-bb:
296 d = def_stmt
297 inner-loop:
298 stmt # use (d)
299 outer-loop-tail-bb:
300 ... */
301 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
303 if (vect_print_dump_info (REPORT_DETAILS))
304 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
306 switch (relevant)
308 case vect_unused_in_scope:
309 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
310 vect_used_in_scope : vect_unused_in_scope;
311 break;
313 case vect_used_in_outer_by_reduction:
314 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
315 relevant = vect_used_by_reduction;
316 break;
318 case vect_used_in_outer:
319 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
320 relevant = vect_used_in_scope;
321 break;
323 case vect_used_in_scope:
324 break;
326 default:
327 gcc_unreachable ();
331 /* case 3b: inner-loop stmt defining an outer-loop stmt:
332 outer-loop-header-bb:
334 inner-loop:
335 d = def_stmt
336 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
337 stmt # use (d) */
338 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
340 if (vect_print_dump_info (REPORT_DETAILS))
341 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
343 switch (relevant)
345 case vect_unused_in_scope:
346 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
347 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
348 vect_used_in_outer_by_reduction : vect_unused_in_scope;
349 break;
351 case vect_used_by_reduction:
352 relevant = vect_used_in_outer_by_reduction;
353 break;
355 case vect_used_in_scope:
356 relevant = vect_used_in_outer;
357 break;
359 default:
360 gcc_unreachable ();
364 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
365 return true;
369 /* Function vect_mark_stmts_to_be_vectorized.
371 Not all stmts in the loop need to be vectorized. For example:
373 for i...
374 for j...
375 1. T0 = i + j
376 2. T1 = a[T0]
378 3. j = j + 1
380 Stmt 1 and 3 do not need to be vectorized, because loop control and
381 addressing of vectorized data-refs are handled differently.
383 This pass detects such stmts. */
385 bool
386 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
388 VEC(gimple,heap) *worklist;
389 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
390 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
391 unsigned int nbbs = loop->num_nodes;
392 gimple_stmt_iterator si;
393 gimple stmt;
394 unsigned int i;
395 stmt_vec_info stmt_vinfo;
396 basic_block bb;
397 gimple phi;
398 bool live_p;
399 enum vect_relevant relevant, tmp_relevant;
400 enum vect_def_type def_type;
402 if (vect_print_dump_info (REPORT_DETAILS))
403 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
405 worklist = VEC_alloc (gimple, heap, 64);
407 /* 1. Init worklist. */
408 for (i = 0; i < nbbs; i++)
410 bb = bbs[i];
411 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
413 phi = gsi_stmt (si);
414 if (vect_print_dump_info (REPORT_DETAILS))
416 fprintf (vect_dump, "init: phi relevant? ");
417 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
420 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
421 vect_mark_relevant (&worklist, phi, relevant, live_p);
423 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
425 stmt = gsi_stmt (si);
426 if (vect_print_dump_info (REPORT_DETAILS))
428 fprintf (vect_dump, "init: stmt relevant? ");
429 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
432 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
433 vect_mark_relevant (&worklist, stmt, relevant, live_p);
437 /* 2. Process_worklist */
438 while (VEC_length (gimple, worklist) > 0)
440 use_operand_p use_p;
441 ssa_op_iter iter;
443 stmt = VEC_pop (gimple, worklist);
444 if (vect_print_dump_info (REPORT_DETAILS))
446 fprintf (vect_dump, "worklist: examine stmt: ");
447 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
450 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
451 (DEF_STMT) as relevant/irrelevant and live/dead according to the
452 liveness and relevance properties of STMT. */
453 stmt_vinfo = vinfo_for_stmt (stmt);
454 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
455 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
457 /* Generally, the liveness and relevance properties of STMT are
458 propagated as is to the DEF_STMTs of its USEs:
459 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
460 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462 One exception is when STMT has been identified as defining a reduction
463 variable; in this case we set the liveness/relevance as follows:
464 live_p = false
465 relevant = vect_used_by_reduction
466 This is because we distinguish between two kinds of relevant stmts -
467 those that are used by a reduction computation, and those that are
468 (also) used by a regular computation. This allows us later on to
469 identify stmts that are used solely by a reduction, and therefore the
470 order of the results that they produce does not have to be kept. */
472 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
473 tmp_relevant = relevant;
474 switch (def_type)
476 case vect_reduction_def:
477 switch (tmp_relevant)
479 case vect_unused_in_scope:
480 relevant = vect_used_by_reduction;
481 break;
483 case vect_used_by_reduction:
484 if (gimple_code (stmt) == GIMPLE_PHI)
485 break;
486 /* fall through */
488 default:
489 if (vect_print_dump_info (REPORT_DETAILS))
490 fprintf (vect_dump, "unsupported use of reduction.");
492 VEC_free (gimple, heap, worklist);
493 return false;
496 live_p = false;
497 break;
499 case vect_nested_cycle:
500 if (tmp_relevant != vect_unused_in_scope
501 && tmp_relevant != vect_used_in_outer_by_reduction
502 && tmp_relevant != vect_used_in_outer)
504 if (vect_print_dump_info (REPORT_DETAILS))
505 fprintf (vect_dump, "unsupported use of nested cycle.");
507 VEC_free (gimple, heap, worklist);
508 return false;
511 live_p = false;
512 break;
514 case vect_double_reduction_def:
515 if (tmp_relevant != vect_unused_in_scope
516 && tmp_relevant != vect_used_by_reduction)
518 if (vect_print_dump_info (REPORT_DETAILS))
519 fprintf (vect_dump, "unsupported use of double reduction.");
521 VEC_free (gimple, heap, worklist);
522 return false;
525 live_p = false;
526 break;
528 default:
529 break;
532 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
534 tree op = USE_FROM_PTR (use_p);
535 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
537 VEC_free (gimple, heap, worklist);
538 return false;
541 } /* while worklist */
543 VEC_free (gimple, heap, worklist);
544 return true;
548 /* Get cost by calling cost target builtin. */
550 static inline
551 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
553 tree dummy_type = NULL;
554 int dummy = 0;
556 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
557 dummy_type, dummy);
561 cost_for_stmt (gimple stmt)
563 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
565 switch (STMT_VINFO_TYPE (stmt_info))
567 case load_vec_info_type:
568 return vect_get_stmt_cost (scalar_load);
569 case store_vec_info_type:
570 return vect_get_stmt_cost (scalar_store);
571 case op_vec_info_type:
572 case condition_vec_info_type:
573 case assignment_vec_info_type:
574 case reduc_vec_info_type:
575 case induc_vec_info_type:
576 case type_promotion_vec_info_type:
577 case type_demotion_vec_info_type:
578 case type_conversion_vec_info_type:
579 case call_vec_info_type:
580 return vect_get_stmt_cost (scalar_stmt);
581 case undef_vec_info_type:
582 default:
583 gcc_unreachable ();
587 /* Function vect_model_simple_cost.
589 Models cost for simple operations, i.e. those that only emit ncopies of a
590 single op. Right now, this does not account for multiple insns that could
591 be generated for the single vector op. We will handle that shortly. */
593 void
594 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
595 enum vect_def_type *dt, slp_tree slp_node)
597 int i;
598 int inside_cost = 0, outside_cost = 0;
600 /* The SLP costs were already calculated during SLP tree build. */
601 if (PURE_SLP_STMT (stmt_info))
602 return;
604 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
606 /* FORNOW: Assuming maximum 2 args per stmts. */
607 for (i = 0; i < 2; i++)
609 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
610 outside_cost += vect_get_stmt_cost (vector_stmt);
613 if (vect_print_dump_info (REPORT_COST))
614 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
615 "outside_cost = %d .", inside_cost, outside_cost);
617 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
618 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
619 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
623 /* Function vect_cost_strided_group_size
625 For strided load or store, return the group_size only if it is the first
626 load or store of a group, else return 1. This ensures that group size is
627 only returned once per group. */
629 static int
630 vect_cost_strided_group_size (stmt_vec_info stmt_info)
632 gimple first_stmt = DR_GROUP_FIRST_DR (stmt_info);
634 if (first_stmt == STMT_VINFO_STMT (stmt_info))
635 return DR_GROUP_SIZE (stmt_info);
637 return 1;
641 /* Function vect_model_store_cost
643 Models cost for stores. In the case of strided accesses, one access
644 has the overhead of the strided access attributed to it. */
646 void
647 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
648 enum vect_def_type dt, slp_tree slp_node)
650 int group_size;
651 unsigned int inside_cost = 0, outside_cost = 0;
652 struct data_reference *first_dr;
653 gimple first_stmt;
655 /* The SLP costs were already calculated during SLP tree build. */
656 if (PURE_SLP_STMT (stmt_info))
657 return;
659 if (dt == vect_constant_def || dt == vect_external_def)
660 outside_cost = vect_get_stmt_cost (scalar_to_vec);
662 /* Strided access? */
663 if (DR_GROUP_FIRST_DR (stmt_info))
665 if (slp_node)
667 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
668 group_size = 1;
670 else
672 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
673 group_size = vect_cost_strided_group_size (stmt_info);
676 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
678 /* Not a strided access. */
679 else
681 group_size = 1;
682 first_dr = STMT_VINFO_DATA_REF (stmt_info);
685 /* Is this an access in a group of stores, which provide strided access?
686 If so, add in the cost of the permutes. */
687 if (group_size > 1)
689 /* Uses a high and low interleave operation for each needed permute. */
690 inside_cost = ncopies * exact_log2(group_size) * group_size
691 * vect_get_stmt_cost (vector_stmt);
693 if (vect_print_dump_info (REPORT_COST))
694 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
695 group_size);
699 /* Costs of the stores. */
700 vect_get_store_cost (first_dr, ncopies, &inside_cost);
702 if (vect_print_dump_info (REPORT_COST))
703 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
704 "outside_cost = %d .", inside_cost, outside_cost);
706 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
707 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
708 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
712 /* Calculate cost of DR's memory access. */
713 void
714 vect_get_store_cost (struct data_reference *dr, int ncopies,
715 unsigned int *inside_cost)
717 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
719 switch (alignment_support_scheme)
721 case dr_aligned:
723 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
725 if (vect_print_dump_info (REPORT_COST))
726 fprintf (vect_dump, "vect_model_store_cost: aligned.");
728 break;
731 case dr_unaligned_supported:
733 gimple stmt = DR_STMT (dr);
734 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
735 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
737 /* Here, we assign an additional cost for the unaligned store. */
738 *inside_cost += ncopies
739 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
740 vectype, DR_MISALIGNMENT (dr));
742 if (vect_print_dump_info (REPORT_COST))
743 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
744 "hardware.");
746 break;
749 default:
750 gcc_unreachable ();
755 /* Function vect_model_load_cost
757 Models cost for loads. In the case of strided accesses, the last access
758 has the overhead of the strided access attributed to it. Since unaligned
759 accesses are supported for loads, we also account for the costs of the
760 access scheme chosen. */
762 void
763 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, slp_tree slp_node)
766 int group_size;
767 gimple first_stmt;
768 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
769 unsigned int inside_cost = 0, outside_cost = 0;
771 /* The SLP costs were already calculated during SLP tree build. */
772 if (PURE_SLP_STMT (stmt_info))
773 return;
775 /* Strided accesses? */
776 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
777 if (first_stmt && !slp_node)
779 group_size = vect_cost_strided_group_size (stmt_info);
780 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
782 /* Not a strided access. */
783 else
785 group_size = 1;
786 first_dr = dr;
789 /* Is this an access in a group of loads providing strided access?
790 If so, add in the cost of the permutes. */
791 if (group_size > 1)
793 /* Uses an even and odd extract operations for each needed permute. */
794 inside_cost = ncopies * exact_log2(group_size) * group_size
795 * vect_get_stmt_cost (vector_stmt);
797 if (vect_print_dump_info (REPORT_COST))
798 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
799 group_size);
802 /* The loads themselves. */
803 vect_get_load_cost (first_dr, ncopies,
804 ((!DR_GROUP_FIRST_DR (stmt_info)) || group_size > 1 || slp_node),
805 &inside_cost, &outside_cost);
807 if (vect_print_dump_info (REPORT_COST))
808 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
809 "outside_cost = %d .", inside_cost, outside_cost);
811 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
812 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
813 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
817 /* Calculate cost of DR's memory access. */
818 void
819 vect_get_load_cost (struct data_reference *dr, int ncopies,
820 bool add_realign_cost, unsigned int *inside_cost,
821 unsigned int *outside_cost)
823 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
825 switch (alignment_support_scheme)
827 case dr_aligned:
829 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
831 if (vect_print_dump_info (REPORT_COST))
832 fprintf (vect_dump, "vect_model_load_cost: aligned.");
834 break;
836 case dr_unaligned_supported:
838 gimple stmt = DR_STMT (dr);
839 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
840 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
842 /* Here, we assign an additional cost for the unaligned load. */
843 *inside_cost += ncopies
844 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
845 vectype, DR_MISALIGNMENT (dr));
846 if (vect_print_dump_info (REPORT_COST))
847 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
848 "hardware.");
850 break;
852 case dr_explicit_realign:
854 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
855 + vect_get_stmt_cost (vector_stmt));
857 /* FIXME: If the misalignment remains fixed across the iterations of
858 the containing loop, the following cost should be added to the
859 outside costs. */
860 if (targetm.vectorize.builtin_mask_for_load)
861 *inside_cost += vect_get_stmt_cost (vector_stmt);
863 break;
865 case dr_explicit_realign_optimized:
867 if (vect_print_dump_info (REPORT_COST))
868 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
869 "pipelined.");
871 /* Unaligned software pipeline has a load of an address, an initial
872 load, and possibly a mask operation to "prime" the loop. However,
873 if this is an access in a group of loads, which provide strided
874 access, then the above cost should only be considered for one
875 access in the group. Inside the loop, there is a load op
876 and a realignment op. */
878 if (add_realign_cost)
880 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
881 if (targetm.vectorize.builtin_mask_for_load)
882 *outside_cost += vect_get_stmt_cost (vector_stmt);
885 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
886 + vect_get_stmt_cost (vector_stmt));
887 break;
890 default:
891 gcc_unreachable ();
896 /* Function vect_init_vector.
898 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
899 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
900 is not NULL. Otherwise, place the initialization at the loop preheader.
901 Return the DEF of INIT_STMT.
902 It will be used in the vectorization of STMT. */
904 tree
905 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
906 gimple_stmt_iterator *gsi)
908 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
909 tree new_var;
910 gimple init_stmt;
911 tree vec_oprnd;
912 edge pe;
913 tree new_temp;
914 basic_block new_bb;
916 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
917 add_referenced_var (new_var);
918 init_stmt = gimple_build_assign (new_var, vector_var);
919 new_temp = make_ssa_name (new_var, init_stmt);
920 gimple_assign_set_lhs (init_stmt, new_temp);
922 if (gsi)
923 vect_finish_stmt_generation (stmt, init_stmt, gsi);
924 else
926 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
928 if (loop_vinfo)
930 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
932 if (nested_in_vect_loop_p (loop, stmt))
933 loop = loop->inner;
935 pe = loop_preheader_edge (loop);
936 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
937 gcc_assert (!new_bb);
939 else
941 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
942 basic_block bb;
943 gimple_stmt_iterator gsi_bb_start;
945 gcc_assert (bb_vinfo);
946 bb = BB_VINFO_BB (bb_vinfo);
947 gsi_bb_start = gsi_after_labels (bb);
948 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
952 if (vect_print_dump_info (REPORT_DETAILS))
954 fprintf (vect_dump, "created new init_stmt: ");
955 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
958 vec_oprnd = gimple_assign_lhs (init_stmt);
959 return vec_oprnd;
963 /* Function vect_get_vec_def_for_operand.
965 OP is an operand in STMT. This function returns a (vector) def that will be
966 used in the vectorized stmt for STMT.
968 In the case that OP is an SSA_NAME which is defined in the loop, then
969 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
971 In case OP is an invariant or constant, a new stmt that creates a vector def
972 needs to be introduced. */
974 tree
975 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
977 tree vec_oprnd;
978 gimple vec_stmt;
979 gimple def_stmt;
980 stmt_vec_info def_stmt_info = NULL;
981 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
982 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
983 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
984 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
985 tree vec_inv;
986 tree vec_cst;
987 tree t = NULL_TREE;
988 tree def;
989 int i;
990 enum vect_def_type dt;
991 bool is_simple_use;
992 tree vector_type;
994 if (vect_print_dump_info (REPORT_DETAILS))
996 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
997 print_generic_expr (vect_dump, op, TDF_SLIM);
1000 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1001 &dt);
1002 gcc_assert (is_simple_use);
1003 if (vect_print_dump_info (REPORT_DETAILS))
1005 if (def)
1007 fprintf (vect_dump, "def = ");
1008 print_generic_expr (vect_dump, def, TDF_SLIM);
1010 if (def_stmt)
1012 fprintf (vect_dump, " def_stmt = ");
1013 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1017 switch (dt)
1019 /* Case 1: operand is a constant. */
1020 case vect_constant_def:
1022 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1023 gcc_assert (vector_type);
1025 if (scalar_def)
1026 *scalar_def = op;
1028 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1029 if (vect_print_dump_info (REPORT_DETAILS))
1030 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1032 for (i = nunits - 1; i >= 0; --i)
1034 t = tree_cons (NULL_TREE, op, t);
1036 vec_cst = build_vector (vector_type, t);
1037 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1040 /* Case 2: operand is defined outside the loop - loop invariant. */
1041 case vect_external_def:
1043 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1044 gcc_assert (vector_type);
1045 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1047 if (scalar_def)
1048 *scalar_def = def;
1050 /* Create 'vec_inv = {inv,inv,..,inv}' */
1051 if (vect_print_dump_info (REPORT_DETAILS))
1052 fprintf (vect_dump, "Create vector_inv.");
1054 for (i = nunits - 1; i >= 0; --i)
1056 t = tree_cons (NULL_TREE, def, t);
1059 /* FIXME: use build_constructor directly. */
1060 vec_inv = build_constructor_from_list (vector_type, t);
1061 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1064 /* Case 3: operand is defined inside the loop. */
1065 case vect_internal_def:
1067 if (scalar_def)
1068 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1070 /* Get the def from the vectorized stmt. */
1071 def_stmt_info = vinfo_for_stmt (def_stmt);
1072 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1073 gcc_assert (vec_stmt);
1074 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1075 vec_oprnd = PHI_RESULT (vec_stmt);
1076 else if (is_gimple_call (vec_stmt))
1077 vec_oprnd = gimple_call_lhs (vec_stmt);
1078 else
1079 vec_oprnd = gimple_assign_lhs (vec_stmt);
1080 return vec_oprnd;
1083 /* Case 4: operand is defined by a loop header phi - reduction */
1084 case vect_reduction_def:
1085 case vect_double_reduction_def:
1086 case vect_nested_cycle:
1088 struct loop *loop;
1090 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1091 loop = (gimple_bb (def_stmt))->loop_father;
1093 /* Get the def before the loop */
1094 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1095 return get_initial_def_for_reduction (stmt, op, scalar_def);
1098 /* Case 5: operand is defined by loop-header phi - induction. */
1099 case vect_induction_def:
1101 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1103 /* Get the def from the vectorized stmt. */
1104 def_stmt_info = vinfo_for_stmt (def_stmt);
1105 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1106 gcc_assert (vec_stmt && gimple_code (vec_stmt) == GIMPLE_PHI);
1107 vec_oprnd = PHI_RESULT (vec_stmt);
1108 return vec_oprnd;
1111 default:
1112 gcc_unreachable ();
1117 /* Function vect_get_vec_def_for_stmt_copy
1119 Return a vector-def for an operand. This function is used when the
1120 vectorized stmt to be created (by the caller to this function) is a "copy"
1121 created in case the vectorized result cannot fit in one vector, and several
1122 copies of the vector-stmt are required. In this case the vector-def is
1123 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1124 of the stmt that defines VEC_OPRND.
1125 DT is the type of the vector def VEC_OPRND.
1127 Context:
1128 In case the vectorization factor (VF) is bigger than the number
1129 of elements that can fit in a vectype (nunits), we have to generate
1130 more than one vector stmt to vectorize the scalar stmt. This situation
1131 arises when there are multiple data-types operated upon in the loop; the
1132 smallest data-type determines the VF, and as a result, when vectorizing
1133 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1134 vector stmt (each computing a vector of 'nunits' results, and together
1135 computing 'VF' results in each iteration). This function is called when
1136 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1137 which VF=16 and nunits=4, so the number of copies required is 4):
1139 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1141 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1142 VS1.1: vx.1 = memref1 VS1.2
1143 VS1.2: vx.2 = memref2 VS1.3
1144 VS1.3: vx.3 = memref3
1146 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1147 VSnew.1: vz1 = vx.1 + ... VSnew.2
1148 VSnew.2: vz2 = vx.2 + ... VSnew.3
1149 VSnew.3: vz3 = vx.3 + ...
1151 The vectorization of S1 is explained in vectorizable_load.
1152 The vectorization of S2:
1153 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1154 the function 'vect_get_vec_def_for_operand' is called to
1155 get the relevant vector-def for each operand of S2. For operand x it
1156 returns the vector-def 'vx.0'.
1158 To create the remaining copies of the vector-stmt (VSnew.j), this
1159 function is called to get the relevant vector-def for each operand. It is
1160 obtained from the respective VS1.j stmt, which is recorded in the
1161 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1163 For example, to obtain the vector-def 'vx.1' in order to create the
1164 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1165 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1166 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1167 and return its def ('vx.1').
1168 Overall, to create the above sequence this function will be called 3 times:
1169 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1170 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1171 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1173 tree
1174 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1176 gimple vec_stmt_for_operand;
1177 stmt_vec_info def_stmt_info;
1179 /* Do nothing; can reuse same def. */
1180 if (dt == vect_external_def || dt == vect_constant_def )
1181 return vec_oprnd;
1183 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1184 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1185 gcc_assert (def_stmt_info);
1186 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1187 gcc_assert (vec_stmt_for_operand);
1188 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1189 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1190 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1191 else
1192 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1193 return vec_oprnd;
1197 /* Get vectorized definitions for the operands to create a copy of an original
1198 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1200 static void
1201 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1202 VEC(tree,heap) **vec_oprnds0,
1203 VEC(tree,heap) **vec_oprnds1)
1205 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1207 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1208 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1210 if (vec_oprnds1 && *vec_oprnds1)
1212 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1213 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1214 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1219 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1221 static void
1222 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1223 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1224 slp_tree slp_node)
1226 if (slp_node)
1227 vect_get_slp_defs (slp_node, vec_oprnds0, vec_oprnds1, -1);
1228 else
1230 tree vec_oprnd;
1232 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1233 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1234 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1236 if (op1)
1238 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1239 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1240 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1246 /* Function vect_finish_stmt_generation.
1248 Insert a new stmt. */
1250 void
1251 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1252 gimple_stmt_iterator *gsi)
1254 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1256 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1258 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1260 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1262 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1263 bb_vinfo));
1265 if (vect_print_dump_info (REPORT_DETAILS))
1267 fprintf (vect_dump, "add new stmt: ");
1268 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1271 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1274 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1275 a function declaration if the target has a vectorized version
1276 of the function, or NULL_TREE if the function cannot be vectorized. */
1278 tree
1279 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1281 tree fndecl = gimple_call_fndecl (call);
1283 /* We only handle functions that do not read or clobber memory -- i.e.
1284 const or novops ones. */
1285 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1286 return NULL_TREE;
1288 if (!fndecl
1289 || TREE_CODE (fndecl) != FUNCTION_DECL
1290 || !DECL_BUILT_IN (fndecl))
1291 return NULL_TREE;
1293 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1294 vectype_in);
1297 /* Function vectorizable_call.
1299 Check if STMT performs a function call that can be vectorized.
1300 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1301 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1302 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1304 static bool
1305 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1307 tree vec_dest;
1308 tree scalar_dest;
1309 tree op, type;
1310 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1311 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1312 tree vectype_out, vectype_in;
1313 int nunits_in;
1314 int nunits_out;
1315 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1316 tree fndecl, new_temp, def, rhs_type;
1317 gimple def_stmt;
1318 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1319 gimple new_stmt = NULL;
1320 int ncopies, j;
1321 VEC(tree, heap) *vargs = NULL;
1322 enum { NARROW, NONE, WIDEN } modifier;
1323 size_t i, nargs;
1325 /* FORNOW: unsupported in basic block SLP. */
1326 gcc_assert (loop_vinfo);
1328 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1329 return false;
1331 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1332 return false;
1334 /* FORNOW: SLP not supported. */
1335 if (STMT_SLP_TYPE (stmt_info))
1336 return false;
1338 /* Is STMT a vectorizable call? */
1339 if (!is_gimple_call (stmt))
1340 return false;
1342 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1343 return false;
1345 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1347 /* Process function arguments. */
1348 rhs_type = NULL_TREE;
1349 vectype_in = NULL_TREE;
1350 nargs = gimple_call_num_args (stmt);
1352 /* Bail out if the function has more than two arguments, we
1353 do not have interesting builtin functions to vectorize with
1354 more than two arguments. No arguments is also not good. */
1355 if (nargs == 0 || nargs > 2)
1356 return false;
1358 for (i = 0; i < nargs; i++)
1360 tree opvectype;
1362 op = gimple_call_arg (stmt, i);
1364 /* We can only handle calls with arguments of the same type. */
1365 if (rhs_type
1366 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1368 if (vect_print_dump_info (REPORT_DETAILS))
1369 fprintf (vect_dump, "argument types differ.");
1370 return false;
1372 if (!rhs_type)
1373 rhs_type = TREE_TYPE (op);
1375 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1376 &def_stmt, &def, &dt[i], &opvectype))
1378 if (vect_print_dump_info (REPORT_DETAILS))
1379 fprintf (vect_dump, "use not simple.");
1380 return false;
1383 if (!vectype_in)
1384 vectype_in = opvectype;
1385 else if (opvectype
1386 && opvectype != vectype_in)
1388 if (vect_print_dump_info (REPORT_DETAILS))
1389 fprintf (vect_dump, "argument vector types differ.");
1390 return false;
1393 /* If all arguments are external or constant defs use a vector type with
1394 the same size as the output vector type. */
1395 if (!vectype_in)
1396 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1397 if (vec_stmt)
1398 gcc_assert (vectype_in);
1399 if (!vectype_in)
1401 if (vect_print_dump_info (REPORT_DETAILS))
1403 fprintf (vect_dump, "no vectype for scalar type ");
1404 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1407 return false;
1410 /* FORNOW */
1411 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1412 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1413 if (nunits_in == nunits_out / 2)
1414 modifier = NARROW;
1415 else if (nunits_out == nunits_in)
1416 modifier = NONE;
1417 else if (nunits_out == nunits_in / 2)
1418 modifier = WIDEN;
1419 else
1420 return false;
1422 /* For now, we only vectorize functions if a target specific builtin
1423 is available. TODO -- in some cases, it might be profitable to
1424 insert the calls for pieces of the vector, in order to be able
1425 to vectorize other operations in the loop. */
1426 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1427 if (fndecl == NULL_TREE)
1429 if (vect_print_dump_info (REPORT_DETAILS))
1430 fprintf (vect_dump, "function is not vectorizable.");
1432 return false;
1435 gcc_assert (!gimple_vuse (stmt));
1437 if (modifier == NARROW)
1438 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1439 else
1440 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1442 /* Sanity check: make sure that at least one copy of the vectorized stmt
1443 needs to be generated. */
1444 gcc_assert (ncopies >= 1);
1446 if (!vec_stmt) /* transformation not required. */
1448 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1449 if (vect_print_dump_info (REPORT_DETAILS))
1450 fprintf (vect_dump, "=== vectorizable_call ===");
1451 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1452 return true;
1455 /** Transform. **/
1457 if (vect_print_dump_info (REPORT_DETAILS))
1458 fprintf (vect_dump, "transform operation.");
1460 /* Handle def. */
1461 scalar_dest = gimple_call_lhs (stmt);
1462 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1464 prev_stmt_info = NULL;
1465 switch (modifier)
1467 case NONE:
1468 for (j = 0; j < ncopies; ++j)
1470 /* Build argument list for the vectorized call. */
1471 if (j == 0)
1472 vargs = VEC_alloc (tree, heap, nargs);
1473 else
1474 VEC_truncate (tree, vargs, 0);
1476 for (i = 0; i < nargs; i++)
1478 op = gimple_call_arg (stmt, i);
1479 if (j == 0)
1480 vec_oprnd0
1481 = vect_get_vec_def_for_operand (op, stmt, NULL);
1482 else
1484 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1485 vec_oprnd0
1486 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1489 VEC_quick_push (tree, vargs, vec_oprnd0);
1492 new_stmt = gimple_build_call_vec (fndecl, vargs);
1493 new_temp = make_ssa_name (vec_dest, new_stmt);
1494 gimple_call_set_lhs (new_stmt, new_temp);
1496 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1497 mark_symbols_for_renaming (new_stmt);
1499 if (j == 0)
1500 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1501 else
1502 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1504 prev_stmt_info = vinfo_for_stmt (new_stmt);
1507 break;
1509 case NARROW:
1510 for (j = 0; j < ncopies; ++j)
1512 /* Build argument list for the vectorized call. */
1513 if (j == 0)
1514 vargs = VEC_alloc (tree, heap, nargs * 2);
1515 else
1516 VEC_truncate (tree, vargs, 0);
1518 for (i = 0; i < nargs; i++)
1520 op = gimple_call_arg (stmt, i);
1521 if (j == 0)
1523 vec_oprnd0
1524 = vect_get_vec_def_for_operand (op, stmt, NULL);
1525 vec_oprnd1
1526 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1528 else
1530 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1531 vec_oprnd0
1532 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1533 vec_oprnd1
1534 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1537 VEC_quick_push (tree, vargs, vec_oprnd0);
1538 VEC_quick_push (tree, vargs, vec_oprnd1);
1541 new_stmt = gimple_build_call_vec (fndecl, vargs);
1542 new_temp = make_ssa_name (vec_dest, new_stmt);
1543 gimple_call_set_lhs (new_stmt, new_temp);
1545 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1546 mark_symbols_for_renaming (new_stmt);
1548 if (j == 0)
1549 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1550 else
1551 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1553 prev_stmt_info = vinfo_for_stmt (new_stmt);
1556 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1558 break;
1560 case WIDEN:
1561 /* No current target implements this case. */
1562 return false;
1565 VEC_free (tree, heap, vargs);
1567 /* Update the exception handling table with the vector stmt if necessary. */
1568 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1569 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1571 /* The call in STMT might prevent it from being removed in dce.
1572 We however cannot remove it here, due to the way the ssa name
1573 it defines is mapped to the new definition. So just replace
1574 rhs of the statement with something harmless. */
1576 type = TREE_TYPE (scalar_dest);
1577 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1578 fold_convert (type, integer_zero_node));
1579 set_vinfo_for_stmt (new_stmt, stmt_info);
1580 set_vinfo_for_stmt (stmt, NULL);
1581 STMT_VINFO_STMT (stmt_info) = new_stmt;
1582 gsi_replace (gsi, new_stmt, false);
1583 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1585 return true;
1589 /* Function vect_gen_widened_results_half
1591 Create a vector stmt whose code, type, number of arguments, and result
1592 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1593 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1594 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1595 needs to be created (DECL is a function-decl of a target-builtin).
1596 STMT is the original scalar stmt that we are vectorizing. */
1598 static gimple
1599 vect_gen_widened_results_half (enum tree_code code,
1600 tree decl,
1601 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1602 tree vec_dest, gimple_stmt_iterator *gsi,
1603 gimple stmt)
1605 gimple new_stmt;
1606 tree new_temp;
1608 /* Generate half of the widened result: */
1609 if (code == CALL_EXPR)
1611 /* Target specific support */
1612 if (op_type == binary_op)
1613 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1614 else
1615 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1616 new_temp = make_ssa_name (vec_dest, new_stmt);
1617 gimple_call_set_lhs (new_stmt, new_temp);
1619 else
1621 /* Generic support */
1622 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1623 if (op_type != binary_op)
1624 vec_oprnd1 = NULL;
1625 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1626 vec_oprnd1);
1627 new_temp = make_ssa_name (vec_dest, new_stmt);
1628 gimple_assign_set_lhs (new_stmt, new_temp);
1630 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1632 return new_stmt;
1636 /* Check if STMT performs a conversion operation, that can be vectorized.
1637 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1638 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1639 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1641 static bool
1642 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1643 gimple *vec_stmt, slp_tree slp_node)
1645 tree vec_dest;
1646 tree scalar_dest;
1647 tree op0;
1648 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1649 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1650 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1651 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1652 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1653 tree new_temp;
1654 tree def;
1655 gimple def_stmt;
1656 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1657 gimple new_stmt = NULL;
1658 stmt_vec_info prev_stmt_info;
1659 int nunits_in;
1660 int nunits_out;
1661 tree vectype_out, vectype_in;
1662 int ncopies, j;
1663 tree rhs_type;
1664 tree builtin_decl;
1665 enum { NARROW, NONE, WIDEN } modifier;
1666 int i;
1667 VEC(tree,heap) *vec_oprnds0 = NULL;
1668 tree vop0;
1669 VEC(tree,heap) *dummy = NULL;
1670 int dummy_int;
1672 /* Is STMT a vectorizable conversion? */
1674 /* FORNOW: unsupported in basic block SLP. */
1675 gcc_assert (loop_vinfo);
1677 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1678 return false;
1680 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1681 return false;
1683 if (!is_gimple_assign (stmt))
1684 return false;
1686 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1687 return false;
1689 code = gimple_assign_rhs_code (stmt);
1690 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1691 return false;
1693 /* Check types of lhs and rhs. */
1694 scalar_dest = gimple_assign_lhs (stmt);
1695 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1697 op0 = gimple_assign_rhs1 (stmt);
1698 rhs_type = TREE_TYPE (op0);
1699 /* Check the operands of the operation. */
1700 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1701 &def_stmt, &def, &dt[0], &vectype_in))
1703 if (vect_print_dump_info (REPORT_DETAILS))
1704 fprintf (vect_dump, "use not simple.");
1705 return false;
1707 /* If op0 is an external or constant defs use a vector type of
1708 the same size as the output vector type. */
1709 if (!vectype_in)
1710 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1711 if (vec_stmt)
1712 gcc_assert (vectype_in);
1713 if (!vectype_in)
1715 if (vect_print_dump_info (REPORT_DETAILS))
1717 fprintf (vect_dump, "no vectype for scalar type ");
1718 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1721 return false;
1724 /* FORNOW */
1725 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1726 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1727 if (nunits_in == nunits_out / 2)
1728 modifier = NARROW;
1729 else if (nunits_out == nunits_in)
1730 modifier = NONE;
1731 else if (nunits_out == nunits_in / 2)
1732 modifier = WIDEN;
1733 else
1734 return false;
1736 if (modifier == NARROW)
1737 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1738 else
1739 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1741 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1742 this, so we can safely override NCOPIES with 1 here. */
1743 if (slp_node)
1744 ncopies = 1;
1746 /* Sanity check: make sure that at least one copy of the vectorized stmt
1747 needs to be generated. */
1748 gcc_assert (ncopies >= 1);
1750 /* Supportable by target? */
1751 if ((modifier == NONE
1752 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1753 || (modifier == WIDEN
1754 && !supportable_widening_operation (code, stmt,
1755 vectype_out, vectype_in,
1756 &decl1, &decl2,
1757 &code1, &code2,
1758 &dummy_int, &dummy))
1759 || (modifier == NARROW
1760 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1761 &code1, &dummy_int, &dummy)))
1763 if (vect_print_dump_info (REPORT_DETAILS))
1764 fprintf (vect_dump, "conversion not supported by target.");
1765 return false;
1768 if (modifier != NONE)
1770 /* FORNOW: SLP not supported. */
1771 if (STMT_SLP_TYPE (stmt_info))
1772 return false;
1775 if (!vec_stmt) /* transformation not required. */
1777 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1778 return true;
1781 /** Transform. **/
1782 if (vect_print_dump_info (REPORT_DETAILS))
1783 fprintf (vect_dump, "transform conversion.");
1785 /* Handle def. */
1786 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1788 if (modifier == NONE && !slp_node)
1789 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1791 prev_stmt_info = NULL;
1792 switch (modifier)
1794 case NONE:
1795 for (j = 0; j < ncopies; j++)
1797 if (j == 0)
1798 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1799 else
1800 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1802 builtin_decl =
1803 targetm.vectorize.builtin_conversion (code,
1804 vectype_out, vectype_in);
1805 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
1807 /* Arguments are ready. create the new vector stmt. */
1808 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1809 new_temp = make_ssa_name (vec_dest, new_stmt);
1810 gimple_call_set_lhs (new_stmt, new_temp);
1811 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1812 if (slp_node)
1813 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1816 if (j == 0)
1817 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1818 else
1819 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1820 prev_stmt_info = vinfo_for_stmt (new_stmt);
1822 break;
1824 case WIDEN:
1825 /* In case the vectorization factor (VF) is bigger than the number
1826 of elements that we can fit in a vectype (nunits), we have to
1827 generate more than one vector stmt - i.e - we need to "unroll"
1828 the vector stmt by a factor VF/nunits. */
1829 for (j = 0; j < ncopies; j++)
1831 if (j == 0)
1832 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1833 else
1834 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1836 /* Generate first half of the widened result: */
1837 new_stmt
1838 = vect_gen_widened_results_half (code1, decl1,
1839 vec_oprnd0, vec_oprnd1,
1840 unary_op, vec_dest, gsi, stmt);
1841 if (j == 0)
1842 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1843 else
1844 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1845 prev_stmt_info = vinfo_for_stmt (new_stmt);
1847 /* Generate second half of the widened result: */
1848 new_stmt
1849 = vect_gen_widened_results_half (code2, decl2,
1850 vec_oprnd0, vec_oprnd1,
1851 unary_op, vec_dest, gsi, stmt);
1852 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1853 prev_stmt_info = vinfo_for_stmt (new_stmt);
1855 break;
1857 case NARROW:
1858 /* In case the vectorization factor (VF) is bigger than the number
1859 of elements that we can fit in a vectype (nunits), we have to
1860 generate more than one vector stmt - i.e - we need to "unroll"
1861 the vector stmt by a factor VF/nunits. */
1862 for (j = 0; j < ncopies; j++)
1864 /* Handle uses. */
1865 if (j == 0)
1867 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1868 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1870 else
1872 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1873 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1876 /* Arguments are ready. Create the new vector stmt. */
1877 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1878 vec_oprnd1);
1879 new_temp = make_ssa_name (vec_dest, new_stmt);
1880 gimple_assign_set_lhs (new_stmt, new_temp);
1881 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1883 if (j == 0)
1884 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1885 else
1886 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1888 prev_stmt_info = vinfo_for_stmt (new_stmt);
1891 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1894 if (vec_oprnds0)
1895 VEC_free (tree, heap, vec_oprnds0);
1897 return true;
1899 /* Function vectorizable_assignment.
1901 Check if STMT performs an assignment (copy) that can be vectorized.
1902 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1903 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1904 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1906 static bool
1907 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1908 gimple *vec_stmt, slp_tree slp_node)
1910 tree vec_dest;
1911 tree scalar_dest;
1912 tree op;
1913 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1914 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1915 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1916 tree new_temp;
1917 tree def;
1918 gimple def_stmt;
1919 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1920 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1921 int ncopies;
1922 int i, j;
1923 VEC(tree,heap) *vec_oprnds = NULL;
1924 tree vop;
1925 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1926 gimple new_stmt = NULL;
1927 stmt_vec_info prev_stmt_info = NULL;
1928 enum tree_code code;
1929 tree vectype_in;
1931 /* Multiple types in SLP are handled by creating the appropriate number of
1932 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1933 case of SLP. */
1934 if (slp_node)
1935 ncopies = 1;
1936 else
1937 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1939 gcc_assert (ncopies >= 1);
1941 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
1942 return false;
1944 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1945 return false;
1947 /* Is vectorizable assignment? */
1948 if (!is_gimple_assign (stmt))
1949 return false;
1951 scalar_dest = gimple_assign_lhs (stmt);
1952 if (TREE_CODE (scalar_dest) != SSA_NAME)
1953 return false;
1955 code = gimple_assign_rhs_code (stmt);
1956 if (gimple_assign_single_p (stmt)
1957 || code == PAREN_EXPR
1958 || CONVERT_EXPR_CODE_P (code))
1959 op = gimple_assign_rhs1 (stmt);
1960 else
1961 return false;
1963 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
1964 &def_stmt, &def, &dt[0], &vectype_in))
1966 if (vect_print_dump_info (REPORT_DETAILS))
1967 fprintf (vect_dump, "use not simple.");
1968 return false;
1971 /* We can handle NOP_EXPR conversions that do not change the number
1972 of elements or the vector size. */
1973 if (CONVERT_EXPR_CODE_P (code)
1974 && (!vectype_in
1975 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
1976 || (GET_MODE_SIZE (TYPE_MODE (vectype))
1977 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
1978 return false;
1980 if (!vec_stmt) /* transformation not required. */
1982 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
1983 if (vect_print_dump_info (REPORT_DETAILS))
1984 fprintf (vect_dump, "=== vectorizable_assignment ===");
1985 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1986 return true;
1989 /** Transform. **/
1990 if (vect_print_dump_info (REPORT_DETAILS))
1991 fprintf (vect_dump, "transform assignment.");
1993 /* Handle def. */
1994 vec_dest = vect_create_destination_var (scalar_dest, vectype);
1996 /* Handle use. */
1997 for (j = 0; j < ncopies; j++)
1999 /* Handle uses. */
2000 if (j == 0)
2001 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2002 else
2003 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2005 /* Arguments are ready. create the new vector stmt. */
2006 for (i = 0; VEC_iterate (tree, vec_oprnds, i, vop); i++)
2008 if (CONVERT_EXPR_CODE_P (code))
2009 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2010 new_stmt = gimple_build_assign (vec_dest, vop);
2011 new_temp = make_ssa_name (vec_dest, new_stmt);
2012 gimple_assign_set_lhs (new_stmt, new_temp);
2013 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2014 if (slp_node)
2015 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2018 if (slp_node)
2019 continue;
2021 if (j == 0)
2022 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2023 else
2024 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2026 prev_stmt_info = vinfo_for_stmt (new_stmt);
2029 VEC_free (tree, heap, vec_oprnds);
2030 return true;
2033 /* Function vectorizable_operation.
2035 Check if STMT performs a binary or unary operation that can be vectorized.
2036 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2037 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2038 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2040 static bool
2041 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2042 gimple *vec_stmt, slp_tree slp_node)
2044 tree vec_dest;
2045 tree scalar_dest;
2046 tree op0, op1 = NULL;
2047 tree vec_oprnd1 = NULL_TREE;
2048 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2049 tree vectype;
2050 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2051 enum tree_code code;
2052 enum machine_mode vec_mode;
2053 tree new_temp;
2054 int op_type;
2055 optab optab;
2056 int icode;
2057 enum machine_mode optab_op2_mode;
2058 tree def;
2059 gimple def_stmt;
2060 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2061 gimple new_stmt = NULL;
2062 stmt_vec_info prev_stmt_info;
2063 int nunits_in;
2064 int nunits_out;
2065 tree vectype_out;
2066 int ncopies;
2067 int j, i;
2068 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2069 tree vop0, vop1;
2070 unsigned int k;
2071 bool scalar_shift_arg = false;
2072 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2073 int vf;
2075 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2076 return false;
2078 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2079 return false;
2081 /* Is STMT a vectorizable binary/unary operation? */
2082 if (!is_gimple_assign (stmt))
2083 return false;
2085 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2086 return false;
2088 code = gimple_assign_rhs_code (stmt);
2090 /* For pointer addition, we should use the normal plus for
2091 the vector addition. */
2092 if (code == POINTER_PLUS_EXPR)
2093 code = PLUS_EXPR;
2095 /* Support only unary or binary operations. */
2096 op_type = TREE_CODE_LENGTH (code);
2097 if (op_type != unary_op && op_type != binary_op)
2099 if (vect_print_dump_info (REPORT_DETAILS))
2100 fprintf (vect_dump, "num. args = %d (not unary/binary op).", op_type);
2101 return false;
2104 scalar_dest = gimple_assign_lhs (stmt);
2105 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2107 op0 = gimple_assign_rhs1 (stmt);
2108 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2109 &def_stmt, &def, &dt[0], &vectype))
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 fprintf (vect_dump, "use not simple.");
2113 return false;
2115 /* If op0 is an external or constant def use a vector type with
2116 the same size as the output vector type. */
2117 if (!vectype)
2118 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2119 if (vec_stmt)
2120 gcc_assert (vectype);
2121 if (!vectype)
2123 if (vect_print_dump_info (REPORT_DETAILS))
2125 fprintf (vect_dump, "no vectype for scalar type ");
2126 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2129 return false;
2132 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2133 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2134 if (nunits_out != nunits_in)
2135 return false;
2137 if (op_type == binary_op)
2139 op1 = gimple_assign_rhs2 (stmt);
2140 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2141 &dt[1]))
2143 if (vect_print_dump_info (REPORT_DETAILS))
2144 fprintf (vect_dump, "use not simple.");
2145 return false;
2149 if (loop_vinfo)
2150 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2151 else
2152 vf = 1;
2154 /* Multiple types in SLP are handled by creating the appropriate number of
2155 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2156 case of SLP. */
2157 if (slp_node)
2158 ncopies = 1;
2159 else
2160 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2162 gcc_assert (ncopies >= 1);
2164 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2165 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2166 shift optabs. */
2167 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2168 || code == RROTATE_EXPR)
2170 /* vector shifted by vector */
2171 if (dt[1] == vect_internal_def)
2173 optab = optab_for_tree_code (code, vectype, optab_vector);
2174 if (vect_print_dump_info (REPORT_DETAILS))
2175 fprintf (vect_dump, "vector/vector shift/rotate found.");
2178 /* See if the machine has a vector shifted by scalar insn and if not
2179 then see if it has a vector shifted by vector insn */
2180 else if (dt[1] == vect_constant_def || dt[1] == vect_external_def)
2182 optab = optab_for_tree_code (code, vectype, optab_scalar);
2183 if (optab
2184 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2186 scalar_shift_arg = true;
2187 if (vect_print_dump_info (REPORT_DETAILS))
2188 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2190 else
2192 optab = optab_for_tree_code (code, vectype, optab_vector);
2193 if (optab
2194 && (optab_handler (optab, TYPE_MODE (vectype))
2195 != CODE_FOR_nothing))
2197 if (vect_print_dump_info (REPORT_DETAILS))
2198 fprintf (vect_dump, "vector/vector shift/rotate found.");
2200 /* Unlike the other binary operators, shifts/rotates have
2201 the rhs being int, instead of the same type as the lhs,
2202 so make sure the scalar is the right type if we are
2203 dealing with vectors of short/char. */
2204 if (dt[1] == vect_constant_def)
2205 op1 = fold_convert (TREE_TYPE (vectype), op1);
2210 else
2212 if (vect_print_dump_info (REPORT_DETAILS))
2213 fprintf (vect_dump, "operand mode requires invariant argument.");
2214 return false;
2217 else
2218 optab = optab_for_tree_code (code, vectype, optab_default);
2220 /* Supportable by target? */
2221 if (!optab)
2223 if (vect_print_dump_info (REPORT_DETAILS))
2224 fprintf (vect_dump, "no optab.");
2225 return false;
2227 vec_mode = TYPE_MODE (vectype);
2228 icode = (int) optab_handler (optab, vec_mode);
2229 if (icode == CODE_FOR_nothing)
2231 if (vect_print_dump_info (REPORT_DETAILS))
2232 fprintf (vect_dump, "op not supported by target.");
2233 /* Check only during analysis. */
2234 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2235 || (vf < vect_min_worthwhile_factor (code)
2236 && !vec_stmt))
2237 return false;
2238 if (vect_print_dump_info (REPORT_DETAILS))
2239 fprintf (vect_dump, "proceeding using word mode.");
2242 /* Worthwhile without SIMD support? Check only during analysis. */
2243 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2244 && vf < vect_min_worthwhile_factor (code)
2245 && !vec_stmt)
2247 if (vect_print_dump_info (REPORT_DETAILS))
2248 fprintf (vect_dump, "not worthwhile without SIMD support.");
2249 return false;
2252 if (!vec_stmt) /* transformation not required. */
2254 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2255 if (vect_print_dump_info (REPORT_DETAILS))
2256 fprintf (vect_dump, "=== vectorizable_operation ===");
2257 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2258 return true;
2261 /** Transform. **/
2263 if (vect_print_dump_info (REPORT_DETAILS))
2264 fprintf (vect_dump, "transform binary/unary operation.");
2266 /* Handle def. */
2267 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2269 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2270 created in the previous stages of the recursion, so no allocation is
2271 needed, except for the case of shift with scalar shift argument. In that
2272 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2273 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2274 In case of loop-based vectorization we allocate VECs of size 1. We
2275 allocate VEC_OPRNDS1 only in case of binary operation. */
2276 if (!slp_node)
2278 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2279 if (op_type == binary_op)
2280 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2282 else if (scalar_shift_arg)
2283 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2285 /* In case the vectorization factor (VF) is bigger than the number
2286 of elements that we can fit in a vectype (nunits), we have to generate
2287 more than one vector stmt - i.e - we need to "unroll" the
2288 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2289 from one copy of the vector stmt to the next, in the field
2290 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2291 stages to find the correct vector defs to be used when vectorizing
2292 stmts that use the defs of the current stmt. The example below illustrates
2293 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2294 4 vectorized stmts):
2296 before vectorization:
2297 RELATED_STMT VEC_STMT
2298 S1: x = memref - -
2299 S2: z = x + 1 - -
2301 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2302 there):
2303 RELATED_STMT VEC_STMT
2304 VS1_0: vx0 = memref0 VS1_1 -
2305 VS1_1: vx1 = memref1 VS1_2 -
2306 VS1_2: vx2 = memref2 VS1_3 -
2307 VS1_3: vx3 = memref3 - -
2308 S1: x = load - VS1_0
2309 S2: z = x + 1 - -
2311 step2: vectorize stmt S2 (done here):
2312 To vectorize stmt S2 we first need to find the relevant vector
2313 def for the first operand 'x'. This is, as usual, obtained from
2314 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2315 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2316 relevant vector def 'vx0'. Having found 'vx0' we can generate
2317 the vector stmt VS2_0, and as usual, record it in the
2318 STMT_VINFO_VEC_STMT of stmt S2.
2319 When creating the second copy (VS2_1), we obtain the relevant vector
2320 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2321 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2322 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2323 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2324 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2325 chain of stmts and pointers:
2326 RELATED_STMT VEC_STMT
2327 VS1_0: vx0 = memref0 VS1_1 -
2328 VS1_1: vx1 = memref1 VS1_2 -
2329 VS1_2: vx2 = memref2 VS1_3 -
2330 VS1_3: vx3 = memref3 - -
2331 S1: x = load - VS1_0
2332 VS2_0: vz0 = vx0 + v1 VS2_1 -
2333 VS2_1: vz1 = vx1 + v1 VS2_2 -
2334 VS2_2: vz2 = vx2 + v1 VS2_3 -
2335 VS2_3: vz3 = vx3 + v1 - -
2336 S2: z = x + 1 - VS2_0 */
2338 prev_stmt_info = NULL;
2339 for (j = 0; j < ncopies; j++)
2341 /* Handle uses. */
2342 if (j == 0)
2344 if (op_type == binary_op && scalar_shift_arg)
2346 /* Vector shl and shr insn patterns can be defined with scalar
2347 operand 2 (shift operand). In this case, use constant or loop
2348 invariant op1 directly, without extending it to vector mode
2349 first. */
2350 optab_op2_mode = insn_data[icode].operand[2].mode;
2351 if (!VECTOR_MODE_P (optab_op2_mode))
2353 if (vect_print_dump_info (REPORT_DETAILS))
2354 fprintf (vect_dump, "operand 1 using scalar mode.");
2355 vec_oprnd1 = op1;
2356 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2357 if (slp_node)
2359 /* Store vec_oprnd1 for every vector stmt to be created
2360 for SLP_NODE. We check during the analysis that all the
2361 shift arguments are the same.
2362 TODO: Allow different constants for different vector
2363 stmts generated for an SLP instance. */
2364 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2365 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2370 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2371 (a special case for certain kind of vector shifts); otherwise,
2372 operand 1 should be of a vector type (the usual case). */
2373 if (op_type == binary_op && !vec_oprnd1)
2374 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2375 slp_node);
2376 else
2377 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2378 slp_node);
2380 else
2381 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2383 /* Arguments are ready. Create the new vector stmt. */
2384 for (i = 0; VEC_iterate (tree, vec_oprnds0, i, vop0); i++)
2386 vop1 = ((op_type == binary_op)
2387 ? VEC_index (tree, vec_oprnds1, i) : NULL);
2388 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2389 new_temp = make_ssa_name (vec_dest, new_stmt);
2390 gimple_assign_set_lhs (new_stmt, new_temp);
2391 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2392 if (slp_node)
2393 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2396 if (slp_node)
2397 continue;
2399 if (j == 0)
2400 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2401 else
2402 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2403 prev_stmt_info = vinfo_for_stmt (new_stmt);
2406 VEC_free (tree, heap, vec_oprnds0);
2407 if (vec_oprnds1)
2408 VEC_free (tree, heap, vec_oprnds1);
2410 return true;
2414 /* Get vectorized definitions for loop-based vectorization. For the first
2415 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2416 scalar operand), and for the rest we get a copy with
2417 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2418 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2419 The vectors are collected into VEC_OPRNDS. */
2421 static void
2422 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2423 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2425 tree vec_oprnd;
2427 /* Get first vector operand. */
2428 /* All the vector operands except the very first one (that is scalar oprnd)
2429 are stmt copies. */
2430 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2431 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2432 else
2433 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2435 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2437 /* Get second vector operand. */
2438 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2439 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2441 *oprnd = vec_oprnd;
2443 /* For conversion in multiple steps, continue to get operands
2444 recursively. */
2445 if (multi_step_cvt)
2446 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2450 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2451 For multi-step conversions store the resulting vectors and call the function
2452 recursively. */
2454 static void
2455 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2456 int multi_step_cvt, gimple stmt,
2457 VEC (tree, heap) *vec_dsts,
2458 gimple_stmt_iterator *gsi,
2459 slp_tree slp_node, enum tree_code code,
2460 stmt_vec_info *prev_stmt_info)
2462 unsigned int i;
2463 tree vop0, vop1, new_tmp, vec_dest;
2464 gimple new_stmt;
2465 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2467 vec_dest = VEC_pop (tree, vec_dsts);
2469 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2471 /* Create demotion operation. */
2472 vop0 = VEC_index (tree, *vec_oprnds, i);
2473 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2474 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2475 new_tmp = make_ssa_name (vec_dest, new_stmt);
2476 gimple_assign_set_lhs (new_stmt, new_tmp);
2477 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2479 if (multi_step_cvt)
2480 /* Store the resulting vector for next recursive call. */
2481 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2482 else
2484 /* This is the last step of the conversion sequence. Store the
2485 vectors in SLP_NODE or in vector info of the scalar statement
2486 (or in STMT_VINFO_RELATED_STMT chain). */
2487 if (slp_node)
2488 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2489 else
2491 if (!*prev_stmt_info)
2492 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2493 else
2494 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2496 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2501 /* For multi-step demotion operations we first generate demotion operations
2502 from the source type to the intermediate types, and then combine the
2503 results (stored in VEC_OPRNDS) in demotion operation to the destination
2504 type. */
2505 if (multi_step_cvt)
2507 /* At each level of recursion we have have of the operands we had at the
2508 previous level. */
2509 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2510 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2511 stmt, vec_dsts, gsi, slp_node,
2512 code, prev_stmt_info);
2517 /* Function vectorizable_type_demotion
2519 Check if STMT performs a binary or unary operation that involves
2520 type demotion, and if it can be vectorized.
2521 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2522 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2523 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2525 static bool
2526 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2527 gimple *vec_stmt, slp_tree slp_node)
2529 tree vec_dest;
2530 tree scalar_dest;
2531 tree op0;
2532 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2533 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2534 enum tree_code code, code1 = ERROR_MARK;
2535 tree def;
2536 gimple def_stmt;
2537 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2538 stmt_vec_info prev_stmt_info;
2539 int nunits_in;
2540 int nunits_out;
2541 tree vectype_out;
2542 int ncopies;
2543 int j, i;
2544 tree vectype_in;
2545 int multi_step_cvt = 0;
2546 VEC (tree, heap) *vec_oprnds0 = NULL;
2547 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2548 tree last_oprnd, intermediate_type;
2550 /* FORNOW: not supported by basic block SLP vectorization. */
2551 gcc_assert (loop_vinfo);
2553 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2554 return false;
2556 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2557 return false;
2559 /* Is STMT a vectorizable type-demotion operation? */
2560 if (!is_gimple_assign (stmt))
2561 return false;
2563 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2564 return false;
2566 code = gimple_assign_rhs_code (stmt);
2567 if (!CONVERT_EXPR_CODE_P (code))
2568 return false;
2570 scalar_dest = gimple_assign_lhs (stmt);
2571 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2573 /* Check the operands of the operation. */
2574 op0 = gimple_assign_rhs1 (stmt);
2575 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2576 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2577 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2578 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2579 && CONVERT_EXPR_CODE_P (code))))
2580 return false;
2581 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2582 &def_stmt, &def, &dt[0], &vectype_in))
2584 if (vect_print_dump_info (REPORT_DETAILS))
2585 fprintf (vect_dump, "use not simple.");
2586 return false;
2588 /* If op0 is an external def use a vector type with the
2589 same size as the output vector type if possible. */
2590 if (!vectype_in)
2591 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2592 if (vec_stmt)
2593 gcc_assert (vectype_in);
2594 if (!vectype_in)
2596 if (vect_print_dump_info (REPORT_DETAILS))
2598 fprintf (vect_dump, "no vectype for scalar type ");
2599 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2602 return false;
2605 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2606 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2607 if (nunits_in >= nunits_out)
2608 return false;
2610 /* Multiple types in SLP are handled by creating the appropriate number of
2611 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2612 case of SLP. */
2613 if (slp_node)
2614 ncopies = 1;
2615 else
2616 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2617 gcc_assert (ncopies >= 1);
2619 /* Supportable by target? */
2620 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2621 &code1, &multi_step_cvt, &interm_types))
2622 return false;
2624 if (!vec_stmt) /* transformation not required. */
2626 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2627 if (vect_print_dump_info (REPORT_DETAILS))
2628 fprintf (vect_dump, "=== vectorizable_demotion ===");
2629 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2630 return true;
2633 /** Transform. **/
2634 if (vect_print_dump_info (REPORT_DETAILS))
2635 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
2636 ncopies);
2638 /* In case of multi-step demotion, we first generate demotion operations to
2639 the intermediate types, and then from that types to the final one.
2640 We create vector destinations for the intermediate type (TYPES) received
2641 from supportable_narrowing_operation, and store them in the correct order
2642 for future use in vect_create_vectorized_demotion_stmts(). */
2643 if (multi_step_cvt)
2644 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2645 else
2646 vec_dsts = VEC_alloc (tree, heap, 1);
2648 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2649 VEC_quick_push (tree, vec_dsts, vec_dest);
2651 if (multi_step_cvt)
2653 for (i = VEC_length (tree, interm_types) - 1;
2654 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2656 vec_dest = vect_create_destination_var (scalar_dest,
2657 intermediate_type);
2658 VEC_quick_push (tree, vec_dsts, vec_dest);
2662 /* In case the vectorization factor (VF) is bigger than the number
2663 of elements that we can fit in a vectype (nunits), we have to generate
2664 more than one vector stmt - i.e - we need to "unroll" the
2665 vector stmt by a factor VF/nunits. */
2666 last_oprnd = op0;
2667 prev_stmt_info = NULL;
2668 for (j = 0; j < ncopies; j++)
2670 /* Handle uses. */
2671 if (slp_node)
2672 vect_get_slp_defs (slp_node, &vec_oprnds0, NULL, -1);
2673 else
2675 VEC_free (tree, heap, vec_oprnds0);
2676 vec_oprnds0 = VEC_alloc (tree, heap,
2677 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
2678 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
2679 vect_pow2 (multi_step_cvt) - 1);
2682 /* Arguments are ready. Create the new vector stmts. */
2683 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
2684 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
2685 multi_step_cvt, stmt, tmp_vec_dsts,
2686 gsi, slp_node, code1,
2687 &prev_stmt_info);
2690 VEC_free (tree, heap, vec_oprnds0);
2691 VEC_free (tree, heap, vec_dsts);
2692 VEC_free (tree, heap, tmp_vec_dsts);
2693 VEC_free (tree, heap, interm_types);
2695 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2696 return true;
2700 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2701 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2702 the resulting vectors and call the function recursively. */
2704 static void
2705 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
2706 VEC (tree, heap) **vec_oprnds1,
2707 int multi_step_cvt, gimple stmt,
2708 VEC (tree, heap) *vec_dsts,
2709 gimple_stmt_iterator *gsi,
2710 slp_tree slp_node, enum tree_code code1,
2711 enum tree_code code2, tree decl1,
2712 tree decl2, int op_type,
2713 stmt_vec_info *prev_stmt_info)
2715 int i;
2716 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
2717 gimple new_stmt1, new_stmt2;
2718 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2719 VEC (tree, heap) *vec_tmp;
2721 vec_dest = VEC_pop (tree, vec_dsts);
2722 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
2724 for (i = 0; VEC_iterate (tree, *vec_oprnds0, i, vop0); i++)
2726 if (op_type == binary_op)
2727 vop1 = VEC_index (tree, *vec_oprnds1, i);
2728 else
2729 vop1 = NULL_TREE;
2731 /* Generate the two halves of promotion operation. */
2732 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
2733 op_type, vec_dest, gsi, stmt);
2734 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
2735 op_type, vec_dest, gsi, stmt);
2736 if (is_gimple_call (new_stmt1))
2738 new_tmp1 = gimple_call_lhs (new_stmt1);
2739 new_tmp2 = gimple_call_lhs (new_stmt2);
2741 else
2743 new_tmp1 = gimple_assign_lhs (new_stmt1);
2744 new_tmp2 = gimple_assign_lhs (new_stmt2);
2747 if (multi_step_cvt)
2749 /* Store the results for the recursive call. */
2750 VEC_quick_push (tree, vec_tmp, new_tmp1);
2751 VEC_quick_push (tree, vec_tmp, new_tmp2);
2753 else
2755 /* Last step of promotion sequience - store the results. */
2756 if (slp_node)
2758 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
2759 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
2761 else
2763 if (!*prev_stmt_info)
2764 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
2765 else
2766 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
2768 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
2769 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
2770 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
2775 if (multi_step_cvt)
2777 /* For multi-step promotion operation we first generate we call the
2778 function recurcively for every stage. We start from the input type,
2779 create promotion operations to the intermediate types, and then
2780 create promotions to the output type. */
2781 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
2782 VEC_free (tree, heap, vec_tmp);
2783 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
2784 multi_step_cvt - 1, stmt,
2785 vec_dsts, gsi, slp_node, code1,
2786 code2, decl2, decl2, op_type,
2787 prev_stmt_info);
2792 /* Function vectorizable_type_promotion
2794 Check if STMT performs a binary or unary operation that involves
2795 type promotion, and if it can be vectorized.
2796 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2797 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2798 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2800 static bool
2801 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
2802 gimple *vec_stmt, slp_tree slp_node)
2804 tree vec_dest;
2805 tree scalar_dest;
2806 tree op0, op1 = NULL;
2807 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
2808 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2809 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2810 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
2811 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
2812 int op_type;
2813 tree def;
2814 gimple def_stmt;
2815 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2816 stmt_vec_info prev_stmt_info;
2817 int nunits_in;
2818 int nunits_out;
2819 tree vectype_out;
2820 int ncopies;
2821 int j, i;
2822 tree vectype_in;
2823 tree intermediate_type = NULL_TREE;
2824 int multi_step_cvt = 0;
2825 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2826 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2828 /* FORNOW: not supported by basic block SLP vectorization. */
2829 gcc_assert (loop_vinfo);
2831 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2832 return false;
2834 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2835 return false;
2837 /* Is STMT a vectorizable type-promotion operation? */
2838 if (!is_gimple_assign (stmt))
2839 return false;
2841 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2842 return false;
2844 code = gimple_assign_rhs_code (stmt);
2845 if (!CONVERT_EXPR_CODE_P (code)
2846 && code != WIDEN_MULT_EXPR)
2847 return false;
2849 scalar_dest = gimple_assign_lhs (stmt);
2850 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2852 /* Check the operands of the operation. */
2853 op0 = gimple_assign_rhs1 (stmt);
2854 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2855 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2856 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2857 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2858 && CONVERT_EXPR_CODE_P (code))))
2859 return false;
2860 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2861 &def_stmt, &def, &dt[0], &vectype_in))
2863 if (vect_print_dump_info (REPORT_DETAILS))
2864 fprintf (vect_dump, "use not simple.");
2865 return false;
2867 /* If op0 is an external or constant def use a vector type with
2868 the same size as the output vector type. */
2869 if (!vectype_in)
2870 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2871 if (vec_stmt)
2872 gcc_assert (vectype_in);
2873 if (!vectype_in)
2875 if (vect_print_dump_info (REPORT_DETAILS))
2877 fprintf (vect_dump, "no vectype for scalar type ");
2878 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2881 return false;
2884 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2885 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2886 if (nunits_in <= nunits_out)
2887 return false;
2889 /* Multiple types in SLP are handled by creating the appropriate number of
2890 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2891 case of SLP. */
2892 if (slp_node)
2893 ncopies = 1;
2894 else
2895 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2897 gcc_assert (ncopies >= 1);
2899 op_type = TREE_CODE_LENGTH (code);
2900 if (op_type == binary_op)
2902 op1 = gimple_assign_rhs2 (stmt);
2903 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
2905 if (vect_print_dump_info (REPORT_DETAILS))
2906 fprintf (vect_dump, "use not simple.");
2907 return false;
2911 /* Supportable by target? */
2912 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
2913 &decl1, &decl2, &code1, &code2,
2914 &multi_step_cvt, &interm_types))
2915 return false;
2917 /* Binary widening operation can only be supported directly by the
2918 architecture. */
2919 gcc_assert (!(multi_step_cvt && op_type == binary_op));
2921 if (!vec_stmt) /* transformation not required. */
2923 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
2924 if (vect_print_dump_info (REPORT_DETAILS))
2925 fprintf (vect_dump, "=== vectorizable_promotion ===");
2926 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
2927 return true;
2930 /** Transform. **/
2932 if (vect_print_dump_info (REPORT_DETAILS))
2933 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
2934 ncopies);
2936 /* Handle def. */
2937 /* In case of multi-step promotion, we first generate promotion operations
2938 to the intermediate types, and then from that types to the final one.
2939 We store vector destination in VEC_DSTS in the correct order for
2940 recursive creation of promotion operations in
2941 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2942 according to TYPES recieved from supportable_widening_operation(). */
2943 if (multi_step_cvt)
2944 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
2945 else
2946 vec_dsts = VEC_alloc (tree, heap, 1);
2948 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2949 VEC_quick_push (tree, vec_dsts, vec_dest);
2951 if (multi_step_cvt)
2953 for (i = VEC_length (tree, interm_types) - 1;
2954 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
2956 vec_dest = vect_create_destination_var (scalar_dest,
2957 intermediate_type);
2958 VEC_quick_push (tree, vec_dsts, vec_dest);
2962 if (!slp_node)
2964 vec_oprnds0 = VEC_alloc (tree, heap,
2965 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
2966 if (op_type == binary_op)
2967 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2970 /* In case the vectorization factor (VF) is bigger than the number
2971 of elements that we can fit in a vectype (nunits), we have to generate
2972 more than one vector stmt - i.e - we need to "unroll" the
2973 vector stmt by a factor VF/nunits. */
2975 prev_stmt_info = NULL;
2976 for (j = 0; j < ncopies; j++)
2978 /* Handle uses. */
2979 if (j == 0)
2981 if (slp_node)
2982 vect_get_slp_defs (slp_node, &vec_oprnds0, &vec_oprnds1, -1);
2983 else
2985 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
2986 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
2987 if (op_type == binary_op)
2989 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
2990 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2994 else
2996 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
2997 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
2998 if (op_type == binary_op)
3000 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3001 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3005 /* Arguments are ready. Create the new vector stmts. */
3006 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3007 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3008 multi_step_cvt, stmt,
3009 tmp_vec_dsts,
3010 gsi, slp_node, code1, code2,
3011 decl1, decl2, op_type,
3012 &prev_stmt_info);
3015 VEC_free (tree, heap, vec_dsts);
3016 VEC_free (tree, heap, tmp_vec_dsts);
3017 VEC_free (tree, heap, interm_types);
3018 VEC_free (tree, heap, vec_oprnds0);
3019 VEC_free (tree, heap, vec_oprnds1);
3021 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3022 return true;
3026 /* Function vectorizable_store.
3028 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3029 can be vectorized.
3030 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3031 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3032 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3034 static bool
3035 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3036 slp_tree slp_node)
3038 tree scalar_dest;
3039 tree data_ref;
3040 tree op;
3041 tree vec_oprnd = NULL_TREE;
3042 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3043 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3044 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3045 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3046 struct loop *loop = NULL;
3047 enum machine_mode vec_mode;
3048 tree dummy;
3049 enum dr_alignment_support alignment_support_scheme;
3050 tree def;
3051 gimple def_stmt;
3052 enum vect_def_type dt;
3053 stmt_vec_info prev_stmt_info = NULL;
3054 tree dataref_ptr = NULL_TREE;
3055 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3056 int ncopies;
3057 int j;
3058 gimple next_stmt, first_stmt = NULL;
3059 bool strided_store = false;
3060 unsigned int group_size, i;
3061 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3062 bool inv_p;
3063 VEC(tree,heap) *vec_oprnds = NULL;
3064 bool slp = (slp_node != NULL);
3065 unsigned int vec_num;
3066 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3068 if (loop_vinfo)
3069 loop = LOOP_VINFO_LOOP (loop_vinfo);
3071 /* Multiple types in SLP are handled by creating the appropriate number of
3072 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3073 case of SLP. */
3074 if (slp)
3075 ncopies = 1;
3076 else
3077 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3079 gcc_assert (ncopies >= 1);
3081 /* FORNOW. This restriction should be relaxed. */
3082 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3084 if (vect_print_dump_info (REPORT_DETAILS))
3085 fprintf (vect_dump, "multiple types in nested loop.");
3086 return false;
3089 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3090 return false;
3092 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3093 return false;
3095 /* Is vectorizable store? */
3097 if (!is_gimple_assign (stmt))
3098 return false;
3100 scalar_dest = gimple_assign_lhs (stmt);
3101 if (TREE_CODE (scalar_dest) != ARRAY_REF
3102 && TREE_CODE (scalar_dest) != INDIRECT_REF
3103 && TREE_CODE (scalar_dest) != COMPONENT_REF
3104 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3105 && TREE_CODE (scalar_dest) != REALPART_EXPR
3106 && TREE_CODE (scalar_dest) != MEM_REF)
3107 return false;
3109 gcc_assert (gimple_assign_single_p (stmt));
3110 op = gimple_assign_rhs1 (stmt);
3111 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3113 if (vect_print_dump_info (REPORT_DETAILS))
3114 fprintf (vect_dump, "use not simple.");
3115 return false;
3118 /* The scalar rhs type needs to be trivially convertible to the vector
3119 component type. This should always be the case. */
3120 if (!useless_type_conversion_p (TREE_TYPE (vectype), TREE_TYPE (op)))
3122 if (vect_print_dump_info (REPORT_DETAILS))
3123 fprintf (vect_dump, "??? operands of different types");
3124 return false;
3127 vec_mode = TYPE_MODE (vectype);
3128 /* FORNOW. In some cases can vectorize even if data-type not supported
3129 (e.g. - array initialization with 0). */
3130 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3131 return false;
3133 if (!STMT_VINFO_DATA_REF (stmt_info))
3134 return false;
3136 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3138 strided_store = true;
3139 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3140 if (!vect_strided_store_supported (vectype)
3141 && !PURE_SLP_STMT (stmt_info) && !slp)
3142 return false;
3144 if (first_stmt == stmt)
3146 /* STMT is the leader of the group. Check the operands of all the
3147 stmts of the group. */
3148 next_stmt = DR_GROUP_NEXT_DR (stmt_info);
3149 while (next_stmt)
3151 gcc_assert (gimple_assign_single_p (next_stmt));
3152 op = gimple_assign_rhs1 (next_stmt);
3153 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3154 &def, &dt))
3156 if (vect_print_dump_info (REPORT_DETAILS))
3157 fprintf (vect_dump, "use not simple.");
3158 return false;
3160 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3165 if (!vec_stmt) /* transformation not required. */
3167 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3168 vect_model_store_cost (stmt_info, ncopies, dt, NULL);
3169 return true;
3172 /** Transform. **/
3174 if (strided_store)
3176 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3177 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3179 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3181 /* FORNOW */
3182 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3184 /* We vectorize all the stmts of the interleaving group when we
3185 reach the last stmt in the group. */
3186 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3187 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
3188 && !slp)
3190 *vec_stmt = NULL;
3191 return true;
3194 if (slp)
3196 strided_store = false;
3197 /* VEC_NUM is the number of vect stmts to be created for this
3198 group. */
3199 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3200 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3201 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3203 else
3204 /* VEC_NUM is the number of vect stmts to be created for this
3205 group. */
3206 vec_num = group_size;
3208 else
3210 first_stmt = stmt;
3211 first_dr = dr;
3212 group_size = vec_num = 1;
3215 if (vect_print_dump_info (REPORT_DETAILS))
3216 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3218 dr_chain = VEC_alloc (tree, heap, group_size);
3219 oprnds = VEC_alloc (tree, heap, group_size);
3221 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3222 gcc_assert (alignment_support_scheme);
3224 /* In case the vectorization factor (VF) is bigger than the number
3225 of elements that we can fit in a vectype (nunits), we have to generate
3226 more than one vector stmt - i.e - we need to "unroll" the
3227 vector stmt by a factor VF/nunits. For more details see documentation in
3228 vect_get_vec_def_for_copy_stmt. */
3230 /* In case of interleaving (non-unit strided access):
3232 S1: &base + 2 = x2
3233 S2: &base = x0
3234 S3: &base + 1 = x1
3235 S4: &base + 3 = x3
3237 We create vectorized stores starting from base address (the access of the
3238 first stmt in the chain (S2 in the above example), when the last store stmt
3239 of the chain (S4) is reached:
3241 VS1: &base = vx2
3242 VS2: &base + vec_size*1 = vx0
3243 VS3: &base + vec_size*2 = vx1
3244 VS4: &base + vec_size*3 = vx3
3246 Then permutation statements are generated:
3248 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3249 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3252 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3253 (the order of the data-refs in the output of vect_permute_store_chain
3254 corresponds to the order of scalar stmts in the interleaving chain - see
3255 the documentation of vect_permute_store_chain()).
3257 In case of both multiple types and interleaving, above vector stores and
3258 permutation stmts are created for every copy. The result vector stmts are
3259 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3260 STMT_VINFO_RELATED_STMT for the next copies.
3263 prev_stmt_info = NULL;
3264 for (j = 0; j < ncopies; j++)
3266 gimple new_stmt;
3267 gimple ptr_incr;
3269 if (j == 0)
3271 if (slp)
3273 /* Get vectorized arguments for SLP_NODE. */
3274 vect_get_slp_defs (slp_node, &vec_oprnds, NULL, -1);
3276 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3278 else
3280 /* For interleaved stores we collect vectorized defs for all the
3281 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3282 used as an input to vect_permute_store_chain(), and OPRNDS as
3283 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3285 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3286 OPRNDS are of size 1. */
3287 next_stmt = first_stmt;
3288 for (i = 0; i < group_size; i++)
3290 /* Since gaps are not supported for interleaved stores,
3291 GROUP_SIZE is the exact number of stmts in the chain.
3292 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3293 there is no interleaving, GROUP_SIZE is 1, and only one
3294 iteration of the loop will be executed. */
3295 gcc_assert (next_stmt
3296 && gimple_assign_single_p (next_stmt));
3297 op = gimple_assign_rhs1 (next_stmt);
3299 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3300 NULL);
3301 VEC_quick_push(tree, dr_chain, vec_oprnd);
3302 VEC_quick_push(tree, oprnds, vec_oprnd);
3303 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3307 /* We should have catched mismatched types earlier. */
3308 gcc_assert (useless_type_conversion_p (vectype,
3309 TREE_TYPE (vec_oprnd)));
3310 dataref_ptr = vect_create_data_ref_ptr (first_stmt, NULL, NULL_TREE,
3311 &dummy, &ptr_incr, false,
3312 &inv_p);
3313 gcc_assert (bb_vinfo || !inv_p);
3315 else
3317 /* For interleaved stores we created vectorized defs for all the
3318 defs stored in OPRNDS in the previous iteration (previous copy).
3319 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3320 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3321 next copy.
3322 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3323 OPRNDS are of size 1. */
3324 for (i = 0; i < group_size; i++)
3326 op = VEC_index (tree, oprnds, i);
3327 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3328 &dt);
3329 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3330 VEC_replace(tree, dr_chain, i, vec_oprnd);
3331 VEC_replace(tree, oprnds, i, vec_oprnd);
3333 dataref_ptr =
3334 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3337 if (strided_store)
3339 result_chain = VEC_alloc (tree, heap, group_size);
3340 /* Permute. */
3341 if (!vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3342 &result_chain))
3343 return false;
3346 next_stmt = first_stmt;
3347 for (i = 0; i < vec_num; i++)
3349 if (i > 0)
3350 /* Bump the vector pointer. */
3351 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3352 NULL_TREE);
3354 if (slp)
3355 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3356 else if (strided_store)
3357 /* For strided stores vectorized defs are interleaved in
3358 vect_permute_store_chain(). */
3359 vec_oprnd = VEC_index (tree, result_chain, i);
3361 if (aligned_access_p (first_dr))
3362 data_ref = build_simple_mem_ref (dataref_ptr);
3363 else
3365 int mis = DR_MISALIGNMENT (first_dr);
3366 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3367 tmis = size_binop (MULT_EXPR, tmis, size_int (BITS_PER_UNIT));
3368 data_ref = build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3371 /* If accesses through a pointer to vectype do not alias the original
3372 memory reference we have a problem. This should never happen. */
3373 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3374 get_alias_set (gimple_assign_lhs (stmt))));
3376 /* Arguments are ready. Create the new vector stmt. */
3377 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3378 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3379 mark_symbols_for_renaming (new_stmt);
3381 if (slp)
3382 continue;
3384 if (j == 0)
3385 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3386 else
3387 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3389 prev_stmt_info = vinfo_for_stmt (new_stmt);
3390 next_stmt = DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt));
3391 if (!next_stmt)
3392 break;
3396 VEC_free (tree, heap, dr_chain);
3397 VEC_free (tree, heap, oprnds);
3398 if (result_chain)
3399 VEC_free (tree, heap, result_chain);
3401 return true;
3404 /* vectorizable_load.
3406 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3407 can be vectorized.
3408 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3409 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3410 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3412 static bool
3413 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3414 slp_tree slp_node, slp_instance slp_node_instance)
3416 tree scalar_dest;
3417 tree vec_dest = NULL;
3418 tree data_ref = NULL;
3419 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3420 stmt_vec_info prev_stmt_info;
3421 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3422 struct loop *loop = NULL;
3423 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3424 bool nested_in_vect_loop = false;
3425 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3426 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3427 tree new_temp;
3428 enum machine_mode mode;
3429 gimple new_stmt = NULL;
3430 tree dummy;
3431 enum dr_alignment_support alignment_support_scheme;
3432 tree dataref_ptr = NULL_TREE;
3433 gimple ptr_incr;
3434 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3435 int ncopies;
3436 int i, j, group_size;
3437 tree msq = NULL_TREE, lsq;
3438 tree offset = NULL_TREE;
3439 tree realignment_token = NULL_TREE;
3440 gimple phi = NULL;
3441 VEC(tree,heap) *dr_chain = NULL;
3442 bool strided_load = false;
3443 gimple first_stmt;
3444 tree scalar_type;
3445 bool inv_p;
3446 bool compute_in_loop = false;
3447 struct loop *at_loop;
3448 int vec_num;
3449 bool slp = (slp_node != NULL);
3450 bool slp_perm = false;
3451 enum tree_code code;
3452 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3453 int vf;
3455 if (loop_vinfo)
3457 loop = LOOP_VINFO_LOOP (loop_vinfo);
3458 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3459 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3461 else
3462 vf = 1;
3464 /* Multiple types in SLP are handled by creating the appropriate number of
3465 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3466 case of SLP. */
3467 if (slp)
3468 ncopies = 1;
3469 else
3470 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3472 gcc_assert (ncopies >= 1);
3474 /* FORNOW. This restriction should be relaxed. */
3475 if (nested_in_vect_loop && ncopies > 1)
3477 if (vect_print_dump_info (REPORT_DETAILS))
3478 fprintf (vect_dump, "multiple types in nested loop.");
3479 return false;
3482 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3483 return false;
3485 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3486 return false;
3488 /* Is vectorizable load? */
3489 if (!is_gimple_assign (stmt))
3490 return false;
3492 scalar_dest = gimple_assign_lhs (stmt);
3493 if (TREE_CODE (scalar_dest) != SSA_NAME)
3494 return false;
3496 code = gimple_assign_rhs_code (stmt);
3497 if (code != ARRAY_REF
3498 && code != INDIRECT_REF
3499 && code != COMPONENT_REF
3500 && code != IMAGPART_EXPR
3501 && code != REALPART_EXPR
3502 && code != MEM_REF)
3503 return false;
3505 if (!STMT_VINFO_DATA_REF (stmt_info))
3506 return false;
3508 scalar_type = TREE_TYPE (DR_REF (dr));
3509 mode = TYPE_MODE (vectype);
3511 /* FORNOW. In some cases can vectorize even if data-type not supported
3512 (e.g. - data copies). */
3513 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
3515 if (vect_print_dump_info (REPORT_DETAILS))
3516 fprintf (vect_dump, "Aligned load, but unsupported type.");
3517 return false;
3520 /* The vector component type needs to be trivially convertible to the
3521 scalar lhs. This should always be the case. */
3522 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), TREE_TYPE (vectype)))
3524 if (vect_print_dump_info (REPORT_DETAILS))
3525 fprintf (vect_dump, "??? operands of different types");
3526 return false;
3529 /* Check if the load is a part of an interleaving chain. */
3530 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3532 strided_load = true;
3533 /* FORNOW */
3534 gcc_assert (! nested_in_vect_loop);
3536 /* Check if interleaving is supported. */
3537 if (!vect_strided_load_supported (vectype)
3538 && !PURE_SLP_STMT (stmt_info) && !slp)
3539 return false;
3542 if (!vec_stmt) /* transformation not required. */
3544 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
3545 vect_model_load_cost (stmt_info, ncopies, NULL);
3546 return true;
3549 if (vect_print_dump_info (REPORT_DETAILS))
3550 fprintf (vect_dump, "transform load.");
3552 /** Transform. **/
3554 if (strided_load)
3556 first_stmt = DR_GROUP_FIRST_DR (stmt_info);
3557 /* Check if the chain of loads is already vectorized. */
3558 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
3560 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3561 return true;
3563 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3564 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
3566 /* VEC_NUM is the number of vect stmts to be created for this group. */
3567 if (slp)
3569 strided_load = false;
3570 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3571 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
3572 slp_perm = true;
3574 else
3575 vec_num = group_size;
3577 dr_chain = VEC_alloc (tree, heap, vec_num);
3579 else
3581 first_stmt = stmt;
3582 first_dr = dr;
3583 group_size = vec_num = 1;
3586 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3587 gcc_assert (alignment_support_scheme);
3589 /* In case the vectorization factor (VF) is bigger than the number
3590 of elements that we can fit in a vectype (nunits), we have to generate
3591 more than one vector stmt - i.e - we need to "unroll" the
3592 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3593 from one copy of the vector stmt to the next, in the field
3594 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3595 stages to find the correct vector defs to be used when vectorizing
3596 stmts that use the defs of the current stmt. The example below illustrates
3597 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3598 4 vectorized stmts):
3600 before vectorization:
3601 RELATED_STMT VEC_STMT
3602 S1: x = memref - -
3603 S2: z = x + 1 - -
3605 step 1: vectorize stmt S1:
3606 We first create the vector stmt VS1_0, and, as usual, record a
3607 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3608 Next, we create the vector stmt VS1_1, and record a pointer to
3609 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3610 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3611 stmts and pointers:
3612 RELATED_STMT VEC_STMT
3613 VS1_0: vx0 = memref0 VS1_1 -
3614 VS1_1: vx1 = memref1 VS1_2 -
3615 VS1_2: vx2 = memref2 VS1_3 -
3616 VS1_3: vx3 = memref3 - -
3617 S1: x = load - VS1_0
3618 S2: z = x + 1 - -
3620 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3621 information we recorded in RELATED_STMT field is used to vectorize
3622 stmt S2. */
3624 /* In case of interleaving (non-unit strided access):
3626 S1: x2 = &base + 2
3627 S2: x0 = &base
3628 S3: x1 = &base + 1
3629 S4: x3 = &base + 3
3631 Vectorized loads are created in the order of memory accesses
3632 starting from the access of the first stmt of the chain:
3634 VS1: vx0 = &base
3635 VS2: vx1 = &base + vec_size*1
3636 VS3: vx3 = &base + vec_size*2
3637 VS4: vx4 = &base + vec_size*3
3639 Then permutation statements are generated:
3641 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3642 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3645 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3646 (the order of the data-refs in the output of vect_permute_load_chain
3647 corresponds to the order of scalar stmts in the interleaving chain - see
3648 the documentation of vect_permute_load_chain()).
3649 The generation of permutation stmts and recording them in
3650 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3652 In case of both multiple types and interleaving, the vector loads and
3653 permutation stmts above are created for every copy. The result vector stmts
3654 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3655 STMT_VINFO_RELATED_STMT for the next copies. */
3657 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3658 on a target that supports unaligned accesses (dr_unaligned_supported)
3659 we generate the following code:
3660 p = initial_addr;
3661 indx = 0;
3662 loop {
3663 p = p + indx * vectype_size;
3664 vec_dest = *(p);
3665 indx = indx + 1;
3668 Otherwise, the data reference is potentially unaligned on a target that
3669 does not support unaligned accesses (dr_explicit_realign_optimized) -
3670 then generate the following code, in which the data in each iteration is
3671 obtained by two vector loads, one from the previous iteration, and one
3672 from the current iteration:
3673 p1 = initial_addr;
3674 msq_init = *(floor(p1))
3675 p2 = initial_addr + VS - 1;
3676 realignment_token = call target_builtin;
3677 indx = 0;
3678 loop {
3679 p2 = p2 + indx * vectype_size
3680 lsq = *(floor(p2))
3681 vec_dest = realign_load (msq, lsq, realignment_token)
3682 indx = indx + 1;
3683 msq = lsq;
3684 } */
3686 /* If the misalignment remains the same throughout the execution of the
3687 loop, we can create the init_addr and permutation mask at the loop
3688 preheader. Otherwise, it needs to be created inside the loop.
3689 This can only occur when vectorizing memory accesses in the inner-loop
3690 nested within an outer-loop that is being vectorized. */
3692 if (loop && nested_in_vect_loop_p (loop, stmt)
3693 && (TREE_INT_CST_LOW (DR_STEP (dr))
3694 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
3696 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
3697 compute_in_loop = true;
3700 if ((alignment_support_scheme == dr_explicit_realign_optimized
3701 || alignment_support_scheme == dr_explicit_realign)
3702 && !compute_in_loop)
3704 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
3705 alignment_support_scheme, NULL_TREE,
3706 &at_loop);
3707 if (alignment_support_scheme == dr_explicit_realign_optimized)
3709 phi = SSA_NAME_DEF_STMT (msq);
3710 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3713 else
3714 at_loop = loop;
3716 prev_stmt_info = NULL;
3717 for (j = 0; j < ncopies; j++)
3719 /* 1. Create the vector pointer update chain. */
3720 if (j == 0)
3721 dataref_ptr = vect_create_data_ref_ptr (first_stmt,
3722 at_loop, offset,
3723 &dummy, &ptr_incr, false,
3724 &inv_p);
3725 else
3726 dataref_ptr =
3727 bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, NULL_TREE);
3729 for (i = 0; i < vec_num; i++)
3731 if (i > 0)
3732 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3733 NULL_TREE);
3735 /* 2. Create the vector-load in the loop. */
3736 switch (alignment_support_scheme)
3738 case dr_aligned:
3739 gcc_assert (aligned_access_p (first_dr));
3740 data_ref = build_simple_mem_ref (dataref_ptr);
3741 break;
3742 case dr_unaligned_supported:
3744 int mis = DR_MISALIGNMENT (first_dr);
3745 tree tmis = (mis == -1 ? size_zero_node : size_int (mis));
3747 tmis = size_binop (MULT_EXPR, tmis, size_int(BITS_PER_UNIT));
3748 data_ref =
3749 build2 (MISALIGNED_INDIRECT_REF, vectype, dataref_ptr, tmis);
3750 break;
3752 case dr_explicit_realign:
3754 tree ptr, bump;
3755 tree vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
3757 if (compute_in_loop)
3758 msq = vect_setup_realignment (first_stmt, gsi,
3759 &realignment_token,
3760 dr_explicit_realign,
3761 dataref_ptr, NULL);
3763 new_stmt = gimple_build_assign_with_ops
3764 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3765 build_int_cst
3766 (TREE_TYPE (dataref_ptr),
3767 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3768 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3769 gimple_assign_set_lhs (new_stmt, ptr);
3770 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3771 data_ref = build_simple_mem_ref (ptr);
3772 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3773 new_stmt = gimple_build_assign (vec_dest, data_ref);
3774 new_temp = make_ssa_name (vec_dest, new_stmt);
3775 gimple_assign_set_lhs (new_stmt, new_temp);
3776 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
3777 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
3778 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3779 msq = new_temp;
3781 bump = size_binop (MULT_EXPR, vs_minus_1,
3782 TYPE_SIZE_UNIT (scalar_type));
3783 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
3784 new_stmt = gimple_build_assign_with_ops
3785 (BIT_AND_EXPR, NULL_TREE, ptr,
3786 build_int_cst
3787 (TREE_TYPE (ptr),
3788 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3789 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3790 gimple_assign_set_lhs (new_stmt, ptr);
3791 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3792 data_ref = build_simple_mem_ref (ptr);
3793 break;
3795 case dr_explicit_realign_optimized:
3796 new_stmt = gimple_build_assign_with_ops
3797 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
3798 build_int_cst
3799 (TREE_TYPE (dataref_ptr),
3800 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
3801 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
3802 gimple_assign_set_lhs (new_stmt, new_temp);
3803 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3804 data_ref = build_simple_mem_ref (new_temp);
3805 break;
3806 default:
3807 gcc_unreachable ();
3809 /* If accesses through a pointer to vectype do not alias the original
3810 memory reference we have a problem. This should never happen. */
3811 gcc_assert (alias_sets_conflict_p (get_alias_set (data_ref),
3812 get_alias_set (gimple_assign_rhs1 (stmt))));
3813 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3814 new_stmt = gimple_build_assign (vec_dest, data_ref);
3815 new_temp = make_ssa_name (vec_dest, new_stmt);
3816 gimple_assign_set_lhs (new_stmt, new_temp);
3817 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3818 mark_symbols_for_renaming (new_stmt);
3820 /* 3. Handle explicit realignment if necessary/supported. Create in
3821 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3822 if (alignment_support_scheme == dr_explicit_realign_optimized
3823 || alignment_support_scheme == dr_explicit_realign)
3825 tree tmp;
3827 lsq = gimple_assign_lhs (new_stmt);
3828 if (!realignment_token)
3829 realignment_token = dataref_ptr;
3830 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3831 tmp = build3 (REALIGN_LOAD_EXPR, vectype, msq, lsq,
3832 realignment_token);
3833 new_stmt = gimple_build_assign (vec_dest, tmp);
3834 new_temp = make_ssa_name (vec_dest, new_stmt);
3835 gimple_assign_set_lhs (new_stmt, new_temp);
3836 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3838 if (alignment_support_scheme == dr_explicit_realign_optimized)
3840 gcc_assert (phi);
3841 if (i == vec_num - 1 && j == ncopies - 1)
3842 add_phi_arg (phi, lsq, loop_latch_edge (containing_loop),
3843 UNKNOWN_LOCATION);
3844 msq = lsq;
3848 /* 4. Handle invariant-load. */
3849 if (inv_p && !bb_vinfo)
3851 gcc_assert (!strided_load);
3852 gcc_assert (nested_in_vect_loop_p (loop, stmt));
3853 if (j == 0)
3855 int k;
3856 tree t = NULL_TREE;
3857 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
3859 /* CHECKME: bitpos depends on endianess? */
3860 bitpos = bitsize_zero_node;
3861 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
3862 bitsize, bitpos);
3863 vec_dest =
3864 vect_create_destination_var (scalar_dest, NULL_TREE);
3865 new_stmt = gimple_build_assign (vec_dest, vec_inv);
3866 new_temp = make_ssa_name (vec_dest, new_stmt);
3867 gimple_assign_set_lhs (new_stmt, new_temp);
3868 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3870 for (k = nunits - 1; k >= 0; --k)
3871 t = tree_cons (NULL_TREE, new_temp, t);
3872 /* FIXME: use build_constructor directly. */
3873 vec_inv = build_constructor_from_list (vectype, t);
3874 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
3875 new_stmt = SSA_NAME_DEF_STMT (new_temp);
3877 else
3878 gcc_unreachable (); /* FORNOW. */
3881 /* Collect vector loads and later create their permutation in
3882 vect_transform_strided_load (). */
3883 if (strided_load || slp_perm)
3884 VEC_quick_push (tree, dr_chain, new_temp);
3886 /* Store vector loads in the corresponding SLP_NODE. */
3887 if (slp && !slp_perm)
3888 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
3891 if (slp && !slp_perm)
3892 continue;
3894 if (slp_perm)
3896 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
3897 slp_node_instance, false))
3899 VEC_free (tree, heap, dr_chain);
3900 return false;
3903 else
3905 if (strided_load)
3907 if (!vect_transform_strided_load (stmt, dr_chain, group_size, gsi))
3908 return false;
3910 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3911 VEC_free (tree, heap, dr_chain);
3912 dr_chain = VEC_alloc (tree, heap, group_size);
3914 else
3916 if (j == 0)
3917 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3918 else
3919 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3920 prev_stmt_info = vinfo_for_stmt (new_stmt);
3925 if (dr_chain)
3926 VEC_free (tree, heap, dr_chain);
3928 return true;
3931 /* Function vect_is_simple_cond.
3933 Input:
3934 LOOP - the loop that is being vectorized.
3935 COND - Condition that is checked for simple use.
3937 Returns whether a COND can be vectorized. Checks whether
3938 condition operands are supportable using vec_is_simple_use. */
3940 static bool
3941 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
3943 tree lhs, rhs;
3944 tree def;
3945 enum vect_def_type dt;
3947 if (!COMPARISON_CLASS_P (cond))
3948 return false;
3950 lhs = TREE_OPERAND (cond, 0);
3951 rhs = TREE_OPERAND (cond, 1);
3953 if (TREE_CODE (lhs) == SSA_NAME)
3955 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
3956 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
3957 &dt))
3958 return false;
3960 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
3961 && TREE_CODE (lhs) != FIXED_CST)
3962 return false;
3964 if (TREE_CODE (rhs) == SSA_NAME)
3966 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
3967 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
3968 &dt))
3969 return false;
3971 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
3972 && TREE_CODE (rhs) != FIXED_CST)
3973 return false;
3975 return true;
3978 /* vectorizable_condition.
3980 Check if STMT is conditional modify expression that can be vectorized.
3981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3982 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
3983 at GSI.
3985 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
3986 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
3987 else caluse if it is 2).
3989 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3991 bool
3992 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
3993 gimple *vec_stmt, tree reduc_def, int reduc_index)
3995 tree scalar_dest = NULL_TREE;
3996 tree vec_dest = NULL_TREE;
3997 tree op = NULL_TREE;
3998 tree cond_expr, then_clause, else_clause;
3999 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4000 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4001 tree vec_cond_lhs, vec_cond_rhs, vec_then_clause, vec_else_clause;
4002 tree vec_compare, vec_cond_expr;
4003 tree new_temp;
4004 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4005 enum machine_mode vec_mode;
4006 tree def;
4007 enum vect_def_type dt;
4008 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4009 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4010 enum tree_code code;
4012 /* FORNOW: unsupported in basic block SLP. */
4013 gcc_assert (loop_vinfo);
4015 gcc_assert (ncopies >= 1);
4016 if (ncopies > 1)
4017 return false; /* FORNOW */
4019 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4020 return false;
4022 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4023 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4024 && reduc_def))
4025 return false;
4027 /* FORNOW: SLP not supported. */
4028 if (STMT_SLP_TYPE (stmt_info))
4029 return false;
4031 /* FORNOW: not yet supported. */
4032 if (STMT_VINFO_LIVE_P (stmt_info))
4034 if (vect_print_dump_info (REPORT_DETAILS))
4035 fprintf (vect_dump, "value used after loop.");
4036 return false;
4039 /* Is vectorizable conditional operation? */
4040 if (!is_gimple_assign (stmt))
4041 return false;
4043 code = gimple_assign_rhs_code (stmt);
4045 if (code != COND_EXPR)
4046 return false;
4048 gcc_assert (gimple_assign_single_p (stmt));
4049 op = gimple_assign_rhs1 (stmt);
4050 cond_expr = TREE_OPERAND (op, 0);
4051 then_clause = TREE_OPERAND (op, 1);
4052 else_clause = TREE_OPERAND (op, 2);
4054 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4055 return false;
4057 /* We do not handle two different vector types for the condition
4058 and the values. */
4059 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4060 TREE_TYPE (vectype)))
4061 return false;
4063 if (TREE_CODE (then_clause) == SSA_NAME)
4065 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4066 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4067 &then_def_stmt, &def, &dt))
4068 return false;
4070 else if (TREE_CODE (then_clause) != INTEGER_CST
4071 && TREE_CODE (then_clause) != REAL_CST
4072 && TREE_CODE (then_clause) != FIXED_CST)
4073 return false;
4075 if (TREE_CODE (else_clause) == SSA_NAME)
4077 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4078 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4079 &else_def_stmt, &def, &dt))
4080 return false;
4082 else if (TREE_CODE (else_clause) != INTEGER_CST
4083 && TREE_CODE (else_clause) != REAL_CST
4084 && TREE_CODE (else_clause) != FIXED_CST)
4085 return false;
4088 vec_mode = TYPE_MODE (vectype);
4090 if (!vec_stmt)
4092 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4093 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4096 /* Transform */
4098 /* Handle def. */
4099 scalar_dest = gimple_assign_lhs (stmt);
4100 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4102 /* Handle cond expr. */
4103 vec_cond_lhs =
4104 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt, NULL);
4105 vec_cond_rhs =
4106 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1), stmt, NULL);
4107 if (reduc_index == 1)
4108 vec_then_clause = reduc_def;
4109 else
4110 vec_then_clause = vect_get_vec_def_for_operand (then_clause, stmt, NULL);
4111 if (reduc_index == 2)
4112 vec_else_clause = reduc_def;
4113 else
4114 vec_else_clause = vect_get_vec_def_for_operand (else_clause, stmt, NULL);
4116 /* Arguments are ready. Create the new vector stmt. */
4117 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4118 vec_cond_lhs, vec_cond_rhs);
4119 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4120 vec_compare, vec_then_clause, vec_else_clause);
4122 *vec_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4123 new_temp = make_ssa_name (vec_dest, *vec_stmt);
4124 gimple_assign_set_lhs (*vec_stmt, new_temp);
4125 vect_finish_stmt_generation (stmt, *vec_stmt, gsi);
4127 return true;
4131 /* Make sure the statement is vectorizable. */
4133 bool
4134 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4136 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4137 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4138 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4139 bool ok;
4140 tree scalar_type, vectype;
4142 if (vect_print_dump_info (REPORT_DETAILS))
4144 fprintf (vect_dump, "==> examining statement: ");
4145 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4148 if (gimple_has_volatile_ops (stmt))
4150 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4151 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4153 return false;
4156 /* Skip stmts that do not need to be vectorized. In loops this is expected
4157 to include:
4158 - the COND_EXPR which is the loop exit condition
4159 - any LABEL_EXPRs in the loop
4160 - computations that are used only for array indexing or loop control.
4161 In basic blocks we only analyze statements that are a part of some SLP
4162 instance, therefore, all the statements are relevant. */
4164 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4165 && !STMT_VINFO_LIVE_P (stmt_info))
4167 if (vect_print_dump_info (REPORT_DETAILS))
4168 fprintf (vect_dump, "irrelevant.");
4170 return true;
4173 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4175 case vect_internal_def:
4176 break;
4178 case vect_reduction_def:
4179 case vect_nested_cycle:
4180 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4181 || relevance == vect_used_in_outer_by_reduction
4182 || relevance == vect_unused_in_scope));
4183 break;
4185 case vect_induction_def:
4186 case vect_constant_def:
4187 case vect_external_def:
4188 case vect_unknown_def_type:
4189 default:
4190 gcc_unreachable ();
4193 if (bb_vinfo)
4195 gcc_assert (PURE_SLP_STMT (stmt_info));
4197 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4198 if (vect_print_dump_info (REPORT_DETAILS))
4200 fprintf (vect_dump, "get vectype for scalar type: ");
4201 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4204 vectype = get_vectype_for_scalar_type (scalar_type);
4205 if (!vectype)
4207 if (vect_print_dump_info (REPORT_DETAILS))
4209 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4210 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4212 return false;
4215 if (vect_print_dump_info (REPORT_DETAILS))
4217 fprintf (vect_dump, "vectype: ");
4218 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4221 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4224 if (STMT_VINFO_RELEVANT_P (stmt_info))
4226 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4227 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4228 *need_to_vectorize = true;
4231 ok = true;
4232 if (!bb_vinfo
4233 && (STMT_VINFO_RELEVANT_P (stmt_info)
4234 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4235 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4236 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4237 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4238 || vectorizable_operation (stmt, NULL, NULL, NULL)
4239 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4240 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4241 || vectorizable_call (stmt, NULL, NULL)
4242 || vectorizable_store (stmt, NULL, NULL, NULL)
4243 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4244 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4245 else
4247 if (bb_vinfo)
4248 ok = (vectorizable_operation (stmt, NULL, NULL, node)
4249 || vectorizable_assignment (stmt, NULL, NULL, node)
4250 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4251 || vectorizable_store (stmt, NULL, NULL, node));
4254 if (!ok)
4256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4258 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4259 fprintf (vect_dump, "supported: ");
4260 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4263 return false;
4266 if (bb_vinfo)
4267 return true;
4269 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4270 need extra handling, except for vectorizable reductions. */
4271 if (STMT_VINFO_LIVE_P (stmt_info)
4272 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4273 ok = vectorizable_live_operation (stmt, NULL, NULL);
4275 if (!ok)
4277 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4279 fprintf (vect_dump, "not vectorized: live stmt not ");
4280 fprintf (vect_dump, "supported: ");
4281 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4284 return false;
4287 if (!PURE_SLP_STMT (stmt_info))
4289 /* Groups of strided accesses whose size is not a power of 2 are not
4290 vectorizable yet using loop-vectorization. Therefore, if this stmt
4291 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4292 loop-based vectorized), the loop cannot be vectorized. */
4293 if (STMT_VINFO_STRIDED_ACCESS (stmt_info)
4294 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4295 DR_GROUP_FIRST_DR (stmt_info)))) == -1)
4297 if (vect_print_dump_info (REPORT_DETAILS))
4299 fprintf (vect_dump, "not vectorized: the size of group "
4300 "of strided accesses is not a power of 2");
4301 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4304 return false;
4308 return true;
4312 /* Function vect_transform_stmt.
4314 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4316 bool
4317 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4318 bool *strided_store, slp_tree slp_node,
4319 slp_instance slp_node_instance)
4321 bool is_store = false;
4322 gimple vec_stmt = NULL;
4323 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4324 gimple orig_stmt_in_pattern;
4325 bool done;
4327 switch (STMT_VINFO_TYPE (stmt_info))
4329 case type_demotion_vec_info_type:
4330 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4331 gcc_assert (done);
4332 break;
4334 case type_promotion_vec_info_type:
4335 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4336 gcc_assert (done);
4337 break;
4339 case type_conversion_vec_info_type:
4340 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4341 gcc_assert (done);
4342 break;
4344 case induc_vec_info_type:
4345 gcc_assert (!slp_node);
4346 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4347 gcc_assert (done);
4348 break;
4350 case op_vec_info_type:
4351 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4352 gcc_assert (done);
4353 break;
4355 case assignment_vec_info_type:
4356 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
4357 gcc_assert (done);
4358 break;
4360 case load_vec_info_type:
4361 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
4362 slp_node_instance);
4363 gcc_assert (done);
4364 break;
4366 case store_vec_info_type:
4367 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
4368 gcc_assert (done);
4369 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
4371 /* In case of interleaving, the whole chain is vectorized when the
4372 last store in the chain is reached. Store stmts before the last
4373 one are skipped, and there vec_stmt_info shouldn't be freed
4374 meanwhile. */
4375 *strided_store = true;
4376 if (STMT_VINFO_VEC_STMT (stmt_info))
4377 is_store = true;
4379 else
4380 is_store = true;
4381 break;
4383 case condition_vec_info_type:
4384 gcc_assert (!slp_node);
4385 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
4386 gcc_assert (done);
4387 break;
4389 case call_vec_info_type:
4390 gcc_assert (!slp_node);
4391 done = vectorizable_call (stmt, gsi, &vec_stmt);
4392 break;
4394 case reduc_vec_info_type:
4395 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
4396 gcc_assert (done);
4397 break;
4399 default:
4400 if (!STMT_VINFO_LIVE_P (stmt_info))
4402 if (vect_print_dump_info (REPORT_DETAILS))
4403 fprintf (vect_dump, "stmt not supported.");
4404 gcc_unreachable ();
4408 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4409 is being vectorized, but outside the immediately enclosing loop. */
4410 if (vec_stmt
4411 && STMT_VINFO_LOOP_VINFO (stmt_info)
4412 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4413 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
4414 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
4415 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
4416 || STMT_VINFO_RELEVANT (stmt_info) ==
4417 vect_used_in_outer_by_reduction))
4419 struct loop *innerloop = LOOP_VINFO_LOOP (
4420 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
4421 imm_use_iterator imm_iter;
4422 use_operand_p use_p;
4423 tree scalar_dest;
4424 gimple exit_phi;
4426 if (vect_print_dump_info (REPORT_DETAILS))
4427 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
4429 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4430 (to be used when vectorizing outer-loop stmts that use the DEF of
4431 STMT). */
4432 if (gimple_code (stmt) == GIMPLE_PHI)
4433 scalar_dest = PHI_RESULT (stmt);
4434 else
4435 scalar_dest = gimple_assign_lhs (stmt);
4437 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
4439 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
4441 exit_phi = USE_STMT (use_p);
4442 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
4447 /* Handle stmts whose DEF is used outside the loop-nest that is
4448 being vectorized. */
4449 if (STMT_VINFO_LIVE_P (stmt_info)
4450 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4452 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
4453 gcc_assert (done);
4456 if (vec_stmt)
4458 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
4459 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
4460 if (orig_stmt_in_pattern)
4462 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
4463 /* STMT was inserted by the vectorizer to replace a computation idiom.
4464 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4465 computed this idiom. We need to record a pointer to VEC_STMT in
4466 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4467 documentation of vect_pattern_recog. */
4468 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
4470 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
4471 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
4476 return is_store;
4480 /* Remove a group of stores (for SLP or interleaving), free their
4481 stmt_vec_info. */
4483 void
4484 vect_remove_stores (gimple first_stmt)
4486 gimple next = first_stmt;
4487 gimple tmp;
4488 gimple_stmt_iterator next_si;
4490 while (next)
4492 /* Free the attached stmt_vec_info and remove the stmt. */
4493 next_si = gsi_for_stmt (next);
4494 gsi_remove (&next_si, true);
4495 tmp = DR_GROUP_NEXT_DR (vinfo_for_stmt (next));
4496 free_stmt_vec_info (next);
4497 next = tmp;
4502 /* Function new_stmt_vec_info.
4504 Create and initialize a new stmt_vec_info struct for STMT. */
4506 stmt_vec_info
4507 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
4508 bb_vec_info bb_vinfo)
4510 stmt_vec_info res;
4511 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
4513 STMT_VINFO_TYPE (res) = undef_vec_info_type;
4514 STMT_VINFO_STMT (res) = stmt;
4515 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
4516 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
4517 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
4518 STMT_VINFO_LIVE_P (res) = false;
4519 STMT_VINFO_VECTYPE (res) = NULL;
4520 STMT_VINFO_VEC_STMT (res) = NULL;
4521 STMT_VINFO_VECTORIZABLE (res) = true;
4522 STMT_VINFO_IN_PATTERN_P (res) = false;
4523 STMT_VINFO_RELATED_STMT (res) = NULL;
4524 STMT_VINFO_DATA_REF (res) = NULL;
4526 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
4527 STMT_VINFO_DR_OFFSET (res) = NULL;
4528 STMT_VINFO_DR_INIT (res) = NULL;
4529 STMT_VINFO_DR_STEP (res) = NULL;
4530 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
4532 if (gimple_code (stmt) == GIMPLE_PHI
4533 && is_loop_header_bb_p (gimple_bb (stmt)))
4534 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
4535 else
4536 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
4538 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
4539 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
4540 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
4541 STMT_SLP_TYPE (res) = loop_vect;
4542 DR_GROUP_FIRST_DR (res) = NULL;
4543 DR_GROUP_NEXT_DR (res) = NULL;
4544 DR_GROUP_SIZE (res) = 0;
4545 DR_GROUP_STORE_COUNT (res) = 0;
4546 DR_GROUP_GAP (res) = 0;
4547 DR_GROUP_SAME_DR_STMT (res) = NULL;
4548 DR_GROUP_READ_WRITE_DEPENDENCE (res) = false;
4550 return res;
4554 /* Create a hash table for stmt_vec_info. */
4556 void
4557 init_stmt_vec_info_vec (void)
4559 gcc_assert (!stmt_vec_info_vec);
4560 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
4564 /* Free hash table for stmt_vec_info. */
4566 void
4567 free_stmt_vec_info_vec (void)
4569 gcc_assert (stmt_vec_info_vec);
4570 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
4574 /* Free stmt vectorization related info. */
4576 void
4577 free_stmt_vec_info (gimple stmt)
4579 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4581 if (!stmt_info)
4582 return;
4584 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
4585 set_vinfo_for_stmt (stmt, NULL);
4586 free (stmt_info);
4590 /* Function get_vectype_for_scalar_type.
4592 Returns the vector type corresponding to SCALAR_TYPE as supported
4593 by the target. */
4595 tree
4596 get_vectype_for_scalar_type (tree scalar_type)
4598 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
4599 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
4600 int nunits;
4601 tree vectype;
4603 if (nbytes == 0 || nbytes >= UNITS_PER_SIMD_WORD (inner_mode))
4604 return NULL_TREE;
4606 /* We can't build a vector type of elements with alignment bigger than
4607 their size. */
4608 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
4609 return NULL_TREE;
4611 /* If we'd build a vector type of elements whose mode precision doesn't
4612 match their types precision we'll get mismatched types on vector
4613 extracts via BIT_FIELD_REFs. This effectively means we disable
4614 vectorization of bool and/or enum types in some languages. */
4615 if (INTEGRAL_TYPE_P (scalar_type)
4616 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
4617 return NULL_TREE;
4619 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4620 is expected. */
4621 nunits = UNITS_PER_SIMD_WORD (inner_mode) / nbytes;
4623 vectype = build_vector_type (scalar_type, nunits);
4624 if (vect_print_dump_info (REPORT_DETAILS))
4626 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
4627 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4630 if (!vectype)
4631 return NULL_TREE;
4633 if (vect_print_dump_info (REPORT_DETAILS))
4635 fprintf (vect_dump, "vectype: ");
4636 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4639 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4640 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
4642 if (vect_print_dump_info (REPORT_DETAILS))
4643 fprintf (vect_dump, "mode not supported by target.");
4644 return NULL_TREE;
4647 return vectype;
4650 /* Function get_same_sized_vectype
4652 Returns a vector type corresponding to SCALAR_TYPE of size
4653 VECTOR_TYPE if supported by the target. */
4655 tree
4656 get_same_sized_vectype (tree scalar_type, tree vector_type ATTRIBUTE_UNUSED)
4658 return get_vectype_for_scalar_type (scalar_type);
4661 /* Function vect_is_simple_use.
4663 Input:
4664 LOOP_VINFO - the vect info of the loop that is being vectorized.
4665 BB_VINFO - the vect info of the basic block that is being vectorized.
4666 OPERAND - operand of a stmt in the loop or bb.
4667 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4669 Returns whether a stmt with OPERAND can be vectorized.
4670 For loops, supportable operands are constants, loop invariants, and operands
4671 that are defined by the current iteration of the loop. Unsupportable
4672 operands are those that are defined by a previous iteration of the loop (as
4673 is the case in reduction/induction computations).
4674 For basic blocks, supportable operands are constants and bb invariants.
4675 For now, operands defined outside the basic block are not supported. */
4677 bool
4678 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
4679 bb_vec_info bb_vinfo, gimple *def_stmt,
4680 tree *def, enum vect_def_type *dt)
4682 basic_block bb;
4683 stmt_vec_info stmt_vinfo;
4684 struct loop *loop = NULL;
4686 if (loop_vinfo)
4687 loop = LOOP_VINFO_LOOP (loop_vinfo);
4689 *def_stmt = NULL;
4690 *def = NULL_TREE;
4692 if (vect_print_dump_info (REPORT_DETAILS))
4694 fprintf (vect_dump, "vect_is_simple_use: operand ");
4695 print_generic_expr (vect_dump, operand, TDF_SLIM);
4698 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
4700 *dt = vect_constant_def;
4701 return true;
4704 if (is_gimple_min_invariant (operand))
4706 *def = operand;
4707 *dt = vect_external_def;
4708 return true;
4711 if (TREE_CODE (operand) == PAREN_EXPR)
4713 if (vect_print_dump_info (REPORT_DETAILS))
4714 fprintf (vect_dump, "non-associatable copy.");
4715 operand = TREE_OPERAND (operand, 0);
4718 if (TREE_CODE (operand) != SSA_NAME)
4720 if (vect_print_dump_info (REPORT_DETAILS))
4721 fprintf (vect_dump, "not ssa-name.");
4722 return false;
4725 *def_stmt = SSA_NAME_DEF_STMT (operand);
4726 if (*def_stmt == NULL)
4728 if (vect_print_dump_info (REPORT_DETAILS))
4729 fprintf (vect_dump, "no def_stmt.");
4730 return false;
4733 if (vect_print_dump_info (REPORT_DETAILS))
4735 fprintf (vect_dump, "def_stmt: ");
4736 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
4739 /* Empty stmt is expected only in case of a function argument.
4740 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4741 if (gimple_nop_p (*def_stmt))
4743 *def = operand;
4744 *dt = vect_external_def;
4745 return true;
4748 bb = gimple_bb (*def_stmt);
4750 if ((loop && !flow_bb_inside_loop_p (loop, bb))
4751 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
4752 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
4753 *dt = vect_external_def;
4754 else
4756 stmt_vinfo = vinfo_for_stmt (*def_stmt);
4757 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
4760 if (*dt == vect_unknown_def_type)
4762 if (vect_print_dump_info (REPORT_DETAILS))
4763 fprintf (vect_dump, "Unsupported pattern.");
4764 return false;
4767 if (vect_print_dump_info (REPORT_DETAILS))
4768 fprintf (vect_dump, "type of def: %d.",*dt);
4770 switch (gimple_code (*def_stmt))
4772 case GIMPLE_PHI:
4773 *def = gimple_phi_result (*def_stmt);
4774 break;
4776 case GIMPLE_ASSIGN:
4777 *def = gimple_assign_lhs (*def_stmt);
4778 break;
4780 case GIMPLE_CALL:
4781 *def = gimple_call_lhs (*def_stmt);
4782 if (*def != NULL)
4783 break;
4784 /* FALLTHRU */
4785 default:
4786 if (vect_print_dump_info (REPORT_DETAILS))
4787 fprintf (vect_dump, "unsupported defining stmt: ");
4788 return false;
4791 return true;
4794 /* Function vect_is_simple_use_1.
4796 Same as vect_is_simple_use_1 but also determines the vector operand
4797 type of OPERAND and stores it to *VECTYPE. If the definition of
4798 OPERAND is vect_uninitialized_def, vect_constant_def or
4799 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4800 is responsible to compute the best suited vector type for the
4801 scalar operand. */
4803 bool
4804 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
4805 bb_vec_info bb_vinfo, gimple *def_stmt,
4806 tree *def, enum vect_def_type *dt, tree *vectype)
4808 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
4809 return false;
4811 /* Now get a vector type if the def is internal, otherwise supply
4812 NULL_TREE and leave it up to the caller to figure out a proper
4813 type for the use stmt. */
4814 if (*dt == vect_internal_def
4815 || *dt == vect_induction_def
4816 || *dt == vect_reduction_def
4817 || *dt == vect_double_reduction_def
4818 || *dt == vect_nested_cycle)
4820 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
4821 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
4822 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
4823 *vectype = STMT_VINFO_VECTYPE (stmt_info);
4824 gcc_assert (*vectype != NULL_TREE);
4826 else if (*dt == vect_uninitialized_def
4827 || *dt == vect_constant_def
4828 || *dt == vect_external_def)
4829 *vectype = NULL_TREE;
4830 else
4831 gcc_unreachable ();
4833 return true;
4837 /* Function supportable_widening_operation
4839 Check whether an operation represented by the code CODE is a
4840 widening operation that is supported by the target platform in
4841 vector form (i.e., when operating on arguments of type VECTYPE_IN
4842 producing a result of type VECTYPE_OUT).
4844 Widening operations we currently support are NOP (CONVERT), FLOAT
4845 and WIDEN_MULT. This function checks if these operations are supported
4846 by the target platform either directly (via vector tree-codes), or via
4847 target builtins.
4849 Output:
4850 - CODE1 and CODE2 are codes of vector operations to be used when
4851 vectorizing the operation, if available.
4852 - DECL1 and DECL2 are decls of target builtin functions to be used
4853 when vectorizing the operation, if available. In this case,
4854 CODE1 and CODE2 are CALL_EXPR.
4855 - MULTI_STEP_CVT determines the number of required intermediate steps in
4856 case of multi-step conversion (like char->short->int - in that case
4857 MULTI_STEP_CVT will be 1).
4858 - INTERM_TYPES contains the intermediate type required to perform the
4859 widening operation (short in the above example). */
4861 bool
4862 supportable_widening_operation (enum tree_code code, gimple stmt,
4863 tree vectype_out, tree vectype_in,
4864 tree *decl1, tree *decl2,
4865 enum tree_code *code1, enum tree_code *code2,
4866 int *multi_step_cvt,
4867 VEC (tree, heap) **interm_types)
4869 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4870 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
4871 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
4872 bool ordered_p;
4873 enum machine_mode vec_mode;
4874 enum insn_code icode1, icode2;
4875 optab optab1, optab2;
4876 tree vectype = vectype_in;
4877 tree wide_vectype = vectype_out;
4878 enum tree_code c1, c2;
4880 /* The result of a vectorized widening operation usually requires two vectors
4881 (because the widened results do not fit int one vector). The generated
4882 vector results would normally be expected to be generated in the same
4883 order as in the original scalar computation, i.e. if 8 results are
4884 generated in each vector iteration, they are to be organized as follows:
4885 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4887 However, in the special case that the result of the widening operation is
4888 used in a reduction computation only, the order doesn't matter (because
4889 when vectorizing a reduction we change the order of the computation).
4890 Some targets can take advantage of this and generate more efficient code.
4891 For example, targets like Altivec, that support widen_mult using a sequence
4892 of {mult_even,mult_odd} generate the following vectors:
4893 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4895 When vectorizing outer-loops, we execute the inner-loop sequentially
4896 (each vectorized inner-loop iteration contributes to VF outer-loop
4897 iterations in parallel). We therefore don't allow to change the order
4898 of the computation in the inner-loop during outer-loop vectorization. */
4900 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
4901 && !nested_in_vect_loop_p (vect_loop, stmt))
4902 ordered_p = false;
4903 else
4904 ordered_p = true;
4906 if (!ordered_p
4907 && code == WIDEN_MULT_EXPR
4908 && targetm.vectorize.builtin_mul_widen_even
4909 && targetm.vectorize.builtin_mul_widen_even (vectype)
4910 && targetm.vectorize.builtin_mul_widen_odd
4911 && targetm.vectorize.builtin_mul_widen_odd (vectype))
4913 if (vect_print_dump_info (REPORT_DETAILS))
4914 fprintf (vect_dump, "Unordered widening operation detected.");
4916 *code1 = *code2 = CALL_EXPR;
4917 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
4918 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
4919 return true;
4922 switch (code)
4924 case WIDEN_MULT_EXPR:
4925 if (BYTES_BIG_ENDIAN)
4927 c1 = VEC_WIDEN_MULT_HI_EXPR;
4928 c2 = VEC_WIDEN_MULT_LO_EXPR;
4930 else
4932 c2 = VEC_WIDEN_MULT_HI_EXPR;
4933 c1 = VEC_WIDEN_MULT_LO_EXPR;
4935 break;
4937 CASE_CONVERT:
4938 if (BYTES_BIG_ENDIAN)
4940 c1 = VEC_UNPACK_HI_EXPR;
4941 c2 = VEC_UNPACK_LO_EXPR;
4943 else
4945 c2 = VEC_UNPACK_HI_EXPR;
4946 c1 = VEC_UNPACK_LO_EXPR;
4948 break;
4950 case FLOAT_EXPR:
4951 if (BYTES_BIG_ENDIAN)
4953 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
4954 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
4956 else
4958 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
4959 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
4961 break;
4963 case FIX_TRUNC_EXPR:
4964 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
4965 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
4966 computing the operation. */
4967 return false;
4969 default:
4970 gcc_unreachable ();
4973 if (code == FIX_TRUNC_EXPR)
4975 /* The signedness is determined from output operand. */
4976 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
4977 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
4979 else
4981 optab1 = optab_for_tree_code (c1, vectype, optab_default);
4982 optab2 = optab_for_tree_code (c2, vectype, optab_default);
4985 if (!optab1 || !optab2)
4986 return false;
4988 vec_mode = TYPE_MODE (vectype);
4989 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
4990 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
4991 return false;
4993 /* Check if it's a multi-step conversion that can be done using intermediate
4994 types. */
4995 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
4996 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
4998 int i;
4999 tree prev_type = vectype, intermediate_type;
5000 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5001 optab optab3, optab4;
5003 if (!CONVERT_EXPR_CODE_P (code))
5004 return false;
5006 *code1 = c1;
5007 *code2 = c2;
5009 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5010 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5011 to get to NARROW_VECTYPE, and fail if we do not. */
5012 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5013 for (i = 0; i < 3; i++)
5015 intermediate_mode = insn_data[icode1].operand[0].mode;
5016 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5017 TYPE_UNSIGNED (prev_type));
5018 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5019 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5021 if (!optab3 || !optab4
5022 || ((icode1 = optab_handler (optab1, prev_mode))
5023 == CODE_FOR_nothing)
5024 || insn_data[icode1].operand[0].mode != intermediate_mode
5025 || ((icode2 = optab_handler (optab2, prev_mode))
5026 == CODE_FOR_nothing)
5027 || insn_data[icode2].operand[0].mode != intermediate_mode
5028 || ((icode1 = optab_handler (optab3, intermediate_mode))
5029 == CODE_FOR_nothing)
5030 || ((icode2 = optab_handler (optab4, intermediate_mode))
5031 == CODE_FOR_nothing))
5032 return false;
5034 VEC_quick_push (tree, *interm_types, intermediate_type);
5035 (*multi_step_cvt)++;
5037 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5038 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5039 return true;
5041 prev_type = intermediate_type;
5042 prev_mode = intermediate_mode;
5045 return false;
5048 *code1 = c1;
5049 *code2 = c2;
5050 return true;
5054 /* Function supportable_narrowing_operation
5056 Check whether an operation represented by the code CODE is a
5057 narrowing operation that is supported by the target platform in
5058 vector form (i.e., when operating on arguments of type VECTYPE_IN
5059 and producing a result of type VECTYPE_OUT).
5061 Narrowing operations we currently support are NOP (CONVERT) and
5062 FIX_TRUNC. This function checks if these operations are supported by
5063 the target platform directly via vector tree-codes.
5065 Output:
5066 - CODE1 is the code of a vector operation to be used when
5067 vectorizing the operation, if available.
5068 - MULTI_STEP_CVT determines the number of required intermediate steps in
5069 case of multi-step conversion (like int->short->char - in that case
5070 MULTI_STEP_CVT will be 1).
5071 - INTERM_TYPES contains the intermediate type required to perform the
5072 narrowing operation (short in the above example). */
5074 bool
5075 supportable_narrowing_operation (enum tree_code code,
5076 tree vectype_out, tree vectype_in,
5077 enum tree_code *code1, int *multi_step_cvt,
5078 VEC (tree, heap) **interm_types)
5080 enum machine_mode vec_mode;
5081 enum insn_code icode1;
5082 optab optab1, interm_optab;
5083 tree vectype = vectype_in;
5084 tree narrow_vectype = vectype_out;
5085 enum tree_code c1;
5086 tree intermediate_type, prev_type;
5087 int i;
5089 switch (code)
5091 CASE_CONVERT:
5092 c1 = VEC_PACK_TRUNC_EXPR;
5093 break;
5095 case FIX_TRUNC_EXPR:
5096 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5097 break;
5099 case FLOAT_EXPR:
5100 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5101 tree code and optabs used for computing the operation. */
5102 return false;
5104 default:
5105 gcc_unreachable ();
5108 if (code == FIX_TRUNC_EXPR)
5109 /* The signedness is determined from output operand. */
5110 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5111 else
5112 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5114 if (!optab1)
5115 return false;
5117 vec_mode = TYPE_MODE (vectype);
5118 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5119 return false;
5121 /* Check if it's a multi-step conversion that can be done using intermediate
5122 types. */
5123 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5125 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5127 *code1 = c1;
5128 prev_type = vectype;
5129 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5130 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5131 to get to NARROW_VECTYPE, and fail if we do not. */
5132 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5133 for (i = 0; i < 3; i++)
5135 intermediate_mode = insn_data[icode1].operand[0].mode;
5136 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5137 TYPE_UNSIGNED (prev_type));
5138 interm_optab = optab_for_tree_code (c1, intermediate_type,
5139 optab_default);
5140 if (!interm_optab
5141 || ((icode1 = optab_handler (optab1, prev_mode))
5142 == CODE_FOR_nothing)
5143 || insn_data[icode1].operand[0].mode != intermediate_mode
5144 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5145 == CODE_FOR_nothing))
5146 return false;
5148 VEC_quick_push (tree, *interm_types, intermediate_type);
5149 (*multi_step_cvt)++;
5151 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5152 return true;
5154 prev_type = intermediate_type;
5155 prev_mode = intermediate_mode;
5158 return false;
5161 *code1 = c1;
5162 return true;