2011-05-29 Janus Weil <janus@gcc.gnu.org>
[official-gcc.git] / gcc / tree-vect-stmts.c
blob33aab9bc2ea4e33f7b6540fc894ae648f0e3cf60
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
47 static tree
48 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
50 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
51 "vect_array");
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
59 static tree
60 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
61 tree array, unsigned HOST_WIDE_INT n)
63 tree vect_type, vect, vect_name, array_ref;
64 gimple new_stmt;
66 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
67 vect_type = TREE_TYPE (TREE_TYPE (array));
68 vect = vect_create_destination_var (scalar_dest, vect_type);
69 array_ref = build4 (ARRAY_REF, vect_type, array,
70 build_int_cst (size_type_node, n),
71 NULL_TREE, NULL_TREE);
73 new_stmt = gimple_build_assign (vect, array_ref);
74 vect_name = make_ssa_name (vect, new_stmt);
75 gimple_assign_set_lhs (new_stmt, vect_name);
76 vect_finish_stmt_generation (stmt, new_stmt, gsi);
77 mark_symbols_for_renaming (new_stmt);
79 return vect_name;
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
86 static void
87 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
88 tree array, unsigned HOST_WIDE_INT n)
90 tree array_ref;
91 gimple new_stmt;
93 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
94 build_int_cst (size_type_node, n),
95 NULL_TREE, NULL_TREE);
97 new_stmt = gimple_build_assign (array_ref, vect);
98 vect_finish_stmt_generation (stmt, new_stmt, gsi);
99 mark_symbols_for_renaming (new_stmt);
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
104 (and its group). */
106 static tree
107 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
109 struct ptr_info_def *pi;
110 tree mem_ref, alias_ptr_type;
112 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
113 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
114 /* Arrays have the same alignment as their type. */
115 pi = get_ptr_info (ptr);
116 pi->align = TYPE_ALIGN_UNIT (type);
117 pi->misalign = 0;
118 return mem_ref;
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
123 /* Function vect_mark_relevant.
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
127 static void
128 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
129 enum vect_relevant relevant, bool live_p)
131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
132 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
133 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
135 if (vect_print_dump_info (REPORT_DETAILS))
136 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
140 gimple pattern_stmt;
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
147 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
149 if (vect_print_dump_info (REPORT_DETAILS))
150 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info = vinfo_for_stmt (pattern_stmt);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
153 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
154 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
155 stmt = pattern_stmt;
158 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
159 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
160 STMT_VINFO_RELEVANT (stmt_info) = relevant;
162 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
165 if (vect_print_dump_info (REPORT_DETAILS))
166 fprintf (vect_dump, "already marked relevant/live.");
167 return;
170 VEC_safe_push (gimple, heap, *worklist, stmt);
174 /* Function vect_stmt_relevant_p.
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
184 CHECKME: what other side effects would the vectorizer allow? */
186 static bool
187 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
188 enum vect_relevant *relevant, bool *live_p)
190 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
191 ssa_op_iter op_iter;
192 imm_use_iterator imm_iter;
193 use_operand_p use_p;
194 def_operand_p def_p;
196 *relevant = vect_unused_in_scope;
197 *live_p = false;
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
202 != loop_exit_ctrl_vec_info_type)
203 *relevant = vect_used_in_scope;
205 /* changing memory. */
206 if (gimple_code (stmt) != GIMPLE_PHI)
207 if (gimple_vdef (stmt))
209 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant = vect_used_in_scope;
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
217 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
219 basic_block bb = gimple_bb (USE_STMT (use_p));
220 if (!flow_bb_inside_loop_p (loop, bb))
222 if (vect_print_dump_info (REPORT_DETAILS))
223 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
225 if (is_gimple_debug (USE_STMT (use_p)))
226 continue;
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
231 gcc_assert (bb == single_exit (loop)->dest);
233 *live_p = true;
238 return (*live_p || *relevant);
242 /* Function exist_non_indexing_operands_for_use_p
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
247 static bool
248 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
250 tree operand;
251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info))
257 return true;
259 /* STMT has a data_ref. FORNOW this means that its of one of
260 the following forms:
261 -1- ARRAY_REF = var
262 -2- var = ARRAY_REF
263 (This should have been verified in analyze_data_refs).
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
267 for array indexing.
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
272 if (!gimple_assign_copy_p (stmt))
273 return false;
274 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
275 return false;
276 operand = gimple_assign_rhs1 (stmt);
277 if (TREE_CODE (operand) != SSA_NAME)
278 return false;
280 if (operand == use)
281 return true;
283 return false;
288 Function process_use.
290 Inputs:
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
296 Outputs:
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
301 Exceptions:
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
310 Return true if everything is as expected. Return false otherwise. */
312 static bool
313 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
314 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
318 stmt_vec_info dstmt_vinfo;
319 basic_block bb, def_bb;
320 tree def;
321 gimple def_stmt;
322 enum vect_def_type dt;
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use, stmt))
327 return true;
329 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
332 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
333 return false;
336 if (!def_stmt || gimple_nop_p (def_stmt))
337 return true;
339 def_bb = gimple_bb (def_stmt);
340 if (!flow_bb_inside_loop_p (loop, def_bb))
342 if (vect_print_dump_info (REPORT_DETAILS))
343 fprintf (vect_dump, "def_stmt is out of loop.");
344 return true;
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo = vinfo_for_stmt (def_stmt);
353 bb = gimple_bb (stmt);
354 if (gimple_code (stmt) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
356 && gimple_code (def_stmt) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
358 && bb->loop_father == def_bb->loop_father)
360 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
363 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
367 return true;
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
372 d = def_stmt
373 inner-loop:
374 stmt # use (d)
375 outer-loop-tail-bb:
376 ... */
377 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
379 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
382 switch (relevant)
384 case vect_unused_in_scope:
385 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
386 vect_used_in_scope : vect_unused_in_scope;
387 break;
389 case vect_used_in_outer_by_reduction:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
391 relevant = vect_used_by_reduction;
392 break;
394 case vect_used_in_outer:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
396 relevant = vect_used_in_scope;
397 break;
399 case vect_used_in_scope:
400 break;
402 default:
403 gcc_unreachable ();
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
410 inner-loop:
411 d = def_stmt
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
413 stmt # use (d) */
414 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
416 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
419 switch (relevant)
421 case vect_unused_in_scope:
422 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
424 vect_used_in_outer_by_reduction : vect_unused_in_scope;
425 break;
427 case vect_used_by_reduction:
428 relevant = vect_used_in_outer_by_reduction;
429 break;
431 case vect_used_in_scope:
432 relevant = vect_used_in_outer;
433 break;
435 default:
436 gcc_unreachable ();
440 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
441 return true;
445 /* Function vect_mark_stmts_to_be_vectorized.
447 Not all stmts in the loop need to be vectorized. For example:
449 for i...
450 for j...
451 1. T0 = i + j
452 2. T1 = a[T0]
454 3. j = j + 1
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
459 This pass detects such stmts. */
461 bool
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
464 VEC(gimple,heap) *worklist;
465 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
466 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
467 unsigned int nbbs = loop->num_nodes;
468 gimple_stmt_iterator si;
469 gimple stmt;
470 unsigned int i;
471 stmt_vec_info stmt_vinfo;
472 basic_block bb;
473 gimple phi;
474 bool live_p;
475 enum vect_relevant relevant, tmp_relevant;
476 enum vect_def_type def_type;
478 if (vect_print_dump_info (REPORT_DETAILS))
479 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
481 worklist = VEC_alloc (gimple, heap, 64);
483 /* 1. Init worklist. */
484 for (i = 0; i < nbbs; i++)
486 bb = bbs[i];
487 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
489 phi = gsi_stmt (si);
490 if (vect_print_dump_info (REPORT_DETAILS))
492 fprintf (vect_dump, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
496 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
497 vect_mark_relevant (&worklist, phi, relevant, live_p);
499 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
501 stmt = gsi_stmt (si);
502 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
508 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
509 vect_mark_relevant (&worklist, stmt, relevant, live_p);
513 /* 2. Process_worklist */
514 while (VEC_length (gimple, worklist) > 0)
516 use_operand_p use_p;
517 ssa_op_iter iter;
519 stmt = VEC_pop (gimple, worklist);
520 if (vect_print_dump_info (REPORT_DETAILS))
522 fprintf (vect_dump, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo = vinfo_for_stmt (stmt);
530 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
531 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
540 live_p = false
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
548 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
549 tmp_relevant = relevant;
550 switch (def_type)
552 case vect_reduction_def:
553 switch (tmp_relevant)
555 case vect_unused_in_scope:
556 relevant = vect_used_by_reduction;
557 break;
559 case vect_used_by_reduction:
560 if (gimple_code (stmt) == GIMPLE_PHI)
561 break;
562 /* fall through */
564 default:
565 if (vect_print_dump_info (REPORT_DETAILS))
566 fprintf (vect_dump, "unsupported use of reduction.");
568 VEC_free (gimple, heap, worklist);
569 return false;
572 live_p = false;
573 break;
575 case vect_nested_cycle:
576 if (tmp_relevant != vect_unused_in_scope
577 && tmp_relevant != vect_used_in_outer_by_reduction
578 && tmp_relevant != vect_used_in_outer)
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "unsupported use of nested cycle.");
583 VEC_free (gimple, heap, worklist);
584 return false;
587 live_p = false;
588 break;
590 case vect_double_reduction_def:
591 if (tmp_relevant != vect_unused_in_scope
592 && tmp_relevant != vect_used_by_reduction)
594 if (vect_print_dump_info (REPORT_DETAILS))
595 fprintf (vect_dump, "unsupported use of double reduction.");
597 VEC_free (gimple, heap, worklist);
598 return false;
601 live_p = false;
602 break;
604 default:
605 break;
608 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
610 tree op = USE_FROM_PTR (use_p);
611 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
613 VEC_free (gimple, heap, worklist);
614 return false;
617 } /* while worklist */
619 VEC_free (gimple, heap, worklist);
620 return true;
624 /* Get cost by calling cost target builtin. */
626 static inline
627 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
629 tree dummy_type = NULL;
630 int dummy = 0;
632 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
633 dummy_type, dummy);
637 /* Get cost for STMT. */
640 cost_for_stmt (gimple stmt)
642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
644 switch (STMT_VINFO_TYPE (stmt_info))
646 case load_vec_info_type:
647 return vect_get_stmt_cost (scalar_load);
648 case store_vec_info_type:
649 return vect_get_stmt_cost (scalar_store);
650 case op_vec_info_type:
651 case condition_vec_info_type:
652 case assignment_vec_info_type:
653 case reduc_vec_info_type:
654 case induc_vec_info_type:
655 case type_promotion_vec_info_type:
656 case type_demotion_vec_info_type:
657 case type_conversion_vec_info_type:
658 case call_vec_info_type:
659 return vect_get_stmt_cost (scalar_stmt);
660 case undef_vec_info_type:
661 default:
662 gcc_unreachable ();
666 /* Function vect_model_simple_cost.
668 Models cost for simple operations, i.e. those that only emit ncopies of a
669 single op. Right now, this does not account for multiple insns that could
670 be generated for the single vector op. We will handle that shortly. */
672 void
673 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
674 enum vect_def_type *dt, slp_tree slp_node)
676 int i;
677 int inside_cost = 0, outside_cost = 0;
679 /* The SLP costs were already calculated during SLP tree build. */
680 if (PURE_SLP_STMT (stmt_info))
681 return;
683 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
685 /* FORNOW: Assuming maximum 2 args per stmts. */
686 for (i = 0; i < 2; i++)
688 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
689 outside_cost += vect_get_stmt_cost (vector_stmt);
692 if (vect_print_dump_info (REPORT_COST))
693 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
694 "outside_cost = %d .", inside_cost, outside_cost);
696 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
697 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
698 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
702 /* Function vect_cost_strided_group_size
704 For strided load or store, return the group_size only if it is the first
705 load or store of a group, else return 1. This ensures that group size is
706 only returned once per group. */
708 static int
709 vect_cost_strided_group_size (stmt_vec_info stmt_info)
711 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
713 if (first_stmt == STMT_VINFO_STMT (stmt_info))
714 return GROUP_SIZE (stmt_info);
716 return 1;
720 /* Function vect_model_store_cost
722 Models cost for stores. In the case of strided accesses, one access
723 has the overhead of the strided access attributed to it. */
725 void
726 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
727 bool store_lanes_p, enum vect_def_type dt,
728 slp_tree slp_node)
730 int group_size;
731 unsigned int inside_cost = 0, outside_cost = 0;
732 struct data_reference *first_dr;
733 gimple first_stmt;
735 /* The SLP costs were already calculated during SLP tree build. */
736 if (PURE_SLP_STMT (stmt_info))
737 return;
739 if (dt == vect_constant_def || dt == vect_external_def)
740 outside_cost = vect_get_stmt_cost (scalar_to_vec);
742 /* Strided access? */
743 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
745 if (slp_node)
747 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
748 group_size = 1;
750 else
752 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
753 group_size = vect_cost_strided_group_size (stmt_info);
756 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
758 /* Not a strided access. */
759 else
761 group_size = 1;
762 first_dr = STMT_VINFO_DATA_REF (stmt_info);
765 /* We assume that the cost of a single store-lanes instruction is
766 equivalent to the cost of GROUP_SIZE separate stores. If a strided
767 access is instead being provided by a permute-and-store operation,
768 include the cost of the permutes. */
769 if (!store_lanes_p && group_size > 1)
771 /* Uses a high and low interleave operation for each needed permute. */
772 inside_cost = ncopies * exact_log2(group_size) * group_size
773 * vect_get_stmt_cost (vector_stmt);
775 if (vect_print_dump_info (REPORT_COST))
776 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
777 group_size);
781 /* Costs of the stores. */
782 vect_get_store_cost (first_dr, ncopies, &inside_cost);
784 if (vect_print_dump_info (REPORT_COST))
785 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
786 "outside_cost = %d .", inside_cost, outside_cost);
788 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
789 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
790 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
794 /* Calculate cost of DR's memory access. */
795 void
796 vect_get_store_cost (struct data_reference *dr, int ncopies,
797 unsigned int *inside_cost)
799 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
801 switch (alignment_support_scheme)
803 case dr_aligned:
805 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
807 if (vect_print_dump_info (REPORT_COST))
808 fprintf (vect_dump, "vect_model_store_cost: aligned.");
810 break;
813 case dr_unaligned_supported:
815 gimple stmt = DR_STMT (dr);
816 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
817 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
819 /* Here, we assign an additional cost for the unaligned store. */
820 *inside_cost += ncopies
821 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
822 vectype, DR_MISALIGNMENT (dr));
824 if (vect_print_dump_info (REPORT_COST))
825 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
826 "hardware.");
828 break;
831 default:
832 gcc_unreachable ();
837 /* Function vect_model_load_cost
839 Models cost for loads. In the case of strided accesses, the last access
840 has the overhead of the strided access attributed to it. Since unaligned
841 accesses are supported for loads, we also account for the costs of the
842 access scheme chosen. */
844 void
845 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
846 slp_tree slp_node)
848 int group_size;
849 gimple first_stmt;
850 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
851 unsigned int inside_cost = 0, outside_cost = 0;
853 /* The SLP costs were already calculated during SLP tree build. */
854 if (PURE_SLP_STMT (stmt_info))
855 return;
857 /* Strided accesses? */
858 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
859 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
861 group_size = vect_cost_strided_group_size (stmt_info);
862 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
864 /* Not a strided access. */
865 else
867 group_size = 1;
868 first_dr = dr;
871 /* We assume that the cost of a single load-lanes instruction is
872 equivalent to the cost of GROUP_SIZE separate loads. If a strided
873 access is instead being provided by a load-and-permute operation,
874 include the cost of the permutes. */
875 if (!load_lanes_p && group_size > 1)
877 /* Uses an even and odd extract operations for each needed permute. */
878 inside_cost = ncopies * exact_log2(group_size) * group_size
879 * vect_get_stmt_cost (vector_stmt);
881 if (vect_print_dump_info (REPORT_COST))
882 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
883 group_size);
886 /* The loads themselves. */
887 vect_get_load_cost (first_dr, ncopies,
888 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
889 || slp_node),
890 &inside_cost, &outside_cost);
892 if (vect_print_dump_info (REPORT_COST))
893 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
894 "outside_cost = %d .", inside_cost, outside_cost);
896 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
897 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
898 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
902 /* Calculate cost of DR's memory access. */
903 void
904 vect_get_load_cost (struct data_reference *dr, int ncopies,
905 bool add_realign_cost, unsigned int *inside_cost,
906 unsigned int *outside_cost)
908 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
910 switch (alignment_support_scheme)
912 case dr_aligned:
914 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
916 if (vect_print_dump_info (REPORT_COST))
917 fprintf (vect_dump, "vect_model_load_cost: aligned.");
919 break;
921 case dr_unaligned_supported:
923 gimple stmt = DR_STMT (dr);
924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
927 /* Here, we assign an additional cost for the unaligned load. */
928 *inside_cost += ncopies
929 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
930 vectype, DR_MISALIGNMENT (dr));
931 if (vect_print_dump_info (REPORT_COST))
932 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
933 "hardware.");
935 break;
937 case dr_explicit_realign:
939 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
940 + vect_get_stmt_cost (vector_stmt));
942 /* FIXME: If the misalignment remains fixed across the iterations of
943 the containing loop, the following cost should be added to the
944 outside costs. */
945 if (targetm.vectorize.builtin_mask_for_load)
946 *inside_cost += vect_get_stmt_cost (vector_stmt);
948 break;
950 case dr_explicit_realign_optimized:
952 if (vect_print_dump_info (REPORT_COST))
953 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
954 "pipelined.");
956 /* Unaligned software pipeline has a load of an address, an initial
957 load, and possibly a mask operation to "prime" the loop. However,
958 if this is an access in a group of loads, which provide strided
959 access, then the above cost should only be considered for one
960 access in the group. Inside the loop, there is a load op
961 and a realignment op. */
963 if (add_realign_cost)
965 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
966 if (targetm.vectorize.builtin_mask_for_load)
967 *outside_cost += vect_get_stmt_cost (vector_stmt);
970 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
971 + vect_get_stmt_cost (vector_stmt));
972 break;
975 default:
976 gcc_unreachable ();
981 /* Function vect_init_vector.
983 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
984 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
985 is not NULL. Otherwise, place the initialization at the loop preheader.
986 Return the DEF of INIT_STMT.
987 It will be used in the vectorization of STMT. */
989 tree
990 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
991 gimple_stmt_iterator *gsi)
993 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
994 tree new_var;
995 gimple init_stmt;
996 tree vec_oprnd;
997 edge pe;
998 tree new_temp;
999 basic_block new_bb;
1001 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1002 add_referenced_var (new_var);
1003 init_stmt = gimple_build_assign (new_var, vector_var);
1004 new_temp = make_ssa_name (new_var, init_stmt);
1005 gimple_assign_set_lhs (init_stmt, new_temp);
1007 if (gsi)
1008 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1009 else
1011 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1013 if (loop_vinfo)
1015 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1017 if (nested_in_vect_loop_p (loop, stmt))
1018 loop = loop->inner;
1020 pe = loop_preheader_edge (loop);
1021 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1022 gcc_assert (!new_bb);
1024 else
1026 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1027 basic_block bb;
1028 gimple_stmt_iterator gsi_bb_start;
1030 gcc_assert (bb_vinfo);
1031 bb = BB_VINFO_BB (bb_vinfo);
1032 gsi_bb_start = gsi_after_labels (bb);
1033 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
1037 if (vect_print_dump_info (REPORT_DETAILS))
1039 fprintf (vect_dump, "created new init_stmt: ");
1040 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1043 vec_oprnd = gimple_assign_lhs (init_stmt);
1044 return vec_oprnd;
1048 /* Function vect_get_vec_def_for_operand.
1050 OP is an operand in STMT. This function returns a (vector) def that will be
1051 used in the vectorized stmt for STMT.
1053 In the case that OP is an SSA_NAME which is defined in the loop, then
1054 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1056 In case OP is an invariant or constant, a new stmt that creates a vector def
1057 needs to be introduced. */
1059 tree
1060 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1062 tree vec_oprnd;
1063 gimple vec_stmt;
1064 gimple def_stmt;
1065 stmt_vec_info def_stmt_info = NULL;
1066 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1067 unsigned int nunits;
1068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1069 tree vec_inv;
1070 tree vec_cst;
1071 tree t = NULL_TREE;
1072 tree def;
1073 int i;
1074 enum vect_def_type dt;
1075 bool is_simple_use;
1076 tree vector_type;
1078 if (vect_print_dump_info (REPORT_DETAILS))
1080 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1081 print_generic_expr (vect_dump, op, TDF_SLIM);
1084 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1085 &dt);
1086 gcc_assert (is_simple_use);
1087 if (vect_print_dump_info (REPORT_DETAILS))
1089 if (def)
1091 fprintf (vect_dump, "def = ");
1092 print_generic_expr (vect_dump, def, TDF_SLIM);
1094 if (def_stmt)
1096 fprintf (vect_dump, " def_stmt = ");
1097 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1101 switch (dt)
1103 /* Case 1: operand is a constant. */
1104 case vect_constant_def:
1106 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1107 gcc_assert (vector_type);
1108 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1110 if (scalar_def)
1111 *scalar_def = op;
1113 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1114 if (vect_print_dump_info (REPORT_DETAILS))
1115 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1117 vec_cst = build_vector_from_val (vector_type, op);
1118 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1121 /* Case 2: operand is defined outside the loop - loop invariant. */
1122 case vect_external_def:
1124 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1125 gcc_assert (vector_type);
1126 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1128 if (scalar_def)
1129 *scalar_def = def;
1131 /* Create 'vec_inv = {inv,inv,..,inv}' */
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "Create vector_inv.");
1135 for (i = nunits - 1; i >= 0; --i)
1137 t = tree_cons (NULL_TREE, def, t);
1140 /* FIXME: use build_constructor directly. */
1141 vec_inv = build_constructor_from_list (vector_type, t);
1142 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1145 /* Case 3: operand is defined inside the loop. */
1146 case vect_internal_def:
1148 if (scalar_def)
1149 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1151 /* Get the def from the vectorized stmt. */
1152 def_stmt_info = vinfo_for_stmt (def_stmt);
1153 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1154 gcc_assert (vec_stmt);
1155 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1156 vec_oprnd = PHI_RESULT (vec_stmt);
1157 else if (is_gimple_call (vec_stmt))
1158 vec_oprnd = gimple_call_lhs (vec_stmt);
1159 else
1160 vec_oprnd = gimple_assign_lhs (vec_stmt);
1161 return vec_oprnd;
1164 /* Case 4: operand is defined by a loop header phi - reduction */
1165 case vect_reduction_def:
1166 case vect_double_reduction_def:
1167 case vect_nested_cycle:
1169 struct loop *loop;
1171 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1172 loop = (gimple_bb (def_stmt))->loop_father;
1174 /* Get the def before the loop */
1175 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1176 return get_initial_def_for_reduction (stmt, op, scalar_def);
1179 /* Case 5: operand is defined by loop-header phi - induction. */
1180 case vect_induction_def:
1182 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1184 /* Get the def from the vectorized stmt. */
1185 def_stmt_info = vinfo_for_stmt (def_stmt);
1186 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1187 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1188 vec_oprnd = PHI_RESULT (vec_stmt);
1189 else
1190 vec_oprnd = gimple_get_lhs (vec_stmt);
1191 return vec_oprnd;
1194 default:
1195 gcc_unreachable ();
1200 /* Function vect_get_vec_def_for_stmt_copy
1202 Return a vector-def for an operand. This function is used when the
1203 vectorized stmt to be created (by the caller to this function) is a "copy"
1204 created in case the vectorized result cannot fit in one vector, and several
1205 copies of the vector-stmt are required. In this case the vector-def is
1206 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1207 of the stmt that defines VEC_OPRND.
1208 DT is the type of the vector def VEC_OPRND.
1210 Context:
1211 In case the vectorization factor (VF) is bigger than the number
1212 of elements that can fit in a vectype (nunits), we have to generate
1213 more than one vector stmt to vectorize the scalar stmt. This situation
1214 arises when there are multiple data-types operated upon in the loop; the
1215 smallest data-type determines the VF, and as a result, when vectorizing
1216 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1217 vector stmt (each computing a vector of 'nunits' results, and together
1218 computing 'VF' results in each iteration). This function is called when
1219 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1220 which VF=16 and nunits=4, so the number of copies required is 4):
1222 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1224 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1225 VS1.1: vx.1 = memref1 VS1.2
1226 VS1.2: vx.2 = memref2 VS1.3
1227 VS1.3: vx.3 = memref3
1229 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1230 VSnew.1: vz1 = vx.1 + ... VSnew.2
1231 VSnew.2: vz2 = vx.2 + ... VSnew.3
1232 VSnew.3: vz3 = vx.3 + ...
1234 The vectorization of S1 is explained in vectorizable_load.
1235 The vectorization of S2:
1236 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1237 the function 'vect_get_vec_def_for_operand' is called to
1238 get the relevant vector-def for each operand of S2. For operand x it
1239 returns the vector-def 'vx.0'.
1241 To create the remaining copies of the vector-stmt (VSnew.j), this
1242 function is called to get the relevant vector-def for each operand. It is
1243 obtained from the respective VS1.j stmt, which is recorded in the
1244 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1246 For example, to obtain the vector-def 'vx.1' in order to create the
1247 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1248 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1249 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1250 and return its def ('vx.1').
1251 Overall, to create the above sequence this function will be called 3 times:
1252 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1253 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1254 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1256 tree
1257 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1259 gimple vec_stmt_for_operand;
1260 stmt_vec_info def_stmt_info;
1262 /* Do nothing; can reuse same def. */
1263 if (dt == vect_external_def || dt == vect_constant_def )
1264 return vec_oprnd;
1266 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1267 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1268 gcc_assert (def_stmt_info);
1269 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1270 gcc_assert (vec_stmt_for_operand);
1271 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1272 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1273 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1274 else
1275 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1276 return vec_oprnd;
1280 /* Get vectorized definitions for the operands to create a copy of an original
1281 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1283 static void
1284 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1285 VEC(tree,heap) **vec_oprnds0,
1286 VEC(tree,heap) **vec_oprnds1)
1288 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1290 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1291 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1293 if (vec_oprnds1 && *vec_oprnds1)
1295 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1296 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1297 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1302 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1303 NULL. */
1305 static void
1306 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1307 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1308 slp_tree slp_node)
1310 if (slp_node)
1311 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1312 else
1314 tree vec_oprnd;
1316 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1317 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1318 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1320 if (op1)
1322 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1323 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1324 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1330 /* Function vect_finish_stmt_generation.
1332 Insert a new stmt. */
1334 void
1335 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1336 gimple_stmt_iterator *gsi)
1338 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1339 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1340 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1342 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1344 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1346 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1347 bb_vinfo));
1349 if (vect_print_dump_info (REPORT_DETAILS))
1351 fprintf (vect_dump, "add new stmt: ");
1352 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1355 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1358 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1359 a function declaration if the target has a vectorized version
1360 of the function, or NULL_TREE if the function cannot be vectorized. */
1362 tree
1363 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1365 tree fndecl = gimple_call_fndecl (call);
1367 /* We only handle functions that do not read or clobber memory -- i.e.
1368 const or novops ones. */
1369 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1370 return NULL_TREE;
1372 if (!fndecl
1373 || TREE_CODE (fndecl) != FUNCTION_DECL
1374 || !DECL_BUILT_IN (fndecl))
1375 return NULL_TREE;
1377 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1378 vectype_in);
1381 /* Function vectorizable_call.
1383 Check if STMT performs a function call that can be vectorized.
1384 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1385 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1386 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1388 static bool
1389 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1391 tree vec_dest;
1392 tree scalar_dest;
1393 tree op, type;
1394 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1395 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1396 tree vectype_out, vectype_in;
1397 int nunits_in;
1398 int nunits_out;
1399 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1400 tree fndecl, new_temp, def, rhs_type;
1401 gimple def_stmt;
1402 enum vect_def_type dt[3]
1403 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1404 gimple new_stmt = NULL;
1405 int ncopies, j;
1406 VEC(tree, heap) *vargs = NULL;
1407 enum { NARROW, NONE, WIDEN } modifier;
1408 size_t i, nargs;
1410 /* FORNOW: unsupported in basic block SLP. */
1411 gcc_assert (loop_vinfo);
1413 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1414 return false;
1416 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1417 return false;
1419 /* FORNOW: SLP not supported. */
1420 if (STMT_SLP_TYPE (stmt_info))
1421 return false;
1423 /* Is STMT a vectorizable call? */
1424 if (!is_gimple_call (stmt))
1425 return false;
1427 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1428 return false;
1430 if (stmt_can_throw_internal (stmt))
1431 return false;
1433 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1435 /* Process function arguments. */
1436 rhs_type = NULL_TREE;
1437 vectype_in = NULL_TREE;
1438 nargs = gimple_call_num_args (stmt);
1440 /* Bail out if the function has more than three arguments, we do not have
1441 interesting builtin functions to vectorize with more than two arguments
1442 except for fma. No arguments is also not good. */
1443 if (nargs == 0 || nargs > 3)
1444 return false;
1446 for (i = 0; i < nargs; i++)
1448 tree opvectype;
1450 op = gimple_call_arg (stmt, i);
1452 /* We can only handle calls with arguments of the same type. */
1453 if (rhs_type
1454 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1456 if (vect_print_dump_info (REPORT_DETAILS))
1457 fprintf (vect_dump, "argument types differ.");
1458 return false;
1460 if (!rhs_type)
1461 rhs_type = TREE_TYPE (op);
1463 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1464 &def_stmt, &def, &dt[i], &opvectype))
1466 if (vect_print_dump_info (REPORT_DETAILS))
1467 fprintf (vect_dump, "use not simple.");
1468 return false;
1471 if (!vectype_in)
1472 vectype_in = opvectype;
1473 else if (opvectype
1474 && opvectype != vectype_in)
1476 if (vect_print_dump_info (REPORT_DETAILS))
1477 fprintf (vect_dump, "argument vector types differ.");
1478 return false;
1481 /* If all arguments are external or constant defs use a vector type with
1482 the same size as the output vector type. */
1483 if (!vectype_in)
1484 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1485 if (vec_stmt)
1486 gcc_assert (vectype_in);
1487 if (!vectype_in)
1489 if (vect_print_dump_info (REPORT_DETAILS))
1491 fprintf (vect_dump, "no vectype for scalar type ");
1492 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1495 return false;
1498 /* FORNOW */
1499 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1500 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1501 if (nunits_in == nunits_out / 2)
1502 modifier = NARROW;
1503 else if (nunits_out == nunits_in)
1504 modifier = NONE;
1505 else if (nunits_out == nunits_in / 2)
1506 modifier = WIDEN;
1507 else
1508 return false;
1510 /* For now, we only vectorize functions if a target specific builtin
1511 is available. TODO -- in some cases, it might be profitable to
1512 insert the calls for pieces of the vector, in order to be able
1513 to vectorize other operations in the loop. */
1514 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1515 if (fndecl == NULL_TREE)
1517 if (vect_print_dump_info (REPORT_DETAILS))
1518 fprintf (vect_dump, "function is not vectorizable.");
1520 return false;
1523 gcc_assert (!gimple_vuse (stmt));
1525 if (modifier == NARROW)
1526 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1527 else
1528 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1530 /* Sanity check: make sure that at least one copy of the vectorized stmt
1531 needs to be generated. */
1532 gcc_assert (ncopies >= 1);
1534 if (!vec_stmt) /* transformation not required. */
1536 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1537 if (vect_print_dump_info (REPORT_DETAILS))
1538 fprintf (vect_dump, "=== vectorizable_call ===");
1539 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1540 return true;
1543 /** Transform. **/
1545 if (vect_print_dump_info (REPORT_DETAILS))
1546 fprintf (vect_dump, "transform operation.");
1548 /* Handle def. */
1549 scalar_dest = gimple_call_lhs (stmt);
1550 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1552 prev_stmt_info = NULL;
1553 switch (modifier)
1555 case NONE:
1556 for (j = 0; j < ncopies; ++j)
1558 /* Build argument list for the vectorized call. */
1559 if (j == 0)
1560 vargs = VEC_alloc (tree, heap, nargs);
1561 else
1562 VEC_truncate (tree, vargs, 0);
1564 for (i = 0; i < nargs; i++)
1566 op = gimple_call_arg (stmt, i);
1567 if (j == 0)
1568 vec_oprnd0
1569 = vect_get_vec_def_for_operand (op, stmt, NULL);
1570 else
1572 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1573 vec_oprnd0
1574 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1577 VEC_quick_push (tree, vargs, vec_oprnd0);
1580 new_stmt = gimple_build_call_vec (fndecl, vargs);
1581 new_temp = make_ssa_name (vec_dest, new_stmt);
1582 gimple_call_set_lhs (new_stmt, new_temp);
1584 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1585 mark_symbols_for_renaming (new_stmt);
1587 if (j == 0)
1588 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1589 else
1590 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1592 prev_stmt_info = vinfo_for_stmt (new_stmt);
1595 break;
1597 case NARROW:
1598 for (j = 0; j < ncopies; ++j)
1600 /* Build argument list for the vectorized call. */
1601 if (j == 0)
1602 vargs = VEC_alloc (tree, heap, nargs * 2);
1603 else
1604 VEC_truncate (tree, vargs, 0);
1606 for (i = 0; i < nargs; i++)
1608 op = gimple_call_arg (stmt, i);
1609 if (j == 0)
1611 vec_oprnd0
1612 = vect_get_vec_def_for_operand (op, stmt, NULL);
1613 vec_oprnd1
1614 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1616 else
1618 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1619 vec_oprnd0
1620 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1621 vec_oprnd1
1622 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1625 VEC_quick_push (tree, vargs, vec_oprnd0);
1626 VEC_quick_push (tree, vargs, vec_oprnd1);
1629 new_stmt = gimple_build_call_vec (fndecl, vargs);
1630 new_temp = make_ssa_name (vec_dest, new_stmt);
1631 gimple_call_set_lhs (new_stmt, new_temp);
1633 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1634 mark_symbols_for_renaming (new_stmt);
1636 if (j == 0)
1637 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1638 else
1639 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1641 prev_stmt_info = vinfo_for_stmt (new_stmt);
1644 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1646 break;
1648 case WIDEN:
1649 /* No current target implements this case. */
1650 return false;
1653 VEC_free (tree, heap, vargs);
1655 /* Update the exception handling table with the vector stmt if necessary. */
1656 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1657 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1659 /* The call in STMT might prevent it from being removed in dce.
1660 We however cannot remove it here, due to the way the ssa name
1661 it defines is mapped to the new definition. So just replace
1662 rhs of the statement with something harmless. */
1664 type = TREE_TYPE (scalar_dest);
1665 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1666 build_zero_cst (type));
1667 set_vinfo_for_stmt (new_stmt, stmt_info);
1668 set_vinfo_for_stmt (stmt, NULL);
1669 STMT_VINFO_STMT (stmt_info) = new_stmt;
1670 gsi_replace (gsi, new_stmt, false);
1671 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1673 return true;
1677 /* Function vect_gen_widened_results_half
1679 Create a vector stmt whose code, type, number of arguments, and result
1680 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1681 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1682 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1683 needs to be created (DECL is a function-decl of a target-builtin).
1684 STMT is the original scalar stmt that we are vectorizing. */
1686 static gimple
1687 vect_gen_widened_results_half (enum tree_code code,
1688 tree decl,
1689 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1690 tree vec_dest, gimple_stmt_iterator *gsi,
1691 gimple stmt)
1693 gimple new_stmt;
1694 tree new_temp;
1696 /* Generate half of the widened result: */
1697 if (code == CALL_EXPR)
1699 /* Target specific support */
1700 if (op_type == binary_op)
1701 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1702 else
1703 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1704 new_temp = make_ssa_name (vec_dest, new_stmt);
1705 gimple_call_set_lhs (new_stmt, new_temp);
1707 else
1709 /* Generic support */
1710 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1711 if (op_type != binary_op)
1712 vec_oprnd1 = NULL;
1713 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1714 vec_oprnd1);
1715 new_temp = make_ssa_name (vec_dest, new_stmt);
1716 gimple_assign_set_lhs (new_stmt, new_temp);
1718 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1720 return new_stmt;
1724 /* Check if STMT performs a conversion operation, that can be vectorized.
1725 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1726 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1727 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1729 static bool
1730 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1731 gimple *vec_stmt, slp_tree slp_node)
1733 tree vec_dest;
1734 tree scalar_dest;
1735 tree op0;
1736 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1737 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1738 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1739 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1740 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1741 tree new_temp;
1742 tree def;
1743 gimple def_stmt;
1744 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1745 gimple new_stmt = NULL;
1746 stmt_vec_info prev_stmt_info;
1747 int nunits_in;
1748 int nunits_out;
1749 tree vectype_out, vectype_in;
1750 int ncopies, j;
1751 tree rhs_type;
1752 tree builtin_decl;
1753 enum { NARROW, NONE, WIDEN } modifier;
1754 int i;
1755 VEC(tree,heap) *vec_oprnds0 = NULL;
1756 tree vop0;
1757 VEC(tree,heap) *dummy = NULL;
1758 int dummy_int;
1760 /* Is STMT a vectorizable conversion? */
1762 /* FORNOW: unsupported in basic block SLP. */
1763 gcc_assert (loop_vinfo);
1765 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1766 return false;
1768 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1769 return false;
1771 if (!is_gimple_assign (stmt))
1772 return false;
1774 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1775 return false;
1777 code = gimple_assign_rhs_code (stmt);
1778 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1779 return false;
1781 /* Check types of lhs and rhs. */
1782 scalar_dest = gimple_assign_lhs (stmt);
1783 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1785 op0 = gimple_assign_rhs1 (stmt);
1786 rhs_type = TREE_TYPE (op0);
1787 /* Check the operands of the operation. */
1788 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1789 &def_stmt, &def, &dt[0], &vectype_in))
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "use not simple.");
1793 return false;
1795 /* If op0 is an external or constant defs use a vector type of
1796 the same size as the output vector type. */
1797 if (!vectype_in)
1798 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1799 if (vec_stmt)
1800 gcc_assert (vectype_in);
1801 if (!vectype_in)
1803 if (vect_print_dump_info (REPORT_DETAILS))
1805 fprintf (vect_dump, "no vectype for scalar type ");
1806 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1809 return false;
1812 /* FORNOW */
1813 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1814 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1815 if (nunits_in == nunits_out / 2)
1816 modifier = NARROW;
1817 else if (nunits_out == nunits_in)
1818 modifier = NONE;
1819 else if (nunits_out == nunits_in / 2)
1820 modifier = WIDEN;
1821 else
1822 return false;
1824 if (modifier == NARROW)
1825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1826 else
1827 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1829 /* Multiple types in SLP are handled by creating the appropriate number of
1830 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1831 case of SLP. */
1832 if (slp_node || PURE_SLP_STMT (stmt_info))
1833 ncopies = 1;
1835 /* Sanity check: make sure that at least one copy of the vectorized stmt
1836 needs to be generated. */
1837 gcc_assert (ncopies >= 1);
1839 /* Supportable by target? */
1840 if ((modifier == NONE
1841 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1842 || (modifier == WIDEN
1843 && !supportable_widening_operation (code, stmt,
1844 vectype_out, vectype_in,
1845 &decl1, &decl2,
1846 &code1, &code2,
1847 &dummy_int, &dummy))
1848 || (modifier == NARROW
1849 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1850 &code1, &dummy_int, &dummy)))
1852 if (vect_print_dump_info (REPORT_DETAILS))
1853 fprintf (vect_dump, "conversion not supported by target.");
1854 return false;
1857 if (modifier != NONE)
1859 /* FORNOW: SLP not supported. */
1860 if (STMT_SLP_TYPE (stmt_info))
1861 return false;
1864 if (!vec_stmt) /* transformation not required. */
1866 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1867 return true;
1870 /** Transform. **/
1871 if (vect_print_dump_info (REPORT_DETAILS))
1872 fprintf (vect_dump, "transform conversion.");
1874 /* Handle def. */
1875 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1877 if (modifier == NONE && !slp_node)
1878 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1880 prev_stmt_info = NULL;
1881 switch (modifier)
1883 case NONE:
1884 for (j = 0; j < ncopies; j++)
1886 if (j == 0)
1887 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1888 else
1889 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1891 builtin_decl =
1892 targetm.vectorize.builtin_conversion (code,
1893 vectype_out, vectype_in);
1894 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1896 /* Arguments are ready. create the new vector stmt. */
1897 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1898 new_temp = make_ssa_name (vec_dest, new_stmt);
1899 gimple_call_set_lhs (new_stmt, new_temp);
1900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1901 if (slp_node)
1902 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1905 if (j == 0)
1906 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1907 else
1908 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1909 prev_stmt_info = vinfo_for_stmt (new_stmt);
1911 break;
1913 case WIDEN:
1914 /* In case the vectorization factor (VF) is bigger than the number
1915 of elements that we can fit in a vectype (nunits), we have to
1916 generate more than one vector stmt - i.e - we need to "unroll"
1917 the vector stmt by a factor VF/nunits. */
1918 for (j = 0; j < ncopies; j++)
1920 if (j == 0)
1921 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1922 else
1923 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1925 /* Generate first half of the widened result: */
1926 new_stmt
1927 = vect_gen_widened_results_half (code1, decl1,
1928 vec_oprnd0, vec_oprnd1,
1929 unary_op, vec_dest, gsi, stmt);
1930 if (j == 0)
1931 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1932 else
1933 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1934 prev_stmt_info = vinfo_for_stmt (new_stmt);
1936 /* Generate second half of the widened result: */
1937 new_stmt
1938 = vect_gen_widened_results_half (code2, decl2,
1939 vec_oprnd0, vec_oprnd1,
1940 unary_op, vec_dest, gsi, stmt);
1941 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1942 prev_stmt_info = vinfo_for_stmt (new_stmt);
1944 break;
1946 case NARROW:
1947 /* In case the vectorization factor (VF) is bigger than the number
1948 of elements that we can fit in a vectype (nunits), we have to
1949 generate more than one vector stmt - i.e - we need to "unroll"
1950 the vector stmt by a factor VF/nunits. */
1951 for (j = 0; j < ncopies; j++)
1953 /* Handle uses. */
1954 if (j == 0)
1956 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1957 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1959 else
1961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1962 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1965 /* Arguments are ready. Create the new vector stmt. */
1966 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1967 vec_oprnd1);
1968 new_temp = make_ssa_name (vec_dest, new_stmt);
1969 gimple_assign_set_lhs (new_stmt, new_temp);
1970 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1972 if (j == 0)
1973 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1974 else
1975 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1977 prev_stmt_info = vinfo_for_stmt (new_stmt);
1980 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1983 if (vec_oprnds0)
1984 VEC_free (tree, heap, vec_oprnds0);
1986 return true;
1990 /* Function vectorizable_assignment.
1992 Check if STMT performs an assignment (copy) that can be vectorized.
1993 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1994 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1995 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1997 static bool
1998 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1999 gimple *vec_stmt, slp_tree slp_node)
2001 tree vec_dest;
2002 tree scalar_dest;
2003 tree op;
2004 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2007 tree new_temp;
2008 tree def;
2009 gimple def_stmt;
2010 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2011 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2012 int ncopies;
2013 int i, j;
2014 VEC(tree,heap) *vec_oprnds = NULL;
2015 tree vop;
2016 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2017 gimple new_stmt = NULL;
2018 stmt_vec_info prev_stmt_info = NULL;
2019 enum tree_code code;
2020 tree vectype_in;
2022 /* Multiple types in SLP are handled by creating the appropriate number of
2023 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2024 case of SLP. */
2025 if (slp_node || PURE_SLP_STMT (stmt_info))
2026 ncopies = 1;
2027 else
2028 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2030 gcc_assert (ncopies >= 1);
2032 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2033 return false;
2035 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2036 return false;
2038 /* Is vectorizable assignment? */
2039 if (!is_gimple_assign (stmt))
2040 return false;
2042 scalar_dest = gimple_assign_lhs (stmt);
2043 if (TREE_CODE (scalar_dest) != SSA_NAME)
2044 return false;
2046 code = gimple_assign_rhs_code (stmt);
2047 if (gimple_assign_single_p (stmt)
2048 || code == PAREN_EXPR
2049 || CONVERT_EXPR_CODE_P (code))
2050 op = gimple_assign_rhs1 (stmt);
2051 else
2052 return false;
2054 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
2055 &def_stmt, &def, &dt[0], &vectype_in))
2057 if (vect_print_dump_info (REPORT_DETAILS))
2058 fprintf (vect_dump, "use not simple.");
2059 return false;
2062 /* We can handle NOP_EXPR conversions that do not change the number
2063 of elements or the vector size. */
2064 if (CONVERT_EXPR_CODE_P (code)
2065 && (!vectype_in
2066 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
2067 || (GET_MODE_SIZE (TYPE_MODE (vectype))
2068 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
2069 return false;
2071 if (!vec_stmt) /* transformation not required. */
2073 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2074 if (vect_print_dump_info (REPORT_DETAILS))
2075 fprintf (vect_dump, "=== vectorizable_assignment ===");
2076 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2077 return true;
2080 /** Transform. **/
2081 if (vect_print_dump_info (REPORT_DETAILS))
2082 fprintf (vect_dump, "transform assignment.");
2084 /* Handle def. */
2085 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2087 /* Handle use. */
2088 for (j = 0; j < ncopies; j++)
2090 /* Handle uses. */
2091 if (j == 0)
2092 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2093 else
2094 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2096 /* Arguments are ready. create the new vector stmt. */
2097 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2099 if (CONVERT_EXPR_CODE_P (code))
2100 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2101 new_stmt = gimple_build_assign (vec_dest, vop);
2102 new_temp = make_ssa_name (vec_dest, new_stmt);
2103 gimple_assign_set_lhs (new_stmt, new_temp);
2104 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2105 if (slp_node)
2106 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2109 if (slp_node)
2110 continue;
2112 if (j == 0)
2113 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2114 else
2115 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2117 prev_stmt_info = vinfo_for_stmt (new_stmt);
2120 VEC_free (tree, heap, vec_oprnds);
2121 return true;
2125 /* Function vectorizable_shift.
2127 Check if STMT performs a shift operation that can be vectorized.
2128 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2129 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2130 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2132 static bool
2133 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2134 gimple *vec_stmt, slp_tree slp_node)
2136 tree vec_dest;
2137 tree scalar_dest;
2138 tree op0, op1 = NULL;
2139 tree vec_oprnd1 = NULL_TREE;
2140 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2141 tree vectype;
2142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2143 enum tree_code code;
2144 enum machine_mode vec_mode;
2145 tree new_temp;
2146 optab optab;
2147 int icode;
2148 enum machine_mode optab_op2_mode;
2149 tree def;
2150 gimple def_stmt;
2151 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2152 gimple new_stmt = NULL;
2153 stmt_vec_info prev_stmt_info;
2154 int nunits_in;
2155 int nunits_out;
2156 tree vectype_out;
2157 int ncopies;
2158 int j, i;
2159 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2160 tree vop0, vop1;
2161 unsigned int k;
2162 bool scalar_shift_arg = true;
2163 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2164 int vf;
2166 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2167 return false;
2169 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2170 return false;
2172 /* Is STMT a vectorizable binary/unary operation? */
2173 if (!is_gimple_assign (stmt))
2174 return false;
2176 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2177 return false;
2179 code = gimple_assign_rhs_code (stmt);
2181 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2182 || code == RROTATE_EXPR))
2183 return false;
2185 scalar_dest = gimple_assign_lhs (stmt);
2186 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2188 op0 = gimple_assign_rhs1 (stmt);
2189 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2190 &def_stmt, &def, &dt[0], &vectype))
2192 if (vect_print_dump_info (REPORT_DETAILS))
2193 fprintf (vect_dump, "use not simple.");
2194 return false;
2196 /* If op0 is an external or constant def use a vector type with
2197 the same size as the output vector type. */
2198 if (!vectype)
2199 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2200 if (vec_stmt)
2201 gcc_assert (vectype);
2202 if (!vectype)
2204 if (vect_print_dump_info (REPORT_DETAILS))
2206 fprintf (vect_dump, "no vectype for scalar type ");
2207 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2210 return false;
2213 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2214 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2215 if (nunits_out != nunits_in)
2216 return false;
2218 op1 = gimple_assign_rhs2 (stmt);
2219 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2221 if (vect_print_dump_info (REPORT_DETAILS))
2222 fprintf (vect_dump, "use not simple.");
2223 return false;
2226 if (loop_vinfo)
2227 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2228 else
2229 vf = 1;
2231 /* Multiple types in SLP are handled by creating the appropriate number of
2232 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2233 case of SLP. */
2234 if (slp_node || PURE_SLP_STMT (stmt_info))
2235 ncopies = 1;
2236 else
2237 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2239 gcc_assert (ncopies >= 1);
2241 /* Determine whether the shift amount is a vector, or scalar. If the
2242 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2244 if (dt[1] == vect_internal_def && !slp_node)
2245 scalar_shift_arg = false;
2246 else if (dt[1] == vect_constant_def
2247 || dt[1] == vect_external_def
2248 || dt[1] == vect_internal_def)
2250 /* In SLP, need to check whether the shift count is the same,
2251 in loops if it is a constant or invariant, it is always
2252 a scalar shift. */
2253 if (slp_node)
2255 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2256 gimple slpstmt;
2258 FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
2259 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
2260 scalar_shift_arg = false;
2263 else
2265 if (vect_print_dump_info (REPORT_DETAILS))
2266 fprintf (vect_dump, "operand mode requires invariant argument.");
2267 return false;
2270 /* Vector shifted by vector. */
2271 if (!scalar_shift_arg)
2273 optab = optab_for_tree_code (code, vectype, optab_vector);
2274 if (vect_print_dump_info (REPORT_DETAILS))
2275 fprintf (vect_dump, "vector/vector shift/rotate found.");
2277 /* See if the machine has a vector shifted by scalar insn and if not
2278 then see if it has a vector shifted by vector insn. */
2279 else
2281 optab = optab_for_tree_code (code, vectype, optab_scalar);
2282 if (optab
2283 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2285 if (vect_print_dump_info (REPORT_DETAILS))
2286 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2288 else
2290 optab = optab_for_tree_code (code, vectype, optab_vector);
2291 if (optab
2292 && (optab_handler (optab, TYPE_MODE (vectype))
2293 != CODE_FOR_nothing))
2295 scalar_shift_arg = false;
2297 if (vect_print_dump_info (REPORT_DETAILS))
2298 fprintf (vect_dump, "vector/vector shift/rotate found.");
2300 /* Unlike the other binary operators, shifts/rotates have
2301 the rhs being int, instead of the same type as the lhs,
2302 so make sure the scalar is the right type if we are
2303 dealing with vectors of short/char. */
2304 if (dt[1] == vect_constant_def)
2305 op1 = fold_convert (TREE_TYPE (vectype), op1);
2310 /* Supportable by target? */
2311 if (!optab)
2313 if (vect_print_dump_info (REPORT_DETAILS))
2314 fprintf (vect_dump, "no optab.");
2315 return false;
2317 vec_mode = TYPE_MODE (vectype);
2318 icode = (int) optab_handler (optab, vec_mode);
2319 if (icode == CODE_FOR_nothing)
2321 if (vect_print_dump_info (REPORT_DETAILS))
2322 fprintf (vect_dump, "op not supported by target.");
2323 /* Check only during analysis. */
2324 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2325 || (vf < vect_min_worthwhile_factor (code)
2326 && !vec_stmt))
2327 return false;
2328 if (vect_print_dump_info (REPORT_DETAILS))
2329 fprintf (vect_dump, "proceeding using word mode.");
2332 /* Worthwhile without SIMD support? Check only during analysis. */
2333 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2334 && vf < vect_min_worthwhile_factor (code)
2335 && !vec_stmt)
2337 if (vect_print_dump_info (REPORT_DETAILS))
2338 fprintf (vect_dump, "not worthwhile without SIMD support.");
2339 return false;
2342 if (!vec_stmt) /* transformation not required. */
2344 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2345 if (vect_print_dump_info (REPORT_DETAILS))
2346 fprintf (vect_dump, "=== vectorizable_shift ===");
2347 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2348 return true;
2351 /** Transform. **/
2353 if (vect_print_dump_info (REPORT_DETAILS))
2354 fprintf (vect_dump, "transform binary/unary operation.");
2356 /* Handle def. */
2357 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2359 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2360 created in the previous stages of the recursion, so no allocation is
2361 needed, except for the case of shift with scalar shift argument. In that
2362 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2363 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2364 In case of loop-based vectorization we allocate VECs of size 1. We
2365 allocate VEC_OPRNDS1 only in case of binary operation. */
2366 if (!slp_node)
2368 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2369 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2371 else if (scalar_shift_arg)
2372 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2374 prev_stmt_info = NULL;
2375 for (j = 0; j < ncopies; j++)
2377 /* Handle uses. */
2378 if (j == 0)
2380 if (scalar_shift_arg)
2382 /* Vector shl and shr insn patterns can be defined with scalar
2383 operand 2 (shift operand). In this case, use constant or loop
2384 invariant op1 directly, without extending it to vector mode
2385 first. */
2386 optab_op2_mode = insn_data[icode].operand[2].mode;
2387 if (!VECTOR_MODE_P (optab_op2_mode))
2389 if (vect_print_dump_info (REPORT_DETAILS))
2390 fprintf (vect_dump, "operand 1 using scalar mode.");
2391 vec_oprnd1 = op1;
2392 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2393 if (slp_node)
2395 /* Store vec_oprnd1 for every vector stmt to be created
2396 for SLP_NODE. We check during the analysis that all
2397 the shift arguments are the same.
2398 TODO: Allow different constants for different vector
2399 stmts generated for an SLP instance. */
2400 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2401 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2406 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2407 (a special case for certain kind of vector shifts); otherwise,
2408 operand 1 should be of a vector type (the usual case). */
2409 if (vec_oprnd1)
2410 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2411 slp_node);
2412 else
2413 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2414 slp_node);
2416 else
2417 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2419 /* Arguments are ready. Create the new vector stmt. */
2420 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2422 vop1 = VEC_index (tree, vec_oprnds1, i);
2423 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2424 new_temp = make_ssa_name (vec_dest, new_stmt);
2425 gimple_assign_set_lhs (new_stmt, new_temp);
2426 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2427 if (slp_node)
2428 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2431 if (slp_node)
2432 continue;
2434 if (j == 0)
2435 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2436 else
2437 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2438 prev_stmt_info = vinfo_for_stmt (new_stmt);
2441 VEC_free (tree, heap, vec_oprnds0);
2442 VEC_free (tree, heap, vec_oprnds1);
2444 return true;
2448 /* Function vectorizable_operation.
2450 Check if STMT performs a binary, unary or ternary operation that can
2451 be vectorized.
2452 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2453 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2454 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2456 static bool
2457 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2458 gimple *vec_stmt, slp_tree slp_node)
2460 tree vec_dest;
2461 tree scalar_dest;
2462 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2463 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2464 tree vectype;
2465 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2466 enum tree_code code;
2467 enum machine_mode vec_mode;
2468 tree new_temp;
2469 int op_type;
2470 optab optab;
2471 int icode;
2472 tree def;
2473 gimple def_stmt;
2474 enum vect_def_type dt[3]
2475 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2476 gimple new_stmt = NULL;
2477 stmt_vec_info prev_stmt_info;
2478 int nunits_in;
2479 int nunits_out;
2480 tree vectype_out;
2481 int ncopies;
2482 int j, i;
2483 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2484 tree vop0, vop1, vop2;
2485 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2486 int vf;
2488 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2489 return false;
2491 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2492 return false;
2494 /* Is STMT a vectorizable binary/unary operation? */
2495 if (!is_gimple_assign (stmt))
2496 return false;
2498 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2499 return false;
2501 code = gimple_assign_rhs_code (stmt);
2503 /* For pointer addition, we should use the normal plus for
2504 the vector addition. */
2505 if (code == POINTER_PLUS_EXPR)
2506 code = PLUS_EXPR;
2508 /* Support only unary or binary operations. */
2509 op_type = TREE_CODE_LENGTH (code);
2510 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2512 if (vect_print_dump_info (REPORT_DETAILS))
2513 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2514 op_type);
2515 return false;
2518 scalar_dest = gimple_assign_lhs (stmt);
2519 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2521 op0 = gimple_assign_rhs1 (stmt);
2522 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2523 &def_stmt, &def, &dt[0], &vectype))
2525 if (vect_print_dump_info (REPORT_DETAILS))
2526 fprintf (vect_dump, "use not simple.");
2527 return false;
2529 /* If op0 is an external or constant def use a vector type with
2530 the same size as the output vector type. */
2531 if (!vectype)
2532 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2533 if (vec_stmt)
2534 gcc_assert (vectype);
2535 if (!vectype)
2537 if (vect_print_dump_info (REPORT_DETAILS))
2539 fprintf (vect_dump, "no vectype for scalar type ");
2540 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2543 return false;
2546 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2547 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2548 if (nunits_out != nunits_in)
2549 return false;
2551 if (op_type == binary_op || op_type == ternary_op)
2553 op1 = gimple_assign_rhs2 (stmt);
2554 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2555 &dt[1]))
2557 if (vect_print_dump_info (REPORT_DETAILS))
2558 fprintf (vect_dump, "use not simple.");
2559 return false;
2562 if (op_type == ternary_op)
2564 op2 = gimple_assign_rhs3 (stmt);
2565 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2566 &dt[2]))
2568 if (vect_print_dump_info (REPORT_DETAILS))
2569 fprintf (vect_dump, "use not simple.");
2570 return false;
2574 if (loop_vinfo)
2575 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2576 else
2577 vf = 1;
2579 /* Multiple types in SLP are handled by creating the appropriate number of
2580 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2581 case of SLP. */
2582 if (slp_node || PURE_SLP_STMT (stmt_info))
2583 ncopies = 1;
2584 else
2585 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2587 gcc_assert (ncopies >= 1);
2589 /* Shifts are handled in vectorizable_shift (). */
2590 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2591 || code == RROTATE_EXPR)
2592 return false;
2594 optab = optab_for_tree_code (code, vectype, optab_default);
2596 /* Supportable by target? */
2597 if (!optab)
2599 if (vect_print_dump_info (REPORT_DETAILS))
2600 fprintf (vect_dump, "no optab.");
2601 return false;
2603 vec_mode = TYPE_MODE (vectype);
2604 icode = (int) optab_handler (optab, vec_mode);
2605 if (icode == CODE_FOR_nothing)
2607 if (vect_print_dump_info (REPORT_DETAILS))
2608 fprintf (vect_dump, "op not supported by target.");
2609 /* Check only during analysis. */
2610 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2611 || (vf < vect_min_worthwhile_factor (code)
2612 && !vec_stmt))
2613 return false;
2614 if (vect_print_dump_info (REPORT_DETAILS))
2615 fprintf (vect_dump, "proceeding using word mode.");
2618 /* Worthwhile without SIMD support? Check only during analysis. */
2619 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2620 && vf < vect_min_worthwhile_factor (code)
2621 && !vec_stmt)
2623 if (vect_print_dump_info (REPORT_DETAILS))
2624 fprintf (vect_dump, "not worthwhile without SIMD support.");
2625 return false;
2628 if (!vec_stmt) /* transformation not required. */
2630 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2631 if (vect_print_dump_info (REPORT_DETAILS))
2632 fprintf (vect_dump, "=== vectorizable_operation ===");
2633 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2634 return true;
2637 /** Transform. **/
2639 if (vect_print_dump_info (REPORT_DETAILS))
2640 fprintf (vect_dump, "transform binary/unary operation.");
2642 /* Handle def. */
2643 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2645 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2646 created in the previous stages of the recursion, so no allocation is
2647 needed, except for the case of shift with scalar shift argument. In that
2648 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2649 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2650 In case of loop-based vectorization we allocate VECs of size 1. We
2651 allocate VEC_OPRNDS1 only in case of binary operation. */
2652 if (!slp_node)
2654 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2655 if (op_type == binary_op || op_type == ternary_op)
2656 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2657 if (op_type == ternary_op)
2658 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2661 /* In case the vectorization factor (VF) is bigger than the number
2662 of elements that we can fit in a vectype (nunits), we have to generate
2663 more than one vector stmt - i.e - we need to "unroll" the
2664 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2665 from one copy of the vector stmt to the next, in the field
2666 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2667 stages to find the correct vector defs to be used when vectorizing
2668 stmts that use the defs of the current stmt. The example below
2669 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2670 we need to create 4 vectorized stmts):
2672 before vectorization:
2673 RELATED_STMT VEC_STMT
2674 S1: x = memref - -
2675 S2: z = x + 1 - -
2677 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2678 there):
2679 RELATED_STMT VEC_STMT
2680 VS1_0: vx0 = memref0 VS1_1 -
2681 VS1_1: vx1 = memref1 VS1_2 -
2682 VS1_2: vx2 = memref2 VS1_3 -
2683 VS1_3: vx3 = memref3 - -
2684 S1: x = load - VS1_0
2685 S2: z = x + 1 - -
2687 step2: vectorize stmt S2 (done here):
2688 To vectorize stmt S2 we first need to find the relevant vector
2689 def for the first operand 'x'. This is, as usual, obtained from
2690 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2691 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2692 relevant vector def 'vx0'. Having found 'vx0' we can generate
2693 the vector stmt VS2_0, and as usual, record it in the
2694 STMT_VINFO_VEC_STMT of stmt S2.
2695 When creating the second copy (VS2_1), we obtain the relevant vector
2696 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2697 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2698 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2699 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2700 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2701 chain of stmts and pointers:
2702 RELATED_STMT VEC_STMT
2703 VS1_0: vx0 = memref0 VS1_1 -
2704 VS1_1: vx1 = memref1 VS1_2 -
2705 VS1_2: vx2 = memref2 VS1_3 -
2706 VS1_3: vx3 = memref3 - -
2707 S1: x = load - VS1_0
2708 VS2_0: vz0 = vx0 + v1 VS2_1 -
2709 VS2_1: vz1 = vx1 + v1 VS2_2 -
2710 VS2_2: vz2 = vx2 + v1 VS2_3 -
2711 VS2_3: vz3 = vx3 + v1 - -
2712 S2: z = x + 1 - VS2_0 */
2714 prev_stmt_info = NULL;
2715 for (j = 0; j < ncopies; j++)
2717 /* Handle uses. */
2718 if (j == 0)
2720 if (op_type == binary_op || op_type == ternary_op)
2721 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2722 slp_node);
2723 else
2724 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2725 slp_node);
2726 if (op_type == ternary_op)
2728 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2729 VEC_quick_push (tree, vec_oprnds2,
2730 vect_get_vec_def_for_operand (op2, stmt, NULL));
2733 else
2735 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2736 if (op_type == ternary_op)
2738 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2739 VEC_quick_push (tree, vec_oprnds2,
2740 vect_get_vec_def_for_stmt_copy (dt[2],
2741 vec_oprnd));
2745 /* Arguments are ready. Create the new vector stmt. */
2746 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2748 vop1 = ((op_type == binary_op || op_type == ternary_op)
2749 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2750 vop2 = ((op_type == ternary_op)
2751 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2752 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2753 vop0, vop1, vop2);
2754 new_temp = make_ssa_name (vec_dest, new_stmt);
2755 gimple_assign_set_lhs (new_stmt, new_temp);
2756 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2757 if (slp_node)
2758 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2761 if (slp_node)
2762 continue;
2764 if (j == 0)
2765 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2766 else
2767 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2768 prev_stmt_info = vinfo_for_stmt (new_stmt);
2771 VEC_free (tree, heap, vec_oprnds0);
2772 if (vec_oprnds1)
2773 VEC_free (tree, heap, vec_oprnds1);
2774 if (vec_oprnds2)
2775 VEC_free (tree, heap, vec_oprnds2);
2777 return true;
2781 /* Get vectorized definitions for loop-based vectorization. For the first
2782 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2783 scalar operand), and for the rest we get a copy with
2784 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2785 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2786 The vectors are collected into VEC_OPRNDS. */
2788 static void
2789 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2790 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2792 tree vec_oprnd;
2794 /* Get first vector operand. */
2795 /* All the vector operands except the very first one (that is scalar oprnd)
2796 are stmt copies. */
2797 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2798 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2799 else
2800 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2802 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2804 /* Get second vector operand. */
2805 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2806 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2808 *oprnd = vec_oprnd;
2810 /* For conversion in multiple steps, continue to get operands
2811 recursively. */
2812 if (multi_step_cvt)
2813 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2817 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2818 For multi-step conversions store the resulting vectors and call the function
2819 recursively. */
2821 static void
2822 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2823 int multi_step_cvt, gimple stmt,
2824 VEC (tree, heap) *vec_dsts,
2825 gimple_stmt_iterator *gsi,
2826 slp_tree slp_node, enum tree_code code,
2827 stmt_vec_info *prev_stmt_info)
2829 unsigned int i;
2830 tree vop0, vop1, new_tmp, vec_dest;
2831 gimple new_stmt;
2832 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2834 vec_dest = VEC_pop (tree, vec_dsts);
2836 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2838 /* Create demotion operation. */
2839 vop0 = VEC_index (tree, *vec_oprnds, i);
2840 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2841 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2842 new_tmp = make_ssa_name (vec_dest, new_stmt);
2843 gimple_assign_set_lhs (new_stmt, new_tmp);
2844 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2846 if (multi_step_cvt)
2847 /* Store the resulting vector for next recursive call. */
2848 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2849 else
2851 /* This is the last step of the conversion sequence. Store the
2852 vectors in SLP_NODE or in vector info of the scalar statement
2853 (or in STMT_VINFO_RELATED_STMT chain). */
2854 if (slp_node)
2855 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2856 else
2858 if (!*prev_stmt_info)
2859 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2860 else
2861 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2863 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2868 /* For multi-step demotion operations we first generate demotion operations
2869 from the source type to the intermediate types, and then combine the
2870 results (stored in VEC_OPRNDS) in demotion operation to the destination
2871 type. */
2872 if (multi_step_cvt)
2874 /* At each level of recursion we have have of the operands we had at the
2875 previous level. */
2876 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2877 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2878 stmt, vec_dsts, gsi, slp_node,
2879 code, prev_stmt_info);
2884 /* Function vectorizable_type_demotion
2886 Check if STMT performs a binary or unary operation that involves
2887 type demotion, and if it can be vectorized.
2888 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2889 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2890 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2892 static bool
2893 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2894 gimple *vec_stmt, slp_tree slp_node)
2896 tree vec_dest;
2897 tree scalar_dest;
2898 tree op0;
2899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2900 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2901 enum tree_code code, code1 = ERROR_MARK;
2902 tree def;
2903 gimple def_stmt;
2904 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2905 stmt_vec_info prev_stmt_info;
2906 int nunits_in;
2907 int nunits_out;
2908 tree vectype_out;
2909 int ncopies;
2910 int j, i;
2911 tree vectype_in;
2912 int multi_step_cvt = 0;
2913 VEC (tree, heap) *vec_oprnds0 = NULL;
2914 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2915 tree last_oprnd, intermediate_type;
2917 /* FORNOW: not supported by basic block SLP vectorization. */
2918 gcc_assert (loop_vinfo);
2920 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2921 return false;
2923 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2924 return false;
2926 /* Is STMT a vectorizable type-demotion operation? */
2927 if (!is_gimple_assign (stmt))
2928 return false;
2930 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2931 return false;
2933 code = gimple_assign_rhs_code (stmt);
2934 if (!CONVERT_EXPR_CODE_P (code))
2935 return false;
2937 scalar_dest = gimple_assign_lhs (stmt);
2938 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2940 /* Check the operands of the operation. */
2941 op0 = gimple_assign_rhs1 (stmt);
2942 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2943 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2944 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2945 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2946 && CONVERT_EXPR_CODE_P (code))))
2947 return false;
2948 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2949 &def_stmt, &def, &dt[0], &vectype_in))
2951 if (vect_print_dump_info (REPORT_DETAILS))
2952 fprintf (vect_dump, "use not simple.");
2953 return false;
2955 /* If op0 is an external def use a vector type with the
2956 same size as the output vector type if possible. */
2957 if (!vectype_in)
2958 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2959 if (vec_stmt)
2960 gcc_assert (vectype_in);
2961 if (!vectype_in)
2963 if (vect_print_dump_info (REPORT_DETAILS))
2965 fprintf (vect_dump, "no vectype for scalar type ");
2966 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2969 return false;
2972 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2973 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2974 if (nunits_in >= nunits_out)
2975 return false;
2977 /* Multiple types in SLP are handled by creating the appropriate number of
2978 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2979 case of SLP. */
2980 if (slp_node || PURE_SLP_STMT (stmt_info))
2981 ncopies = 1;
2982 else
2983 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2984 gcc_assert (ncopies >= 1);
2986 /* Supportable by target? */
2987 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2988 &code1, &multi_step_cvt, &interm_types))
2989 return false;
2991 if (!vec_stmt) /* transformation not required. */
2993 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2994 if (vect_print_dump_info (REPORT_DETAILS))
2995 fprintf (vect_dump, "=== vectorizable_demotion ===");
2996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2997 return true;
3000 /** Transform. **/
3001 if (vect_print_dump_info (REPORT_DETAILS))
3002 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3003 ncopies);
3005 /* In case of multi-step demotion, we first generate demotion operations to
3006 the intermediate types, and then from that types to the final one.
3007 We create vector destinations for the intermediate type (TYPES) received
3008 from supportable_narrowing_operation, and store them in the correct order
3009 for future use in vect_create_vectorized_demotion_stmts(). */
3010 if (multi_step_cvt)
3011 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3012 else
3013 vec_dsts = VEC_alloc (tree, heap, 1);
3015 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3016 VEC_quick_push (tree, vec_dsts, vec_dest);
3018 if (multi_step_cvt)
3020 for (i = VEC_length (tree, interm_types) - 1;
3021 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3023 vec_dest = vect_create_destination_var (scalar_dest,
3024 intermediate_type);
3025 VEC_quick_push (tree, vec_dsts, vec_dest);
3029 /* In case the vectorization factor (VF) is bigger than the number
3030 of elements that we can fit in a vectype (nunits), we have to generate
3031 more than one vector stmt - i.e - we need to "unroll" the
3032 vector stmt by a factor VF/nunits. */
3033 last_oprnd = op0;
3034 prev_stmt_info = NULL;
3035 for (j = 0; j < ncopies; j++)
3037 /* Handle uses. */
3038 if (slp_node)
3039 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
3040 else
3042 VEC_free (tree, heap, vec_oprnds0);
3043 vec_oprnds0 = VEC_alloc (tree, heap,
3044 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
3045 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3046 vect_pow2 (multi_step_cvt) - 1);
3049 /* Arguments are ready. Create the new vector stmts. */
3050 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3051 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
3052 multi_step_cvt, stmt, tmp_vec_dsts,
3053 gsi, slp_node, code1,
3054 &prev_stmt_info);
3057 VEC_free (tree, heap, vec_oprnds0);
3058 VEC_free (tree, heap, vec_dsts);
3059 VEC_free (tree, heap, tmp_vec_dsts);
3060 VEC_free (tree, heap, interm_types);
3062 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3063 return true;
3067 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3068 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3069 the resulting vectors and call the function recursively. */
3071 static void
3072 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
3073 VEC (tree, heap) **vec_oprnds1,
3074 int multi_step_cvt, gimple stmt,
3075 VEC (tree, heap) *vec_dsts,
3076 gimple_stmt_iterator *gsi,
3077 slp_tree slp_node, enum tree_code code1,
3078 enum tree_code code2, tree decl1,
3079 tree decl2, int op_type,
3080 stmt_vec_info *prev_stmt_info)
3082 int i;
3083 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
3084 gimple new_stmt1, new_stmt2;
3085 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3086 VEC (tree, heap) *vec_tmp;
3088 vec_dest = VEC_pop (tree, vec_dsts);
3089 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
3091 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
3093 if (op_type == binary_op)
3094 vop1 = VEC_index (tree, *vec_oprnds1, i);
3095 else
3096 vop1 = NULL_TREE;
3098 /* Generate the two halves of promotion operation. */
3099 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3100 op_type, vec_dest, gsi, stmt);
3101 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3102 op_type, vec_dest, gsi, stmt);
3103 if (is_gimple_call (new_stmt1))
3105 new_tmp1 = gimple_call_lhs (new_stmt1);
3106 new_tmp2 = gimple_call_lhs (new_stmt2);
3108 else
3110 new_tmp1 = gimple_assign_lhs (new_stmt1);
3111 new_tmp2 = gimple_assign_lhs (new_stmt2);
3114 if (multi_step_cvt)
3116 /* Store the results for the recursive call. */
3117 VEC_quick_push (tree, vec_tmp, new_tmp1);
3118 VEC_quick_push (tree, vec_tmp, new_tmp2);
3120 else
3122 /* Last step of promotion sequience - store the results. */
3123 if (slp_node)
3125 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3126 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3128 else
3130 if (!*prev_stmt_info)
3131 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3132 else
3133 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3135 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3136 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3137 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3142 if (multi_step_cvt)
3144 /* For multi-step promotion operation we first generate we call the
3145 function recurcively for every stage. We start from the input type,
3146 create promotion operations to the intermediate types, and then
3147 create promotions to the output type. */
3148 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3149 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3150 multi_step_cvt - 1, stmt,
3151 vec_dsts, gsi, slp_node, code1,
3152 code2, decl2, decl2, op_type,
3153 prev_stmt_info);
3156 VEC_free (tree, heap, vec_tmp);
3160 /* Function vectorizable_type_promotion
3162 Check if STMT performs a binary or unary operation that involves
3163 type promotion, and if it can be vectorized.
3164 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3165 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3166 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3168 static bool
3169 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3170 gimple *vec_stmt, slp_tree slp_node)
3172 tree vec_dest;
3173 tree scalar_dest;
3174 tree op0, op1 = NULL;
3175 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3177 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3178 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3179 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3180 int op_type;
3181 tree def;
3182 gimple def_stmt;
3183 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3184 stmt_vec_info prev_stmt_info;
3185 int nunits_in;
3186 int nunits_out;
3187 tree vectype_out;
3188 int ncopies;
3189 int j, i;
3190 tree vectype_in;
3191 tree intermediate_type = NULL_TREE;
3192 int multi_step_cvt = 0;
3193 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3194 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3196 /* FORNOW: not supported by basic block SLP vectorization. */
3197 gcc_assert (loop_vinfo);
3199 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3200 return false;
3202 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3203 return false;
3205 /* Is STMT a vectorizable type-promotion operation? */
3206 if (!is_gimple_assign (stmt))
3207 return false;
3209 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3210 return false;
3212 code = gimple_assign_rhs_code (stmt);
3213 if (!CONVERT_EXPR_CODE_P (code)
3214 && code != WIDEN_MULT_EXPR)
3215 return false;
3217 scalar_dest = gimple_assign_lhs (stmt);
3218 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3220 /* Check the operands of the operation. */
3221 op0 = gimple_assign_rhs1 (stmt);
3222 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3223 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3224 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3225 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3226 && CONVERT_EXPR_CODE_P (code))))
3227 return false;
3228 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3229 &def_stmt, &def, &dt[0], &vectype_in))
3231 if (vect_print_dump_info (REPORT_DETAILS))
3232 fprintf (vect_dump, "use not simple.");
3233 return false;
3235 /* If op0 is an external or constant def use a vector type with
3236 the same size as the output vector type. */
3237 if (!vectype_in)
3238 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3239 if (vec_stmt)
3240 gcc_assert (vectype_in);
3241 if (!vectype_in)
3243 if (vect_print_dump_info (REPORT_DETAILS))
3245 fprintf (vect_dump, "no vectype for scalar type ");
3246 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3249 return false;
3252 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3253 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3254 if (nunits_in <= nunits_out)
3255 return false;
3257 /* Multiple types in SLP are handled by creating the appropriate number of
3258 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3259 case of SLP. */
3260 if (slp_node || PURE_SLP_STMT (stmt_info))
3261 ncopies = 1;
3262 else
3263 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3265 gcc_assert (ncopies >= 1);
3267 op_type = TREE_CODE_LENGTH (code);
3268 if (op_type == binary_op)
3270 op1 = gimple_assign_rhs2 (stmt);
3271 if (!vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def, &dt[1]))
3273 if (vect_print_dump_info (REPORT_DETAILS))
3274 fprintf (vect_dump, "use not simple.");
3275 return false;
3279 /* Supportable by target? */
3280 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3281 &decl1, &decl2, &code1, &code2,
3282 &multi_step_cvt, &interm_types))
3283 return false;
3285 /* Binary widening operation can only be supported directly by the
3286 architecture. */
3287 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3289 if (!vec_stmt) /* transformation not required. */
3291 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3292 if (vect_print_dump_info (REPORT_DETAILS))
3293 fprintf (vect_dump, "=== vectorizable_promotion ===");
3294 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3295 return true;
3298 /** Transform. **/
3300 if (vect_print_dump_info (REPORT_DETAILS))
3301 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3302 ncopies);
3304 /* Handle def. */
3305 /* In case of multi-step promotion, we first generate promotion operations
3306 to the intermediate types, and then from that types to the final one.
3307 We store vector destination in VEC_DSTS in the correct order for
3308 recursive creation of promotion operations in
3309 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3310 according to TYPES recieved from supportable_widening_operation(). */
3311 if (multi_step_cvt)
3312 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3313 else
3314 vec_dsts = VEC_alloc (tree, heap, 1);
3316 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3317 VEC_quick_push (tree, vec_dsts, vec_dest);
3319 if (multi_step_cvt)
3321 for (i = VEC_length (tree, interm_types) - 1;
3322 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3324 vec_dest = vect_create_destination_var (scalar_dest,
3325 intermediate_type);
3326 VEC_quick_push (tree, vec_dsts, vec_dest);
3330 if (!slp_node)
3332 vec_oprnds0 = VEC_alloc (tree, heap,
3333 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3334 if (op_type == binary_op)
3335 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3338 /* In case the vectorization factor (VF) is bigger than the number
3339 of elements that we can fit in a vectype (nunits), we have to generate
3340 more than one vector stmt - i.e - we need to "unroll" the
3341 vector stmt by a factor VF/nunits. */
3343 prev_stmt_info = NULL;
3344 for (j = 0; j < ncopies; j++)
3346 /* Handle uses. */
3347 if (j == 0)
3349 if (slp_node)
3350 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3351 &vec_oprnds1, -1);
3352 else
3354 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3355 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3356 if (op_type == binary_op)
3358 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3359 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3363 else
3365 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3366 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3367 if (op_type == binary_op)
3369 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3370 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3374 /* Arguments are ready. Create the new vector stmts. */
3375 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3376 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3377 multi_step_cvt, stmt,
3378 tmp_vec_dsts,
3379 gsi, slp_node, code1, code2,
3380 decl1, decl2, op_type,
3381 &prev_stmt_info);
3384 VEC_free (tree, heap, vec_dsts);
3385 VEC_free (tree, heap, tmp_vec_dsts);
3386 VEC_free (tree, heap, interm_types);
3387 VEC_free (tree, heap, vec_oprnds0);
3388 VEC_free (tree, heap, vec_oprnds1);
3390 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3391 return true;
3395 /* Function vectorizable_store.
3397 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3398 can be vectorized.
3399 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3400 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3401 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3403 static bool
3404 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3405 slp_tree slp_node)
3407 tree scalar_dest;
3408 tree data_ref;
3409 tree op;
3410 tree vec_oprnd = NULL_TREE;
3411 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3412 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3413 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3414 tree elem_type;
3415 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3416 struct loop *loop = NULL;
3417 enum machine_mode vec_mode;
3418 tree dummy;
3419 enum dr_alignment_support alignment_support_scheme;
3420 tree def;
3421 gimple def_stmt;
3422 enum vect_def_type dt;
3423 stmt_vec_info prev_stmt_info = NULL;
3424 tree dataref_ptr = NULL_TREE;
3425 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3426 int ncopies;
3427 int j;
3428 gimple next_stmt, first_stmt = NULL;
3429 bool strided_store = false;
3430 bool store_lanes_p = false;
3431 unsigned int group_size, i;
3432 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3433 bool inv_p;
3434 VEC(tree,heap) *vec_oprnds = NULL;
3435 bool slp = (slp_node != NULL);
3436 unsigned int vec_num;
3437 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3438 tree aggr_type;
3440 if (loop_vinfo)
3441 loop = LOOP_VINFO_LOOP (loop_vinfo);
3443 /* Multiple types in SLP are handled by creating the appropriate number of
3444 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3445 case of SLP. */
3446 if (slp || PURE_SLP_STMT (stmt_info))
3447 ncopies = 1;
3448 else
3449 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3451 gcc_assert (ncopies >= 1);
3453 /* FORNOW. This restriction should be relaxed. */
3454 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3456 if (vect_print_dump_info (REPORT_DETAILS))
3457 fprintf (vect_dump, "multiple types in nested loop.");
3458 return false;
3461 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3462 return false;
3464 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3465 return false;
3467 /* Is vectorizable store? */
3469 if (!is_gimple_assign (stmt))
3470 return false;
3472 scalar_dest = gimple_assign_lhs (stmt);
3473 if (TREE_CODE (scalar_dest) != ARRAY_REF
3474 && TREE_CODE (scalar_dest) != INDIRECT_REF
3475 && TREE_CODE (scalar_dest) != COMPONENT_REF
3476 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3477 && TREE_CODE (scalar_dest) != REALPART_EXPR
3478 && TREE_CODE (scalar_dest) != MEM_REF)
3479 return false;
3481 gcc_assert (gimple_assign_single_p (stmt));
3482 op = gimple_assign_rhs1 (stmt);
3483 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3485 if (vect_print_dump_info (REPORT_DETAILS))
3486 fprintf (vect_dump, "use not simple.");
3487 return false;
3490 /* The scalar rhs type needs to be trivially convertible to the vector
3491 component type. This should always be the case. */
3492 elem_type = TREE_TYPE (vectype);
3493 if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
3495 if (vect_print_dump_info (REPORT_DETAILS))
3496 fprintf (vect_dump, "??? operands of different types");
3497 return false;
3500 vec_mode = TYPE_MODE (vectype);
3501 /* FORNOW. In some cases can vectorize even if data-type not supported
3502 (e.g. - array initialization with 0). */
3503 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3504 return false;
3506 if (!STMT_VINFO_DATA_REF (stmt_info))
3507 return false;
3509 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3511 if (vect_print_dump_info (REPORT_DETAILS))
3512 fprintf (vect_dump, "negative step for store.");
3513 return false;
3516 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3518 strided_store = true;
3519 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
3520 if (!slp && !PURE_SLP_STMT (stmt_info))
3522 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3523 if (vect_store_lanes_supported (vectype, group_size))
3524 store_lanes_p = true;
3525 else if (!vect_strided_store_supported (vectype, group_size))
3526 return false;
3529 if (first_stmt == stmt)
3531 /* STMT is the leader of the group. Check the operands of all the
3532 stmts of the group. */
3533 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
3534 while (next_stmt)
3536 gcc_assert (gimple_assign_single_p (next_stmt));
3537 op = gimple_assign_rhs1 (next_stmt);
3538 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3539 &def, &dt))
3541 if (vect_print_dump_info (REPORT_DETAILS))
3542 fprintf (vect_dump, "use not simple.");
3543 return false;
3545 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3550 if (!vec_stmt) /* transformation not required. */
3552 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3553 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
3554 return true;
3557 /** Transform. **/
3559 if (strided_store)
3561 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3562 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3564 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3566 /* FORNOW */
3567 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3569 /* We vectorize all the stmts of the interleaving group when we
3570 reach the last stmt in the group. */
3571 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3572 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
3573 && !slp)
3575 *vec_stmt = NULL;
3576 return true;
3579 if (slp)
3581 strided_store = false;
3582 /* VEC_NUM is the number of vect stmts to be created for this
3583 group. */
3584 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3585 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3586 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3588 else
3589 /* VEC_NUM is the number of vect stmts to be created for this
3590 group. */
3591 vec_num = group_size;
3593 else
3595 first_stmt = stmt;
3596 first_dr = dr;
3597 group_size = vec_num = 1;
3600 if (vect_print_dump_info (REPORT_DETAILS))
3601 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3603 dr_chain = VEC_alloc (tree, heap, group_size);
3604 oprnds = VEC_alloc (tree, heap, group_size);
3606 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3607 gcc_assert (alignment_support_scheme);
3608 /* Targets with store-lane instructions must not require explicit
3609 realignment. */
3610 gcc_assert (!store_lanes_p
3611 || alignment_support_scheme == dr_aligned
3612 || alignment_support_scheme == dr_unaligned_supported);
3614 if (store_lanes_p)
3615 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
3616 else
3617 aggr_type = vectype;
3619 /* In case the vectorization factor (VF) is bigger than the number
3620 of elements that we can fit in a vectype (nunits), we have to generate
3621 more than one vector stmt - i.e - we need to "unroll" the
3622 vector stmt by a factor VF/nunits. For more details see documentation in
3623 vect_get_vec_def_for_copy_stmt. */
3625 /* In case of interleaving (non-unit strided access):
3627 S1: &base + 2 = x2
3628 S2: &base = x0
3629 S3: &base + 1 = x1
3630 S4: &base + 3 = x3
3632 We create vectorized stores starting from base address (the access of the
3633 first stmt in the chain (S2 in the above example), when the last store stmt
3634 of the chain (S4) is reached:
3636 VS1: &base = vx2
3637 VS2: &base + vec_size*1 = vx0
3638 VS3: &base + vec_size*2 = vx1
3639 VS4: &base + vec_size*3 = vx3
3641 Then permutation statements are generated:
3643 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3644 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3647 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3648 (the order of the data-refs in the output of vect_permute_store_chain
3649 corresponds to the order of scalar stmts in the interleaving chain - see
3650 the documentation of vect_permute_store_chain()).
3652 In case of both multiple types and interleaving, above vector stores and
3653 permutation stmts are created for every copy. The result vector stmts are
3654 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3655 STMT_VINFO_RELATED_STMT for the next copies.
3658 prev_stmt_info = NULL;
3659 for (j = 0; j < ncopies; j++)
3661 gimple new_stmt;
3662 gimple ptr_incr;
3664 if (j == 0)
3666 if (slp)
3668 /* Get vectorized arguments for SLP_NODE. */
3669 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3670 NULL, -1);
3672 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3674 else
3676 /* For interleaved stores we collect vectorized defs for all the
3677 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3678 used as an input to vect_permute_store_chain(), and OPRNDS as
3679 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3681 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3682 OPRNDS are of size 1. */
3683 next_stmt = first_stmt;
3684 for (i = 0; i < group_size; i++)
3686 /* Since gaps are not supported for interleaved stores,
3687 GROUP_SIZE is the exact number of stmts in the chain.
3688 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3689 there is no interleaving, GROUP_SIZE is 1, and only one
3690 iteration of the loop will be executed. */
3691 gcc_assert (next_stmt
3692 && gimple_assign_single_p (next_stmt));
3693 op = gimple_assign_rhs1 (next_stmt);
3695 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3696 NULL);
3697 VEC_quick_push(tree, dr_chain, vec_oprnd);
3698 VEC_quick_push(tree, oprnds, vec_oprnd);
3699 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3703 /* We should have catched mismatched types earlier. */
3704 gcc_assert (useless_type_conversion_p (vectype,
3705 TREE_TYPE (vec_oprnd)));
3706 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
3707 NULL_TREE, &dummy, gsi,
3708 &ptr_incr, false, &inv_p);
3709 gcc_assert (bb_vinfo || !inv_p);
3711 else
3713 /* For interleaved stores we created vectorized defs for all the
3714 defs stored in OPRNDS in the previous iteration (previous copy).
3715 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3716 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3717 next copy.
3718 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3719 OPRNDS are of size 1. */
3720 for (i = 0; i < group_size; i++)
3722 op = VEC_index (tree, oprnds, i);
3723 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3724 &dt);
3725 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3726 VEC_replace(tree, dr_chain, i, vec_oprnd);
3727 VEC_replace(tree, oprnds, i, vec_oprnd);
3729 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3730 TYPE_SIZE_UNIT (aggr_type));
3733 if (store_lanes_p)
3735 tree vec_array;
3737 /* Combine all the vectors into an array. */
3738 vec_array = create_vector_array (vectype, vec_num);
3739 for (i = 0; i < vec_num; i++)
3741 vec_oprnd = VEC_index (tree, dr_chain, i);
3742 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
3745 /* Emit:
3746 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3747 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
3748 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
3749 gimple_call_set_lhs (new_stmt, data_ref);
3750 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3751 mark_symbols_for_renaming (new_stmt);
3753 else
3755 new_stmt = NULL;
3756 if (strided_store)
3758 result_chain = VEC_alloc (tree, heap, group_size);
3759 /* Permute. */
3760 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3761 &result_chain);
3764 next_stmt = first_stmt;
3765 for (i = 0; i < vec_num; i++)
3767 struct ptr_info_def *pi;
3769 if (i > 0)
3770 /* Bump the vector pointer. */
3771 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
3772 stmt, NULL_TREE);
3774 if (slp)
3775 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3776 else if (strided_store)
3777 /* For strided stores vectorized defs are interleaved in
3778 vect_permute_store_chain(). */
3779 vec_oprnd = VEC_index (tree, result_chain, i);
3781 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3782 build_int_cst (reference_alias_ptr_type
3783 (DR_REF (first_dr)), 0));
3784 pi = get_ptr_info (dataref_ptr);
3785 pi->align = TYPE_ALIGN_UNIT (vectype);
3786 if (aligned_access_p (first_dr))
3787 pi->misalign = 0;
3788 else if (DR_MISALIGNMENT (first_dr) == -1)
3790 TREE_TYPE (data_ref)
3791 = build_aligned_type (TREE_TYPE (data_ref),
3792 TYPE_ALIGN (elem_type));
3793 pi->align = TYPE_ALIGN_UNIT (elem_type);
3794 pi->misalign = 0;
3796 else
3798 TREE_TYPE (data_ref)
3799 = build_aligned_type (TREE_TYPE (data_ref),
3800 TYPE_ALIGN (elem_type));
3801 pi->misalign = DR_MISALIGNMENT (first_dr);
3804 /* Arguments are ready. Create the new vector stmt. */
3805 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3806 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3807 mark_symbols_for_renaming (new_stmt);
3809 if (slp)
3810 continue;
3812 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3813 if (!next_stmt)
3814 break;
3817 if (!slp)
3819 if (j == 0)
3820 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3821 else
3822 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3823 prev_stmt_info = vinfo_for_stmt (new_stmt);
3827 VEC_free (tree, heap, dr_chain);
3828 VEC_free (tree, heap, oprnds);
3829 if (result_chain)
3830 VEC_free (tree, heap, result_chain);
3831 if (vec_oprnds)
3832 VEC_free (tree, heap, vec_oprnds);
3834 return true;
3837 /* Given a vector type VECTYPE returns a builtin DECL to be used
3838 for vector permutation and stores a mask into *MASK that implements
3839 reversal of the vector elements. If that is impossible to do
3840 returns NULL (and *MASK is unchanged). */
3842 static tree
3843 perm_mask_for_reverse (tree vectype, tree *mask)
3845 tree builtin_decl;
3846 tree mask_element_type, mask_type;
3847 tree mask_vec = NULL;
3848 int i;
3849 int nunits;
3850 if (!targetm.vectorize.builtin_vec_perm)
3851 return NULL;
3853 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3854 &mask_element_type);
3855 if (!builtin_decl || !mask_element_type)
3856 return NULL;
3858 mask_type = get_vectype_for_scalar_type (mask_element_type);
3859 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3860 if (!mask_type
3861 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3862 return NULL;
3864 for (i = 0; i < nunits; i++)
3865 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3866 mask_vec = build_vector (mask_type, mask_vec);
3868 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3869 return NULL;
3870 if (mask)
3871 *mask = mask_vec;
3872 return builtin_decl;
3875 /* Given a vector variable X, that was generated for the scalar LHS of
3876 STMT, generate instructions to reverse the vector elements of X,
3877 insert them a *GSI and return the permuted vector variable. */
3879 static tree
3880 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3882 tree vectype = TREE_TYPE (x);
3883 tree mask_vec, builtin_decl;
3884 tree perm_dest, data_ref;
3885 gimple perm_stmt;
3887 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3889 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3891 /* Generate the permute statement. */
3892 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3893 if (!useless_type_conversion_p (vectype,
3894 TREE_TYPE (TREE_TYPE (builtin_decl))))
3896 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3897 tem = make_ssa_name (tem, perm_stmt);
3898 gimple_call_set_lhs (perm_stmt, tem);
3899 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3900 perm_stmt = gimple_build_assign (NULL_TREE,
3901 build1 (VIEW_CONVERT_EXPR,
3902 vectype, tem));
3904 data_ref = make_ssa_name (perm_dest, perm_stmt);
3905 gimple_set_lhs (perm_stmt, data_ref);
3906 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3908 return data_ref;
3911 /* vectorizable_load.
3913 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3914 can be vectorized.
3915 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3916 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3917 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3919 static bool
3920 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3921 slp_tree slp_node, slp_instance slp_node_instance)
3923 tree scalar_dest;
3924 tree vec_dest = NULL;
3925 tree data_ref = NULL;
3926 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3927 stmt_vec_info prev_stmt_info;
3928 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3929 struct loop *loop = NULL;
3930 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3931 bool nested_in_vect_loop = false;
3932 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3933 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3934 tree elem_type;
3935 tree new_temp;
3936 enum machine_mode mode;
3937 gimple new_stmt = NULL;
3938 tree dummy;
3939 enum dr_alignment_support alignment_support_scheme;
3940 tree dataref_ptr = NULL_TREE;
3941 gimple ptr_incr;
3942 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3943 int ncopies;
3944 int i, j, group_size;
3945 tree msq = NULL_TREE, lsq;
3946 tree offset = NULL_TREE;
3947 tree realignment_token = NULL_TREE;
3948 gimple phi = NULL;
3949 VEC(tree,heap) *dr_chain = NULL;
3950 bool strided_load = false;
3951 bool load_lanes_p = false;
3952 gimple first_stmt;
3953 tree scalar_type;
3954 bool inv_p;
3955 bool negative;
3956 bool compute_in_loop = false;
3957 struct loop *at_loop;
3958 int vec_num;
3959 bool slp = (slp_node != NULL);
3960 bool slp_perm = false;
3961 enum tree_code code;
3962 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3963 int vf;
3964 tree aggr_type;
3966 if (loop_vinfo)
3968 loop = LOOP_VINFO_LOOP (loop_vinfo);
3969 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3970 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3972 else
3973 vf = 1;
3975 /* Multiple types in SLP are handled by creating the appropriate number of
3976 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3977 case of SLP. */
3978 if (slp || PURE_SLP_STMT (stmt_info))
3979 ncopies = 1;
3980 else
3981 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3983 gcc_assert (ncopies >= 1);
3985 /* FORNOW. This restriction should be relaxed. */
3986 if (nested_in_vect_loop && ncopies > 1)
3988 if (vect_print_dump_info (REPORT_DETAILS))
3989 fprintf (vect_dump, "multiple types in nested loop.");
3990 return false;
3993 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3994 return false;
3996 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3997 return false;
3999 /* Is vectorizable load? */
4000 if (!is_gimple_assign (stmt))
4001 return false;
4003 scalar_dest = gimple_assign_lhs (stmt);
4004 if (TREE_CODE (scalar_dest) != SSA_NAME)
4005 return false;
4007 code = gimple_assign_rhs_code (stmt);
4008 if (code != ARRAY_REF
4009 && code != INDIRECT_REF
4010 && code != COMPONENT_REF
4011 && code != IMAGPART_EXPR
4012 && code != REALPART_EXPR
4013 && code != MEM_REF)
4014 return false;
4016 if (!STMT_VINFO_DATA_REF (stmt_info))
4017 return false;
4019 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
4020 if (negative && ncopies > 1)
4022 if (vect_print_dump_info (REPORT_DETAILS))
4023 fprintf (vect_dump, "multiple types with negative step.");
4024 return false;
4027 scalar_type = TREE_TYPE (DR_REF (dr));
4028 mode = TYPE_MODE (vectype);
4030 /* FORNOW. In some cases can vectorize even if data-type not supported
4031 (e.g. - data copies). */
4032 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
4034 if (vect_print_dump_info (REPORT_DETAILS))
4035 fprintf (vect_dump, "Aligned load, but unsupported type.");
4036 return false;
4039 /* The vector component type needs to be trivially convertible to the
4040 scalar lhs. This should always be the case. */
4041 elem_type = TREE_TYPE (vectype);
4042 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
4044 if (vect_print_dump_info (REPORT_DETAILS))
4045 fprintf (vect_dump, "??? operands of different types");
4046 return false;
4049 /* Check if the load is a part of an interleaving chain. */
4050 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4052 strided_load = true;
4053 /* FORNOW */
4054 gcc_assert (! nested_in_vect_loop);
4056 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4057 if (!slp && !PURE_SLP_STMT (stmt_info))
4059 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4060 if (vect_load_lanes_supported (vectype, group_size))
4061 load_lanes_p = true;
4062 else if (!vect_strided_load_supported (vectype, group_size))
4063 return false;
4067 if (negative)
4069 gcc_assert (!strided_load);
4070 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
4071 if (alignment_support_scheme != dr_aligned
4072 && alignment_support_scheme != dr_unaligned_supported)
4074 if (vect_print_dump_info (REPORT_DETAILS))
4075 fprintf (vect_dump, "negative step but alignment required.");
4076 return false;
4078 if (!perm_mask_for_reverse (vectype, NULL))
4080 if (vect_print_dump_info (REPORT_DETAILS))
4081 fprintf (vect_dump, "negative step and reversing not supported.");
4082 return false;
4086 if (!vec_stmt) /* transformation not required. */
4088 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
4089 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
4090 return true;
4093 if (vect_print_dump_info (REPORT_DETAILS))
4094 fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
4096 /** Transform. **/
4098 if (strided_load)
4100 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4101 /* Check if the chain of loads is already vectorized. */
4102 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4104 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4105 return true;
4107 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
4108 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4110 /* VEC_NUM is the number of vect stmts to be created for this group. */
4111 if (slp)
4113 strided_load = false;
4114 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4115 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
4116 slp_perm = true;
4118 else
4119 vec_num = group_size;
4121 else
4123 first_stmt = stmt;
4124 first_dr = dr;
4125 group_size = vec_num = 1;
4128 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
4129 gcc_assert (alignment_support_scheme);
4130 /* Targets with load-lane instructions must not require explicit
4131 realignment. */
4132 gcc_assert (!load_lanes_p
4133 || alignment_support_scheme == dr_aligned
4134 || alignment_support_scheme == dr_unaligned_supported);
4136 /* In case the vectorization factor (VF) is bigger than the number
4137 of elements that we can fit in a vectype (nunits), we have to generate
4138 more than one vector stmt - i.e - we need to "unroll" the
4139 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4140 from one copy of the vector stmt to the next, in the field
4141 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4142 stages to find the correct vector defs to be used when vectorizing
4143 stmts that use the defs of the current stmt. The example below
4144 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4145 need to create 4 vectorized stmts):
4147 before vectorization:
4148 RELATED_STMT VEC_STMT
4149 S1: x = memref - -
4150 S2: z = x + 1 - -
4152 step 1: vectorize stmt S1:
4153 We first create the vector stmt VS1_0, and, as usual, record a
4154 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4155 Next, we create the vector stmt VS1_1, and record a pointer to
4156 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4157 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4158 stmts and pointers:
4159 RELATED_STMT VEC_STMT
4160 VS1_0: vx0 = memref0 VS1_1 -
4161 VS1_1: vx1 = memref1 VS1_2 -
4162 VS1_2: vx2 = memref2 VS1_3 -
4163 VS1_3: vx3 = memref3 - -
4164 S1: x = load - VS1_0
4165 S2: z = x + 1 - -
4167 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4168 information we recorded in RELATED_STMT field is used to vectorize
4169 stmt S2. */
4171 /* In case of interleaving (non-unit strided access):
4173 S1: x2 = &base + 2
4174 S2: x0 = &base
4175 S3: x1 = &base + 1
4176 S4: x3 = &base + 3
4178 Vectorized loads are created in the order of memory accesses
4179 starting from the access of the first stmt of the chain:
4181 VS1: vx0 = &base
4182 VS2: vx1 = &base + vec_size*1
4183 VS3: vx3 = &base + vec_size*2
4184 VS4: vx4 = &base + vec_size*3
4186 Then permutation statements are generated:
4188 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4189 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4192 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4193 (the order of the data-refs in the output of vect_permute_load_chain
4194 corresponds to the order of scalar stmts in the interleaving chain - see
4195 the documentation of vect_permute_load_chain()).
4196 The generation of permutation stmts and recording them in
4197 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4199 In case of both multiple types and interleaving, the vector loads and
4200 permutation stmts above are created for every copy. The result vector
4201 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4202 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4204 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4205 on a target that supports unaligned accesses (dr_unaligned_supported)
4206 we generate the following code:
4207 p = initial_addr;
4208 indx = 0;
4209 loop {
4210 p = p + indx * vectype_size;
4211 vec_dest = *(p);
4212 indx = indx + 1;
4215 Otherwise, the data reference is potentially unaligned on a target that
4216 does not support unaligned accesses (dr_explicit_realign_optimized) -
4217 then generate the following code, in which the data in each iteration is
4218 obtained by two vector loads, one from the previous iteration, and one
4219 from the current iteration:
4220 p1 = initial_addr;
4221 msq_init = *(floor(p1))
4222 p2 = initial_addr + VS - 1;
4223 realignment_token = call target_builtin;
4224 indx = 0;
4225 loop {
4226 p2 = p2 + indx * vectype_size
4227 lsq = *(floor(p2))
4228 vec_dest = realign_load (msq, lsq, realignment_token)
4229 indx = indx + 1;
4230 msq = lsq;
4231 } */
4233 /* If the misalignment remains the same throughout the execution of the
4234 loop, we can create the init_addr and permutation mask at the loop
4235 preheader. Otherwise, it needs to be created inside the loop.
4236 This can only occur when vectorizing memory accesses in the inner-loop
4237 nested within an outer-loop that is being vectorized. */
4239 if (loop && nested_in_vect_loop_p (loop, stmt)
4240 && (TREE_INT_CST_LOW (DR_STEP (dr))
4241 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4243 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4244 compute_in_loop = true;
4247 if ((alignment_support_scheme == dr_explicit_realign_optimized
4248 || alignment_support_scheme == dr_explicit_realign)
4249 && !compute_in_loop)
4251 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4252 alignment_support_scheme, NULL_TREE,
4253 &at_loop);
4254 if (alignment_support_scheme == dr_explicit_realign_optimized)
4256 phi = SSA_NAME_DEF_STMT (msq);
4257 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4260 else
4261 at_loop = loop;
4263 if (negative)
4264 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4266 if (load_lanes_p)
4267 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
4268 else
4269 aggr_type = vectype;
4271 prev_stmt_info = NULL;
4272 for (j = 0; j < ncopies; j++)
4274 /* 1. Create the vector or array pointer update chain. */
4275 if (j == 0)
4276 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
4277 offset, &dummy, gsi,
4278 &ptr_incr, false, &inv_p);
4279 else
4280 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4281 TYPE_SIZE_UNIT (aggr_type));
4283 if (strided_load || slp_perm)
4284 dr_chain = VEC_alloc (tree, heap, vec_num);
4286 if (load_lanes_p)
4288 tree vec_array;
4290 vec_array = create_vector_array (vectype, vec_num);
4292 /* Emit:
4293 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4294 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
4295 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
4296 gimple_call_set_lhs (new_stmt, vec_array);
4297 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4298 mark_symbols_for_renaming (new_stmt);
4300 /* Extract each vector into an SSA_NAME. */
4301 for (i = 0; i < vec_num; i++)
4303 new_temp = read_vector_array (stmt, gsi, scalar_dest,
4304 vec_array, i);
4305 VEC_quick_push (tree, dr_chain, new_temp);
4308 /* Record the mapping between SSA_NAMEs and statements. */
4309 vect_record_strided_load_vectors (stmt, dr_chain);
4311 else
4313 for (i = 0; i < vec_num; i++)
4315 if (i > 0)
4316 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
4317 stmt, NULL_TREE);
4319 /* 2. Create the vector-load in the loop. */
4320 switch (alignment_support_scheme)
4322 case dr_aligned:
4323 case dr_unaligned_supported:
4325 struct ptr_info_def *pi;
4326 data_ref
4327 = build2 (MEM_REF, vectype, dataref_ptr,
4328 build_int_cst (reference_alias_ptr_type
4329 (DR_REF (first_dr)), 0));
4330 pi = get_ptr_info (dataref_ptr);
4331 pi->align = TYPE_ALIGN_UNIT (vectype);
4332 if (alignment_support_scheme == dr_aligned)
4334 gcc_assert (aligned_access_p (first_dr));
4335 pi->misalign = 0;
4337 else if (DR_MISALIGNMENT (first_dr) == -1)
4339 TREE_TYPE (data_ref)
4340 = build_aligned_type (TREE_TYPE (data_ref),
4341 TYPE_ALIGN (elem_type));
4342 pi->align = TYPE_ALIGN_UNIT (elem_type);
4343 pi->misalign = 0;
4345 else
4347 TREE_TYPE (data_ref)
4348 = build_aligned_type (TREE_TYPE (data_ref),
4349 TYPE_ALIGN (elem_type));
4350 pi->misalign = DR_MISALIGNMENT (first_dr);
4352 break;
4354 case dr_explicit_realign:
4356 tree ptr, bump;
4357 tree vs_minus_1;
4359 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4361 if (compute_in_loop)
4362 msq = vect_setup_realignment (first_stmt, gsi,
4363 &realignment_token,
4364 dr_explicit_realign,
4365 dataref_ptr, NULL);
4367 new_stmt = gimple_build_assign_with_ops
4368 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4369 build_int_cst
4370 (TREE_TYPE (dataref_ptr),
4371 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4372 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4373 gimple_assign_set_lhs (new_stmt, ptr);
4374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4375 data_ref
4376 = build2 (MEM_REF, vectype, ptr,
4377 build_int_cst (reference_alias_ptr_type
4378 (DR_REF (first_dr)), 0));
4379 vec_dest = vect_create_destination_var (scalar_dest,
4380 vectype);
4381 new_stmt = gimple_build_assign (vec_dest, data_ref);
4382 new_temp = make_ssa_name (vec_dest, new_stmt);
4383 gimple_assign_set_lhs (new_stmt, new_temp);
4384 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4385 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4386 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4387 msq = new_temp;
4389 bump = size_binop (MULT_EXPR, vs_minus_1,
4390 TYPE_SIZE_UNIT (scalar_type));
4391 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4392 new_stmt = gimple_build_assign_with_ops
4393 (BIT_AND_EXPR, NULL_TREE, ptr,
4394 build_int_cst
4395 (TREE_TYPE (ptr),
4396 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4397 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4398 gimple_assign_set_lhs (new_stmt, ptr);
4399 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4400 data_ref
4401 = build2 (MEM_REF, vectype, ptr,
4402 build_int_cst (reference_alias_ptr_type
4403 (DR_REF (first_dr)), 0));
4404 break;
4406 case dr_explicit_realign_optimized:
4407 new_stmt = gimple_build_assign_with_ops
4408 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4409 build_int_cst
4410 (TREE_TYPE (dataref_ptr),
4411 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4412 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
4413 new_stmt);
4414 gimple_assign_set_lhs (new_stmt, new_temp);
4415 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4416 data_ref
4417 = build2 (MEM_REF, vectype, new_temp,
4418 build_int_cst (reference_alias_ptr_type
4419 (DR_REF (first_dr)), 0));
4420 break;
4421 default:
4422 gcc_unreachable ();
4424 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4425 new_stmt = gimple_build_assign (vec_dest, data_ref);
4426 new_temp = make_ssa_name (vec_dest, new_stmt);
4427 gimple_assign_set_lhs (new_stmt, new_temp);
4428 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4429 mark_symbols_for_renaming (new_stmt);
4431 /* 3. Handle explicit realignment if necessary/supported.
4432 Create in loop:
4433 vec_dest = realign_load (msq, lsq, realignment_token) */
4434 if (alignment_support_scheme == dr_explicit_realign_optimized
4435 || alignment_support_scheme == dr_explicit_realign)
4437 lsq = gimple_assign_lhs (new_stmt);
4438 if (!realignment_token)
4439 realignment_token = dataref_ptr;
4440 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4441 new_stmt
4442 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
4443 vec_dest, msq, lsq,
4444 realignment_token);
4445 new_temp = make_ssa_name (vec_dest, new_stmt);
4446 gimple_assign_set_lhs (new_stmt, new_temp);
4447 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4449 if (alignment_support_scheme == dr_explicit_realign_optimized)
4451 gcc_assert (phi);
4452 if (i == vec_num - 1 && j == ncopies - 1)
4453 add_phi_arg (phi, lsq,
4454 loop_latch_edge (containing_loop),
4455 UNKNOWN_LOCATION);
4456 msq = lsq;
4460 /* 4. Handle invariant-load. */
4461 if (inv_p && !bb_vinfo)
4463 gcc_assert (!strided_load);
4464 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4465 if (j == 0)
4467 int k;
4468 tree t = NULL_TREE;
4469 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4471 /* CHECKME: bitpos depends on endianess? */
4472 bitpos = bitsize_zero_node;
4473 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4474 bitsize, bitpos);
4475 vec_dest = vect_create_destination_var (scalar_dest,
4476 NULL_TREE);
4477 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4478 new_temp = make_ssa_name (vec_dest, new_stmt);
4479 gimple_assign_set_lhs (new_stmt, new_temp);
4480 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4482 for (k = nunits - 1; k >= 0; --k)
4483 t = tree_cons (NULL_TREE, new_temp, t);
4484 /* FIXME: use build_constructor directly. */
4485 vec_inv = build_constructor_from_list (vectype, t);
4486 new_temp = vect_init_vector (stmt, vec_inv,
4487 vectype, gsi);
4488 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4490 else
4491 gcc_unreachable (); /* FORNOW. */
4494 if (negative)
4496 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4497 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4500 /* Collect vector loads and later create their permutation in
4501 vect_transform_strided_load (). */
4502 if (strided_load || slp_perm)
4503 VEC_quick_push (tree, dr_chain, new_temp);
4505 /* Store vector loads in the corresponding SLP_NODE. */
4506 if (slp && !slp_perm)
4507 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
4508 new_stmt);
4512 if (slp && !slp_perm)
4513 continue;
4515 if (slp_perm)
4517 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4518 slp_node_instance, false))
4520 VEC_free (tree, heap, dr_chain);
4521 return false;
4524 else
4526 if (strided_load)
4528 if (!load_lanes_p)
4529 vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
4530 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4532 else
4534 if (j == 0)
4535 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4536 else
4537 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4538 prev_stmt_info = vinfo_for_stmt (new_stmt);
4541 if (dr_chain)
4542 VEC_free (tree, heap, dr_chain);
4545 return true;
4548 /* Function vect_is_simple_cond.
4550 Input:
4551 LOOP - the loop that is being vectorized.
4552 COND - Condition that is checked for simple use.
4554 Returns whether a COND can be vectorized. Checks whether
4555 condition operands are supportable using vec_is_simple_use. */
4557 static bool
4558 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4560 tree lhs, rhs;
4561 tree def;
4562 enum vect_def_type dt;
4564 if (!COMPARISON_CLASS_P (cond))
4565 return false;
4567 lhs = TREE_OPERAND (cond, 0);
4568 rhs = TREE_OPERAND (cond, 1);
4570 if (TREE_CODE (lhs) == SSA_NAME)
4572 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4573 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4574 &dt))
4575 return false;
4577 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4578 && TREE_CODE (lhs) != FIXED_CST)
4579 return false;
4581 if (TREE_CODE (rhs) == SSA_NAME)
4583 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4584 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4585 &dt))
4586 return false;
4588 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4589 && TREE_CODE (rhs) != FIXED_CST)
4590 return false;
4592 return true;
4595 /* vectorizable_condition.
4597 Check if STMT is conditional modify expression that can be vectorized.
4598 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4599 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4600 at GSI.
4602 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4603 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4604 else caluse if it is 2).
4606 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4608 bool
4609 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4610 gimple *vec_stmt, tree reduc_def, int reduc_index)
4612 tree scalar_dest = NULL_TREE;
4613 tree vec_dest = NULL_TREE;
4614 tree op = NULL_TREE;
4615 tree cond_expr, then_clause, else_clause;
4616 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4617 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4618 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4619 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4620 tree vec_compare, vec_cond_expr;
4621 tree new_temp;
4622 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4623 enum machine_mode vec_mode;
4624 tree def;
4625 enum vect_def_type dt, dts[4];
4626 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4627 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4628 enum tree_code code;
4629 stmt_vec_info prev_stmt_info = NULL;
4630 int j;
4632 /* FORNOW: unsupported in basic block SLP. */
4633 gcc_assert (loop_vinfo);
4635 /* FORNOW: SLP not supported. */
4636 if (STMT_SLP_TYPE (stmt_info))
4637 return false;
4639 gcc_assert (ncopies >= 1);
4640 if (reduc_index && ncopies > 1)
4641 return false; /* FORNOW */
4643 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4644 return false;
4646 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4647 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4648 && reduc_def))
4649 return false;
4651 /* FORNOW: not yet supported. */
4652 if (STMT_VINFO_LIVE_P (stmt_info))
4654 if (vect_print_dump_info (REPORT_DETAILS))
4655 fprintf (vect_dump, "value used after loop.");
4656 return false;
4659 /* Is vectorizable conditional operation? */
4660 if (!is_gimple_assign (stmt))
4661 return false;
4663 code = gimple_assign_rhs_code (stmt);
4665 if (code != COND_EXPR)
4666 return false;
4668 gcc_assert (gimple_assign_single_p (stmt));
4669 op = gimple_assign_rhs1 (stmt);
4670 cond_expr = TREE_OPERAND (op, 0);
4671 then_clause = TREE_OPERAND (op, 1);
4672 else_clause = TREE_OPERAND (op, 2);
4674 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4675 return false;
4677 /* We do not handle two different vector types for the condition
4678 and the values. */
4679 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4680 TREE_TYPE (vectype)))
4681 return false;
4683 if (TREE_CODE (then_clause) == SSA_NAME)
4685 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4686 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4687 &then_def_stmt, &def, &dt))
4688 return false;
4690 else if (TREE_CODE (then_clause) != INTEGER_CST
4691 && TREE_CODE (then_clause) != REAL_CST
4692 && TREE_CODE (then_clause) != FIXED_CST)
4693 return false;
4695 if (TREE_CODE (else_clause) == SSA_NAME)
4697 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4698 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4699 &else_def_stmt, &def, &dt))
4700 return false;
4702 else if (TREE_CODE (else_clause) != INTEGER_CST
4703 && TREE_CODE (else_clause) != REAL_CST
4704 && TREE_CODE (else_clause) != FIXED_CST)
4705 return false;
4708 vec_mode = TYPE_MODE (vectype);
4710 if (!vec_stmt)
4712 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4713 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4716 /* Transform */
4718 /* Handle def. */
4719 scalar_dest = gimple_assign_lhs (stmt);
4720 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4722 /* Handle cond expr. */
4723 for (j = 0; j < ncopies; j++)
4725 gimple new_stmt;
4726 if (j == 0)
4728 gimple gtemp;
4729 vec_cond_lhs =
4730 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4731 stmt, NULL);
4732 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4733 NULL, &gtemp, &def, &dts[0]);
4734 vec_cond_rhs =
4735 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4736 stmt, NULL);
4737 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4738 NULL, &gtemp, &def, &dts[1]);
4739 if (reduc_index == 1)
4740 vec_then_clause = reduc_def;
4741 else
4743 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4744 stmt, NULL);
4745 vect_is_simple_use (then_clause, loop_vinfo,
4746 NULL, &gtemp, &def, &dts[2]);
4748 if (reduc_index == 2)
4749 vec_else_clause = reduc_def;
4750 else
4752 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4753 stmt, NULL);
4754 vect_is_simple_use (else_clause, loop_vinfo,
4755 NULL, &gtemp, &def, &dts[3]);
4758 else
4760 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4761 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4762 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4763 vec_then_clause);
4764 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4765 vec_else_clause);
4768 /* Arguments are ready. Create the new vector stmt. */
4769 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4770 vec_cond_lhs, vec_cond_rhs);
4771 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4772 vec_compare, vec_then_clause, vec_else_clause);
4774 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4775 new_temp = make_ssa_name (vec_dest, new_stmt);
4776 gimple_assign_set_lhs (new_stmt, new_temp);
4777 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4778 if (j == 0)
4779 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4780 else
4781 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4783 prev_stmt_info = vinfo_for_stmt (new_stmt);
4786 return true;
4790 /* Make sure the statement is vectorizable. */
4792 bool
4793 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4795 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4796 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4797 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4798 bool ok;
4799 tree scalar_type, vectype;
4801 if (vect_print_dump_info (REPORT_DETAILS))
4803 fprintf (vect_dump, "==> examining statement: ");
4804 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4807 if (gimple_has_volatile_ops (stmt))
4809 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4810 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4812 return false;
4815 /* Skip stmts that do not need to be vectorized. In loops this is expected
4816 to include:
4817 - the COND_EXPR which is the loop exit condition
4818 - any LABEL_EXPRs in the loop
4819 - computations that are used only for array indexing or loop control.
4820 In basic blocks we only analyze statements that are a part of some SLP
4821 instance, therefore, all the statements are relevant. */
4823 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4824 && !STMT_VINFO_LIVE_P (stmt_info))
4826 if (vect_print_dump_info (REPORT_DETAILS))
4827 fprintf (vect_dump, "irrelevant.");
4829 return true;
4832 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4834 case vect_internal_def:
4835 break;
4837 case vect_reduction_def:
4838 case vect_nested_cycle:
4839 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4840 || relevance == vect_used_in_outer_by_reduction
4841 || relevance == vect_unused_in_scope));
4842 break;
4844 case vect_induction_def:
4845 case vect_constant_def:
4846 case vect_external_def:
4847 case vect_unknown_def_type:
4848 default:
4849 gcc_unreachable ();
4852 if (bb_vinfo)
4854 gcc_assert (PURE_SLP_STMT (stmt_info));
4856 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4857 if (vect_print_dump_info (REPORT_DETAILS))
4859 fprintf (vect_dump, "get vectype for scalar type: ");
4860 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4863 vectype = get_vectype_for_scalar_type (scalar_type);
4864 if (!vectype)
4866 if (vect_print_dump_info (REPORT_DETAILS))
4868 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4869 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4871 return false;
4874 if (vect_print_dump_info (REPORT_DETAILS))
4876 fprintf (vect_dump, "vectype: ");
4877 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4880 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4883 if (STMT_VINFO_RELEVANT_P (stmt_info))
4885 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4886 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4887 *need_to_vectorize = true;
4890 ok = true;
4891 if (!bb_vinfo
4892 && (STMT_VINFO_RELEVANT_P (stmt_info)
4893 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4894 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4895 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4896 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4897 || vectorizable_shift (stmt, NULL, NULL, NULL)
4898 || vectorizable_operation (stmt, NULL, NULL, NULL)
4899 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4900 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4901 || vectorizable_call (stmt, NULL, NULL)
4902 || vectorizable_store (stmt, NULL, NULL, NULL)
4903 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4904 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4905 else
4907 if (bb_vinfo)
4908 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4909 || vectorizable_operation (stmt, NULL, NULL, node)
4910 || vectorizable_assignment (stmt, NULL, NULL, node)
4911 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4912 || vectorizable_store (stmt, NULL, NULL, node));
4915 if (!ok)
4917 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4919 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4920 fprintf (vect_dump, "supported: ");
4921 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4924 return false;
4927 if (bb_vinfo)
4928 return true;
4930 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4931 need extra handling, except for vectorizable reductions. */
4932 if (STMT_VINFO_LIVE_P (stmt_info)
4933 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4934 ok = vectorizable_live_operation (stmt, NULL, NULL);
4936 if (!ok)
4938 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4940 fprintf (vect_dump, "not vectorized: live stmt not ");
4941 fprintf (vect_dump, "supported: ");
4942 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4945 return false;
4948 return true;
4952 /* Function vect_transform_stmt.
4954 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4956 bool
4957 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4958 bool *strided_store, slp_tree slp_node,
4959 slp_instance slp_node_instance)
4961 bool is_store = false;
4962 gimple vec_stmt = NULL;
4963 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4964 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4965 bool done;
4967 switch (STMT_VINFO_TYPE (stmt_info))
4969 case type_demotion_vec_info_type:
4970 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4971 gcc_assert (done);
4972 break;
4974 case type_promotion_vec_info_type:
4975 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4976 gcc_assert (done);
4977 break;
4979 case type_conversion_vec_info_type:
4980 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
4981 gcc_assert (done);
4982 break;
4984 case induc_vec_info_type:
4985 gcc_assert (!slp_node);
4986 done = vectorizable_induction (stmt, gsi, &vec_stmt);
4987 gcc_assert (done);
4988 break;
4990 case shift_vec_info_type:
4991 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
4992 gcc_assert (done);
4993 break;
4995 case op_vec_info_type:
4996 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
4997 gcc_assert (done);
4998 break;
5000 case assignment_vec_info_type:
5001 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
5002 gcc_assert (done);
5003 break;
5005 case load_vec_info_type:
5006 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
5007 slp_node_instance);
5008 gcc_assert (done);
5009 break;
5011 case store_vec_info_type:
5012 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
5013 gcc_assert (done);
5014 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
5016 /* In case of interleaving, the whole chain is vectorized when the
5017 last store in the chain is reached. Store stmts before the last
5018 one are skipped, and there vec_stmt_info shouldn't be freed
5019 meanwhile. */
5020 *strided_store = true;
5021 if (STMT_VINFO_VEC_STMT (stmt_info))
5022 is_store = true;
5024 else
5025 is_store = true;
5026 break;
5028 case condition_vec_info_type:
5029 gcc_assert (!slp_node);
5030 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
5031 gcc_assert (done);
5032 break;
5034 case call_vec_info_type:
5035 gcc_assert (!slp_node);
5036 done = vectorizable_call (stmt, gsi, &vec_stmt);
5037 stmt = gsi_stmt (*gsi);
5038 break;
5040 case reduc_vec_info_type:
5041 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
5042 gcc_assert (done);
5043 break;
5045 default:
5046 if (!STMT_VINFO_LIVE_P (stmt_info))
5048 if (vect_print_dump_info (REPORT_DETAILS))
5049 fprintf (vect_dump, "stmt not supported.");
5050 gcc_unreachable ();
5054 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5055 is being vectorized, but outside the immediately enclosing loop. */
5056 if (vec_stmt
5057 && STMT_VINFO_LOOP_VINFO (stmt_info)
5058 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5059 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
5060 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
5061 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
5062 || STMT_VINFO_RELEVANT (stmt_info) ==
5063 vect_used_in_outer_by_reduction))
5065 struct loop *innerloop = LOOP_VINFO_LOOP (
5066 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
5067 imm_use_iterator imm_iter;
5068 use_operand_p use_p;
5069 tree scalar_dest;
5070 gimple exit_phi;
5072 if (vect_print_dump_info (REPORT_DETAILS))
5073 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
5075 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5076 (to be used when vectorizing outer-loop stmts that use the DEF of
5077 STMT). */
5078 if (gimple_code (stmt) == GIMPLE_PHI)
5079 scalar_dest = PHI_RESULT (stmt);
5080 else
5081 scalar_dest = gimple_assign_lhs (stmt);
5083 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5085 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
5087 exit_phi = USE_STMT (use_p);
5088 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
5093 /* Handle stmts whose DEF is used outside the loop-nest that is
5094 being vectorized. */
5095 if (STMT_VINFO_LIVE_P (stmt_info)
5096 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5098 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
5099 gcc_assert (done);
5102 if (vec_stmt)
5104 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5105 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5106 if (orig_stmt_in_pattern)
5108 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5109 /* STMT was inserted by the vectorizer to replace a computation idiom.
5110 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5111 computed this idiom. We need to record a pointer to VEC_STMT in
5112 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5113 documentation of vect_pattern_recog. */
5114 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5116 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
5117 == orig_scalar_stmt);
5118 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5123 return is_store;
5127 /* Remove a group of stores (for SLP or interleaving), free their
5128 stmt_vec_info. */
5130 void
5131 vect_remove_stores (gimple first_stmt)
5133 gimple next = first_stmt;
5134 gimple tmp;
5135 gimple_stmt_iterator next_si;
5137 while (next)
5139 /* Free the attached stmt_vec_info and remove the stmt. */
5140 next_si = gsi_for_stmt (next);
5141 gsi_remove (&next_si, true);
5142 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
5143 free_stmt_vec_info (next);
5144 next = tmp;
5149 /* Function new_stmt_vec_info.
5151 Create and initialize a new stmt_vec_info struct for STMT. */
5153 stmt_vec_info
5154 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
5155 bb_vec_info bb_vinfo)
5157 stmt_vec_info res;
5158 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
5160 STMT_VINFO_TYPE (res) = undef_vec_info_type;
5161 STMT_VINFO_STMT (res) = stmt;
5162 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
5163 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
5164 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
5165 STMT_VINFO_LIVE_P (res) = false;
5166 STMT_VINFO_VECTYPE (res) = NULL;
5167 STMT_VINFO_VEC_STMT (res) = NULL;
5168 STMT_VINFO_VECTORIZABLE (res) = true;
5169 STMT_VINFO_IN_PATTERN_P (res) = false;
5170 STMT_VINFO_RELATED_STMT (res) = NULL;
5171 STMT_VINFO_DATA_REF (res) = NULL;
5173 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
5174 STMT_VINFO_DR_OFFSET (res) = NULL;
5175 STMT_VINFO_DR_INIT (res) = NULL;
5176 STMT_VINFO_DR_STEP (res) = NULL;
5177 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5179 if (gimple_code (stmt) == GIMPLE_PHI
5180 && is_loop_header_bb_p (gimple_bb (stmt)))
5181 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5182 else
5183 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5185 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5186 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5187 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5188 STMT_SLP_TYPE (res) = loop_vect;
5189 GROUP_FIRST_ELEMENT (res) = NULL;
5190 GROUP_NEXT_ELEMENT (res) = NULL;
5191 GROUP_SIZE (res) = 0;
5192 GROUP_STORE_COUNT (res) = 0;
5193 GROUP_GAP (res) = 0;
5194 GROUP_SAME_DR_STMT (res) = NULL;
5195 GROUP_READ_WRITE_DEPENDENCE (res) = false;
5197 return res;
5201 /* Create a hash table for stmt_vec_info. */
5203 void
5204 init_stmt_vec_info_vec (void)
5206 gcc_assert (!stmt_vec_info_vec);
5207 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5211 /* Free hash table for stmt_vec_info. */
5213 void
5214 free_stmt_vec_info_vec (void)
5216 gcc_assert (stmt_vec_info_vec);
5217 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5221 /* Free stmt vectorization related info. */
5223 void
5224 free_stmt_vec_info (gimple stmt)
5226 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5228 if (!stmt_info)
5229 return;
5231 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5232 set_vinfo_for_stmt (stmt, NULL);
5233 free (stmt_info);
5237 /* Function get_vectype_for_scalar_type_and_size.
5239 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5240 by the target. */
5242 static tree
5243 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5245 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5246 enum machine_mode simd_mode;
5247 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5248 int nunits;
5249 tree vectype;
5251 if (nbytes == 0)
5252 return NULL_TREE;
5254 /* We can't build a vector type of elements with alignment bigger than
5255 their size. */
5256 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5257 return NULL_TREE;
5259 /* If we'd build a vector type of elements whose mode precision doesn't
5260 match their types precision we'll get mismatched types on vector
5261 extracts via BIT_FIELD_REFs. This effectively means we disable
5262 vectorization of bool and/or enum types in some languages. */
5263 if (INTEGRAL_TYPE_P (scalar_type)
5264 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5265 return NULL_TREE;
5267 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5268 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5269 return NULL_TREE;
5271 /* If no size was supplied use the mode the target prefers. Otherwise
5272 lookup a vector mode of the specified size. */
5273 if (size == 0)
5274 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5275 else
5276 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5277 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5278 if (nunits <= 1)
5279 return NULL_TREE;
5281 vectype = build_vector_type (scalar_type, nunits);
5282 if (vect_print_dump_info (REPORT_DETAILS))
5284 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5285 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5288 if (!vectype)
5289 return NULL_TREE;
5291 if (vect_print_dump_info (REPORT_DETAILS))
5293 fprintf (vect_dump, "vectype: ");
5294 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5297 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5298 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5300 if (vect_print_dump_info (REPORT_DETAILS))
5301 fprintf (vect_dump, "mode not supported by target.");
5302 return NULL_TREE;
5305 return vectype;
5308 unsigned int current_vector_size;
5310 /* Function get_vectype_for_scalar_type.
5312 Returns the vector type corresponding to SCALAR_TYPE as supported
5313 by the target. */
5315 tree
5316 get_vectype_for_scalar_type (tree scalar_type)
5318 tree vectype;
5319 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5320 current_vector_size);
5321 if (vectype
5322 && current_vector_size == 0)
5323 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5324 return vectype;
5327 /* Function get_same_sized_vectype
5329 Returns a vector type corresponding to SCALAR_TYPE of size
5330 VECTOR_TYPE if supported by the target. */
5332 tree
5333 get_same_sized_vectype (tree scalar_type, tree vector_type)
5335 return get_vectype_for_scalar_type_and_size
5336 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5339 /* Function vect_is_simple_use.
5341 Input:
5342 LOOP_VINFO - the vect info of the loop that is being vectorized.
5343 BB_VINFO - the vect info of the basic block that is being vectorized.
5344 OPERAND - operand of a stmt in the loop or bb.
5345 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5347 Returns whether a stmt with OPERAND can be vectorized.
5348 For loops, supportable operands are constants, loop invariants, and operands
5349 that are defined by the current iteration of the loop. Unsupportable
5350 operands are those that are defined by a previous iteration of the loop (as
5351 is the case in reduction/induction computations).
5352 For basic blocks, supportable operands are constants and bb invariants.
5353 For now, operands defined outside the basic block are not supported. */
5355 bool
5356 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5357 bb_vec_info bb_vinfo, gimple *def_stmt,
5358 tree *def, enum vect_def_type *dt)
5360 basic_block bb;
5361 stmt_vec_info stmt_vinfo;
5362 struct loop *loop = NULL;
5364 if (loop_vinfo)
5365 loop = LOOP_VINFO_LOOP (loop_vinfo);
5367 *def_stmt = NULL;
5368 *def = NULL_TREE;
5370 if (vect_print_dump_info (REPORT_DETAILS))
5372 fprintf (vect_dump, "vect_is_simple_use: operand ");
5373 print_generic_expr (vect_dump, operand, TDF_SLIM);
5376 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5378 *dt = vect_constant_def;
5379 return true;
5382 if (is_gimple_min_invariant (operand))
5384 *def = operand;
5385 *dt = vect_external_def;
5386 return true;
5389 if (TREE_CODE (operand) == PAREN_EXPR)
5391 if (vect_print_dump_info (REPORT_DETAILS))
5392 fprintf (vect_dump, "non-associatable copy.");
5393 operand = TREE_OPERAND (operand, 0);
5396 if (TREE_CODE (operand) != SSA_NAME)
5398 if (vect_print_dump_info (REPORT_DETAILS))
5399 fprintf (vect_dump, "not ssa-name.");
5400 return false;
5403 *def_stmt = SSA_NAME_DEF_STMT (operand);
5404 if (*def_stmt == NULL)
5406 if (vect_print_dump_info (REPORT_DETAILS))
5407 fprintf (vect_dump, "no def_stmt.");
5408 return false;
5411 if (vect_print_dump_info (REPORT_DETAILS))
5413 fprintf (vect_dump, "def_stmt: ");
5414 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5417 /* Empty stmt is expected only in case of a function argument.
5418 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5419 if (gimple_nop_p (*def_stmt))
5421 *def = operand;
5422 *dt = vect_external_def;
5423 return true;
5426 bb = gimple_bb (*def_stmt);
5428 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5429 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5430 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5431 *dt = vect_external_def;
5432 else
5434 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5435 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5438 if (*dt == vect_unknown_def_type)
5440 if (vect_print_dump_info (REPORT_DETAILS))
5441 fprintf (vect_dump, "Unsupported pattern.");
5442 return false;
5445 if (vect_print_dump_info (REPORT_DETAILS))
5446 fprintf (vect_dump, "type of def: %d.",*dt);
5448 switch (gimple_code (*def_stmt))
5450 case GIMPLE_PHI:
5451 *def = gimple_phi_result (*def_stmt);
5452 break;
5454 case GIMPLE_ASSIGN:
5455 *def = gimple_assign_lhs (*def_stmt);
5456 break;
5458 case GIMPLE_CALL:
5459 *def = gimple_call_lhs (*def_stmt);
5460 if (*def != NULL)
5461 break;
5462 /* FALLTHRU */
5463 default:
5464 if (vect_print_dump_info (REPORT_DETAILS))
5465 fprintf (vect_dump, "unsupported defining stmt: ");
5466 return false;
5469 return true;
5472 /* Function vect_is_simple_use_1.
5474 Same as vect_is_simple_use_1 but also determines the vector operand
5475 type of OPERAND and stores it to *VECTYPE. If the definition of
5476 OPERAND is vect_uninitialized_def, vect_constant_def or
5477 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5478 is responsible to compute the best suited vector type for the
5479 scalar operand. */
5481 bool
5482 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5483 bb_vec_info bb_vinfo, gimple *def_stmt,
5484 tree *def, enum vect_def_type *dt, tree *vectype)
5486 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5487 return false;
5489 /* Now get a vector type if the def is internal, otherwise supply
5490 NULL_TREE and leave it up to the caller to figure out a proper
5491 type for the use stmt. */
5492 if (*dt == vect_internal_def
5493 || *dt == vect_induction_def
5494 || *dt == vect_reduction_def
5495 || *dt == vect_double_reduction_def
5496 || *dt == vect_nested_cycle)
5498 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5499 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5500 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5501 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5502 gcc_assert (*vectype != NULL_TREE);
5504 else if (*dt == vect_uninitialized_def
5505 || *dt == vect_constant_def
5506 || *dt == vect_external_def)
5507 *vectype = NULL_TREE;
5508 else
5509 gcc_unreachable ();
5511 return true;
5515 /* Function supportable_widening_operation
5517 Check whether an operation represented by the code CODE is a
5518 widening operation that is supported by the target platform in
5519 vector form (i.e., when operating on arguments of type VECTYPE_IN
5520 producing a result of type VECTYPE_OUT).
5522 Widening operations we currently support are NOP (CONVERT), FLOAT
5523 and WIDEN_MULT. This function checks if these operations are supported
5524 by the target platform either directly (via vector tree-codes), or via
5525 target builtins.
5527 Output:
5528 - CODE1 and CODE2 are codes of vector operations to be used when
5529 vectorizing the operation, if available.
5530 - DECL1 and DECL2 are decls of target builtin functions to be used
5531 when vectorizing the operation, if available. In this case,
5532 CODE1 and CODE2 are CALL_EXPR.
5533 - MULTI_STEP_CVT determines the number of required intermediate steps in
5534 case of multi-step conversion (like char->short->int - in that case
5535 MULTI_STEP_CVT will be 1).
5536 - INTERM_TYPES contains the intermediate type required to perform the
5537 widening operation (short in the above example). */
5539 bool
5540 supportable_widening_operation (enum tree_code code, gimple stmt,
5541 tree vectype_out, tree vectype_in,
5542 tree *decl1, tree *decl2,
5543 enum tree_code *code1, enum tree_code *code2,
5544 int *multi_step_cvt,
5545 VEC (tree, heap) **interm_types)
5547 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5548 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5549 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5550 bool ordered_p;
5551 enum machine_mode vec_mode;
5552 enum insn_code icode1, icode2;
5553 optab optab1, optab2;
5554 tree vectype = vectype_in;
5555 tree wide_vectype = vectype_out;
5556 enum tree_code c1, c2;
5558 /* The result of a vectorized widening operation usually requires two vectors
5559 (because the widened results do not fit int one vector). The generated
5560 vector results would normally be expected to be generated in the same
5561 order as in the original scalar computation, i.e. if 8 results are
5562 generated in each vector iteration, they are to be organized as follows:
5563 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5565 However, in the special case that the result of the widening operation is
5566 used in a reduction computation only, the order doesn't matter (because
5567 when vectorizing a reduction we change the order of the computation).
5568 Some targets can take advantage of this and generate more efficient code.
5569 For example, targets like Altivec, that support widen_mult using a sequence
5570 of {mult_even,mult_odd} generate the following vectors:
5571 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5573 When vectorizing outer-loops, we execute the inner-loop sequentially
5574 (each vectorized inner-loop iteration contributes to VF outer-loop
5575 iterations in parallel). We therefore don't allow to change the order
5576 of the computation in the inner-loop during outer-loop vectorization. */
5578 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5579 && !nested_in_vect_loop_p (vect_loop, stmt))
5580 ordered_p = false;
5581 else
5582 ordered_p = true;
5584 if (!ordered_p
5585 && code == WIDEN_MULT_EXPR
5586 && targetm.vectorize.builtin_mul_widen_even
5587 && targetm.vectorize.builtin_mul_widen_even (vectype)
5588 && targetm.vectorize.builtin_mul_widen_odd
5589 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5591 if (vect_print_dump_info (REPORT_DETAILS))
5592 fprintf (vect_dump, "Unordered widening operation detected.");
5594 *code1 = *code2 = CALL_EXPR;
5595 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5596 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5597 return true;
5600 switch (code)
5602 case WIDEN_MULT_EXPR:
5603 if (BYTES_BIG_ENDIAN)
5605 c1 = VEC_WIDEN_MULT_HI_EXPR;
5606 c2 = VEC_WIDEN_MULT_LO_EXPR;
5608 else
5610 c2 = VEC_WIDEN_MULT_HI_EXPR;
5611 c1 = VEC_WIDEN_MULT_LO_EXPR;
5613 break;
5615 CASE_CONVERT:
5616 if (BYTES_BIG_ENDIAN)
5618 c1 = VEC_UNPACK_HI_EXPR;
5619 c2 = VEC_UNPACK_LO_EXPR;
5621 else
5623 c2 = VEC_UNPACK_HI_EXPR;
5624 c1 = VEC_UNPACK_LO_EXPR;
5626 break;
5628 case FLOAT_EXPR:
5629 if (BYTES_BIG_ENDIAN)
5631 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5632 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5634 else
5636 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5637 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5639 break;
5641 case FIX_TRUNC_EXPR:
5642 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5643 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5644 computing the operation. */
5645 return false;
5647 default:
5648 gcc_unreachable ();
5651 if (code == FIX_TRUNC_EXPR)
5653 /* The signedness is determined from output operand. */
5654 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5655 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5657 else
5659 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5660 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5663 if (!optab1 || !optab2)
5664 return false;
5666 vec_mode = TYPE_MODE (vectype);
5667 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5668 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5669 return false;
5671 /* Check if it's a multi-step conversion that can be done using intermediate
5672 types. */
5673 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5674 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5676 int i;
5677 tree prev_type = vectype, intermediate_type;
5678 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5679 optab optab3, optab4;
5681 if (!CONVERT_EXPR_CODE_P (code))
5682 return false;
5684 *code1 = c1;
5685 *code2 = c2;
5687 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5688 intermediate steps in promotion sequence. We try
5689 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5690 not. */
5691 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5692 for (i = 0; i < 3; i++)
5694 intermediate_mode = insn_data[icode1].operand[0].mode;
5695 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5696 TYPE_UNSIGNED (prev_type));
5697 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5698 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5700 if (!optab3 || !optab4
5701 || ((icode1 = optab_handler (optab1, prev_mode))
5702 == CODE_FOR_nothing)
5703 || insn_data[icode1].operand[0].mode != intermediate_mode
5704 || ((icode2 = optab_handler (optab2, prev_mode))
5705 == CODE_FOR_nothing)
5706 || insn_data[icode2].operand[0].mode != intermediate_mode
5707 || ((icode1 = optab_handler (optab3, intermediate_mode))
5708 == CODE_FOR_nothing)
5709 || ((icode2 = optab_handler (optab4, intermediate_mode))
5710 == CODE_FOR_nothing))
5711 return false;
5713 VEC_quick_push (tree, *interm_types, intermediate_type);
5714 (*multi_step_cvt)++;
5716 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5717 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5718 return true;
5720 prev_type = intermediate_type;
5721 prev_mode = intermediate_mode;
5724 return false;
5727 *code1 = c1;
5728 *code2 = c2;
5729 return true;
5733 /* Function supportable_narrowing_operation
5735 Check whether an operation represented by the code CODE is a
5736 narrowing operation that is supported by the target platform in
5737 vector form (i.e., when operating on arguments of type VECTYPE_IN
5738 and producing a result of type VECTYPE_OUT).
5740 Narrowing operations we currently support are NOP (CONVERT) and
5741 FIX_TRUNC. This function checks if these operations are supported by
5742 the target platform directly via vector tree-codes.
5744 Output:
5745 - CODE1 is the code of a vector operation to be used when
5746 vectorizing the operation, if available.
5747 - MULTI_STEP_CVT determines the number of required intermediate steps in
5748 case of multi-step conversion (like int->short->char - in that case
5749 MULTI_STEP_CVT will be 1).
5750 - INTERM_TYPES contains the intermediate type required to perform the
5751 narrowing operation (short in the above example). */
5753 bool
5754 supportable_narrowing_operation (enum tree_code code,
5755 tree vectype_out, tree vectype_in,
5756 enum tree_code *code1, int *multi_step_cvt,
5757 VEC (tree, heap) **interm_types)
5759 enum machine_mode vec_mode;
5760 enum insn_code icode1;
5761 optab optab1, interm_optab;
5762 tree vectype = vectype_in;
5763 tree narrow_vectype = vectype_out;
5764 enum tree_code c1;
5765 tree intermediate_type, prev_type;
5766 int i;
5768 switch (code)
5770 CASE_CONVERT:
5771 c1 = VEC_PACK_TRUNC_EXPR;
5772 break;
5774 case FIX_TRUNC_EXPR:
5775 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5776 break;
5778 case FLOAT_EXPR:
5779 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5780 tree code and optabs used for computing the operation. */
5781 return false;
5783 default:
5784 gcc_unreachable ();
5787 if (code == FIX_TRUNC_EXPR)
5788 /* The signedness is determined from output operand. */
5789 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5790 else
5791 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5793 if (!optab1)
5794 return false;
5796 vec_mode = TYPE_MODE (vectype);
5797 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5798 return false;
5800 /* Check if it's a multi-step conversion that can be done using intermediate
5801 types. */
5802 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5804 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5806 *code1 = c1;
5807 prev_type = vectype;
5808 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5809 intermediate steps in promotion sequence. We try
5810 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5811 not. */
5812 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5813 for (i = 0; i < 3; i++)
5815 intermediate_mode = insn_data[icode1].operand[0].mode;
5816 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5817 TYPE_UNSIGNED (prev_type));
5818 interm_optab = optab_for_tree_code (c1, intermediate_type,
5819 optab_default);
5820 if (!interm_optab
5821 || ((icode1 = optab_handler (optab1, prev_mode))
5822 == CODE_FOR_nothing)
5823 || insn_data[icode1].operand[0].mode != intermediate_mode
5824 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5825 == CODE_FOR_nothing))
5826 return false;
5828 VEC_quick_push (tree, *interm_types, intermediate_type);
5829 (*multi_step_cvt)++;
5831 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5832 return true;
5834 prev_type = intermediate_type;
5835 prev_mode = intermediate_mode;
5838 return false;
5841 *code1 = c1;
5842 return true;