20090811-1.c: Skip for incompatible options, do not override other options.
[official-gcc.git] / gcc / tree-vect-stmts.c
blob1b2ed20040de34fb522422c6886c13408465462e
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
35 #include "cfgloop.h"
36 #include "cfglayout.h"
37 #include "expr.h"
38 #include "recog.h"
39 #include "optabs.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
47 static tree
48 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
50 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
51 "vect_array");
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
59 static tree
60 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
61 tree array, unsigned HOST_WIDE_INT n)
63 tree vect_type, vect, vect_name, array_ref;
64 gimple new_stmt;
66 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
67 vect_type = TREE_TYPE (TREE_TYPE (array));
68 vect = vect_create_destination_var (scalar_dest, vect_type);
69 array_ref = build4 (ARRAY_REF, vect_type, array,
70 build_int_cst (size_type_node, n),
71 NULL_TREE, NULL_TREE);
73 new_stmt = gimple_build_assign (vect, array_ref);
74 vect_name = make_ssa_name (vect, new_stmt);
75 gimple_assign_set_lhs (new_stmt, vect_name);
76 vect_finish_stmt_generation (stmt, new_stmt, gsi);
77 mark_symbols_for_renaming (new_stmt);
79 return vect_name;
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
86 static void
87 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
88 tree array, unsigned HOST_WIDE_INT n)
90 tree array_ref;
91 gimple new_stmt;
93 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
94 build_int_cst (size_type_node, n),
95 NULL_TREE, NULL_TREE);
97 new_stmt = gimple_build_assign (array_ref, vect);
98 vect_finish_stmt_generation (stmt, new_stmt, gsi);
99 mark_symbols_for_renaming (new_stmt);
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
104 (and its group). */
106 static tree
107 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
109 struct ptr_info_def *pi;
110 tree mem_ref, alias_ptr_type;
112 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
113 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
114 /* Arrays have the same alignment as their type. */
115 pi = get_ptr_info (ptr);
116 pi->align = TYPE_ALIGN_UNIT (type);
117 pi->misalign = 0;
118 return mem_ref;
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
123 /* Function vect_mark_relevant.
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
127 static void
128 vect_mark_relevant (VEC(gimple,heap) **worklist, gimple stmt,
129 enum vect_relevant relevant, bool live_p)
131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
132 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
133 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
135 if (vect_print_dump_info (REPORT_DETAILS))
136 fprintf (vect_dump, "mark relevant %d, live %d.", relevant, live_p);
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
140 gimple pattern_stmt;
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
147 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
149 if (vect_print_dump_info (REPORT_DETAILS))
150 fprintf (vect_dump, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info = vinfo_for_stmt (pattern_stmt);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
153 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
154 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
155 stmt = pattern_stmt;
158 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
159 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
160 STMT_VINFO_RELEVANT (stmt_info) = relevant;
162 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
165 if (vect_print_dump_info (REPORT_DETAILS))
166 fprintf (vect_dump, "already marked relevant/live.");
167 return;
170 VEC_safe_push (gimple, heap, *worklist, stmt);
174 /* Function vect_stmt_relevant_p.
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
184 CHECKME: what other side effects would the vectorizer allow? */
186 static bool
187 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
188 enum vect_relevant *relevant, bool *live_p)
190 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
191 ssa_op_iter op_iter;
192 imm_use_iterator imm_iter;
193 use_operand_p use_p;
194 def_operand_p def_p;
196 *relevant = vect_unused_in_scope;
197 *live_p = false;
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
202 != loop_exit_ctrl_vec_info_type)
203 *relevant = vect_used_in_scope;
205 /* changing memory. */
206 if (gimple_code (stmt) != GIMPLE_PHI)
207 if (gimple_vdef (stmt))
209 if (vect_print_dump_info (REPORT_DETAILS))
210 fprintf (vect_dump, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant = vect_used_in_scope;
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
217 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
219 basic_block bb = gimple_bb (USE_STMT (use_p));
220 if (!flow_bb_inside_loop_p (loop, bb))
222 if (vect_print_dump_info (REPORT_DETAILS))
223 fprintf (vect_dump, "vec_stmt_relevant_p: used out of loop.");
225 if (is_gimple_debug (USE_STMT (use_p)))
226 continue;
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
231 gcc_assert (bb == single_exit (loop)->dest);
233 *live_p = true;
238 return (*live_p || *relevant);
242 /* Function exist_non_indexing_operands_for_use_p
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
247 static bool
248 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
250 tree operand;
251 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info))
257 return true;
259 /* STMT has a data_ref. FORNOW this means that its of one of
260 the following forms:
261 -1- ARRAY_REF = var
262 -2- var = ARRAY_REF
263 (This should have been verified in analyze_data_refs).
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
267 for array indexing.
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
272 if (!gimple_assign_copy_p (stmt))
273 return false;
274 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
275 return false;
276 operand = gimple_assign_rhs1 (stmt);
277 if (TREE_CODE (operand) != SSA_NAME)
278 return false;
280 if (operand == use)
281 return true;
283 return false;
288 Function process_use.
290 Inputs:
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
296 Outputs:
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
301 Exceptions:
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
310 Return true if everything is as expected. Return false otherwise. */
312 static bool
313 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
314 enum vect_relevant relevant, VEC(gimple,heap) **worklist)
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
318 stmt_vec_info dstmt_vinfo;
319 basic_block bb, def_bb;
320 tree def;
321 gimple def_stmt;
322 enum vect_def_type dt;
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use, stmt))
327 return true;
329 if (!vect_is_simple_use (use, loop_vinfo, NULL, &def_stmt, &def, &dt))
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
332 fprintf (vect_dump, "not vectorized: unsupported use in stmt.");
333 return false;
336 if (!def_stmt || gimple_nop_p (def_stmt))
337 return true;
339 def_bb = gimple_bb (def_stmt);
340 if (!flow_bb_inside_loop_p (loop, def_bb))
342 if (vect_print_dump_info (REPORT_DETAILS))
343 fprintf (vect_dump, "def_stmt is out of loop.");
344 return true;
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo = vinfo_for_stmt (def_stmt);
353 bb = gimple_bb (stmt);
354 if (gimple_code (stmt) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
356 && gimple_code (def_stmt) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
358 && bb->loop_father == def_bb->loop_father)
360 if (vect_print_dump_info (REPORT_DETAILS))
361 fprintf (vect_dump, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
363 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
367 return true;
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
372 d = def_stmt
373 inner-loop:
374 stmt # use (d)
375 outer-loop-tail-bb:
376 ... */
377 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
379 if (vect_print_dump_info (REPORT_DETAILS))
380 fprintf (vect_dump, "outer-loop def-stmt defining inner-loop stmt.");
382 switch (relevant)
384 case vect_unused_in_scope:
385 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
386 vect_used_in_scope : vect_unused_in_scope;
387 break;
389 case vect_used_in_outer_by_reduction:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
391 relevant = vect_used_by_reduction;
392 break;
394 case vect_used_in_outer:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
396 relevant = vect_used_in_scope;
397 break;
399 case vect_used_in_scope:
400 break;
402 default:
403 gcc_unreachable ();
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
410 inner-loop:
411 d = def_stmt
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
413 stmt # use (d) */
414 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
416 if (vect_print_dump_info (REPORT_DETAILS))
417 fprintf (vect_dump, "inner-loop def-stmt defining outer-loop stmt.");
419 switch (relevant)
421 case vect_unused_in_scope:
422 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
424 vect_used_in_outer_by_reduction : vect_unused_in_scope;
425 break;
427 case vect_used_by_reduction:
428 relevant = vect_used_in_outer_by_reduction;
429 break;
431 case vect_used_in_scope:
432 relevant = vect_used_in_outer;
433 break;
435 default:
436 gcc_unreachable ();
440 vect_mark_relevant (worklist, def_stmt, relevant, live_p);
441 return true;
445 /* Function vect_mark_stmts_to_be_vectorized.
447 Not all stmts in the loop need to be vectorized. For example:
449 for i...
450 for j...
451 1. T0 = i + j
452 2. T1 = a[T0]
454 3. j = j + 1
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
459 This pass detects such stmts. */
461 bool
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
464 VEC(gimple,heap) *worklist;
465 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
466 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
467 unsigned int nbbs = loop->num_nodes;
468 gimple_stmt_iterator si;
469 gimple stmt;
470 unsigned int i;
471 stmt_vec_info stmt_vinfo;
472 basic_block bb;
473 gimple phi;
474 bool live_p;
475 enum vect_relevant relevant, tmp_relevant;
476 enum vect_def_type def_type;
478 if (vect_print_dump_info (REPORT_DETAILS))
479 fprintf (vect_dump, "=== vect_mark_stmts_to_be_vectorized ===");
481 worklist = VEC_alloc (gimple, heap, 64);
483 /* 1. Init worklist. */
484 for (i = 0; i < nbbs; i++)
486 bb = bbs[i];
487 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
489 phi = gsi_stmt (si);
490 if (vect_print_dump_info (REPORT_DETAILS))
492 fprintf (vect_dump, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
496 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
497 vect_mark_relevant (&worklist, phi, relevant, live_p);
499 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
501 stmt = gsi_stmt (si);
502 if (vect_print_dump_info (REPORT_DETAILS))
504 fprintf (vect_dump, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
508 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
509 vect_mark_relevant (&worklist, stmt, relevant, live_p);
513 /* 2. Process_worklist */
514 while (VEC_length (gimple, worklist) > 0)
516 use_operand_p use_p;
517 ssa_op_iter iter;
519 stmt = VEC_pop (gimple, worklist);
520 if (vect_print_dump_info (REPORT_DETAILS))
522 fprintf (vect_dump, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo = vinfo_for_stmt (stmt);
530 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
531 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
540 live_p = false
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
548 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
549 tmp_relevant = relevant;
550 switch (def_type)
552 case vect_reduction_def:
553 switch (tmp_relevant)
555 case vect_unused_in_scope:
556 relevant = vect_used_by_reduction;
557 break;
559 case vect_used_by_reduction:
560 if (gimple_code (stmt) == GIMPLE_PHI)
561 break;
562 /* fall through */
564 default:
565 if (vect_print_dump_info (REPORT_DETAILS))
566 fprintf (vect_dump, "unsupported use of reduction.");
568 VEC_free (gimple, heap, worklist);
569 return false;
572 live_p = false;
573 break;
575 case vect_nested_cycle:
576 if (tmp_relevant != vect_unused_in_scope
577 && tmp_relevant != vect_used_in_outer_by_reduction
578 && tmp_relevant != vect_used_in_outer)
580 if (vect_print_dump_info (REPORT_DETAILS))
581 fprintf (vect_dump, "unsupported use of nested cycle.");
583 VEC_free (gimple, heap, worklist);
584 return false;
587 live_p = false;
588 break;
590 case vect_double_reduction_def:
591 if (tmp_relevant != vect_unused_in_scope
592 && tmp_relevant != vect_used_by_reduction)
594 if (vect_print_dump_info (REPORT_DETAILS))
595 fprintf (vect_dump, "unsupported use of double reduction.");
597 VEC_free (gimple, heap, worklist);
598 return false;
601 live_p = false;
602 break;
604 default:
605 break;
608 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
610 tree op = USE_FROM_PTR (use_p);
611 if (!process_use (stmt, op, loop_vinfo, live_p, relevant, &worklist))
613 VEC_free (gimple, heap, worklist);
614 return false;
617 } /* while worklist */
619 VEC_free (gimple, heap, worklist);
620 return true;
624 /* Get cost by calling cost target builtin. */
626 static inline
627 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
629 tree dummy_type = NULL;
630 int dummy = 0;
632 return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
633 dummy_type, dummy);
637 /* Get cost for STMT. */
640 cost_for_stmt (gimple stmt)
642 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
644 switch (STMT_VINFO_TYPE (stmt_info))
646 case load_vec_info_type:
647 return vect_get_stmt_cost (scalar_load);
648 case store_vec_info_type:
649 return vect_get_stmt_cost (scalar_store);
650 case op_vec_info_type:
651 case condition_vec_info_type:
652 case assignment_vec_info_type:
653 case reduc_vec_info_type:
654 case induc_vec_info_type:
655 case type_promotion_vec_info_type:
656 case type_demotion_vec_info_type:
657 case type_conversion_vec_info_type:
658 case call_vec_info_type:
659 return vect_get_stmt_cost (scalar_stmt);
660 case undef_vec_info_type:
661 default:
662 gcc_unreachable ();
666 /* Function vect_model_simple_cost.
668 Models cost for simple operations, i.e. those that only emit ncopies of a
669 single op. Right now, this does not account for multiple insns that could
670 be generated for the single vector op. We will handle that shortly. */
672 void
673 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
674 enum vect_def_type *dt, slp_tree slp_node)
676 int i;
677 int inside_cost = 0, outside_cost = 0;
679 /* The SLP costs were already calculated during SLP tree build. */
680 if (PURE_SLP_STMT (stmt_info))
681 return;
683 inside_cost = ncopies * vect_get_stmt_cost (vector_stmt);
685 /* FORNOW: Assuming maximum 2 args per stmts. */
686 for (i = 0; i < 2; i++)
688 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
689 outside_cost += vect_get_stmt_cost (vector_stmt);
692 if (vect_print_dump_info (REPORT_COST))
693 fprintf (vect_dump, "vect_model_simple_cost: inside_cost = %d, "
694 "outside_cost = %d .", inside_cost, outside_cost);
696 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
697 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
698 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
702 /* Function vect_cost_strided_group_size
704 For strided load or store, return the group_size only if it is the first
705 load or store of a group, else return 1. This ensures that group size is
706 only returned once per group. */
708 static int
709 vect_cost_strided_group_size (stmt_vec_info stmt_info)
711 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
713 if (first_stmt == STMT_VINFO_STMT (stmt_info))
714 return GROUP_SIZE (stmt_info);
716 return 1;
720 /* Function vect_model_store_cost
722 Models cost for stores. In the case of strided accesses, one access
723 has the overhead of the strided access attributed to it. */
725 void
726 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
727 bool store_lanes_p, enum vect_def_type dt,
728 slp_tree slp_node)
730 int group_size;
731 unsigned int inside_cost = 0, outside_cost = 0;
732 struct data_reference *first_dr;
733 gimple first_stmt;
735 /* The SLP costs were already calculated during SLP tree build. */
736 if (PURE_SLP_STMT (stmt_info))
737 return;
739 if (dt == vect_constant_def || dt == vect_external_def)
740 outside_cost = vect_get_stmt_cost (scalar_to_vec);
742 /* Strided access? */
743 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
745 if (slp_node)
747 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
748 group_size = 1;
750 else
752 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
753 group_size = vect_cost_strided_group_size (stmt_info);
756 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
758 /* Not a strided access. */
759 else
761 group_size = 1;
762 first_dr = STMT_VINFO_DATA_REF (stmt_info);
765 /* We assume that the cost of a single store-lanes instruction is
766 equivalent to the cost of GROUP_SIZE separate stores. If a strided
767 access is instead being provided by a permute-and-store operation,
768 include the cost of the permutes. */
769 if (!store_lanes_p && group_size > 1)
771 /* Uses a high and low interleave operation for each needed permute. */
772 inside_cost = ncopies * exact_log2(group_size) * group_size
773 * vect_get_stmt_cost (vector_stmt);
775 if (vect_print_dump_info (REPORT_COST))
776 fprintf (vect_dump, "vect_model_store_cost: strided group_size = %d .",
777 group_size);
781 /* Costs of the stores. */
782 vect_get_store_cost (first_dr, ncopies, &inside_cost);
784 if (vect_print_dump_info (REPORT_COST))
785 fprintf (vect_dump, "vect_model_store_cost: inside_cost = %d, "
786 "outside_cost = %d .", inside_cost, outside_cost);
788 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
789 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
790 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
794 /* Calculate cost of DR's memory access. */
795 void
796 vect_get_store_cost (struct data_reference *dr, int ncopies,
797 unsigned int *inside_cost)
799 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
801 switch (alignment_support_scheme)
803 case dr_aligned:
805 *inside_cost += ncopies * vect_get_stmt_cost (vector_store);
807 if (vect_print_dump_info (REPORT_COST))
808 fprintf (vect_dump, "vect_model_store_cost: aligned.");
810 break;
813 case dr_unaligned_supported:
815 gimple stmt = DR_STMT (dr);
816 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
817 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
819 /* Here, we assign an additional cost for the unaligned store. */
820 *inside_cost += ncopies
821 * targetm.vectorize.builtin_vectorization_cost (unaligned_store,
822 vectype, DR_MISALIGNMENT (dr));
824 if (vect_print_dump_info (REPORT_COST))
825 fprintf (vect_dump, "vect_model_store_cost: unaligned supported by "
826 "hardware.");
828 break;
831 default:
832 gcc_unreachable ();
837 /* Function vect_model_load_cost
839 Models cost for loads. In the case of strided accesses, the last access
840 has the overhead of the strided access attributed to it. Since unaligned
841 accesses are supported for loads, we also account for the costs of the
842 access scheme chosen. */
844 void
845 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies, bool load_lanes_p,
846 slp_tree slp_node)
848 int group_size;
849 gimple first_stmt;
850 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
851 unsigned int inside_cost = 0, outside_cost = 0;
853 /* The SLP costs were already calculated during SLP tree build. */
854 if (PURE_SLP_STMT (stmt_info))
855 return;
857 /* Strided accesses? */
858 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
859 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && first_stmt && !slp_node)
861 group_size = vect_cost_strided_group_size (stmt_info);
862 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
864 /* Not a strided access. */
865 else
867 group_size = 1;
868 first_dr = dr;
871 /* We assume that the cost of a single load-lanes instruction is
872 equivalent to the cost of GROUP_SIZE separate loads. If a strided
873 access is instead being provided by a load-and-permute operation,
874 include the cost of the permutes. */
875 if (!load_lanes_p && group_size > 1)
877 /* Uses an even and odd extract operations for each needed permute. */
878 inside_cost = ncopies * exact_log2(group_size) * group_size
879 * vect_get_stmt_cost (vector_stmt);
881 if (vect_print_dump_info (REPORT_COST))
882 fprintf (vect_dump, "vect_model_load_cost: strided group_size = %d .",
883 group_size);
886 /* The loads themselves. */
887 vect_get_load_cost (first_dr, ncopies,
888 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info)) || group_size > 1
889 || slp_node),
890 &inside_cost, &outside_cost);
892 if (vect_print_dump_info (REPORT_COST))
893 fprintf (vect_dump, "vect_model_load_cost: inside_cost = %d, "
894 "outside_cost = %d .", inside_cost, outside_cost);
896 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
897 stmt_vinfo_set_inside_of_loop_cost (stmt_info, slp_node, inside_cost);
898 stmt_vinfo_set_outside_of_loop_cost (stmt_info, slp_node, outside_cost);
902 /* Calculate cost of DR's memory access. */
903 void
904 vect_get_load_cost (struct data_reference *dr, int ncopies,
905 bool add_realign_cost, unsigned int *inside_cost,
906 unsigned int *outside_cost)
908 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
910 switch (alignment_support_scheme)
912 case dr_aligned:
914 *inside_cost += ncopies * vect_get_stmt_cost (vector_load);
916 if (vect_print_dump_info (REPORT_COST))
917 fprintf (vect_dump, "vect_model_load_cost: aligned.");
919 break;
921 case dr_unaligned_supported:
923 gimple stmt = DR_STMT (dr);
924 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
925 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
927 /* Here, we assign an additional cost for the unaligned load. */
928 *inside_cost += ncopies
929 * targetm.vectorize.builtin_vectorization_cost (unaligned_load,
930 vectype, DR_MISALIGNMENT (dr));
931 if (vect_print_dump_info (REPORT_COST))
932 fprintf (vect_dump, "vect_model_load_cost: unaligned supported by "
933 "hardware.");
935 break;
937 case dr_explicit_realign:
939 *inside_cost += ncopies * (2 * vect_get_stmt_cost (vector_load)
940 + vect_get_stmt_cost (vector_stmt));
942 /* FIXME: If the misalignment remains fixed across the iterations of
943 the containing loop, the following cost should be added to the
944 outside costs. */
945 if (targetm.vectorize.builtin_mask_for_load)
946 *inside_cost += vect_get_stmt_cost (vector_stmt);
948 break;
950 case dr_explicit_realign_optimized:
952 if (vect_print_dump_info (REPORT_COST))
953 fprintf (vect_dump, "vect_model_load_cost: unaligned software "
954 "pipelined.");
956 /* Unaligned software pipeline has a load of an address, an initial
957 load, and possibly a mask operation to "prime" the loop. However,
958 if this is an access in a group of loads, which provide strided
959 access, then the above cost should only be considered for one
960 access in the group. Inside the loop, there is a load op
961 and a realignment op. */
963 if (add_realign_cost)
965 *outside_cost = 2 * vect_get_stmt_cost (vector_stmt);
966 if (targetm.vectorize.builtin_mask_for_load)
967 *outside_cost += vect_get_stmt_cost (vector_stmt);
970 *inside_cost += ncopies * (vect_get_stmt_cost (vector_load)
971 + vect_get_stmt_cost (vector_stmt));
972 break;
975 default:
976 gcc_unreachable ();
981 /* Function vect_init_vector.
983 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
984 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
985 is not NULL. Otherwise, place the initialization at the loop preheader.
986 Return the DEF of INIT_STMT.
987 It will be used in the vectorization of STMT. */
989 tree
990 vect_init_vector (gimple stmt, tree vector_var, tree vector_type,
991 gimple_stmt_iterator *gsi)
993 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
994 tree new_var;
995 gimple init_stmt;
996 tree vec_oprnd;
997 edge pe;
998 tree new_temp;
999 basic_block new_bb;
1001 new_var = vect_get_new_vect_var (vector_type, vect_simple_var, "cst_");
1002 add_referenced_var (new_var);
1003 init_stmt = gimple_build_assign (new_var, vector_var);
1004 new_temp = make_ssa_name (new_var, init_stmt);
1005 gimple_assign_set_lhs (init_stmt, new_temp);
1007 if (gsi)
1008 vect_finish_stmt_generation (stmt, init_stmt, gsi);
1009 else
1011 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1013 if (loop_vinfo)
1015 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1017 if (nested_in_vect_loop_p (loop, stmt))
1018 loop = loop->inner;
1020 pe = loop_preheader_edge (loop);
1021 new_bb = gsi_insert_on_edge_immediate (pe, init_stmt);
1022 gcc_assert (!new_bb);
1024 else
1026 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1027 basic_block bb;
1028 gimple_stmt_iterator gsi_bb_start;
1030 gcc_assert (bb_vinfo);
1031 bb = BB_VINFO_BB (bb_vinfo);
1032 gsi_bb_start = gsi_after_labels (bb);
1033 gsi_insert_before (&gsi_bb_start, init_stmt, GSI_SAME_STMT);
1037 if (vect_print_dump_info (REPORT_DETAILS))
1039 fprintf (vect_dump, "created new init_stmt: ");
1040 print_gimple_stmt (vect_dump, init_stmt, 0, TDF_SLIM);
1043 vec_oprnd = gimple_assign_lhs (init_stmt);
1044 return vec_oprnd;
1048 /* Function vect_get_vec_def_for_operand.
1050 OP is an operand in STMT. This function returns a (vector) def that will be
1051 used in the vectorized stmt for STMT.
1053 In the case that OP is an SSA_NAME which is defined in the loop, then
1054 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1056 In case OP is an invariant or constant, a new stmt that creates a vector def
1057 needs to be introduced. */
1059 tree
1060 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1062 tree vec_oprnd;
1063 gimple vec_stmt;
1064 gimple def_stmt;
1065 stmt_vec_info def_stmt_info = NULL;
1066 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1067 unsigned int nunits;
1068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1069 tree vec_inv;
1070 tree vec_cst;
1071 tree t = NULL_TREE;
1072 tree def;
1073 int i;
1074 enum vect_def_type dt;
1075 bool is_simple_use;
1076 tree vector_type;
1078 if (vect_print_dump_info (REPORT_DETAILS))
1080 fprintf (vect_dump, "vect_get_vec_def_for_operand: ");
1081 print_generic_expr (vect_dump, op, TDF_SLIM);
1084 is_simple_use = vect_is_simple_use (op, loop_vinfo, NULL, &def_stmt, &def,
1085 &dt);
1086 gcc_assert (is_simple_use);
1087 if (vect_print_dump_info (REPORT_DETAILS))
1089 if (def)
1091 fprintf (vect_dump, "def = ");
1092 print_generic_expr (vect_dump, def, TDF_SLIM);
1094 if (def_stmt)
1096 fprintf (vect_dump, " def_stmt = ");
1097 print_gimple_stmt (vect_dump, def_stmt, 0, TDF_SLIM);
1101 switch (dt)
1103 /* Case 1: operand is a constant. */
1104 case vect_constant_def:
1106 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1107 gcc_assert (vector_type);
1108 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1110 if (scalar_def)
1111 *scalar_def = op;
1113 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1114 if (vect_print_dump_info (REPORT_DETAILS))
1115 fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
1117 vec_cst = build_vector_from_val (vector_type, op);
1118 return vect_init_vector (stmt, vec_cst, vector_type, NULL);
1121 /* Case 2: operand is defined outside the loop - loop invariant. */
1122 case vect_external_def:
1124 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1125 gcc_assert (vector_type);
1126 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1128 if (scalar_def)
1129 *scalar_def = def;
1131 /* Create 'vec_inv = {inv,inv,..,inv}' */
1132 if (vect_print_dump_info (REPORT_DETAILS))
1133 fprintf (vect_dump, "Create vector_inv.");
1135 for (i = nunits - 1; i >= 0; --i)
1137 t = tree_cons (NULL_TREE, def, t);
1140 /* FIXME: use build_constructor directly. */
1141 vec_inv = build_constructor_from_list (vector_type, t);
1142 return vect_init_vector (stmt, vec_inv, vector_type, NULL);
1145 /* Case 3: operand is defined inside the loop. */
1146 case vect_internal_def:
1148 if (scalar_def)
1149 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1151 /* Get the def from the vectorized stmt. */
1152 def_stmt_info = vinfo_for_stmt (def_stmt);
1153 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1154 gcc_assert (vec_stmt);
1155 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1156 vec_oprnd = PHI_RESULT (vec_stmt);
1157 else if (is_gimple_call (vec_stmt))
1158 vec_oprnd = gimple_call_lhs (vec_stmt);
1159 else
1160 vec_oprnd = gimple_assign_lhs (vec_stmt);
1161 return vec_oprnd;
1164 /* Case 4: operand is defined by a loop header phi - reduction */
1165 case vect_reduction_def:
1166 case vect_double_reduction_def:
1167 case vect_nested_cycle:
1169 struct loop *loop;
1171 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1172 loop = (gimple_bb (def_stmt))->loop_father;
1174 /* Get the def before the loop */
1175 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1176 return get_initial_def_for_reduction (stmt, op, scalar_def);
1179 /* Case 5: operand is defined by loop-header phi - induction. */
1180 case vect_induction_def:
1182 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1184 /* Get the def from the vectorized stmt. */
1185 def_stmt_info = vinfo_for_stmt (def_stmt);
1186 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1187 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1188 vec_oprnd = PHI_RESULT (vec_stmt);
1189 else
1190 vec_oprnd = gimple_get_lhs (vec_stmt);
1191 return vec_oprnd;
1194 default:
1195 gcc_unreachable ();
1200 /* Function vect_get_vec_def_for_stmt_copy
1202 Return a vector-def for an operand. This function is used when the
1203 vectorized stmt to be created (by the caller to this function) is a "copy"
1204 created in case the vectorized result cannot fit in one vector, and several
1205 copies of the vector-stmt are required. In this case the vector-def is
1206 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1207 of the stmt that defines VEC_OPRND.
1208 DT is the type of the vector def VEC_OPRND.
1210 Context:
1211 In case the vectorization factor (VF) is bigger than the number
1212 of elements that can fit in a vectype (nunits), we have to generate
1213 more than one vector stmt to vectorize the scalar stmt. This situation
1214 arises when there are multiple data-types operated upon in the loop; the
1215 smallest data-type determines the VF, and as a result, when vectorizing
1216 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1217 vector stmt (each computing a vector of 'nunits' results, and together
1218 computing 'VF' results in each iteration). This function is called when
1219 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1220 which VF=16 and nunits=4, so the number of copies required is 4):
1222 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1224 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1225 VS1.1: vx.1 = memref1 VS1.2
1226 VS1.2: vx.2 = memref2 VS1.3
1227 VS1.3: vx.3 = memref3
1229 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1230 VSnew.1: vz1 = vx.1 + ... VSnew.2
1231 VSnew.2: vz2 = vx.2 + ... VSnew.3
1232 VSnew.3: vz3 = vx.3 + ...
1234 The vectorization of S1 is explained in vectorizable_load.
1235 The vectorization of S2:
1236 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1237 the function 'vect_get_vec_def_for_operand' is called to
1238 get the relevant vector-def for each operand of S2. For operand x it
1239 returns the vector-def 'vx.0'.
1241 To create the remaining copies of the vector-stmt (VSnew.j), this
1242 function is called to get the relevant vector-def for each operand. It is
1243 obtained from the respective VS1.j stmt, which is recorded in the
1244 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1246 For example, to obtain the vector-def 'vx.1' in order to create the
1247 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1248 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1249 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1250 and return its def ('vx.1').
1251 Overall, to create the above sequence this function will be called 3 times:
1252 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1253 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1254 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1256 tree
1257 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1259 gimple vec_stmt_for_operand;
1260 stmt_vec_info def_stmt_info;
1262 /* Do nothing; can reuse same def. */
1263 if (dt == vect_external_def || dt == vect_constant_def )
1264 return vec_oprnd;
1266 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1267 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1268 gcc_assert (def_stmt_info);
1269 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1270 gcc_assert (vec_stmt_for_operand);
1271 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1272 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1273 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1274 else
1275 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1276 return vec_oprnd;
1280 /* Get vectorized definitions for the operands to create a copy of an original
1281 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1283 static void
1284 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1285 VEC(tree,heap) **vec_oprnds0,
1286 VEC(tree,heap) **vec_oprnds1)
1288 tree vec_oprnd = VEC_pop (tree, *vec_oprnds0);
1290 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1291 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1293 if (vec_oprnds1 && *vec_oprnds1)
1295 vec_oprnd = VEC_pop (tree, *vec_oprnds1);
1296 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1297 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1302 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1303 NULL. */
1305 static void
1306 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1307 VEC(tree,heap) **vec_oprnds0, VEC(tree,heap) **vec_oprnds1,
1308 slp_tree slp_node)
1310 if (slp_node)
1311 vect_get_slp_defs (op0, op1, slp_node, vec_oprnds0, vec_oprnds1, -1);
1312 else
1314 tree vec_oprnd;
1316 *vec_oprnds0 = VEC_alloc (tree, heap, 1);
1317 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1318 VEC_quick_push (tree, *vec_oprnds0, vec_oprnd);
1320 if (op1)
1322 *vec_oprnds1 = VEC_alloc (tree, heap, 1);
1323 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1324 VEC_quick_push (tree, *vec_oprnds1, vec_oprnd);
1330 /* Function vect_finish_stmt_generation.
1332 Insert a new stmt. */
1334 void
1335 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1336 gimple_stmt_iterator *gsi)
1338 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1339 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1340 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1342 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1344 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1346 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1347 bb_vinfo));
1349 if (vect_print_dump_info (REPORT_DETAILS))
1351 fprintf (vect_dump, "add new stmt: ");
1352 print_gimple_stmt (vect_dump, vec_stmt, 0, TDF_SLIM);
1355 gimple_set_location (vec_stmt, gimple_location (gsi_stmt (*gsi)));
1358 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1359 a function declaration if the target has a vectorized version
1360 of the function, or NULL_TREE if the function cannot be vectorized. */
1362 tree
1363 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1365 tree fndecl = gimple_call_fndecl (call);
1367 /* We only handle functions that do not read or clobber memory -- i.e.
1368 const or novops ones. */
1369 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1370 return NULL_TREE;
1372 if (!fndecl
1373 || TREE_CODE (fndecl) != FUNCTION_DECL
1374 || !DECL_BUILT_IN (fndecl))
1375 return NULL_TREE;
1377 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1378 vectype_in);
1381 /* Function vectorizable_call.
1383 Check if STMT performs a function call that can be vectorized.
1384 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1385 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1386 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1388 static bool
1389 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt)
1391 tree vec_dest;
1392 tree scalar_dest;
1393 tree op, type;
1394 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1395 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
1396 tree vectype_out, vectype_in;
1397 int nunits_in;
1398 int nunits_out;
1399 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1400 tree fndecl, new_temp, def, rhs_type;
1401 gimple def_stmt;
1402 enum vect_def_type dt[3]
1403 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
1404 gimple new_stmt = NULL;
1405 int ncopies, j;
1406 VEC(tree, heap) *vargs = NULL;
1407 enum { NARROW, NONE, WIDEN } modifier;
1408 size_t i, nargs;
1410 /* FORNOW: unsupported in basic block SLP. */
1411 gcc_assert (loop_vinfo);
1413 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1414 return false;
1416 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1417 return false;
1419 /* FORNOW: SLP not supported. */
1420 if (STMT_SLP_TYPE (stmt_info))
1421 return false;
1423 /* Is STMT a vectorizable call? */
1424 if (!is_gimple_call (stmt))
1425 return false;
1427 if (TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
1428 return false;
1430 if (stmt_can_throw_internal (stmt))
1431 return false;
1433 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1435 /* Process function arguments. */
1436 rhs_type = NULL_TREE;
1437 vectype_in = NULL_TREE;
1438 nargs = gimple_call_num_args (stmt);
1440 /* Bail out if the function has more than three arguments, we do not have
1441 interesting builtin functions to vectorize with more than two arguments
1442 except for fma. No arguments is also not good. */
1443 if (nargs == 0 || nargs > 3)
1444 return false;
1446 for (i = 0; i < nargs; i++)
1448 tree opvectype;
1450 op = gimple_call_arg (stmt, i);
1452 /* We can only handle calls with arguments of the same type. */
1453 if (rhs_type
1454 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
1456 if (vect_print_dump_info (REPORT_DETAILS))
1457 fprintf (vect_dump, "argument types differ.");
1458 return false;
1460 if (!rhs_type)
1461 rhs_type = TREE_TYPE (op);
1463 if (!vect_is_simple_use_1 (op, loop_vinfo, NULL,
1464 &def_stmt, &def, &dt[i], &opvectype))
1466 if (vect_print_dump_info (REPORT_DETAILS))
1467 fprintf (vect_dump, "use not simple.");
1468 return false;
1471 if (!vectype_in)
1472 vectype_in = opvectype;
1473 else if (opvectype
1474 && opvectype != vectype_in)
1476 if (vect_print_dump_info (REPORT_DETAILS))
1477 fprintf (vect_dump, "argument vector types differ.");
1478 return false;
1481 /* If all arguments are external or constant defs use a vector type with
1482 the same size as the output vector type. */
1483 if (!vectype_in)
1484 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1485 if (vec_stmt)
1486 gcc_assert (vectype_in);
1487 if (!vectype_in)
1489 if (vect_print_dump_info (REPORT_DETAILS))
1491 fprintf (vect_dump, "no vectype for scalar type ");
1492 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1495 return false;
1498 /* FORNOW */
1499 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1500 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1501 if (nunits_in == nunits_out / 2)
1502 modifier = NARROW;
1503 else if (nunits_out == nunits_in)
1504 modifier = NONE;
1505 else if (nunits_out == nunits_in / 2)
1506 modifier = WIDEN;
1507 else
1508 return false;
1510 /* For now, we only vectorize functions if a target specific builtin
1511 is available. TODO -- in some cases, it might be profitable to
1512 insert the calls for pieces of the vector, in order to be able
1513 to vectorize other operations in the loop. */
1514 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
1515 if (fndecl == NULL_TREE)
1517 if (vect_print_dump_info (REPORT_DETAILS))
1518 fprintf (vect_dump, "function is not vectorizable.");
1520 return false;
1523 gcc_assert (!gimple_vuse (stmt));
1525 if (modifier == NARROW)
1526 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1527 else
1528 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1530 /* Sanity check: make sure that at least one copy of the vectorized stmt
1531 needs to be generated. */
1532 gcc_assert (ncopies >= 1);
1534 if (!vec_stmt) /* transformation not required. */
1536 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1537 if (vect_print_dump_info (REPORT_DETAILS))
1538 fprintf (vect_dump, "=== vectorizable_call ===");
1539 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
1540 return true;
1543 /** Transform. **/
1545 if (vect_print_dump_info (REPORT_DETAILS))
1546 fprintf (vect_dump, "transform operation.");
1548 /* Handle def. */
1549 scalar_dest = gimple_call_lhs (stmt);
1550 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1552 prev_stmt_info = NULL;
1553 switch (modifier)
1555 case NONE:
1556 for (j = 0; j < ncopies; ++j)
1558 /* Build argument list for the vectorized call. */
1559 if (j == 0)
1560 vargs = VEC_alloc (tree, heap, nargs);
1561 else
1562 VEC_truncate (tree, vargs, 0);
1564 for (i = 0; i < nargs; i++)
1566 op = gimple_call_arg (stmt, i);
1567 if (j == 0)
1568 vec_oprnd0
1569 = vect_get_vec_def_for_operand (op, stmt, NULL);
1570 else
1572 vec_oprnd0 = gimple_call_arg (new_stmt, i);
1573 vec_oprnd0
1574 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1577 VEC_quick_push (tree, vargs, vec_oprnd0);
1580 new_stmt = gimple_build_call_vec (fndecl, vargs);
1581 new_temp = make_ssa_name (vec_dest, new_stmt);
1582 gimple_call_set_lhs (new_stmt, new_temp);
1584 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1585 mark_symbols_for_renaming (new_stmt);
1587 if (j == 0)
1588 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1589 else
1590 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1592 prev_stmt_info = vinfo_for_stmt (new_stmt);
1595 break;
1597 case NARROW:
1598 for (j = 0; j < ncopies; ++j)
1600 /* Build argument list for the vectorized call. */
1601 if (j == 0)
1602 vargs = VEC_alloc (tree, heap, nargs * 2);
1603 else
1604 VEC_truncate (tree, vargs, 0);
1606 for (i = 0; i < nargs; i++)
1608 op = gimple_call_arg (stmt, i);
1609 if (j == 0)
1611 vec_oprnd0
1612 = vect_get_vec_def_for_operand (op, stmt, NULL);
1613 vec_oprnd1
1614 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1616 else
1618 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i);
1619 vec_oprnd0
1620 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
1621 vec_oprnd1
1622 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
1625 VEC_quick_push (tree, vargs, vec_oprnd0);
1626 VEC_quick_push (tree, vargs, vec_oprnd1);
1629 new_stmt = gimple_build_call_vec (fndecl, vargs);
1630 new_temp = make_ssa_name (vec_dest, new_stmt);
1631 gimple_call_set_lhs (new_stmt, new_temp);
1633 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1634 mark_symbols_for_renaming (new_stmt);
1636 if (j == 0)
1637 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1638 else
1639 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1641 prev_stmt_info = vinfo_for_stmt (new_stmt);
1644 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1646 break;
1648 case WIDEN:
1649 /* No current target implements this case. */
1650 return false;
1653 VEC_free (tree, heap, vargs);
1655 /* Update the exception handling table with the vector stmt if necessary. */
1656 if (maybe_clean_or_replace_eh_stmt (stmt, *vec_stmt))
1657 gimple_purge_dead_eh_edges (gimple_bb (stmt));
1659 /* The call in STMT might prevent it from being removed in dce.
1660 We however cannot remove it here, due to the way the ssa name
1661 it defines is mapped to the new definition. So just replace
1662 rhs of the statement with something harmless. */
1664 type = TREE_TYPE (scalar_dest);
1665 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
1666 build_zero_cst (type));
1667 set_vinfo_for_stmt (new_stmt, stmt_info);
1668 set_vinfo_for_stmt (stmt, NULL);
1669 STMT_VINFO_STMT (stmt_info) = new_stmt;
1670 gsi_replace (gsi, new_stmt, false);
1671 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
1673 return true;
1677 /* Function vect_gen_widened_results_half
1679 Create a vector stmt whose code, type, number of arguments, and result
1680 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1681 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1682 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1683 needs to be created (DECL is a function-decl of a target-builtin).
1684 STMT is the original scalar stmt that we are vectorizing. */
1686 static gimple
1687 vect_gen_widened_results_half (enum tree_code code,
1688 tree decl,
1689 tree vec_oprnd0, tree vec_oprnd1, int op_type,
1690 tree vec_dest, gimple_stmt_iterator *gsi,
1691 gimple stmt)
1693 gimple new_stmt;
1694 tree new_temp;
1696 /* Generate half of the widened result: */
1697 if (code == CALL_EXPR)
1699 /* Target specific support */
1700 if (op_type == binary_op)
1701 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
1702 else
1703 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
1704 new_temp = make_ssa_name (vec_dest, new_stmt);
1705 gimple_call_set_lhs (new_stmt, new_temp);
1707 else
1709 /* Generic support */
1710 gcc_assert (op_type == TREE_CODE_LENGTH (code));
1711 if (op_type != binary_op)
1712 vec_oprnd1 = NULL;
1713 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
1714 vec_oprnd1);
1715 new_temp = make_ssa_name (vec_dest, new_stmt);
1716 gimple_assign_set_lhs (new_stmt, new_temp);
1718 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1720 return new_stmt;
1724 /* Check if STMT performs a conversion operation, that can be vectorized.
1725 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1726 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1727 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1729 static bool
1730 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
1731 gimple *vec_stmt, slp_tree slp_node)
1733 tree vec_dest;
1734 tree scalar_dest;
1735 tree op0;
1736 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
1737 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1738 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1739 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
1740 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
1741 tree new_temp;
1742 tree def;
1743 gimple def_stmt;
1744 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
1745 gimple new_stmt = NULL;
1746 stmt_vec_info prev_stmt_info;
1747 int nunits_in;
1748 int nunits_out;
1749 tree vectype_out, vectype_in;
1750 int ncopies, j;
1751 tree rhs_type;
1752 tree builtin_decl;
1753 enum { NARROW, NONE, WIDEN } modifier;
1754 int i;
1755 VEC(tree,heap) *vec_oprnds0 = NULL;
1756 tree vop0;
1757 VEC(tree,heap) *dummy = NULL;
1758 int dummy_int;
1760 /* Is STMT a vectorizable conversion? */
1762 /* FORNOW: unsupported in basic block SLP. */
1763 gcc_assert (loop_vinfo);
1765 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1766 return false;
1768 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1769 return false;
1771 if (!is_gimple_assign (stmt))
1772 return false;
1774 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
1775 return false;
1777 code = gimple_assign_rhs_code (stmt);
1778 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
1779 return false;
1781 /* Check types of lhs and rhs. */
1782 scalar_dest = gimple_assign_lhs (stmt);
1783 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
1785 op0 = gimple_assign_rhs1 (stmt);
1786 rhs_type = TREE_TYPE (op0);
1787 /* Check the operands of the operation. */
1788 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
1789 &def_stmt, &def, &dt[0], &vectype_in))
1791 if (vect_print_dump_info (REPORT_DETAILS))
1792 fprintf (vect_dump, "use not simple.");
1793 return false;
1795 /* If op0 is an external or constant defs use a vector type of
1796 the same size as the output vector type. */
1797 if (!vectype_in)
1798 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
1799 if (vec_stmt)
1800 gcc_assert (vectype_in);
1801 if (!vectype_in)
1803 if (vect_print_dump_info (REPORT_DETAILS))
1805 fprintf (vect_dump, "no vectype for scalar type ");
1806 print_generic_expr (vect_dump, rhs_type, TDF_SLIM);
1809 return false;
1812 /* FORNOW */
1813 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
1814 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
1815 if (nunits_in == nunits_out / 2)
1816 modifier = NARROW;
1817 else if (nunits_out == nunits_in)
1818 modifier = NONE;
1819 else if (nunits_out == nunits_in / 2)
1820 modifier = WIDEN;
1821 else
1822 return false;
1824 if (modifier == NARROW)
1825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
1826 else
1827 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
1829 /* Multiple types in SLP are handled by creating the appropriate number of
1830 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1831 case of SLP. */
1832 if (slp_node || PURE_SLP_STMT (stmt_info))
1833 ncopies = 1;
1835 /* Sanity check: make sure that at least one copy of the vectorized stmt
1836 needs to be generated. */
1837 gcc_assert (ncopies >= 1);
1839 /* Supportable by target? */
1840 if ((modifier == NONE
1841 && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
1842 || (modifier == WIDEN
1843 && !supportable_widening_operation (code, stmt,
1844 vectype_out, vectype_in,
1845 &decl1, &decl2,
1846 &code1, &code2,
1847 &dummy_int, &dummy))
1848 || (modifier == NARROW
1849 && !supportable_narrowing_operation (code, vectype_out, vectype_in,
1850 &code1, &dummy_int, &dummy)))
1852 if (vect_print_dump_info (REPORT_DETAILS))
1853 fprintf (vect_dump, "conversion not supported by target.");
1854 return false;
1857 if (modifier != NONE)
1859 /* FORNOW: SLP not supported. */
1860 if (STMT_SLP_TYPE (stmt_info))
1861 return false;
1864 if (!vec_stmt) /* transformation not required. */
1866 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
1867 return true;
1870 /** Transform. **/
1871 if (vect_print_dump_info (REPORT_DETAILS))
1872 fprintf (vect_dump, "transform conversion.");
1874 /* Handle def. */
1875 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
1877 if (modifier == NONE && !slp_node)
1878 vec_oprnds0 = VEC_alloc (tree, heap, 1);
1880 prev_stmt_info = NULL;
1881 switch (modifier)
1883 case NONE:
1884 for (j = 0; j < ncopies; j++)
1886 if (j == 0)
1887 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
1888 else
1889 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
1891 builtin_decl =
1892 targetm.vectorize.builtin_conversion (code,
1893 vectype_out, vectype_in);
1894 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
1896 /* Arguments are ready. create the new vector stmt. */
1897 new_stmt = gimple_build_call (builtin_decl, 1, vop0);
1898 new_temp = make_ssa_name (vec_dest, new_stmt);
1899 gimple_call_set_lhs (new_stmt, new_temp);
1900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1901 if (slp_node)
1902 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
1905 if (j == 0)
1906 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1907 else
1908 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1909 prev_stmt_info = vinfo_for_stmt (new_stmt);
1911 break;
1913 case WIDEN:
1914 /* In case the vectorization factor (VF) is bigger than the number
1915 of elements that we can fit in a vectype (nunits), we have to
1916 generate more than one vector stmt - i.e - we need to "unroll"
1917 the vector stmt by a factor VF/nunits. */
1918 for (j = 0; j < ncopies; j++)
1920 if (j == 0)
1921 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1922 else
1923 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1925 /* Generate first half of the widened result: */
1926 new_stmt
1927 = vect_gen_widened_results_half (code1, decl1,
1928 vec_oprnd0, vec_oprnd1,
1929 unary_op, vec_dest, gsi, stmt);
1930 if (j == 0)
1931 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1932 else
1933 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1934 prev_stmt_info = vinfo_for_stmt (new_stmt);
1936 /* Generate second half of the widened result: */
1937 new_stmt
1938 = vect_gen_widened_results_half (code2, decl2,
1939 vec_oprnd0, vec_oprnd1,
1940 unary_op, vec_dest, gsi, stmt);
1941 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1942 prev_stmt_info = vinfo_for_stmt (new_stmt);
1944 break;
1946 case NARROW:
1947 /* In case the vectorization factor (VF) is bigger than the number
1948 of elements that we can fit in a vectype (nunits), we have to
1949 generate more than one vector stmt - i.e - we need to "unroll"
1950 the vector stmt by a factor VF/nunits. */
1951 for (j = 0; j < ncopies; j++)
1953 /* Handle uses. */
1954 if (j == 0)
1956 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
1957 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1959 else
1961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd1);
1962 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
1965 /* Arguments are ready. Create the new vector stmt. */
1966 new_stmt = gimple_build_assign_with_ops (code1, vec_dest, vec_oprnd0,
1967 vec_oprnd1);
1968 new_temp = make_ssa_name (vec_dest, new_stmt);
1969 gimple_assign_set_lhs (new_stmt, new_temp);
1970 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1972 if (j == 0)
1973 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
1974 else
1975 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1977 prev_stmt_info = vinfo_for_stmt (new_stmt);
1980 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
1983 if (vec_oprnds0)
1984 VEC_free (tree, heap, vec_oprnds0);
1986 return true;
1990 /* Function vectorizable_assignment.
1992 Check if STMT performs an assignment (copy) that can be vectorized.
1993 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1994 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1995 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1997 static bool
1998 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
1999 gimple *vec_stmt, slp_tree slp_node)
2001 tree vec_dest;
2002 tree scalar_dest;
2003 tree op;
2004 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2007 tree new_temp;
2008 tree def;
2009 gimple def_stmt;
2010 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2011 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2012 int ncopies;
2013 int i, j;
2014 VEC(tree,heap) *vec_oprnds = NULL;
2015 tree vop;
2016 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2017 gimple new_stmt = NULL;
2018 stmt_vec_info prev_stmt_info = NULL;
2019 enum tree_code code;
2020 tree vectype_in;
2022 /* Multiple types in SLP are handled by creating the appropriate number of
2023 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2024 case of SLP. */
2025 if (slp_node || PURE_SLP_STMT (stmt_info))
2026 ncopies = 1;
2027 else
2028 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2030 gcc_assert (ncopies >= 1);
2032 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2033 return false;
2035 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2036 return false;
2038 /* Is vectorizable assignment? */
2039 if (!is_gimple_assign (stmt))
2040 return false;
2042 scalar_dest = gimple_assign_lhs (stmt);
2043 if (TREE_CODE (scalar_dest) != SSA_NAME)
2044 return false;
2046 code = gimple_assign_rhs_code (stmt);
2047 if (gimple_assign_single_p (stmt)
2048 || code == PAREN_EXPR
2049 || CONVERT_EXPR_CODE_P (code))
2050 op = gimple_assign_rhs1 (stmt);
2051 else
2052 return false;
2054 if (!vect_is_simple_use_1 (op, loop_vinfo, bb_vinfo,
2055 &def_stmt, &def, &dt[0], &vectype_in))
2057 if (vect_print_dump_info (REPORT_DETAILS))
2058 fprintf (vect_dump, "use not simple.");
2059 return false;
2062 /* We can handle NOP_EXPR conversions that do not change the number
2063 of elements or the vector size. */
2064 if (CONVERT_EXPR_CODE_P (code)
2065 && (!vectype_in
2066 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
2067 || (GET_MODE_SIZE (TYPE_MODE (vectype))
2068 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
2069 return false;
2071 if (!vec_stmt) /* transformation not required. */
2073 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
2074 if (vect_print_dump_info (REPORT_DETAILS))
2075 fprintf (vect_dump, "=== vectorizable_assignment ===");
2076 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2077 return true;
2080 /** Transform. **/
2081 if (vect_print_dump_info (REPORT_DETAILS))
2082 fprintf (vect_dump, "transform assignment.");
2084 /* Handle def. */
2085 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2087 /* Handle use. */
2088 for (j = 0; j < ncopies; j++)
2090 /* Handle uses. */
2091 if (j == 0)
2092 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2093 else
2094 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2096 /* Arguments are ready. create the new vector stmt. */
2097 FOR_EACH_VEC_ELT (tree, vec_oprnds, i, vop)
2099 if (CONVERT_EXPR_CODE_P (code))
2100 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
2101 new_stmt = gimple_build_assign (vec_dest, vop);
2102 new_temp = make_ssa_name (vec_dest, new_stmt);
2103 gimple_assign_set_lhs (new_stmt, new_temp);
2104 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2105 if (slp_node)
2106 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2109 if (slp_node)
2110 continue;
2112 if (j == 0)
2113 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2114 else
2115 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2117 prev_stmt_info = vinfo_for_stmt (new_stmt);
2120 VEC_free (tree, heap, vec_oprnds);
2121 return true;
2125 /* Function vectorizable_shift.
2127 Check if STMT performs a shift operation that can be vectorized.
2128 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2129 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2130 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2132 static bool
2133 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
2134 gimple *vec_stmt, slp_tree slp_node)
2136 tree vec_dest;
2137 tree scalar_dest;
2138 tree op0, op1 = NULL;
2139 tree vec_oprnd1 = NULL_TREE;
2140 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2141 tree vectype;
2142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2143 enum tree_code code;
2144 enum machine_mode vec_mode;
2145 tree new_temp;
2146 optab optab;
2147 int icode;
2148 enum machine_mode optab_op2_mode;
2149 tree def;
2150 gimple def_stmt;
2151 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2152 gimple new_stmt = NULL;
2153 stmt_vec_info prev_stmt_info;
2154 int nunits_in;
2155 int nunits_out;
2156 tree vectype_out;
2157 int ncopies;
2158 int j, i;
2159 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
2160 tree vop0, vop1;
2161 unsigned int k;
2162 bool scalar_shift_arg = true;
2163 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2164 int vf;
2166 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2167 return false;
2169 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2170 return false;
2172 /* Is STMT a vectorizable binary/unary operation? */
2173 if (!is_gimple_assign (stmt))
2174 return false;
2176 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2177 return false;
2179 code = gimple_assign_rhs_code (stmt);
2181 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2182 || code == RROTATE_EXPR))
2183 return false;
2185 scalar_dest = gimple_assign_lhs (stmt);
2186 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2188 op0 = gimple_assign_rhs1 (stmt);
2189 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2190 &def_stmt, &def, &dt[0], &vectype))
2192 if (vect_print_dump_info (REPORT_DETAILS))
2193 fprintf (vect_dump, "use not simple.");
2194 return false;
2196 /* If op0 is an external or constant def use a vector type with
2197 the same size as the output vector type. */
2198 if (!vectype)
2199 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2200 if (vec_stmt)
2201 gcc_assert (vectype);
2202 if (!vectype)
2204 if (vect_print_dump_info (REPORT_DETAILS))
2206 fprintf (vect_dump, "no vectype for scalar type ");
2207 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2210 return false;
2213 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2214 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2215 if (nunits_out != nunits_in)
2216 return false;
2218 op1 = gimple_assign_rhs2 (stmt);
2219 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
2221 if (vect_print_dump_info (REPORT_DETAILS))
2222 fprintf (vect_dump, "use not simple.");
2223 return false;
2226 if (loop_vinfo)
2227 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2228 else
2229 vf = 1;
2231 /* Multiple types in SLP are handled by creating the appropriate number of
2232 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2233 case of SLP. */
2234 if (slp_node || PURE_SLP_STMT (stmt_info))
2235 ncopies = 1;
2236 else
2237 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2239 gcc_assert (ncopies >= 1);
2241 /* Determine whether the shift amount is a vector, or scalar. If the
2242 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2244 if (dt[1] == vect_internal_def && !slp_node)
2245 scalar_shift_arg = false;
2246 else if (dt[1] == vect_constant_def
2247 || dt[1] == vect_external_def
2248 || dt[1] == vect_internal_def)
2250 /* In SLP, need to check whether the shift count is the same,
2251 in loops if it is a constant or invariant, it is always
2252 a scalar shift. */
2253 if (slp_node)
2255 VEC (gimple, heap) *stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2256 gimple slpstmt;
2258 FOR_EACH_VEC_ELT (gimple, stmts, k, slpstmt)
2259 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
2260 scalar_shift_arg = false;
2263 else
2265 if (vect_print_dump_info (REPORT_DETAILS))
2266 fprintf (vect_dump, "operand mode requires invariant argument.");
2267 return false;
2270 /* Vector shifted by vector. */
2271 if (!scalar_shift_arg)
2273 optab = optab_for_tree_code (code, vectype, optab_vector);
2274 if (vect_print_dump_info (REPORT_DETAILS))
2275 fprintf (vect_dump, "vector/vector shift/rotate found.");
2277 /* See if the machine has a vector shifted by scalar insn and if not
2278 then see if it has a vector shifted by vector insn. */
2279 else
2281 optab = optab_for_tree_code (code, vectype, optab_scalar);
2282 if (optab
2283 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
2285 if (vect_print_dump_info (REPORT_DETAILS))
2286 fprintf (vect_dump, "vector/scalar shift/rotate found.");
2288 else
2290 optab = optab_for_tree_code (code, vectype, optab_vector);
2291 if (optab
2292 && (optab_handler (optab, TYPE_MODE (vectype))
2293 != CODE_FOR_nothing))
2295 scalar_shift_arg = false;
2297 if (vect_print_dump_info (REPORT_DETAILS))
2298 fprintf (vect_dump, "vector/vector shift/rotate found.");
2300 /* Unlike the other binary operators, shifts/rotates have
2301 the rhs being int, instead of the same type as the lhs,
2302 so make sure the scalar is the right type if we are
2303 dealing with vectors of short/char. */
2304 if (dt[1] == vect_constant_def)
2305 op1 = fold_convert (TREE_TYPE (vectype), op1);
2310 /* Supportable by target? */
2311 if (!optab)
2313 if (vect_print_dump_info (REPORT_DETAILS))
2314 fprintf (vect_dump, "no optab.");
2315 return false;
2317 vec_mode = TYPE_MODE (vectype);
2318 icode = (int) optab_handler (optab, vec_mode);
2319 if (icode == CODE_FOR_nothing)
2321 if (vect_print_dump_info (REPORT_DETAILS))
2322 fprintf (vect_dump, "op not supported by target.");
2323 /* Check only during analysis. */
2324 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2325 || (vf < vect_min_worthwhile_factor (code)
2326 && !vec_stmt))
2327 return false;
2328 if (vect_print_dump_info (REPORT_DETAILS))
2329 fprintf (vect_dump, "proceeding using word mode.");
2332 /* Worthwhile without SIMD support? Check only during analysis. */
2333 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2334 && vf < vect_min_worthwhile_factor (code)
2335 && !vec_stmt)
2337 if (vect_print_dump_info (REPORT_DETAILS))
2338 fprintf (vect_dump, "not worthwhile without SIMD support.");
2339 return false;
2342 if (!vec_stmt) /* transformation not required. */
2344 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
2345 if (vect_print_dump_info (REPORT_DETAILS))
2346 fprintf (vect_dump, "=== vectorizable_shift ===");
2347 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2348 return true;
2351 /** Transform. **/
2353 if (vect_print_dump_info (REPORT_DETAILS))
2354 fprintf (vect_dump, "transform binary/unary operation.");
2356 /* Handle def. */
2357 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2359 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2360 created in the previous stages of the recursion, so no allocation is
2361 needed, except for the case of shift with scalar shift argument. In that
2362 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2363 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2364 In case of loop-based vectorization we allocate VECs of size 1. We
2365 allocate VEC_OPRNDS1 only in case of binary operation. */
2366 if (!slp_node)
2368 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2369 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2371 else if (scalar_shift_arg)
2372 vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
2374 prev_stmt_info = NULL;
2375 for (j = 0; j < ncopies; j++)
2377 /* Handle uses. */
2378 if (j == 0)
2380 if (scalar_shift_arg)
2382 /* Vector shl and shr insn patterns can be defined with scalar
2383 operand 2 (shift operand). In this case, use constant or loop
2384 invariant op1 directly, without extending it to vector mode
2385 first. */
2386 optab_op2_mode = insn_data[icode].operand[2].mode;
2387 if (!VECTOR_MODE_P (optab_op2_mode))
2389 if (vect_print_dump_info (REPORT_DETAILS))
2390 fprintf (vect_dump, "operand 1 using scalar mode.");
2391 vec_oprnd1 = op1;
2392 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2393 if (slp_node)
2395 /* Store vec_oprnd1 for every vector stmt to be created
2396 for SLP_NODE. We check during the analysis that all
2397 the shift arguments are the same.
2398 TODO: Allow different constants for different vector
2399 stmts generated for an SLP instance. */
2400 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
2401 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
2406 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2407 (a special case for certain kind of vector shifts); otherwise,
2408 operand 1 should be of a vector type (the usual case). */
2409 if (vec_oprnd1)
2410 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2411 slp_node);
2412 else
2413 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2414 slp_node);
2416 else
2417 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2419 /* Arguments are ready. Create the new vector stmt. */
2420 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2422 vop1 = VEC_index (tree, vec_oprnds1, i);
2423 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2424 new_temp = make_ssa_name (vec_dest, new_stmt);
2425 gimple_assign_set_lhs (new_stmt, new_temp);
2426 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2427 if (slp_node)
2428 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2431 if (slp_node)
2432 continue;
2434 if (j == 0)
2435 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2436 else
2437 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2438 prev_stmt_info = vinfo_for_stmt (new_stmt);
2441 VEC_free (tree, heap, vec_oprnds0);
2442 VEC_free (tree, heap, vec_oprnds1);
2444 return true;
2448 /* Function vectorizable_operation.
2450 Check if STMT performs a binary, unary or ternary operation that can
2451 be vectorized.
2452 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2453 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2454 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2456 static bool
2457 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
2458 gimple *vec_stmt, slp_tree slp_node)
2460 tree vec_dest;
2461 tree scalar_dest;
2462 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
2463 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2464 tree vectype;
2465 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2466 enum tree_code code;
2467 enum machine_mode vec_mode;
2468 tree new_temp;
2469 int op_type;
2470 optab optab;
2471 int icode;
2472 tree def;
2473 gimple def_stmt;
2474 enum vect_def_type dt[3]
2475 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2476 gimple new_stmt = NULL;
2477 stmt_vec_info prev_stmt_info;
2478 int nunits_in;
2479 int nunits_out;
2480 tree vectype_out;
2481 int ncopies;
2482 int j, i;
2483 VEC(tree,heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL, *vec_oprnds2 = NULL;
2484 tree vop0, vop1, vop2;
2485 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2486 int vf;
2488 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2489 return false;
2491 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2492 return false;
2494 /* Is STMT a vectorizable binary/unary operation? */
2495 if (!is_gimple_assign (stmt))
2496 return false;
2498 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2499 return false;
2501 code = gimple_assign_rhs_code (stmt);
2503 /* For pointer addition, we should use the normal plus for
2504 the vector addition. */
2505 if (code == POINTER_PLUS_EXPR)
2506 code = PLUS_EXPR;
2508 /* Support only unary or binary operations. */
2509 op_type = TREE_CODE_LENGTH (code);
2510 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
2512 if (vect_print_dump_info (REPORT_DETAILS))
2513 fprintf (vect_dump, "num. args = %d (not unary/binary/ternary op).",
2514 op_type);
2515 return false;
2518 scalar_dest = gimple_assign_lhs (stmt);
2519 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2521 op0 = gimple_assign_rhs1 (stmt);
2522 if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
2523 &def_stmt, &def, &dt[0], &vectype))
2525 if (vect_print_dump_info (REPORT_DETAILS))
2526 fprintf (vect_dump, "use not simple.");
2527 return false;
2529 /* If op0 is an external or constant def use a vector type with
2530 the same size as the output vector type. */
2531 if (!vectype)
2532 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2533 if (vec_stmt)
2534 gcc_assert (vectype);
2535 if (!vectype)
2537 if (vect_print_dump_info (REPORT_DETAILS))
2539 fprintf (vect_dump, "no vectype for scalar type ");
2540 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2543 return false;
2546 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2547 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
2548 if (nunits_out != nunits_in)
2549 return false;
2551 if (op_type == binary_op || op_type == ternary_op)
2553 op1 = gimple_assign_rhs2 (stmt);
2554 if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
2555 &dt[1]))
2557 if (vect_print_dump_info (REPORT_DETAILS))
2558 fprintf (vect_dump, "use not simple.");
2559 return false;
2562 if (op_type == ternary_op)
2564 op2 = gimple_assign_rhs3 (stmt);
2565 if (!vect_is_simple_use (op2, loop_vinfo, bb_vinfo, &def_stmt, &def,
2566 &dt[2]))
2568 if (vect_print_dump_info (REPORT_DETAILS))
2569 fprintf (vect_dump, "use not simple.");
2570 return false;
2574 if (loop_vinfo)
2575 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2576 else
2577 vf = 1;
2579 /* Multiple types in SLP are handled by creating the appropriate number of
2580 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2581 case of SLP. */
2582 if (slp_node || PURE_SLP_STMT (stmt_info))
2583 ncopies = 1;
2584 else
2585 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2587 gcc_assert (ncopies >= 1);
2589 /* Shifts are handled in vectorizable_shift (). */
2590 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
2591 || code == RROTATE_EXPR)
2592 return false;
2594 optab = optab_for_tree_code (code, vectype, optab_default);
2596 /* Supportable by target? */
2597 if (!optab)
2599 if (vect_print_dump_info (REPORT_DETAILS))
2600 fprintf (vect_dump, "no optab.");
2601 return false;
2603 vec_mode = TYPE_MODE (vectype);
2604 icode = (int) optab_handler (optab, vec_mode);
2605 if (icode == CODE_FOR_nothing)
2607 if (vect_print_dump_info (REPORT_DETAILS))
2608 fprintf (vect_dump, "op not supported by target.");
2609 /* Check only during analysis. */
2610 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
2611 || (vf < vect_min_worthwhile_factor (code)
2612 && !vec_stmt))
2613 return false;
2614 if (vect_print_dump_info (REPORT_DETAILS))
2615 fprintf (vect_dump, "proceeding using word mode.");
2618 /* Worthwhile without SIMD support? Check only during analysis. */
2619 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2620 && vf < vect_min_worthwhile_factor (code)
2621 && !vec_stmt)
2623 if (vect_print_dump_info (REPORT_DETAILS))
2624 fprintf (vect_dump, "not worthwhile without SIMD support.");
2625 return false;
2628 if (!vec_stmt) /* transformation not required. */
2630 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
2631 if (vect_print_dump_info (REPORT_DETAILS))
2632 fprintf (vect_dump, "=== vectorizable_operation ===");
2633 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2634 return true;
2637 /** Transform. **/
2639 if (vect_print_dump_info (REPORT_DETAILS))
2640 fprintf (vect_dump, "transform binary/unary operation.");
2642 /* Handle def. */
2643 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2645 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2646 created in the previous stages of the recursion, so no allocation is
2647 needed, except for the case of shift with scalar shift argument. In that
2648 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2649 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2650 In case of loop-based vectorization we allocate VECs of size 1. We
2651 allocate VEC_OPRNDS1 only in case of binary operation. */
2652 if (!slp_node)
2654 vec_oprnds0 = VEC_alloc (tree, heap, 1);
2655 if (op_type == binary_op || op_type == ternary_op)
2656 vec_oprnds1 = VEC_alloc (tree, heap, 1);
2657 if (op_type == ternary_op)
2658 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2661 /* In case the vectorization factor (VF) is bigger than the number
2662 of elements that we can fit in a vectype (nunits), we have to generate
2663 more than one vector stmt - i.e - we need to "unroll" the
2664 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2665 from one copy of the vector stmt to the next, in the field
2666 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2667 stages to find the correct vector defs to be used when vectorizing
2668 stmts that use the defs of the current stmt. The example below
2669 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2670 we need to create 4 vectorized stmts):
2672 before vectorization:
2673 RELATED_STMT VEC_STMT
2674 S1: x = memref - -
2675 S2: z = x + 1 - -
2677 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2678 there):
2679 RELATED_STMT VEC_STMT
2680 VS1_0: vx0 = memref0 VS1_1 -
2681 VS1_1: vx1 = memref1 VS1_2 -
2682 VS1_2: vx2 = memref2 VS1_3 -
2683 VS1_3: vx3 = memref3 - -
2684 S1: x = load - VS1_0
2685 S2: z = x + 1 - -
2687 step2: vectorize stmt S2 (done here):
2688 To vectorize stmt S2 we first need to find the relevant vector
2689 def for the first operand 'x'. This is, as usual, obtained from
2690 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2691 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2692 relevant vector def 'vx0'. Having found 'vx0' we can generate
2693 the vector stmt VS2_0, and as usual, record it in the
2694 STMT_VINFO_VEC_STMT of stmt S2.
2695 When creating the second copy (VS2_1), we obtain the relevant vector
2696 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2697 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2698 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2699 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2700 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2701 chain of stmts and pointers:
2702 RELATED_STMT VEC_STMT
2703 VS1_0: vx0 = memref0 VS1_1 -
2704 VS1_1: vx1 = memref1 VS1_2 -
2705 VS1_2: vx2 = memref2 VS1_3 -
2706 VS1_3: vx3 = memref3 - -
2707 S1: x = load - VS1_0
2708 VS2_0: vz0 = vx0 + v1 VS2_1 -
2709 VS2_1: vz1 = vx1 + v1 VS2_2 -
2710 VS2_2: vz2 = vx2 + v1 VS2_3 -
2711 VS2_3: vz3 = vx3 + v1 - -
2712 S2: z = x + 1 - VS2_0 */
2714 prev_stmt_info = NULL;
2715 for (j = 0; j < ncopies; j++)
2717 /* Handle uses. */
2718 if (j == 0)
2720 if (op_type == binary_op || op_type == ternary_op)
2721 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
2722 slp_node);
2723 else
2724 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
2725 slp_node);
2726 if (op_type == ternary_op)
2728 vec_oprnds2 = VEC_alloc (tree, heap, 1);
2729 VEC_quick_push (tree, vec_oprnds2,
2730 vect_get_vec_def_for_operand (op2, stmt, NULL));
2733 else
2735 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
2736 if (op_type == ternary_op)
2738 tree vec_oprnd = VEC_pop (tree, vec_oprnds2);
2739 VEC_quick_push (tree, vec_oprnds2,
2740 vect_get_vec_def_for_stmt_copy (dt[2],
2741 vec_oprnd));
2745 /* Arguments are ready. Create the new vector stmt. */
2746 FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
2748 vop1 = ((op_type == binary_op || op_type == ternary_op)
2749 ? VEC_index (tree, vec_oprnds1, i) : NULL_TREE);
2750 vop2 = ((op_type == ternary_op)
2751 ? VEC_index (tree, vec_oprnds2, i) : NULL_TREE);
2752 new_stmt = gimple_build_assign_with_ops3 (code, vec_dest,
2753 vop0, vop1, vop2);
2754 new_temp = make_ssa_name (vec_dest, new_stmt);
2755 gimple_assign_set_lhs (new_stmt, new_temp);
2756 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2757 if (slp_node)
2758 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2761 if (slp_node)
2762 continue;
2764 if (j == 0)
2765 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2766 else
2767 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2768 prev_stmt_info = vinfo_for_stmt (new_stmt);
2771 VEC_free (tree, heap, vec_oprnds0);
2772 if (vec_oprnds1)
2773 VEC_free (tree, heap, vec_oprnds1);
2774 if (vec_oprnds2)
2775 VEC_free (tree, heap, vec_oprnds2);
2777 return true;
2781 /* Get vectorized definitions for loop-based vectorization. For the first
2782 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2783 scalar operand), and for the rest we get a copy with
2784 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2785 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2786 The vectors are collected into VEC_OPRNDS. */
2788 static void
2789 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
2790 VEC (tree, heap) **vec_oprnds, int multi_step_cvt)
2792 tree vec_oprnd;
2794 /* Get first vector operand. */
2795 /* All the vector operands except the very first one (that is scalar oprnd)
2796 are stmt copies. */
2797 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
2798 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
2799 else
2800 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
2802 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2804 /* Get second vector operand. */
2805 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
2806 VEC_quick_push (tree, *vec_oprnds, vec_oprnd);
2808 *oprnd = vec_oprnd;
2810 /* For conversion in multiple steps, continue to get operands
2811 recursively. */
2812 if (multi_step_cvt)
2813 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
2817 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2818 For multi-step conversions store the resulting vectors and call the function
2819 recursively. */
2821 static void
2822 vect_create_vectorized_demotion_stmts (VEC (tree, heap) **vec_oprnds,
2823 int multi_step_cvt, gimple stmt,
2824 VEC (tree, heap) *vec_dsts,
2825 gimple_stmt_iterator *gsi,
2826 slp_tree slp_node, enum tree_code code,
2827 stmt_vec_info *prev_stmt_info)
2829 unsigned int i;
2830 tree vop0, vop1, new_tmp, vec_dest;
2831 gimple new_stmt;
2832 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2834 vec_dest = VEC_pop (tree, vec_dsts);
2836 for (i = 0; i < VEC_length (tree, *vec_oprnds); i += 2)
2838 /* Create demotion operation. */
2839 vop0 = VEC_index (tree, *vec_oprnds, i);
2840 vop1 = VEC_index (tree, *vec_oprnds, i + 1);
2841 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
2842 new_tmp = make_ssa_name (vec_dest, new_stmt);
2843 gimple_assign_set_lhs (new_stmt, new_tmp);
2844 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2846 if (multi_step_cvt)
2847 /* Store the resulting vector for next recursive call. */
2848 VEC_replace (tree, *vec_oprnds, i/2, new_tmp);
2849 else
2851 /* This is the last step of the conversion sequence. Store the
2852 vectors in SLP_NODE or in vector info of the scalar statement
2853 (or in STMT_VINFO_RELATED_STMT chain). */
2854 if (slp_node)
2855 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
2856 else
2858 if (!*prev_stmt_info)
2859 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2860 else
2861 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
2863 *prev_stmt_info = vinfo_for_stmt (new_stmt);
2868 /* For multi-step demotion operations we first generate demotion operations
2869 from the source type to the intermediate types, and then combine the
2870 results (stored in VEC_OPRNDS) in demotion operation to the destination
2871 type. */
2872 if (multi_step_cvt)
2874 /* At each level of recursion we have have of the operands we had at the
2875 previous level. */
2876 VEC_truncate (tree, *vec_oprnds, (i+1)/2);
2877 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
2878 stmt, vec_dsts, gsi, slp_node,
2879 code, prev_stmt_info);
2884 /* Function vectorizable_type_demotion
2886 Check if STMT performs a binary or unary operation that involves
2887 type demotion, and if it can be vectorized.
2888 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2889 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2890 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2892 static bool
2893 vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
2894 gimple *vec_stmt, slp_tree slp_node)
2896 tree vec_dest;
2897 tree scalar_dest;
2898 tree op0;
2899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2900 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2901 enum tree_code code, code1 = ERROR_MARK;
2902 tree def;
2903 gimple def_stmt;
2904 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
2905 stmt_vec_info prev_stmt_info;
2906 int nunits_in;
2907 int nunits_out;
2908 tree vectype_out;
2909 int ncopies;
2910 int j, i;
2911 tree vectype_in;
2912 int multi_step_cvt = 0;
2913 VEC (tree, heap) *vec_oprnds0 = NULL;
2914 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
2915 tree last_oprnd, intermediate_type;
2917 /* FORNOW: not supported by basic block SLP vectorization. */
2918 gcc_assert (loop_vinfo);
2920 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2921 return false;
2923 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2924 return false;
2926 /* Is STMT a vectorizable type-demotion operation? */
2927 if (!is_gimple_assign (stmt))
2928 return false;
2930 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
2931 return false;
2933 code = gimple_assign_rhs_code (stmt);
2934 if (!CONVERT_EXPR_CODE_P (code))
2935 return false;
2937 scalar_dest = gimple_assign_lhs (stmt);
2938 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2940 /* Check the operands of the operation. */
2941 op0 = gimple_assign_rhs1 (stmt);
2942 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
2943 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
2944 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
2945 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
2946 && CONVERT_EXPR_CODE_P (code))))
2947 return false;
2948 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
2949 &def_stmt, &def, &dt[0], &vectype_in))
2951 if (vect_print_dump_info (REPORT_DETAILS))
2952 fprintf (vect_dump, "use not simple.");
2953 return false;
2955 /* If op0 is an external def use a vector type with the
2956 same size as the output vector type if possible. */
2957 if (!vectype_in)
2958 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
2959 if (vec_stmt)
2960 gcc_assert (vectype_in);
2961 if (!vectype_in)
2963 if (vect_print_dump_info (REPORT_DETAILS))
2965 fprintf (vect_dump, "no vectype for scalar type ");
2966 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
2969 return false;
2972 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2973 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2974 if (nunits_in >= nunits_out)
2975 return false;
2977 /* Multiple types in SLP are handled by creating the appropriate number of
2978 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2979 case of SLP. */
2980 if (slp_node || PURE_SLP_STMT (stmt_info))
2981 ncopies = 1;
2982 else
2983 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2984 gcc_assert (ncopies >= 1);
2986 /* Supportable by target? */
2987 if (!supportable_narrowing_operation (code, vectype_out, vectype_in,
2988 &code1, &multi_step_cvt, &interm_types))
2989 return false;
2991 if (!vec_stmt) /* transformation not required. */
2993 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
2994 if (vect_print_dump_info (REPORT_DETAILS))
2995 fprintf (vect_dump, "=== vectorizable_demotion ===");
2996 vect_model_simple_cost (stmt_info, ncopies, dt, NULL);
2997 return true;
3000 /** Transform. **/
3001 if (vect_print_dump_info (REPORT_DETAILS))
3002 fprintf (vect_dump, "transform type demotion operation. ncopies = %d.",
3003 ncopies);
3005 /* In case of multi-step demotion, we first generate demotion operations to
3006 the intermediate types, and then from that types to the final one.
3007 We create vector destinations for the intermediate type (TYPES) received
3008 from supportable_narrowing_operation, and store them in the correct order
3009 for future use in vect_create_vectorized_demotion_stmts(). */
3010 if (multi_step_cvt)
3011 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3012 else
3013 vec_dsts = VEC_alloc (tree, heap, 1);
3015 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3016 VEC_quick_push (tree, vec_dsts, vec_dest);
3018 if (multi_step_cvt)
3020 for (i = VEC_length (tree, interm_types) - 1;
3021 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3023 vec_dest = vect_create_destination_var (scalar_dest,
3024 intermediate_type);
3025 VEC_quick_push (tree, vec_dsts, vec_dest);
3029 /* In case the vectorization factor (VF) is bigger than the number
3030 of elements that we can fit in a vectype (nunits), we have to generate
3031 more than one vector stmt - i.e - we need to "unroll" the
3032 vector stmt by a factor VF/nunits. */
3033 last_oprnd = op0;
3034 prev_stmt_info = NULL;
3035 for (j = 0; j < ncopies; j++)
3037 /* Handle uses. */
3038 if (slp_node)
3039 vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL, -1);
3040 else
3042 VEC_free (tree, heap, vec_oprnds0);
3043 vec_oprnds0 = VEC_alloc (tree, heap,
3044 (multi_step_cvt ? vect_pow2 (multi_step_cvt) * 2 : 2));
3045 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3046 vect_pow2 (multi_step_cvt) - 1);
3049 /* Arguments are ready. Create the new vector stmts. */
3050 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3051 vect_create_vectorized_demotion_stmts (&vec_oprnds0,
3052 multi_step_cvt, stmt, tmp_vec_dsts,
3053 gsi, slp_node, code1,
3054 &prev_stmt_info);
3057 VEC_free (tree, heap, vec_oprnds0);
3058 VEC_free (tree, heap, vec_dsts);
3059 VEC_free (tree, heap, tmp_vec_dsts);
3060 VEC_free (tree, heap, interm_types);
3062 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3063 return true;
3067 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3068 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3069 the resulting vectors and call the function recursively. */
3071 static void
3072 vect_create_vectorized_promotion_stmts (VEC (tree, heap) **vec_oprnds0,
3073 VEC (tree, heap) **vec_oprnds1,
3074 int multi_step_cvt, gimple stmt,
3075 VEC (tree, heap) *vec_dsts,
3076 gimple_stmt_iterator *gsi,
3077 slp_tree slp_node, enum tree_code code1,
3078 enum tree_code code2, tree decl1,
3079 tree decl2, int op_type,
3080 stmt_vec_info *prev_stmt_info)
3082 int i;
3083 tree vop0, vop1, new_tmp1, new_tmp2, vec_dest;
3084 gimple new_stmt1, new_stmt2;
3085 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3086 VEC (tree, heap) *vec_tmp;
3088 vec_dest = VEC_pop (tree, vec_dsts);
3089 vec_tmp = VEC_alloc (tree, heap, VEC_length (tree, *vec_oprnds0) * 2);
3091 FOR_EACH_VEC_ELT (tree, *vec_oprnds0, i, vop0)
3093 if (op_type == binary_op)
3094 vop1 = VEC_index (tree, *vec_oprnds1, i);
3095 else
3096 vop1 = NULL_TREE;
3098 /* Generate the two halves of promotion operation. */
3099 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3100 op_type, vec_dest, gsi, stmt);
3101 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3102 op_type, vec_dest, gsi, stmt);
3103 if (is_gimple_call (new_stmt1))
3105 new_tmp1 = gimple_call_lhs (new_stmt1);
3106 new_tmp2 = gimple_call_lhs (new_stmt2);
3108 else
3110 new_tmp1 = gimple_assign_lhs (new_stmt1);
3111 new_tmp2 = gimple_assign_lhs (new_stmt2);
3114 if (multi_step_cvt)
3116 /* Store the results for the recursive call. */
3117 VEC_quick_push (tree, vec_tmp, new_tmp1);
3118 VEC_quick_push (tree, vec_tmp, new_tmp2);
3120 else
3122 /* Last step of promotion sequience - store the results. */
3123 if (slp_node)
3125 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt1);
3126 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt2);
3128 else
3130 if (!*prev_stmt_info)
3131 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt1;
3132 else
3133 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt1;
3135 *prev_stmt_info = vinfo_for_stmt (new_stmt1);
3136 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt2;
3137 *prev_stmt_info = vinfo_for_stmt (new_stmt2);
3142 if (multi_step_cvt)
3144 /* For multi-step promotion operation we first generate we call the
3145 function recurcively for every stage. We start from the input type,
3146 create promotion operations to the intermediate types, and then
3147 create promotions to the output type. */
3148 *vec_oprnds0 = VEC_copy (tree, heap, vec_tmp);
3149 vect_create_vectorized_promotion_stmts (vec_oprnds0, vec_oprnds1,
3150 multi_step_cvt - 1, stmt,
3151 vec_dsts, gsi, slp_node, code1,
3152 code2, decl2, decl2, op_type,
3153 prev_stmt_info);
3156 VEC_free (tree, heap, vec_tmp);
3160 /* Function vectorizable_type_promotion
3162 Check if STMT performs a binary or unary operation that involves
3163 type promotion, and if it can be vectorized.
3164 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3165 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3166 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3168 static bool
3169 vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
3170 gimple *vec_stmt, slp_tree slp_node)
3172 tree vec_dest;
3173 tree scalar_dest;
3174 tree op0, op1 = NULL;
3175 tree vec_oprnd0=NULL, vec_oprnd1=NULL;
3176 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3177 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3178 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3179 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3180 int op_type;
3181 tree def;
3182 gimple def_stmt;
3183 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3184 stmt_vec_info prev_stmt_info;
3185 int nunits_in;
3186 int nunits_out;
3187 tree vectype_out;
3188 int ncopies;
3189 int j, i;
3190 tree vectype_in;
3191 tree intermediate_type = NULL_TREE;
3192 int multi_step_cvt = 0;
3193 VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
3194 VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
3196 /* FORNOW: not supported by basic block SLP vectorization. */
3197 gcc_assert (loop_vinfo);
3199 if (!STMT_VINFO_RELEVANT_P (stmt_info))
3200 return false;
3202 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3203 return false;
3205 /* Is STMT a vectorizable type-promotion operation? */
3206 if (!is_gimple_assign (stmt))
3207 return false;
3209 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3210 return false;
3212 code = gimple_assign_rhs_code (stmt);
3213 if (!CONVERT_EXPR_CODE_P (code)
3214 && code != WIDEN_MULT_EXPR)
3215 return false;
3217 scalar_dest = gimple_assign_lhs (stmt);
3218 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3220 /* Check the operands of the operation. */
3221 op0 = gimple_assign_rhs1 (stmt);
3222 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
3223 && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3224 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
3225 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
3226 && CONVERT_EXPR_CODE_P (code))))
3227 return false;
3228 if (!vect_is_simple_use_1 (op0, loop_vinfo, NULL,
3229 &def_stmt, &def, &dt[0], &vectype_in))
3231 if (vect_print_dump_info (REPORT_DETAILS))
3232 fprintf (vect_dump, "use not simple.");
3233 return false;
3236 op_type = TREE_CODE_LENGTH (code);
3237 if (op_type == binary_op)
3239 bool ok;
3241 op1 = gimple_assign_rhs2 (stmt);
3242 if (code == WIDEN_MULT_EXPR)
3244 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3245 OP1. */
3246 if (CONSTANT_CLASS_P (op0))
3247 ok = vect_is_simple_use_1 (op1, loop_vinfo, NULL,
3248 &def_stmt, &def, &dt[1], &vectype_in);
3249 else
3250 ok = vect_is_simple_use (op1, loop_vinfo, NULL, &def_stmt, &def,
3251 &dt[1]);
3253 if (!ok)
3255 if (vect_print_dump_info (REPORT_DETAILS))
3256 fprintf (vect_dump, "use not simple.");
3257 return false;
3262 /* If op0 is an external or constant def use a vector type with
3263 the same size as the output vector type. */
3264 if (!vectype_in)
3265 vectype_in = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
3266 if (vec_stmt)
3267 gcc_assert (vectype_in);
3268 if (!vectype_in)
3270 if (vect_print_dump_info (REPORT_DETAILS))
3272 fprintf (vect_dump, "no vectype for scalar type ");
3273 print_generic_expr (vect_dump, TREE_TYPE (op0), TDF_SLIM);
3276 return false;
3279 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3280 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3281 if (nunits_in <= nunits_out)
3282 return false;
3284 /* Multiple types in SLP are handled by creating the appropriate number of
3285 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3286 case of SLP. */
3287 if (slp_node || PURE_SLP_STMT (stmt_info))
3288 ncopies = 1;
3289 else
3290 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3292 gcc_assert (ncopies >= 1);
3294 /* Supportable by target? */
3295 if (!supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3296 &decl1, &decl2, &code1, &code2,
3297 &multi_step_cvt, &interm_types))
3298 return false;
3300 /* Binary widening operation can only be supported directly by the
3301 architecture. */
3302 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3304 if (!vec_stmt) /* transformation not required. */
3306 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3307 if (vect_print_dump_info (REPORT_DETAILS))
3308 fprintf (vect_dump, "=== vectorizable_promotion ===");
3309 vect_model_simple_cost (stmt_info, 2*ncopies, dt, NULL);
3310 return true;
3313 /** Transform. **/
3315 if (vect_print_dump_info (REPORT_DETAILS))
3316 fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
3317 ncopies);
3319 if (code == WIDEN_MULT_EXPR)
3321 if (CONSTANT_CLASS_P (op0))
3322 op0 = fold_convert (TREE_TYPE (op1), op0);
3323 else if (CONSTANT_CLASS_P (op1))
3324 op1 = fold_convert (TREE_TYPE (op0), op1);
3327 /* Handle def. */
3328 /* In case of multi-step promotion, we first generate promotion operations
3329 to the intermediate types, and then from that types to the final one.
3330 We store vector destination in VEC_DSTS in the correct order for
3331 recursive creation of promotion operations in
3332 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3333 according to TYPES recieved from supportable_widening_operation(). */
3334 if (multi_step_cvt)
3335 vec_dsts = VEC_alloc (tree, heap, multi_step_cvt + 1);
3336 else
3337 vec_dsts = VEC_alloc (tree, heap, 1);
3339 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3340 VEC_quick_push (tree, vec_dsts, vec_dest);
3342 if (multi_step_cvt)
3344 for (i = VEC_length (tree, interm_types) - 1;
3345 VEC_iterate (tree, interm_types, i, intermediate_type); i--)
3347 vec_dest = vect_create_destination_var (scalar_dest,
3348 intermediate_type);
3349 VEC_quick_push (tree, vec_dsts, vec_dest);
3353 if (!slp_node)
3355 vec_oprnds0 = VEC_alloc (tree, heap,
3356 (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3357 if (op_type == binary_op)
3358 vec_oprnds1 = VEC_alloc (tree, heap, 1);
3361 /* In case the vectorization factor (VF) is bigger than the number
3362 of elements that we can fit in a vectype (nunits), we have to generate
3363 more than one vector stmt - i.e - we need to "unroll" the
3364 vector stmt by a factor VF/nunits. */
3366 prev_stmt_info = NULL;
3367 for (j = 0; j < ncopies; j++)
3369 /* Handle uses. */
3370 if (j == 0)
3372 if (slp_node)
3373 vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
3374 &vec_oprnds1, -1);
3375 else
3377 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3378 VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
3379 if (op_type == binary_op)
3381 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
3382 VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
3386 else
3388 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3389 VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
3390 if (op_type == binary_op)
3392 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
3393 VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
3397 /* Arguments are ready. Create the new vector stmts. */
3398 tmp_vec_dsts = VEC_copy (tree, heap, vec_dsts);
3399 vect_create_vectorized_promotion_stmts (&vec_oprnds0, &vec_oprnds1,
3400 multi_step_cvt, stmt,
3401 tmp_vec_dsts,
3402 gsi, slp_node, code1, code2,
3403 decl1, decl2, op_type,
3404 &prev_stmt_info);
3407 VEC_free (tree, heap, vec_dsts);
3408 VEC_free (tree, heap, tmp_vec_dsts);
3409 VEC_free (tree, heap, interm_types);
3410 VEC_free (tree, heap, vec_oprnds0);
3411 VEC_free (tree, heap, vec_oprnds1);
3413 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3414 return true;
3418 /* Function vectorizable_store.
3420 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3421 can be vectorized.
3422 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3423 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3424 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3426 static bool
3427 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3428 slp_tree slp_node)
3430 tree scalar_dest;
3431 tree data_ref;
3432 tree op;
3433 tree vec_oprnd = NULL_TREE;
3434 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3435 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
3436 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3437 tree elem_type;
3438 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3439 struct loop *loop = NULL;
3440 enum machine_mode vec_mode;
3441 tree dummy;
3442 enum dr_alignment_support alignment_support_scheme;
3443 tree def;
3444 gimple def_stmt;
3445 enum vect_def_type dt;
3446 stmt_vec_info prev_stmt_info = NULL;
3447 tree dataref_ptr = NULL_TREE;
3448 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3449 int ncopies;
3450 int j;
3451 gimple next_stmt, first_stmt = NULL;
3452 bool strided_store = false;
3453 bool store_lanes_p = false;
3454 unsigned int group_size, i;
3455 VEC(tree,heap) *dr_chain = NULL, *oprnds = NULL, *result_chain = NULL;
3456 bool inv_p;
3457 VEC(tree,heap) *vec_oprnds = NULL;
3458 bool slp = (slp_node != NULL);
3459 unsigned int vec_num;
3460 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3461 tree aggr_type;
3463 if (loop_vinfo)
3464 loop = LOOP_VINFO_LOOP (loop_vinfo);
3466 /* Multiple types in SLP are handled by creating the appropriate number of
3467 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3468 case of SLP. */
3469 if (slp || PURE_SLP_STMT (stmt_info))
3470 ncopies = 1;
3471 else
3472 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3474 gcc_assert (ncopies >= 1);
3476 /* FORNOW. This restriction should be relaxed. */
3477 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
3479 if (vect_print_dump_info (REPORT_DETAILS))
3480 fprintf (vect_dump, "multiple types in nested loop.");
3481 return false;
3484 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3485 return false;
3487 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3488 return false;
3490 /* Is vectorizable store? */
3492 if (!is_gimple_assign (stmt))
3493 return false;
3495 scalar_dest = gimple_assign_lhs (stmt);
3496 if (TREE_CODE (scalar_dest) != ARRAY_REF
3497 && TREE_CODE (scalar_dest) != INDIRECT_REF
3498 && TREE_CODE (scalar_dest) != COMPONENT_REF
3499 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
3500 && TREE_CODE (scalar_dest) != REALPART_EXPR
3501 && TREE_CODE (scalar_dest) != MEM_REF)
3502 return false;
3504 gcc_assert (gimple_assign_single_p (stmt));
3505 op = gimple_assign_rhs1 (stmt);
3506 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt))
3508 if (vect_print_dump_info (REPORT_DETAILS))
3509 fprintf (vect_dump, "use not simple.");
3510 return false;
3513 /* The scalar rhs type needs to be trivially convertible to the vector
3514 component type. This should always be the case. */
3515 elem_type = TREE_TYPE (vectype);
3516 if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
3518 if (vect_print_dump_info (REPORT_DETAILS))
3519 fprintf (vect_dump, "??? operands of different types");
3520 return false;
3523 vec_mode = TYPE_MODE (vectype);
3524 /* FORNOW. In some cases can vectorize even if data-type not supported
3525 (e.g. - array initialization with 0). */
3526 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
3527 return false;
3529 if (!STMT_VINFO_DATA_REF (stmt_info))
3530 return false;
3532 if (tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0)
3534 if (vect_print_dump_info (REPORT_DETAILS))
3535 fprintf (vect_dump, "negative step for store.");
3536 return false;
3539 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
3541 strided_store = true;
3542 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
3543 if (!slp && !PURE_SLP_STMT (stmt_info))
3545 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3546 if (vect_store_lanes_supported (vectype, group_size))
3547 store_lanes_p = true;
3548 else if (!vect_strided_store_supported (vectype, group_size))
3549 return false;
3552 if (first_stmt == stmt)
3554 /* STMT is the leader of the group. Check the operands of all the
3555 stmts of the group. */
3556 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
3557 while (next_stmt)
3559 gcc_assert (gimple_assign_single_p (next_stmt));
3560 op = gimple_assign_rhs1 (next_stmt);
3561 if (!vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt,
3562 &def, &dt))
3564 if (vect_print_dump_info (REPORT_DETAILS))
3565 fprintf (vect_dump, "use not simple.");
3566 return false;
3568 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3573 if (!vec_stmt) /* transformation not required. */
3575 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
3576 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt, NULL);
3577 return true;
3580 /** Transform. **/
3582 if (strided_store)
3584 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3585 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
3587 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
3589 /* FORNOW */
3590 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
3592 /* We vectorize all the stmts of the interleaving group when we
3593 reach the last stmt in the group. */
3594 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
3595 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
3596 && !slp)
3598 *vec_stmt = NULL;
3599 return true;
3602 if (slp)
3604 strided_store = false;
3605 /* VEC_NUM is the number of vect stmts to be created for this
3606 group. */
3607 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3608 first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
3609 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
3611 else
3612 /* VEC_NUM is the number of vect stmts to be created for this
3613 group. */
3614 vec_num = group_size;
3616 else
3618 first_stmt = stmt;
3619 first_dr = dr;
3620 group_size = vec_num = 1;
3623 if (vect_print_dump_info (REPORT_DETAILS))
3624 fprintf (vect_dump, "transform store. ncopies = %d",ncopies);
3626 dr_chain = VEC_alloc (tree, heap, group_size);
3627 oprnds = VEC_alloc (tree, heap, group_size);
3629 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
3630 gcc_assert (alignment_support_scheme);
3631 /* Targets with store-lane instructions must not require explicit
3632 realignment. */
3633 gcc_assert (!store_lanes_p
3634 || alignment_support_scheme == dr_aligned
3635 || alignment_support_scheme == dr_unaligned_supported);
3637 if (store_lanes_p)
3638 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
3639 else
3640 aggr_type = vectype;
3642 /* In case the vectorization factor (VF) is bigger than the number
3643 of elements that we can fit in a vectype (nunits), we have to generate
3644 more than one vector stmt - i.e - we need to "unroll" the
3645 vector stmt by a factor VF/nunits. For more details see documentation in
3646 vect_get_vec_def_for_copy_stmt. */
3648 /* In case of interleaving (non-unit strided access):
3650 S1: &base + 2 = x2
3651 S2: &base = x0
3652 S3: &base + 1 = x1
3653 S4: &base + 3 = x3
3655 We create vectorized stores starting from base address (the access of the
3656 first stmt in the chain (S2 in the above example), when the last store stmt
3657 of the chain (S4) is reached:
3659 VS1: &base = vx2
3660 VS2: &base + vec_size*1 = vx0
3661 VS3: &base + vec_size*2 = vx1
3662 VS4: &base + vec_size*3 = vx3
3664 Then permutation statements are generated:
3666 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3667 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3670 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3671 (the order of the data-refs in the output of vect_permute_store_chain
3672 corresponds to the order of scalar stmts in the interleaving chain - see
3673 the documentation of vect_permute_store_chain()).
3675 In case of both multiple types and interleaving, above vector stores and
3676 permutation stmts are created for every copy. The result vector stmts are
3677 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3678 STMT_VINFO_RELATED_STMT for the next copies.
3681 prev_stmt_info = NULL;
3682 for (j = 0; j < ncopies; j++)
3684 gimple new_stmt;
3685 gimple ptr_incr;
3687 if (j == 0)
3689 if (slp)
3691 /* Get vectorized arguments for SLP_NODE. */
3692 vect_get_slp_defs (NULL_TREE, NULL_TREE, slp_node, &vec_oprnds,
3693 NULL, -1);
3695 vec_oprnd = VEC_index (tree, vec_oprnds, 0);
3697 else
3699 /* For interleaved stores we collect vectorized defs for all the
3700 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3701 used as an input to vect_permute_store_chain(), and OPRNDS as
3702 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3704 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3705 OPRNDS are of size 1. */
3706 next_stmt = first_stmt;
3707 for (i = 0; i < group_size; i++)
3709 /* Since gaps are not supported for interleaved stores,
3710 GROUP_SIZE is the exact number of stmts in the chain.
3711 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3712 there is no interleaving, GROUP_SIZE is 1, and only one
3713 iteration of the loop will be executed. */
3714 gcc_assert (next_stmt
3715 && gimple_assign_single_p (next_stmt));
3716 op = gimple_assign_rhs1 (next_stmt);
3718 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
3719 NULL);
3720 VEC_quick_push(tree, dr_chain, vec_oprnd);
3721 VEC_quick_push(tree, oprnds, vec_oprnd);
3722 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3726 /* We should have catched mismatched types earlier. */
3727 gcc_assert (useless_type_conversion_p (vectype,
3728 TREE_TYPE (vec_oprnd)));
3729 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, NULL,
3730 NULL_TREE, &dummy, gsi,
3731 &ptr_incr, false, &inv_p);
3732 gcc_assert (bb_vinfo || !inv_p);
3734 else
3736 /* For interleaved stores we created vectorized defs for all the
3737 defs stored in OPRNDS in the previous iteration (previous copy).
3738 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3739 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3740 next copy.
3741 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3742 OPRNDS are of size 1. */
3743 for (i = 0; i < group_size; i++)
3745 op = VEC_index (tree, oprnds, i);
3746 vect_is_simple_use (op, loop_vinfo, bb_vinfo, &def_stmt, &def,
3747 &dt);
3748 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
3749 VEC_replace(tree, dr_chain, i, vec_oprnd);
3750 VEC_replace(tree, oprnds, i, vec_oprnd);
3752 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
3753 TYPE_SIZE_UNIT (aggr_type));
3756 if (store_lanes_p)
3758 tree vec_array;
3760 /* Combine all the vectors into an array. */
3761 vec_array = create_vector_array (vectype, vec_num);
3762 for (i = 0; i < vec_num; i++)
3764 vec_oprnd = VEC_index (tree, dr_chain, i);
3765 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
3768 /* Emit:
3769 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3770 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
3771 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
3772 gimple_call_set_lhs (new_stmt, data_ref);
3773 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3774 mark_symbols_for_renaming (new_stmt);
3776 else
3778 new_stmt = NULL;
3779 if (strided_store)
3781 result_chain = VEC_alloc (tree, heap, group_size);
3782 /* Permute. */
3783 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
3784 &result_chain);
3787 next_stmt = first_stmt;
3788 for (i = 0; i < vec_num; i++)
3790 struct ptr_info_def *pi;
3792 if (i > 0)
3793 /* Bump the vector pointer. */
3794 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
3795 stmt, NULL_TREE);
3797 if (slp)
3798 vec_oprnd = VEC_index (tree, vec_oprnds, i);
3799 else if (strided_store)
3800 /* For strided stores vectorized defs are interleaved in
3801 vect_permute_store_chain(). */
3802 vec_oprnd = VEC_index (tree, result_chain, i);
3804 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
3805 build_int_cst (reference_alias_ptr_type
3806 (DR_REF (first_dr)), 0));
3807 pi = get_ptr_info (dataref_ptr);
3808 pi->align = TYPE_ALIGN_UNIT (vectype);
3809 if (aligned_access_p (first_dr))
3810 pi->misalign = 0;
3811 else if (DR_MISALIGNMENT (first_dr) == -1)
3813 TREE_TYPE (data_ref)
3814 = build_aligned_type (TREE_TYPE (data_ref),
3815 TYPE_ALIGN (elem_type));
3816 pi->align = TYPE_ALIGN_UNIT (elem_type);
3817 pi->misalign = 0;
3819 else
3821 TREE_TYPE (data_ref)
3822 = build_aligned_type (TREE_TYPE (data_ref),
3823 TYPE_ALIGN (elem_type));
3824 pi->misalign = DR_MISALIGNMENT (first_dr);
3827 /* Arguments are ready. Create the new vector stmt. */
3828 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
3829 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3830 mark_symbols_for_renaming (new_stmt);
3832 if (slp)
3833 continue;
3835 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
3836 if (!next_stmt)
3837 break;
3840 if (!slp)
3842 if (j == 0)
3843 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3844 else
3845 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3846 prev_stmt_info = vinfo_for_stmt (new_stmt);
3850 VEC_free (tree, heap, dr_chain);
3851 VEC_free (tree, heap, oprnds);
3852 if (result_chain)
3853 VEC_free (tree, heap, result_chain);
3854 if (vec_oprnds)
3855 VEC_free (tree, heap, vec_oprnds);
3857 return true;
3860 /* Given a vector type VECTYPE returns a builtin DECL to be used
3861 for vector permutation and stores a mask into *MASK that implements
3862 reversal of the vector elements. If that is impossible to do
3863 returns NULL (and *MASK is unchanged). */
3865 static tree
3866 perm_mask_for_reverse (tree vectype, tree *mask)
3868 tree builtin_decl;
3869 tree mask_element_type, mask_type;
3870 tree mask_vec = NULL;
3871 int i;
3872 int nunits;
3873 if (!targetm.vectorize.builtin_vec_perm)
3874 return NULL;
3876 builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
3877 &mask_element_type);
3878 if (!builtin_decl || !mask_element_type)
3879 return NULL;
3881 mask_type = get_vectype_for_scalar_type (mask_element_type);
3882 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3883 if (!mask_type
3884 || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
3885 return NULL;
3887 for (i = 0; i < nunits; i++)
3888 mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
3889 mask_vec = build_vector (mask_type, mask_vec);
3891 if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
3892 return NULL;
3893 if (mask)
3894 *mask = mask_vec;
3895 return builtin_decl;
3898 /* Given a vector variable X, that was generated for the scalar LHS of
3899 STMT, generate instructions to reverse the vector elements of X,
3900 insert them a *GSI and return the permuted vector variable. */
3902 static tree
3903 reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
3905 tree vectype = TREE_TYPE (x);
3906 tree mask_vec, builtin_decl;
3907 tree perm_dest, data_ref;
3908 gimple perm_stmt;
3910 builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
3912 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3914 /* Generate the permute statement. */
3915 perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
3916 if (!useless_type_conversion_p (vectype,
3917 TREE_TYPE (TREE_TYPE (builtin_decl))))
3919 tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
3920 tem = make_ssa_name (tem, perm_stmt);
3921 gimple_call_set_lhs (perm_stmt, tem);
3922 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3923 perm_stmt = gimple_build_assign (NULL_TREE,
3924 build1 (VIEW_CONVERT_EXPR,
3925 vectype, tem));
3927 data_ref = make_ssa_name (perm_dest, perm_stmt);
3928 gimple_set_lhs (perm_stmt, data_ref);
3929 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3931 return data_ref;
3934 /* vectorizable_load.
3936 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3937 can be vectorized.
3938 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3939 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3940 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3942 static bool
3943 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
3944 slp_tree slp_node, slp_instance slp_node_instance)
3946 tree scalar_dest;
3947 tree vec_dest = NULL;
3948 tree data_ref = NULL;
3949 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3950 stmt_vec_info prev_stmt_info;
3951 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3952 struct loop *loop = NULL;
3953 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
3954 bool nested_in_vect_loop = false;
3955 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
3956 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3957 tree elem_type;
3958 tree new_temp;
3959 enum machine_mode mode;
3960 gimple new_stmt = NULL;
3961 tree dummy;
3962 enum dr_alignment_support alignment_support_scheme;
3963 tree dataref_ptr = NULL_TREE;
3964 gimple ptr_incr;
3965 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
3966 int ncopies;
3967 int i, j, group_size;
3968 tree msq = NULL_TREE, lsq;
3969 tree offset = NULL_TREE;
3970 tree realignment_token = NULL_TREE;
3971 gimple phi = NULL;
3972 VEC(tree,heap) *dr_chain = NULL;
3973 bool strided_load = false;
3974 bool load_lanes_p = false;
3975 gimple first_stmt;
3976 tree scalar_type;
3977 bool inv_p;
3978 bool negative;
3979 bool compute_in_loop = false;
3980 struct loop *at_loop;
3981 int vec_num;
3982 bool slp = (slp_node != NULL);
3983 bool slp_perm = false;
3984 enum tree_code code;
3985 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3986 int vf;
3987 tree aggr_type;
3989 if (loop_vinfo)
3991 loop = LOOP_VINFO_LOOP (loop_vinfo);
3992 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
3993 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3995 else
3996 vf = 1;
3998 /* Multiple types in SLP are handled by creating the appropriate number of
3999 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4000 case of SLP. */
4001 if (slp || PURE_SLP_STMT (stmt_info))
4002 ncopies = 1;
4003 else
4004 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4006 gcc_assert (ncopies >= 1);
4008 /* FORNOW. This restriction should be relaxed. */
4009 if (nested_in_vect_loop && ncopies > 1)
4011 if (vect_print_dump_info (REPORT_DETAILS))
4012 fprintf (vect_dump, "multiple types in nested loop.");
4013 return false;
4016 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4017 return false;
4019 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4020 return false;
4022 /* Is vectorizable load? */
4023 if (!is_gimple_assign (stmt))
4024 return false;
4026 scalar_dest = gimple_assign_lhs (stmt);
4027 if (TREE_CODE (scalar_dest) != SSA_NAME)
4028 return false;
4030 code = gimple_assign_rhs_code (stmt);
4031 if (code != ARRAY_REF
4032 && code != INDIRECT_REF
4033 && code != COMPONENT_REF
4034 && code != IMAGPART_EXPR
4035 && code != REALPART_EXPR
4036 && code != MEM_REF)
4037 return false;
4039 if (!STMT_VINFO_DATA_REF (stmt_info))
4040 return false;
4042 negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
4043 if (negative && ncopies > 1)
4045 if (vect_print_dump_info (REPORT_DETAILS))
4046 fprintf (vect_dump, "multiple types with negative step.");
4047 return false;
4050 scalar_type = TREE_TYPE (DR_REF (dr));
4051 mode = TYPE_MODE (vectype);
4053 /* FORNOW. In some cases can vectorize even if data-type not supported
4054 (e.g. - data copies). */
4055 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
4057 if (vect_print_dump_info (REPORT_DETAILS))
4058 fprintf (vect_dump, "Aligned load, but unsupported type.");
4059 return false;
4062 /* The vector component type needs to be trivially convertible to the
4063 scalar lhs. This should always be the case. */
4064 elem_type = TREE_TYPE (vectype);
4065 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
4067 if (vect_print_dump_info (REPORT_DETAILS))
4068 fprintf (vect_dump, "??? operands of different types");
4069 return false;
4072 /* Check if the load is a part of an interleaving chain. */
4073 if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
4075 strided_load = true;
4076 /* FORNOW */
4077 gcc_assert (! nested_in_vect_loop);
4079 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4080 if (!slp && !PURE_SLP_STMT (stmt_info))
4082 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4083 if (vect_load_lanes_supported (vectype, group_size))
4084 load_lanes_p = true;
4085 else if (!vect_strided_load_supported (vectype, group_size))
4086 return false;
4090 if (negative)
4092 gcc_assert (!strided_load);
4093 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
4094 if (alignment_support_scheme != dr_aligned
4095 && alignment_support_scheme != dr_unaligned_supported)
4097 if (vect_print_dump_info (REPORT_DETAILS))
4098 fprintf (vect_dump, "negative step but alignment required.");
4099 return false;
4101 if (!perm_mask_for_reverse (vectype, NULL))
4103 if (vect_print_dump_info (REPORT_DETAILS))
4104 fprintf (vect_dump, "negative step and reversing not supported.");
4105 return false;
4109 if (!vec_stmt) /* transformation not required. */
4111 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
4112 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL);
4113 return true;
4116 if (vect_print_dump_info (REPORT_DETAILS))
4117 fprintf (vect_dump, "transform load. ncopies = %d", ncopies);
4119 /** Transform. **/
4121 if (strided_load)
4123 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
4124 /* Check if the chain of loads is already vectorized. */
4125 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
4127 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4128 return true;
4130 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
4131 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
4133 /* VEC_NUM is the number of vect stmts to be created for this group. */
4134 if (slp)
4136 strided_load = false;
4137 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4138 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance))
4139 slp_perm = true;
4141 else
4142 vec_num = group_size;
4144 else
4146 first_stmt = stmt;
4147 first_dr = dr;
4148 group_size = vec_num = 1;
4151 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
4152 gcc_assert (alignment_support_scheme);
4153 /* Targets with load-lane instructions must not require explicit
4154 realignment. */
4155 gcc_assert (!load_lanes_p
4156 || alignment_support_scheme == dr_aligned
4157 || alignment_support_scheme == dr_unaligned_supported);
4159 /* In case the vectorization factor (VF) is bigger than the number
4160 of elements that we can fit in a vectype (nunits), we have to generate
4161 more than one vector stmt - i.e - we need to "unroll" the
4162 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4163 from one copy of the vector stmt to the next, in the field
4164 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4165 stages to find the correct vector defs to be used when vectorizing
4166 stmts that use the defs of the current stmt. The example below
4167 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4168 need to create 4 vectorized stmts):
4170 before vectorization:
4171 RELATED_STMT VEC_STMT
4172 S1: x = memref - -
4173 S2: z = x + 1 - -
4175 step 1: vectorize stmt S1:
4176 We first create the vector stmt VS1_0, and, as usual, record a
4177 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4178 Next, we create the vector stmt VS1_1, and record a pointer to
4179 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4180 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4181 stmts and pointers:
4182 RELATED_STMT VEC_STMT
4183 VS1_0: vx0 = memref0 VS1_1 -
4184 VS1_1: vx1 = memref1 VS1_2 -
4185 VS1_2: vx2 = memref2 VS1_3 -
4186 VS1_3: vx3 = memref3 - -
4187 S1: x = load - VS1_0
4188 S2: z = x + 1 - -
4190 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4191 information we recorded in RELATED_STMT field is used to vectorize
4192 stmt S2. */
4194 /* In case of interleaving (non-unit strided access):
4196 S1: x2 = &base + 2
4197 S2: x0 = &base
4198 S3: x1 = &base + 1
4199 S4: x3 = &base + 3
4201 Vectorized loads are created in the order of memory accesses
4202 starting from the access of the first stmt of the chain:
4204 VS1: vx0 = &base
4205 VS2: vx1 = &base + vec_size*1
4206 VS3: vx3 = &base + vec_size*2
4207 VS4: vx4 = &base + vec_size*3
4209 Then permutation statements are generated:
4211 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4212 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4215 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4216 (the order of the data-refs in the output of vect_permute_load_chain
4217 corresponds to the order of scalar stmts in the interleaving chain - see
4218 the documentation of vect_permute_load_chain()).
4219 The generation of permutation stmts and recording them in
4220 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4222 In case of both multiple types and interleaving, the vector loads and
4223 permutation stmts above are created for every copy. The result vector
4224 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4225 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4227 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4228 on a target that supports unaligned accesses (dr_unaligned_supported)
4229 we generate the following code:
4230 p = initial_addr;
4231 indx = 0;
4232 loop {
4233 p = p + indx * vectype_size;
4234 vec_dest = *(p);
4235 indx = indx + 1;
4238 Otherwise, the data reference is potentially unaligned on a target that
4239 does not support unaligned accesses (dr_explicit_realign_optimized) -
4240 then generate the following code, in which the data in each iteration is
4241 obtained by two vector loads, one from the previous iteration, and one
4242 from the current iteration:
4243 p1 = initial_addr;
4244 msq_init = *(floor(p1))
4245 p2 = initial_addr + VS - 1;
4246 realignment_token = call target_builtin;
4247 indx = 0;
4248 loop {
4249 p2 = p2 + indx * vectype_size
4250 lsq = *(floor(p2))
4251 vec_dest = realign_load (msq, lsq, realignment_token)
4252 indx = indx + 1;
4253 msq = lsq;
4254 } */
4256 /* If the misalignment remains the same throughout the execution of the
4257 loop, we can create the init_addr and permutation mask at the loop
4258 preheader. Otherwise, it needs to be created inside the loop.
4259 This can only occur when vectorizing memory accesses in the inner-loop
4260 nested within an outer-loop that is being vectorized. */
4262 if (loop && nested_in_vect_loop_p (loop, stmt)
4263 && (TREE_INT_CST_LOW (DR_STEP (dr))
4264 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
4266 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
4267 compute_in_loop = true;
4270 if ((alignment_support_scheme == dr_explicit_realign_optimized
4271 || alignment_support_scheme == dr_explicit_realign)
4272 && !compute_in_loop)
4274 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
4275 alignment_support_scheme, NULL_TREE,
4276 &at_loop);
4277 if (alignment_support_scheme == dr_explicit_realign_optimized)
4279 phi = SSA_NAME_DEF_STMT (msq);
4280 offset = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4283 else
4284 at_loop = loop;
4286 if (negative)
4287 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
4289 if (load_lanes_p)
4290 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
4291 else
4292 aggr_type = vectype;
4294 prev_stmt_info = NULL;
4295 for (j = 0; j < ncopies; j++)
4297 /* 1. Create the vector or array pointer update chain. */
4298 if (j == 0)
4299 dataref_ptr = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
4300 offset, &dummy, gsi,
4301 &ptr_incr, false, &inv_p);
4302 else
4303 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
4304 TYPE_SIZE_UNIT (aggr_type));
4306 if (strided_load || slp_perm)
4307 dr_chain = VEC_alloc (tree, heap, vec_num);
4309 if (load_lanes_p)
4311 tree vec_array;
4313 vec_array = create_vector_array (vectype, vec_num);
4315 /* Emit:
4316 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4317 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
4318 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
4319 gimple_call_set_lhs (new_stmt, vec_array);
4320 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4321 mark_symbols_for_renaming (new_stmt);
4323 /* Extract each vector into an SSA_NAME. */
4324 for (i = 0; i < vec_num; i++)
4326 new_temp = read_vector_array (stmt, gsi, scalar_dest,
4327 vec_array, i);
4328 VEC_quick_push (tree, dr_chain, new_temp);
4331 /* Record the mapping between SSA_NAMEs and statements. */
4332 vect_record_strided_load_vectors (stmt, dr_chain);
4334 else
4336 for (i = 0; i < vec_num; i++)
4338 if (i > 0)
4339 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
4340 stmt, NULL_TREE);
4342 /* 2. Create the vector-load in the loop. */
4343 switch (alignment_support_scheme)
4345 case dr_aligned:
4346 case dr_unaligned_supported:
4348 struct ptr_info_def *pi;
4349 data_ref
4350 = build2 (MEM_REF, vectype, dataref_ptr,
4351 build_int_cst (reference_alias_ptr_type
4352 (DR_REF (first_dr)), 0));
4353 pi = get_ptr_info (dataref_ptr);
4354 pi->align = TYPE_ALIGN_UNIT (vectype);
4355 if (alignment_support_scheme == dr_aligned)
4357 gcc_assert (aligned_access_p (first_dr));
4358 pi->misalign = 0;
4360 else if (DR_MISALIGNMENT (first_dr) == -1)
4362 TREE_TYPE (data_ref)
4363 = build_aligned_type (TREE_TYPE (data_ref),
4364 TYPE_ALIGN (elem_type));
4365 pi->align = TYPE_ALIGN_UNIT (elem_type);
4366 pi->misalign = 0;
4368 else
4370 TREE_TYPE (data_ref)
4371 = build_aligned_type (TREE_TYPE (data_ref),
4372 TYPE_ALIGN (elem_type));
4373 pi->misalign = DR_MISALIGNMENT (first_dr);
4375 break;
4377 case dr_explicit_realign:
4379 tree ptr, bump;
4380 tree vs_minus_1;
4382 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
4384 if (compute_in_loop)
4385 msq = vect_setup_realignment (first_stmt, gsi,
4386 &realignment_token,
4387 dr_explicit_realign,
4388 dataref_ptr, NULL);
4390 new_stmt = gimple_build_assign_with_ops
4391 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4392 build_int_cst
4393 (TREE_TYPE (dataref_ptr),
4394 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4395 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4396 gimple_assign_set_lhs (new_stmt, ptr);
4397 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4398 data_ref
4399 = build2 (MEM_REF, vectype, ptr,
4400 build_int_cst (reference_alias_ptr_type
4401 (DR_REF (first_dr)), 0));
4402 vec_dest = vect_create_destination_var (scalar_dest,
4403 vectype);
4404 new_stmt = gimple_build_assign (vec_dest, data_ref);
4405 new_temp = make_ssa_name (vec_dest, new_stmt);
4406 gimple_assign_set_lhs (new_stmt, new_temp);
4407 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
4408 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
4409 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4410 msq = new_temp;
4412 bump = size_binop (MULT_EXPR, vs_minus_1,
4413 TYPE_SIZE_UNIT (scalar_type));
4414 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
4415 new_stmt = gimple_build_assign_with_ops
4416 (BIT_AND_EXPR, NULL_TREE, ptr,
4417 build_int_cst
4418 (TREE_TYPE (ptr),
4419 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4420 ptr = make_ssa_name (SSA_NAME_VAR (dataref_ptr), new_stmt);
4421 gimple_assign_set_lhs (new_stmt, ptr);
4422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4423 data_ref
4424 = build2 (MEM_REF, vectype, ptr,
4425 build_int_cst (reference_alias_ptr_type
4426 (DR_REF (first_dr)), 0));
4427 break;
4429 case dr_explicit_realign_optimized:
4430 new_stmt = gimple_build_assign_with_ops
4431 (BIT_AND_EXPR, NULL_TREE, dataref_ptr,
4432 build_int_cst
4433 (TREE_TYPE (dataref_ptr),
4434 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4435 new_temp = make_ssa_name (SSA_NAME_VAR (dataref_ptr),
4436 new_stmt);
4437 gimple_assign_set_lhs (new_stmt, new_temp);
4438 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4439 data_ref
4440 = build2 (MEM_REF, vectype, new_temp,
4441 build_int_cst (reference_alias_ptr_type
4442 (DR_REF (first_dr)), 0));
4443 break;
4444 default:
4445 gcc_unreachable ();
4447 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4448 new_stmt = gimple_build_assign (vec_dest, data_ref);
4449 new_temp = make_ssa_name (vec_dest, new_stmt);
4450 gimple_assign_set_lhs (new_stmt, new_temp);
4451 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4452 mark_symbols_for_renaming (new_stmt);
4454 /* 3. Handle explicit realignment if necessary/supported.
4455 Create in loop:
4456 vec_dest = realign_load (msq, lsq, realignment_token) */
4457 if (alignment_support_scheme == dr_explicit_realign_optimized
4458 || alignment_support_scheme == dr_explicit_realign)
4460 lsq = gimple_assign_lhs (new_stmt);
4461 if (!realignment_token)
4462 realignment_token = dataref_ptr;
4463 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4464 new_stmt
4465 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR,
4466 vec_dest, msq, lsq,
4467 realignment_token);
4468 new_temp = make_ssa_name (vec_dest, new_stmt);
4469 gimple_assign_set_lhs (new_stmt, new_temp);
4470 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4472 if (alignment_support_scheme == dr_explicit_realign_optimized)
4474 gcc_assert (phi);
4475 if (i == vec_num - 1 && j == ncopies - 1)
4476 add_phi_arg (phi, lsq,
4477 loop_latch_edge (containing_loop),
4478 UNKNOWN_LOCATION);
4479 msq = lsq;
4483 /* 4. Handle invariant-load. */
4484 if (inv_p && !bb_vinfo)
4486 gcc_assert (!strided_load);
4487 gcc_assert (nested_in_vect_loop_p (loop, stmt));
4488 if (j == 0)
4490 int k;
4491 tree t = NULL_TREE;
4492 tree vec_inv, bitpos, bitsize = TYPE_SIZE (scalar_type);
4494 /* CHECKME: bitpos depends on endianess? */
4495 bitpos = bitsize_zero_node;
4496 vec_inv = build3 (BIT_FIELD_REF, scalar_type, new_temp,
4497 bitsize, bitpos);
4498 vec_dest = vect_create_destination_var (scalar_dest,
4499 NULL_TREE);
4500 new_stmt = gimple_build_assign (vec_dest, vec_inv);
4501 new_temp = make_ssa_name (vec_dest, new_stmt);
4502 gimple_assign_set_lhs (new_stmt, new_temp);
4503 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4505 for (k = nunits - 1; k >= 0; --k)
4506 t = tree_cons (NULL_TREE, new_temp, t);
4507 /* FIXME: use build_constructor directly. */
4508 vec_inv = build_constructor_from_list (vectype, t);
4509 new_temp = vect_init_vector (stmt, vec_inv,
4510 vectype, gsi);
4511 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4513 else
4514 gcc_unreachable (); /* FORNOW. */
4517 if (negative)
4519 new_temp = reverse_vec_elements (new_temp, stmt, gsi);
4520 new_stmt = SSA_NAME_DEF_STMT (new_temp);
4523 /* Collect vector loads and later create their permutation in
4524 vect_transform_strided_load (). */
4525 if (strided_load || slp_perm)
4526 VEC_quick_push (tree, dr_chain, new_temp);
4528 /* Store vector loads in the corresponding SLP_NODE. */
4529 if (slp && !slp_perm)
4530 VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node),
4531 new_stmt);
4535 if (slp && !slp_perm)
4536 continue;
4538 if (slp_perm)
4540 if (!vect_transform_slp_perm_load (stmt, dr_chain, gsi, vf,
4541 slp_node_instance, false))
4543 VEC_free (tree, heap, dr_chain);
4544 return false;
4547 else
4549 if (strided_load)
4551 if (!load_lanes_p)
4552 vect_transform_strided_load (stmt, dr_chain, group_size, gsi);
4553 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4555 else
4557 if (j == 0)
4558 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4559 else
4560 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4561 prev_stmt_info = vinfo_for_stmt (new_stmt);
4564 if (dr_chain)
4565 VEC_free (tree, heap, dr_chain);
4568 return true;
4571 /* Function vect_is_simple_cond.
4573 Input:
4574 LOOP - the loop that is being vectorized.
4575 COND - Condition that is checked for simple use.
4577 Returns whether a COND can be vectorized. Checks whether
4578 condition operands are supportable using vec_is_simple_use. */
4580 static bool
4581 vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo)
4583 tree lhs, rhs;
4584 tree def;
4585 enum vect_def_type dt;
4587 if (!COMPARISON_CLASS_P (cond))
4588 return false;
4590 lhs = TREE_OPERAND (cond, 0);
4591 rhs = TREE_OPERAND (cond, 1);
4593 if (TREE_CODE (lhs) == SSA_NAME)
4595 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
4596 if (!vect_is_simple_use (lhs, loop_vinfo, NULL, &lhs_def_stmt, &def,
4597 &dt))
4598 return false;
4600 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
4601 && TREE_CODE (lhs) != FIXED_CST)
4602 return false;
4604 if (TREE_CODE (rhs) == SSA_NAME)
4606 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
4607 if (!vect_is_simple_use (rhs, loop_vinfo, NULL, &rhs_def_stmt, &def,
4608 &dt))
4609 return false;
4611 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
4612 && TREE_CODE (rhs) != FIXED_CST)
4613 return false;
4615 return true;
4618 /* vectorizable_condition.
4620 Check if STMT is conditional modify expression that can be vectorized.
4621 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4622 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4623 at GSI.
4625 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4626 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4627 else caluse if it is 2).
4629 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4631 bool
4632 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
4633 gimple *vec_stmt, tree reduc_def, int reduc_index)
4635 tree scalar_dest = NULL_TREE;
4636 tree vec_dest = NULL_TREE;
4637 tree op = NULL_TREE;
4638 tree cond_expr, then_clause, else_clause;
4639 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4640 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4641 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
4642 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
4643 tree vec_compare, vec_cond_expr;
4644 tree new_temp;
4645 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4646 enum machine_mode vec_mode;
4647 tree def;
4648 enum vect_def_type dt, dts[4];
4649 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4650 int ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4651 enum tree_code code;
4652 stmt_vec_info prev_stmt_info = NULL;
4653 int j;
4655 /* FORNOW: unsupported in basic block SLP. */
4656 gcc_assert (loop_vinfo);
4658 /* FORNOW: SLP not supported. */
4659 if (STMT_SLP_TYPE (stmt_info))
4660 return false;
4662 gcc_assert (ncopies >= 1);
4663 if (reduc_index && ncopies > 1)
4664 return false; /* FORNOW */
4666 if (!STMT_VINFO_RELEVANT_P (stmt_info))
4667 return false;
4669 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4670 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
4671 && reduc_def))
4672 return false;
4674 /* FORNOW: not yet supported. */
4675 if (STMT_VINFO_LIVE_P (stmt_info))
4677 if (vect_print_dump_info (REPORT_DETAILS))
4678 fprintf (vect_dump, "value used after loop.");
4679 return false;
4682 /* Is vectorizable conditional operation? */
4683 if (!is_gimple_assign (stmt))
4684 return false;
4686 code = gimple_assign_rhs_code (stmt);
4688 if (code != COND_EXPR)
4689 return false;
4691 gcc_assert (gimple_assign_single_p (stmt));
4692 op = gimple_assign_rhs1 (stmt);
4693 cond_expr = TREE_OPERAND (op, 0);
4694 then_clause = TREE_OPERAND (op, 1);
4695 else_clause = TREE_OPERAND (op, 2);
4697 if (!vect_is_simple_cond (cond_expr, loop_vinfo))
4698 return false;
4700 /* We do not handle two different vector types for the condition
4701 and the values. */
4702 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr, 0)),
4703 TREE_TYPE (vectype)))
4704 return false;
4706 if (TREE_CODE (then_clause) == SSA_NAME)
4708 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
4709 if (!vect_is_simple_use (then_clause, loop_vinfo, NULL,
4710 &then_def_stmt, &def, &dt))
4711 return false;
4713 else if (TREE_CODE (then_clause) != INTEGER_CST
4714 && TREE_CODE (then_clause) != REAL_CST
4715 && TREE_CODE (then_clause) != FIXED_CST)
4716 return false;
4718 if (TREE_CODE (else_clause) == SSA_NAME)
4720 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
4721 if (!vect_is_simple_use (else_clause, loop_vinfo, NULL,
4722 &else_def_stmt, &def, &dt))
4723 return false;
4725 else if (TREE_CODE (else_clause) != INTEGER_CST
4726 && TREE_CODE (else_clause) != REAL_CST
4727 && TREE_CODE (else_clause) != FIXED_CST)
4728 return false;
4731 vec_mode = TYPE_MODE (vectype);
4733 if (!vec_stmt)
4735 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
4736 return expand_vec_cond_expr_p (TREE_TYPE (op), vec_mode);
4739 /* Transform */
4741 /* Handle def. */
4742 scalar_dest = gimple_assign_lhs (stmt);
4743 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4745 /* Handle cond expr. */
4746 for (j = 0; j < ncopies; j++)
4748 gimple new_stmt;
4749 if (j == 0)
4751 gimple gtemp;
4752 vec_cond_lhs =
4753 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
4754 stmt, NULL);
4755 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), loop_vinfo,
4756 NULL, &gtemp, &def, &dts[0]);
4757 vec_cond_rhs =
4758 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
4759 stmt, NULL);
4760 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), loop_vinfo,
4761 NULL, &gtemp, &def, &dts[1]);
4762 if (reduc_index == 1)
4763 vec_then_clause = reduc_def;
4764 else
4766 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
4767 stmt, NULL);
4768 vect_is_simple_use (then_clause, loop_vinfo,
4769 NULL, &gtemp, &def, &dts[2]);
4771 if (reduc_index == 2)
4772 vec_else_clause = reduc_def;
4773 else
4775 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
4776 stmt, NULL);
4777 vect_is_simple_use (else_clause, loop_vinfo,
4778 NULL, &gtemp, &def, &dts[3]);
4781 else
4783 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0], vec_cond_lhs);
4784 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1], vec_cond_rhs);
4785 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
4786 vec_then_clause);
4787 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
4788 vec_else_clause);
4791 /* Arguments are ready. Create the new vector stmt. */
4792 vec_compare = build2 (TREE_CODE (cond_expr), vectype,
4793 vec_cond_lhs, vec_cond_rhs);
4794 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
4795 vec_compare, vec_then_clause, vec_else_clause);
4797 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
4798 new_temp = make_ssa_name (vec_dest, new_stmt);
4799 gimple_assign_set_lhs (new_stmt, new_temp);
4800 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4801 if (j == 0)
4802 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4803 else
4804 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4806 prev_stmt_info = vinfo_for_stmt (new_stmt);
4809 return true;
4813 /* Make sure the statement is vectorizable. */
4815 bool
4816 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
4818 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4819 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4820 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
4821 bool ok;
4822 tree scalar_type, vectype;
4824 if (vect_print_dump_info (REPORT_DETAILS))
4826 fprintf (vect_dump, "==> examining statement: ");
4827 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4830 if (gimple_has_volatile_ops (stmt))
4832 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4833 fprintf (vect_dump, "not vectorized: stmt has volatile operands");
4835 return false;
4838 /* Skip stmts that do not need to be vectorized. In loops this is expected
4839 to include:
4840 - the COND_EXPR which is the loop exit condition
4841 - any LABEL_EXPRs in the loop
4842 - computations that are used only for array indexing or loop control.
4843 In basic blocks we only analyze statements that are a part of some SLP
4844 instance, therefore, all the statements are relevant. */
4846 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4847 && !STMT_VINFO_LIVE_P (stmt_info))
4849 if (vect_print_dump_info (REPORT_DETAILS))
4850 fprintf (vect_dump, "irrelevant.");
4852 return true;
4855 switch (STMT_VINFO_DEF_TYPE (stmt_info))
4857 case vect_internal_def:
4858 break;
4860 case vect_reduction_def:
4861 case vect_nested_cycle:
4862 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
4863 || relevance == vect_used_in_outer_by_reduction
4864 || relevance == vect_unused_in_scope));
4865 break;
4867 case vect_induction_def:
4868 case vect_constant_def:
4869 case vect_external_def:
4870 case vect_unknown_def_type:
4871 default:
4872 gcc_unreachable ();
4875 if (bb_vinfo)
4877 gcc_assert (PURE_SLP_STMT (stmt_info));
4879 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
4880 if (vect_print_dump_info (REPORT_DETAILS))
4882 fprintf (vect_dump, "get vectype for scalar type: ");
4883 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4886 vectype = get_vectype_for_scalar_type (scalar_type);
4887 if (!vectype)
4889 if (vect_print_dump_info (REPORT_DETAILS))
4891 fprintf (vect_dump, "not SLPed: unsupported data-type ");
4892 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
4894 return false;
4897 if (vect_print_dump_info (REPORT_DETAILS))
4899 fprintf (vect_dump, "vectype: ");
4900 print_generic_expr (vect_dump, vectype, TDF_SLIM);
4903 STMT_VINFO_VECTYPE (stmt_info) = vectype;
4906 if (STMT_VINFO_RELEVANT_P (stmt_info))
4908 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
4909 gcc_assert (STMT_VINFO_VECTYPE (stmt_info));
4910 *need_to_vectorize = true;
4913 ok = true;
4914 if (!bb_vinfo
4915 && (STMT_VINFO_RELEVANT_P (stmt_info)
4916 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
4917 ok = (vectorizable_type_promotion (stmt, NULL, NULL, NULL)
4918 || vectorizable_type_demotion (stmt, NULL, NULL, NULL)
4919 || vectorizable_conversion (stmt, NULL, NULL, NULL)
4920 || vectorizable_shift (stmt, NULL, NULL, NULL)
4921 || vectorizable_operation (stmt, NULL, NULL, NULL)
4922 || vectorizable_assignment (stmt, NULL, NULL, NULL)
4923 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
4924 || vectorizable_call (stmt, NULL, NULL)
4925 || vectorizable_store (stmt, NULL, NULL, NULL)
4926 || vectorizable_reduction (stmt, NULL, NULL, NULL)
4927 || vectorizable_condition (stmt, NULL, NULL, NULL, 0));
4928 else
4930 if (bb_vinfo)
4931 ok = (vectorizable_shift (stmt, NULL, NULL, node)
4932 || vectorizable_operation (stmt, NULL, NULL, node)
4933 || vectorizable_assignment (stmt, NULL, NULL, node)
4934 || vectorizable_load (stmt, NULL, NULL, node, NULL)
4935 || vectorizable_store (stmt, NULL, NULL, node));
4938 if (!ok)
4940 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4942 fprintf (vect_dump, "not vectorized: relevant stmt not ");
4943 fprintf (vect_dump, "supported: ");
4944 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4947 return false;
4950 if (bb_vinfo)
4951 return true;
4953 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4954 need extra handling, except for vectorizable reductions. */
4955 if (STMT_VINFO_LIVE_P (stmt_info)
4956 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
4957 ok = vectorizable_live_operation (stmt, NULL, NULL);
4959 if (!ok)
4961 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS))
4963 fprintf (vect_dump, "not vectorized: live stmt not ");
4964 fprintf (vect_dump, "supported: ");
4965 print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM);
4968 return false;
4971 return true;
4975 /* Function vect_transform_stmt.
4977 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4979 bool
4980 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
4981 bool *strided_store, slp_tree slp_node,
4982 slp_instance slp_node_instance)
4984 bool is_store = false;
4985 gimple vec_stmt = NULL;
4986 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4987 gimple orig_stmt_in_pattern, orig_scalar_stmt = stmt;
4988 bool done;
4990 switch (STMT_VINFO_TYPE (stmt_info))
4992 case type_demotion_vec_info_type:
4993 done = vectorizable_type_demotion (stmt, gsi, &vec_stmt, slp_node);
4994 gcc_assert (done);
4995 break;
4997 case type_promotion_vec_info_type:
4998 done = vectorizable_type_promotion (stmt, gsi, &vec_stmt, slp_node);
4999 gcc_assert (done);
5000 break;
5002 case type_conversion_vec_info_type:
5003 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
5004 gcc_assert (done);
5005 break;
5007 case induc_vec_info_type:
5008 gcc_assert (!slp_node);
5009 done = vectorizable_induction (stmt, gsi, &vec_stmt);
5010 gcc_assert (done);
5011 break;
5013 case shift_vec_info_type:
5014 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
5015 gcc_assert (done);
5016 break;
5018 case op_vec_info_type:
5019 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
5020 gcc_assert (done);
5021 break;
5023 case assignment_vec_info_type:
5024 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
5025 gcc_assert (done);
5026 break;
5028 case load_vec_info_type:
5029 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
5030 slp_node_instance);
5031 gcc_assert (done);
5032 break;
5034 case store_vec_info_type:
5035 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
5036 gcc_assert (done);
5037 if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && !slp_node)
5039 /* In case of interleaving, the whole chain is vectorized when the
5040 last store in the chain is reached. Store stmts before the last
5041 one are skipped, and there vec_stmt_info shouldn't be freed
5042 meanwhile. */
5043 *strided_store = true;
5044 if (STMT_VINFO_VEC_STMT (stmt_info))
5045 is_store = true;
5047 else
5048 is_store = true;
5049 break;
5051 case condition_vec_info_type:
5052 gcc_assert (!slp_node);
5053 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0);
5054 gcc_assert (done);
5055 break;
5057 case call_vec_info_type:
5058 gcc_assert (!slp_node);
5059 done = vectorizable_call (stmt, gsi, &vec_stmt);
5060 stmt = gsi_stmt (*gsi);
5061 break;
5063 case reduc_vec_info_type:
5064 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
5065 gcc_assert (done);
5066 break;
5068 default:
5069 if (!STMT_VINFO_LIVE_P (stmt_info))
5071 if (vect_print_dump_info (REPORT_DETAILS))
5072 fprintf (vect_dump, "stmt not supported.");
5073 gcc_unreachable ();
5077 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5078 is being vectorized, but outside the immediately enclosing loop. */
5079 if (vec_stmt
5080 && STMT_VINFO_LOOP_VINFO (stmt_info)
5081 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5082 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
5083 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
5084 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
5085 || STMT_VINFO_RELEVANT (stmt_info) ==
5086 vect_used_in_outer_by_reduction))
5088 struct loop *innerloop = LOOP_VINFO_LOOP (
5089 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
5090 imm_use_iterator imm_iter;
5091 use_operand_p use_p;
5092 tree scalar_dest;
5093 gimple exit_phi;
5095 if (vect_print_dump_info (REPORT_DETAILS))
5096 fprintf (vect_dump, "Record the vdef for outer-loop vectorization.");
5098 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5099 (to be used when vectorizing outer-loop stmts that use the DEF of
5100 STMT). */
5101 if (gimple_code (stmt) == GIMPLE_PHI)
5102 scalar_dest = PHI_RESULT (stmt);
5103 else
5104 scalar_dest = gimple_assign_lhs (stmt);
5106 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5108 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
5110 exit_phi = USE_STMT (use_p);
5111 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
5116 /* Handle stmts whose DEF is used outside the loop-nest that is
5117 being vectorized. */
5118 if (STMT_VINFO_LIVE_P (stmt_info)
5119 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
5121 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
5122 gcc_assert (done);
5125 if (vec_stmt)
5127 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
5128 orig_stmt_in_pattern = STMT_VINFO_RELATED_STMT (stmt_info);
5129 if (orig_stmt_in_pattern)
5131 stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt_in_pattern);
5132 /* STMT was inserted by the vectorizer to replace a computation idiom.
5133 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5134 computed this idiom. We need to record a pointer to VEC_STMT in
5135 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5136 documentation of vect_pattern_recog. */
5137 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
5139 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo)
5140 == orig_scalar_stmt);
5141 STMT_VINFO_VEC_STMT (stmt_vinfo) = vec_stmt;
5146 return is_store;
5150 /* Remove a group of stores (for SLP or interleaving), free their
5151 stmt_vec_info. */
5153 void
5154 vect_remove_stores (gimple first_stmt)
5156 gimple next = first_stmt;
5157 gimple tmp;
5158 gimple_stmt_iterator next_si;
5160 while (next)
5162 /* Free the attached stmt_vec_info and remove the stmt. */
5163 next_si = gsi_for_stmt (next);
5164 gsi_remove (&next_si, true);
5165 tmp = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
5166 free_stmt_vec_info (next);
5167 next = tmp;
5172 /* Function new_stmt_vec_info.
5174 Create and initialize a new stmt_vec_info struct for STMT. */
5176 stmt_vec_info
5177 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
5178 bb_vec_info bb_vinfo)
5180 stmt_vec_info res;
5181 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
5183 STMT_VINFO_TYPE (res) = undef_vec_info_type;
5184 STMT_VINFO_STMT (res) = stmt;
5185 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
5186 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
5187 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
5188 STMT_VINFO_LIVE_P (res) = false;
5189 STMT_VINFO_VECTYPE (res) = NULL;
5190 STMT_VINFO_VEC_STMT (res) = NULL;
5191 STMT_VINFO_VECTORIZABLE (res) = true;
5192 STMT_VINFO_IN_PATTERN_P (res) = false;
5193 STMT_VINFO_RELATED_STMT (res) = NULL;
5194 STMT_VINFO_DATA_REF (res) = NULL;
5196 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
5197 STMT_VINFO_DR_OFFSET (res) = NULL;
5198 STMT_VINFO_DR_INIT (res) = NULL;
5199 STMT_VINFO_DR_STEP (res) = NULL;
5200 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
5202 if (gimple_code (stmt) == GIMPLE_PHI
5203 && is_loop_header_bb_p (gimple_bb (stmt)))
5204 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
5205 else
5206 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
5208 STMT_VINFO_SAME_ALIGN_REFS (res) = VEC_alloc (dr_p, heap, 5);
5209 STMT_VINFO_INSIDE_OF_LOOP_COST (res) = 0;
5210 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res) = 0;
5211 STMT_SLP_TYPE (res) = loop_vect;
5212 GROUP_FIRST_ELEMENT (res) = NULL;
5213 GROUP_NEXT_ELEMENT (res) = NULL;
5214 GROUP_SIZE (res) = 0;
5215 GROUP_STORE_COUNT (res) = 0;
5216 GROUP_GAP (res) = 0;
5217 GROUP_SAME_DR_STMT (res) = NULL;
5218 GROUP_READ_WRITE_DEPENDENCE (res) = false;
5220 return res;
5224 /* Create a hash table for stmt_vec_info. */
5226 void
5227 init_stmt_vec_info_vec (void)
5229 gcc_assert (!stmt_vec_info_vec);
5230 stmt_vec_info_vec = VEC_alloc (vec_void_p, heap, 50);
5234 /* Free hash table for stmt_vec_info. */
5236 void
5237 free_stmt_vec_info_vec (void)
5239 gcc_assert (stmt_vec_info_vec);
5240 VEC_free (vec_void_p, heap, stmt_vec_info_vec);
5244 /* Free stmt vectorization related info. */
5246 void
5247 free_stmt_vec_info (gimple stmt)
5249 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5251 if (!stmt_info)
5252 return;
5254 VEC_free (dr_p, heap, STMT_VINFO_SAME_ALIGN_REFS (stmt_info));
5255 set_vinfo_for_stmt (stmt, NULL);
5256 free (stmt_info);
5260 /* Function get_vectype_for_scalar_type_and_size.
5262 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5263 by the target. */
5265 static tree
5266 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
5268 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
5269 enum machine_mode simd_mode;
5270 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
5271 int nunits;
5272 tree vectype;
5274 if (nbytes == 0)
5275 return NULL_TREE;
5277 /* We can't build a vector type of elements with alignment bigger than
5278 their size. */
5279 if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
5280 return NULL_TREE;
5282 /* If we'd build a vector type of elements whose mode precision doesn't
5283 match their types precision we'll get mismatched types on vector
5284 extracts via BIT_FIELD_REFs. This effectively means we disable
5285 vectorization of bool and/or enum types in some languages. */
5286 if (INTEGRAL_TYPE_P (scalar_type)
5287 && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
5288 return NULL_TREE;
5290 if (GET_MODE_CLASS (inner_mode) != MODE_INT
5291 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
5292 return NULL_TREE;
5294 /* If no size was supplied use the mode the target prefers. Otherwise
5295 lookup a vector mode of the specified size. */
5296 if (size == 0)
5297 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
5298 else
5299 simd_mode = mode_for_vector (inner_mode, size / nbytes);
5300 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
5301 if (nunits <= 1)
5302 return NULL_TREE;
5304 vectype = build_vector_type (scalar_type, nunits);
5305 if (vect_print_dump_info (REPORT_DETAILS))
5307 fprintf (vect_dump, "get vectype with %d units of type ", nunits);
5308 print_generic_expr (vect_dump, scalar_type, TDF_SLIM);
5311 if (!vectype)
5312 return NULL_TREE;
5314 if (vect_print_dump_info (REPORT_DETAILS))
5316 fprintf (vect_dump, "vectype: ");
5317 print_generic_expr (vect_dump, vectype, TDF_SLIM);
5320 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
5321 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
5323 if (vect_print_dump_info (REPORT_DETAILS))
5324 fprintf (vect_dump, "mode not supported by target.");
5325 return NULL_TREE;
5328 return vectype;
5331 unsigned int current_vector_size;
5333 /* Function get_vectype_for_scalar_type.
5335 Returns the vector type corresponding to SCALAR_TYPE as supported
5336 by the target. */
5338 tree
5339 get_vectype_for_scalar_type (tree scalar_type)
5341 tree vectype;
5342 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
5343 current_vector_size);
5344 if (vectype
5345 && current_vector_size == 0)
5346 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
5347 return vectype;
5350 /* Function get_same_sized_vectype
5352 Returns a vector type corresponding to SCALAR_TYPE of size
5353 VECTOR_TYPE if supported by the target. */
5355 tree
5356 get_same_sized_vectype (tree scalar_type, tree vector_type)
5358 return get_vectype_for_scalar_type_and_size
5359 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
5362 /* Function vect_is_simple_use.
5364 Input:
5365 LOOP_VINFO - the vect info of the loop that is being vectorized.
5366 BB_VINFO - the vect info of the basic block that is being vectorized.
5367 OPERAND - operand of a stmt in the loop or bb.
5368 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5370 Returns whether a stmt with OPERAND can be vectorized.
5371 For loops, supportable operands are constants, loop invariants, and operands
5372 that are defined by the current iteration of the loop. Unsupportable
5373 operands are those that are defined by a previous iteration of the loop (as
5374 is the case in reduction/induction computations).
5375 For basic blocks, supportable operands are constants and bb invariants.
5376 For now, operands defined outside the basic block are not supported. */
5378 bool
5379 vect_is_simple_use (tree operand, loop_vec_info loop_vinfo,
5380 bb_vec_info bb_vinfo, gimple *def_stmt,
5381 tree *def, enum vect_def_type *dt)
5383 basic_block bb;
5384 stmt_vec_info stmt_vinfo;
5385 struct loop *loop = NULL;
5387 if (loop_vinfo)
5388 loop = LOOP_VINFO_LOOP (loop_vinfo);
5390 *def_stmt = NULL;
5391 *def = NULL_TREE;
5393 if (vect_print_dump_info (REPORT_DETAILS))
5395 fprintf (vect_dump, "vect_is_simple_use: operand ");
5396 print_generic_expr (vect_dump, operand, TDF_SLIM);
5399 if (TREE_CODE (operand) == INTEGER_CST || TREE_CODE (operand) == REAL_CST)
5401 *dt = vect_constant_def;
5402 return true;
5405 if (is_gimple_min_invariant (operand))
5407 *def = operand;
5408 *dt = vect_external_def;
5409 return true;
5412 if (TREE_CODE (operand) == PAREN_EXPR)
5414 if (vect_print_dump_info (REPORT_DETAILS))
5415 fprintf (vect_dump, "non-associatable copy.");
5416 operand = TREE_OPERAND (operand, 0);
5419 if (TREE_CODE (operand) != SSA_NAME)
5421 if (vect_print_dump_info (REPORT_DETAILS))
5422 fprintf (vect_dump, "not ssa-name.");
5423 return false;
5426 *def_stmt = SSA_NAME_DEF_STMT (operand);
5427 if (*def_stmt == NULL)
5429 if (vect_print_dump_info (REPORT_DETAILS))
5430 fprintf (vect_dump, "no def_stmt.");
5431 return false;
5434 if (vect_print_dump_info (REPORT_DETAILS))
5436 fprintf (vect_dump, "def_stmt: ");
5437 print_gimple_stmt (vect_dump, *def_stmt, 0, TDF_SLIM);
5440 /* Empty stmt is expected only in case of a function argument.
5441 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5442 if (gimple_nop_p (*def_stmt))
5444 *def = operand;
5445 *dt = vect_external_def;
5446 return true;
5449 bb = gimple_bb (*def_stmt);
5451 if ((loop && !flow_bb_inside_loop_p (loop, bb))
5452 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
5453 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
5454 *dt = vect_external_def;
5455 else
5457 stmt_vinfo = vinfo_for_stmt (*def_stmt);
5458 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
5461 if (*dt == vect_unknown_def_type)
5463 if (vect_print_dump_info (REPORT_DETAILS))
5464 fprintf (vect_dump, "Unsupported pattern.");
5465 return false;
5468 if (vect_print_dump_info (REPORT_DETAILS))
5469 fprintf (vect_dump, "type of def: %d.",*dt);
5471 switch (gimple_code (*def_stmt))
5473 case GIMPLE_PHI:
5474 *def = gimple_phi_result (*def_stmt);
5475 break;
5477 case GIMPLE_ASSIGN:
5478 *def = gimple_assign_lhs (*def_stmt);
5479 break;
5481 case GIMPLE_CALL:
5482 *def = gimple_call_lhs (*def_stmt);
5483 if (*def != NULL)
5484 break;
5485 /* FALLTHRU */
5486 default:
5487 if (vect_print_dump_info (REPORT_DETAILS))
5488 fprintf (vect_dump, "unsupported defining stmt: ");
5489 return false;
5492 return true;
5495 /* Function vect_is_simple_use_1.
5497 Same as vect_is_simple_use_1 but also determines the vector operand
5498 type of OPERAND and stores it to *VECTYPE. If the definition of
5499 OPERAND is vect_uninitialized_def, vect_constant_def or
5500 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5501 is responsible to compute the best suited vector type for the
5502 scalar operand. */
5504 bool
5505 vect_is_simple_use_1 (tree operand, loop_vec_info loop_vinfo,
5506 bb_vec_info bb_vinfo, gimple *def_stmt,
5507 tree *def, enum vect_def_type *dt, tree *vectype)
5509 if (!vect_is_simple_use (operand, loop_vinfo, bb_vinfo, def_stmt, def, dt))
5510 return false;
5512 /* Now get a vector type if the def is internal, otherwise supply
5513 NULL_TREE and leave it up to the caller to figure out a proper
5514 type for the use stmt. */
5515 if (*dt == vect_internal_def
5516 || *dt == vect_induction_def
5517 || *dt == vect_reduction_def
5518 || *dt == vect_double_reduction_def
5519 || *dt == vect_nested_cycle)
5521 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
5522 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
5523 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
5524 *vectype = STMT_VINFO_VECTYPE (stmt_info);
5525 gcc_assert (*vectype != NULL_TREE);
5527 else if (*dt == vect_uninitialized_def
5528 || *dt == vect_constant_def
5529 || *dt == vect_external_def)
5530 *vectype = NULL_TREE;
5531 else
5532 gcc_unreachable ();
5534 return true;
5538 /* Function supportable_widening_operation
5540 Check whether an operation represented by the code CODE is a
5541 widening operation that is supported by the target platform in
5542 vector form (i.e., when operating on arguments of type VECTYPE_IN
5543 producing a result of type VECTYPE_OUT).
5545 Widening operations we currently support are NOP (CONVERT), FLOAT
5546 and WIDEN_MULT. This function checks if these operations are supported
5547 by the target platform either directly (via vector tree-codes), or via
5548 target builtins.
5550 Output:
5551 - CODE1 and CODE2 are codes of vector operations to be used when
5552 vectorizing the operation, if available.
5553 - DECL1 and DECL2 are decls of target builtin functions to be used
5554 when vectorizing the operation, if available. In this case,
5555 CODE1 and CODE2 are CALL_EXPR.
5556 - MULTI_STEP_CVT determines the number of required intermediate steps in
5557 case of multi-step conversion (like char->short->int - in that case
5558 MULTI_STEP_CVT will be 1).
5559 - INTERM_TYPES contains the intermediate type required to perform the
5560 widening operation (short in the above example). */
5562 bool
5563 supportable_widening_operation (enum tree_code code, gimple stmt,
5564 tree vectype_out, tree vectype_in,
5565 tree *decl1, tree *decl2,
5566 enum tree_code *code1, enum tree_code *code2,
5567 int *multi_step_cvt,
5568 VEC (tree, heap) **interm_types)
5570 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5571 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
5572 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
5573 bool ordered_p;
5574 enum machine_mode vec_mode;
5575 enum insn_code icode1, icode2;
5576 optab optab1, optab2;
5577 tree vectype = vectype_in;
5578 tree wide_vectype = vectype_out;
5579 enum tree_code c1, c2;
5581 /* The result of a vectorized widening operation usually requires two vectors
5582 (because the widened results do not fit int one vector). The generated
5583 vector results would normally be expected to be generated in the same
5584 order as in the original scalar computation, i.e. if 8 results are
5585 generated in each vector iteration, they are to be organized as follows:
5586 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5588 However, in the special case that the result of the widening operation is
5589 used in a reduction computation only, the order doesn't matter (because
5590 when vectorizing a reduction we change the order of the computation).
5591 Some targets can take advantage of this and generate more efficient code.
5592 For example, targets like Altivec, that support widen_mult using a sequence
5593 of {mult_even,mult_odd} generate the following vectors:
5594 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5596 When vectorizing outer-loops, we execute the inner-loop sequentially
5597 (each vectorized inner-loop iteration contributes to VF outer-loop
5598 iterations in parallel). We therefore don't allow to change the order
5599 of the computation in the inner-loop during outer-loop vectorization. */
5601 if (STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
5602 && !nested_in_vect_loop_p (vect_loop, stmt))
5603 ordered_p = false;
5604 else
5605 ordered_p = true;
5607 if (!ordered_p
5608 && code == WIDEN_MULT_EXPR
5609 && targetm.vectorize.builtin_mul_widen_even
5610 && targetm.vectorize.builtin_mul_widen_even (vectype)
5611 && targetm.vectorize.builtin_mul_widen_odd
5612 && targetm.vectorize.builtin_mul_widen_odd (vectype))
5614 if (vect_print_dump_info (REPORT_DETAILS))
5615 fprintf (vect_dump, "Unordered widening operation detected.");
5617 *code1 = *code2 = CALL_EXPR;
5618 *decl1 = targetm.vectorize.builtin_mul_widen_even (vectype);
5619 *decl2 = targetm.vectorize.builtin_mul_widen_odd (vectype);
5620 return true;
5623 switch (code)
5625 case WIDEN_MULT_EXPR:
5626 if (BYTES_BIG_ENDIAN)
5628 c1 = VEC_WIDEN_MULT_HI_EXPR;
5629 c2 = VEC_WIDEN_MULT_LO_EXPR;
5631 else
5633 c2 = VEC_WIDEN_MULT_HI_EXPR;
5634 c1 = VEC_WIDEN_MULT_LO_EXPR;
5636 break;
5638 CASE_CONVERT:
5639 if (BYTES_BIG_ENDIAN)
5641 c1 = VEC_UNPACK_HI_EXPR;
5642 c2 = VEC_UNPACK_LO_EXPR;
5644 else
5646 c2 = VEC_UNPACK_HI_EXPR;
5647 c1 = VEC_UNPACK_LO_EXPR;
5649 break;
5651 case FLOAT_EXPR:
5652 if (BYTES_BIG_ENDIAN)
5654 c1 = VEC_UNPACK_FLOAT_HI_EXPR;
5655 c2 = VEC_UNPACK_FLOAT_LO_EXPR;
5657 else
5659 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
5660 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
5662 break;
5664 case FIX_TRUNC_EXPR:
5665 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5666 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5667 computing the operation. */
5668 return false;
5670 default:
5671 gcc_unreachable ();
5674 if (code == FIX_TRUNC_EXPR)
5676 /* The signedness is determined from output operand. */
5677 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5678 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
5680 else
5682 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5683 optab2 = optab_for_tree_code (c2, vectype, optab_default);
5686 if (!optab1 || !optab2)
5687 return false;
5689 vec_mode = TYPE_MODE (vectype);
5690 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
5691 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
5692 return false;
5694 /* Check if it's a multi-step conversion that can be done using intermediate
5695 types. */
5696 if (insn_data[icode1].operand[0].mode != TYPE_MODE (wide_vectype)
5697 || insn_data[icode2].operand[0].mode != TYPE_MODE (wide_vectype))
5699 int i;
5700 tree prev_type = vectype, intermediate_type;
5701 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5702 optab optab3, optab4;
5704 if (!CONVERT_EXPR_CODE_P (code))
5705 return false;
5707 *code1 = c1;
5708 *code2 = c2;
5710 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5711 intermediate steps in promotion sequence. We try
5712 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5713 not. */
5714 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5715 for (i = 0; i < 3; i++)
5717 intermediate_mode = insn_data[icode1].operand[0].mode;
5718 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5719 TYPE_UNSIGNED (prev_type));
5720 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
5721 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
5723 if (!optab3 || !optab4
5724 || ((icode1 = optab_handler (optab1, prev_mode))
5725 == CODE_FOR_nothing)
5726 || insn_data[icode1].operand[0].mode != intermediate_mode
5727 || ((icode2 = optab_handler (optab2, prev_mode))
5728 == CODE_FOR_nothing)
5729 || insn_data[icode2].operand[0].mode != intermediate_mode
5730 || ((icode1 = optab_handler (optab3, intermediate_mode))
5731 == CODE_FOR_nothing)
5732 || ((icode2 = optab_handler (optab4, intermediate_mode))
5733 == CODE_FOR_nothing))
5734 return false;
5736 VEC_quick_push (tree, *interm_types, intermediate_type);
5737 (*multi_step_cvt)++;
5739 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
5740 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
5741 return true;
5743 prev_type = intermediate_type;
5744 prev_mode = intermediate_mode;
5747 return false;
5750 *code1 = c1;
5751 *code2 = c2;
5752 return true;
5756 /* Function supportable_narrowing_operation
5758 Check whether an operation represented by the code CODE is a
5759 narrowing operation that is supported by the target platform in
5760 vector form (i.e., when operating on arguments of type VECTYPE_IN
5761 and producing a result of type VECTYPE_OUT).
5763 Narrowing operations we currently support are NOP (CONVERT) and
5764 FIX_TRUNC. This function checks if these operations are supported by
5765 the target platform directly via vector tree-codes.
5767 Output:
5768 - CODE1 is the code of a vector operation to be used when
5769 vectorizing the operation, if available.
5770 - MULTI_STEP_CVT determines the number of required intermediate steps in
5771 case of multi-step conversion (like int->short->char - in that case
5772 MULTI_STEP_CVT will be 1).
5773 - INTERM_TYPES contains the intermediate type required to perform the
5774 narrowing operation (short in the above example). */
5776 bool
5777 supportable_narrowing_operation (enum tree_code code,
5778 tree vectype_out, tree vectype_in,
5779 enum tree_code *code1, int *multi_step_cvt,
5780 VEC (tree, heap) **interm_types)
5782 enum machine_mode vec_mode;
5783 enum insn_code icode1;
5784 optab optab1, interm_optab;
5785 tree vectype = vectype_in;
5786 tree narrow_vectype = vectype_out;
5787 enum tree_code c1;
5788 tree intermediate_type, prev_type;
5789 int i;
5791 switch (code)
5793 CASE_CONVERT:
5794 c1 = VEC_PACK_TRUNC_EXPR;
5795 break;
5797 case FIX_TRUNC_EXPR:
5798 c1 = VEC_PACK_FIX_TRUNC_EXPR;
5799 break;
5801 case FLOAT_EXPR:
5802 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5803 tree code and optabs used for computing the operation. */
5804 return false;
5806 default:
5807 gcc_unreachable ();
5810 if (code == FIX_TRUNC_EXPR)
5811 /* The signedness is determined from output operand. */
5812 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
5813 else
5814 optab1 = optab_for_tree_code (c1, vectype, optab_default);
5816 if (!optab1)
5817 return false;
5819 vec_mode = TYPE_MODE (vectype);
5820 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
5821 return false;
5823 /* Check if it's a multi-step conversion that can be done using intermediate
5824 types. */
5825 if (insn_data[icode1].operand[0].mode != TYPE_MODE (narrow_vectype))
5827 enum machine_mode intermediate_mode, prev_mode = vec_mode;
5829 *code1 = c1;
5830 prev_type = vectype;
5831 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5832 intermediate steps in promotion sequence. We try
5833 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5834 not. */
5835 *interm_types = VEC_alloc (tree, heap, MAX_INTERM_CVT_STEPS);
5836 for (i = 0; i < 3; i++)
5838 intermediate_mode = insn_data[icode1].operand[0].mode;
5839 intermediate_type = lang_hooks.types.type_for_mode (intermediate_mode,
5840 TYPE_UNSIGNED (prev_type));
5841 interm_optab = optab_for_tree_code (c1, intermediate_type,
5842 optab_default);
5843 if (!interm_optab
5844 || ((icode1 = optab_handler (optab1, prev_mode))
5845 == CODE_FOR_nothing)
5846 || insn_data[icode1].operand[0].mode != intermediate_mode
5847 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
5848 == CODE_FOR_nothing))
5849 return false;
5851 VEC_quick_push (tree, *interm_types, intermediate_type);
5852 (*multi_step_cvt)++;
5854 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
5855 return true;
5857 prev_type = intermediate_type;
5858 prev_mode = intermediate_mode;
5861 return false;
5864 *code1 = c1;
5865 return true;