re PR tree-optimization/64454 (optimize (x%5)%5)
[official-gcc.git] / gcc / tree-vect-stmts.c
blobf82decb798e100f5e985da645aa61bfd0e0ad4f9
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "dominance.h"
44 #include "cfg.h"
45 #include "basic-block.h"
46 #include "gimple-pretty-print.h"
47 #include "tree-ssa-alias.h"
48 #include "internal-fn.h"
49 #include "tree-eh.h"
50 #include "gimple-expr.h"
51 #include "is-a.h"
52 #include "gimple.h"
53 #include "gimplify.h"
54 #include "gimple-iterator.h"
55 #include "gimplify-me.h"
56 #include "gimple-ssa.h"
57 #include "tree-cfg.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-manip.h"
63 #include "cfgloop.h"
64 #include "tree-ssa-loop.h"
65 #include "tree-scalar-evolution.h"
66 #include "hashtab.h"
67 #include "rtl.h"
68 #include "flags.h"
69 #include "statistics.h"
70 #include "real.h"
71 #include "fixed-value.h"
72 #include "insn-config.h"
73 #include "expmed.h"
74 #include "dojump.h"
75 #include "explow.h"
76 #include "calls.h"
77 #include "emit-rtl.h"
78 #include "varasm.h"
79 #include "stmt.h"
80 #include "expr.h"
81 #include "recog.h" /* FIXME: for insn_data */
82 #include "insn-codes.h"
83 #include "optabs.h"
84 #include "diagnostic-core.h"
85 #include "tree-vectorizer.h"
86 #include "hash-map.h"
87 #include "plugin-api.h"
88 #include "ipa-ref.h"
89 #include "cgraph.h"
90 #include "builtins.h"
92 /* For lang_hooks.types.type_for_mode. */
93 #include "langhooks.h"
95 /* Return the vectorized type for the given statement. */
97 tree
98 stmt_vectype (struct _stmt_vec_info *stmt_info)
100 return STMT_VINFO_VECTYPE (stmt_info);
103 /* Return TRUE iff the given statement is in an inner loop relative to
104 the loop being vectorized. */
105 bool
106 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
108 gimple stmt = STMT_VINFO_STMT (stmt_info);
109 basic_block bb = gimple_bb (stmt);
110 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
111 struct loop* loop;
113 if (!loop_vinfo)
114 return false;
116 loop = LOOP_VINFO_LOOP (loop_vinfo);
118 return (bb->loop_father == loop->inner);
121 /* Record the cost of a statement, either by directly informing the
122 target model or by saving it in a vector for later processing.
123 Return a preliminary estimate of the statement's cost. */
125 unsigned
126 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
127 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
128 int misalign, enum vect_cost_model_location where)
130 if (body_cost_vec)
132 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
133 add_stmt_info_to_vec (body_cost_vec, count, kind,
134 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
135 misalign);
136 return (unsigned)
137 (builtin_vectorization_cost (kind, vectype, misalign) * count);
140 else
142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
143 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
144 void *target_cost_data;
146 if (loop_vinfo)
147 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
148 else
149 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
151 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
152 misalign, where);
156 /* Return a variable of type ELEM_TYPE[NELEMS]. */
158 static tree
159 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
161 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
162 "vect_array");
165 /* ARRAY is an array of vectors created by create_vector_array.
166 Return an SSA_NAME for the vector in index N. The reference
167 is part of the vectorization of STMT and the vector is associated
168 with scalar destination SCALAR_DEST. */
170 static tree
171 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
172 tree array, unsigned HOST_WIDE_INT n)
174 tree vect_type, vect, vect_name, array_ref;
175 gimple new_stmt;
177 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
178 vect_type = TREE_TYPE (TREE_TYPE (array));
179 vect = vect_create_destination_var (scalar_dest, vect_type);
180 array_ref = build4 (ARRAY_REF, vect_type, array,
181 build_int_cst (size_type_node, n),
182 NULL_TREE, NULL_TREE);
184 new_stmt = gimple_build_assign (vect, array_ref);
185 vect_name = make_ssa_name (vect, new_stmt);
186 gimple_assign_set_lhs (new_stmt, vect_name);
187 vect_finish_stmt_generation (stmt, new_stmt, gsi);
189 return vect_name;
192 /* ARRAY is an array of vectors created by create_vector_array.
193 Emit code to store SSA_NAME VECT in index N of the array.
194 The store is part of the vectorization of STMT. */
196 static void
197 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
198 tree array, unsigned HOST_WIDE_INT n)
200 tree array_ref;
201 gimple new_stmt;
203 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
204 build_int_cst (size_type_node, n),
205 NULL_TREE, NULL_TREE);
207 new_stmt = gimple_build_assign (array_ref, vect);
208 vect_finish_stmt_generation (stmt, new_stmt, gsi);
211 /* PTR is a pointer to an array of type TYPE. Return a representation
212 of *PTR. The memory reference replaces those in FIRST_DR
213 (and its group). */
215 static tree
216 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
218 tree mem_ref, alias_ptr_type;
220 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
221 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
222 /* Arrays have the same alignment as their type. */
223 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
224 return mem_ref;
227 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
229 /* Function vect_mark_relevant.
231 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
233 static void
234 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
235 enum vect_relevant relevant, bool live_p,
236 bool used_in_pattern)
238 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
239 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
240 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
241 gimple pattern_stmt;
243 if (dump_enabled_p ())
244 dump_printf_loc (MSG_NOTE, vect_location,
245 "mark relevant %d, live %d.\n", relevant, live_p);
247 /* If this stmt is an original stmt in a pattern, we might need to mark its
248 related pattern stmt instead of the original stmt. However, such stmts
249 may have their own uses that are not in any pattern, in such cases the
250 stmt itself should be marked. */
251 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
253 bool found = false;
254 if (!used_in_pattern)
256 imm_use_iterator imm_iter;
257 use_operand_p use_p;
258 gimple use_stmt;
259 tree lhs;
260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
261 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
263 if (is_gimple_assign (stmt))
264 lhs = gimple_assign_lhs (stmt);
265 else
266 lhs = gimple_call_lhs (stmt);
268 /* This use is out of pattern use, if LHS has other uses that are
269 pattern uses, we should mark the stmt itself, and not the pattern
270 stmt. */
271 if (lhs && TREE_CODE (lhs) == SSA_NAME)
272 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
274 if (is_gimple_debug (USE_STMT (use_p)))
275 continue;
276 use_stmt = USE_STMT (use_p);
278 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
279 continue;
281 if (vinfo_for_stmt (use_stmt)
282 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
284 found = true;
285 break;
290 if (!found)
292 /* This is the last stmt in a sequence that was detected as a
293 pattern that can potentially be vectorized. Don't mark the stmt
294 as relevant/live because it's not going to be vectorized.
295 Instead mark the pattern-stmt that replaces it. */
297 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
299 if (dump_enabled_p ())
300 dump_printf_loc (MSG_NOTE, vect_location,
301 "last stmt in pattern. don't mark"
302 " relevant/live.\n");
303 stmt_info = vinfo_for_stmt (pattern_stmt);
304 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
305 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
306 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
307 stmt = pattern_stmt;
311 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
312 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
313 STMT_VINFO_RELEVANT (stmt_info) = relevant;
315 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
316 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
318 if (dump_enabled_p ())
319 dump_printf_loc (MSG_NOTE, vect_location,
320 "already marked relevant/live.\n");
321 return;
324 worklist->safe_push (stmt);
328 /* Function vect_stmt_relevant_p.
330 Return true if STMT in loop that is represented by LOOP_VINFO is
331 "relevant for vectorization".
333 A stmt is considered "relevant for vectorization" if:
334 - it has uses outside the loop.
335 - it has vdefs (it alters memory).
336 - control stmts in the loop (except for the exit condition).
338 CHECKME: what other side effects would the vectorizer allow? */
340 static bool
341 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
342 enum vect_relevant *relevant, bool *live_p)
344 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
345 ssa_op_iter op_iter;
346 imm_use_iterator imm_iter;
347 use_operand_p use_p;
348 def_operand_p def_p;
350 *relevant = vect_unused_in_scope;
351 *live_p = false;
353 /* cond stmt other than loop exit cond. */
354 if (is_ctrl_stmt (stmt)
355 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
356 != loop_exit_ctrl_vec_info_type)
357 *relevant = vect_used_in_scope;
359 /* changing memory. */
360 if (gimple_code (stmt) != GIMPLE_PHI)
361 if (gimple_vdef (stmt)
362 && !gimple_clobber_p (stmt))
364 if (dump_enabled_p ())
365 dump_printf_loc (MSG_NOTE, vect_location,
366 "vec_stmt_relevant_p: stmt has vdefs.\n");
367 *relevant = vect_used_in_scope;
370 /* uses outside the loop. */
371 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
373 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
375 basic_block bb = gimple_bb (USE_STMT (use_p));
376 if (!flow_bb_inside_loop_p (loop, bb))
378 if (dump_enabled_p ())
379 dump_printf_loc (MSG_NOTE, vect_location,
380 "vec_stmt_relevant_p: used out of loop.\n");
382 if (is_gimple_debug (USE_STMT (use_p)))
383 continue;
385 /* We expect all such uses to be in the loop exit phis
386 (because of loop closed form) */
387 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
388 gcc_assert (bb == single_exit (loop)->dest);
390 *live_p = true;
395 return (*live_p || *relevant);
399 /* Function exist_non_indexing_operands_for_use_p
401 USE is one of the uses attached to STMT. Check if USE is
402 used in STMT for anything other than indexing an array. */
404 static bool
405 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
407 tree operand;
408 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
410 /* USE corresponds to some operand in STMT. If there is no data
411 reference in STMT, then any operand that corresponds to USE
412 is not indexing an array. */
413 if (!STMT_VINFO_DATA_REF (stmt_info))
414 return true;
416 /* STMT has a data_ref. FORNOW this means that its of one of
417 the following forms:
418 -1- ARRAY_REF = var
419 -2- var = ARRAY_REF
420 (This should have been verified in analyze_data_refs).
422 'var' in the second case corresponds to a def, not a use,
423 so USE cannot correspond to any operands that are not used
424 for array indexing.
426 Therefore, all we need to check is if STMT falls into the
427 first case, and whether var corresponds to USE. */
429 if (!gimple_assign_copy_p (stmt))
431 if (is_gimple_call (stmt)
432 && gimple_call_internal_p (stmt))
433 switch (gimple_call_internal_fn (stmt))
435 case IFN_MASK_STORE:
436 operand = gimple_call_arg (stmt, 3);
437 if (operand == use)
438 return true;
439 /* FALLTHRU */
440 case IFN_MASK_LOAD:
441 operand = gimple_call_arg (stmt, 2);
442 if (operand == use)
443 return true;
444 break;
445 default:
446 break;
448 return false;
451 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
452 return false;
453 operand = gimple_assign_rhs1 (stmt);
454 if (TREE_CODE (operand) != SSA_NAME)
455 return false;
457 if (operand == use)
458 return true;
460 return false;
465 Function process_use.
467 Inputs:
468 - a USE in STMT in a loop represented by LOOP_VINFO
469 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
470 that defined USE. This is done by calling mark_relevant and passing it
471 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
472 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
473 be performed.
475 Outputs:
476 Generally, LIVE_P and RELEVANT are used to define the liveness and
477 relevance info of the DEF_STMT of this USE:
478 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
479 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
480 Exceptions:
481 - case 1: If USE is used only for address computations (e.g. array indexing),
482 which does not need to be directly vectorized, then the liveness/relevance
483 of the respective DEF_STMT is left unchanged.
484 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
485 skip DEF_STMT cause it had already been processed.
486 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
487 be modified accordingly.
489 Return true if everything is as expected. Return false otherwise. */
491 static bool
492 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
493 enum vect_relevant relevant, vec<gimple> *worklist,
494 bool force)
496 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
497 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
498 stmt_vec_info dstmt_vinfo;
499 basic_block bb, def_bb;
500 tree def;
501 gimple def_stmt;
502 enum vect_def_type dt;
504 /* case 1: we are only interested in uses that need to be vectorized. Uses
505 that are used for address computation are not considered relevant. */
506 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
507 return true;
509 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
511 if (dump_enabled_p ())
512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
513 "not vectorized: unsupported use in stmt.\n");
514 return false;
517 if (!def_stmt || gimple_nop_p (def_stmt))
518 return true;
520 def_bb = gimple_bb (def_stmt);
521 if (!flow_bb_inside_loop_p (loop, def_bb))
523 if (dump_enabled_p ())
524 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
525 return true;
528 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
529 DEF_STMT must have already been processed, because this should be the
530 only way that STMT, which is a reduction-phi, was put in the worklist,
531 as there should be no other uses for DEF_STMT in the loop. So we just
532 check that everything is as expected, and we are done. */
533 dstmt_vinfo = vinfo_for_stmt (def_stmt);
534 bb = gimple_bb (stmt);
535 if (gimple_code (stmt) == GIMPLE_PHI
536 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
537 && gimple_code (def_stmt) != GIMPLE_PHI
538 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
539 && bb->loop_father == def_bb->loop_father)
541 if (dump_enabled_p ())
542 dump_printf_loc (MSG_NOTE, vect_location,
543 "reduc-stmt defining reduc-phi in the same nest.\n");
544 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
545 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
546 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
547 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
548 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
549 return true;
552 /* case 3a: outer-loop stmt defining an inner-loop stmt:
553 outer-loop-header-bb:
554 d = def_stmt
555 inner-loop:
556 stmt # use (d)
557 outer-loop-tail-bb:
558 ... */
559 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
561 if (dump_enabled_p ())
562 dump_printf_loc (MSG_NOTE, vect_location,
563 "outer-loop def-stmt defining inner-loop stmt.\n");
565 switch (relevant)
567 case vect_unused_in_scope:
568 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
569 vect_used_in_scope : vect_unused_in_scope;
570 break;
572 case vect_used_in_outer_by_reduction:
573 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
574 relevant = vect_used_by_reduction;
575 break;
577 case vect_used_in_outer:
578 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
579 relevant = vect_used_in_scope;
580 break;
582 case vect_used_in_scope:
583 break;
585 default:
586 gcc_unreachable ();
590 /* case 3b: inner-loop stmt defining an outer-loop stmt:
591 outer-loop-header-bb:
593 inner-loop:
594 d = def_stmt
595 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
596 stmt # use (d) */
597 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
599 if (dump_enabled_p ())
600 dump_printf_loc (MSG_NOTE, vect_location,
601 "inner-loop def-stmt defining outer-loop stmt.\n");
603 switch (relevant)
605 case vect_unused_in_scope:
606 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
607 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
608 vect_used_in_outer_by_reduction : vect_unused_in_scope;
609 break;
611 case vect_used_by_reduction:
612 relevant = vect_used_in_outer_by_reduction;
613 break;
615 case vect_used_in_scope:
616 relevant = vect_used_in_outer;
617 break;
619 default:
620 gcc_unreachable ();
624 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
625 is_pattern_stmt_p (stmt_vinfo));
626 return true;
630 /* Function vect_mark_stmts_to_be_vectorized.
632 Not all stmts in the loop need to be vectorized. For example:
634 for i...
635 for j...
636 1. T0 = i + j
637 2. T1 = a[T0]
639 3. j = j + 1
641 Stmt 1 and 3 do not need to be vectorized, because loop control and
642 addressing of vectorized data-refs are handled differently.
644 This pass detects such stmts. */
646 bool
647 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
649 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
650 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
651 unsigned int nbbs = loop->num_nodes;
652 gimple_stmt_iterator si;
653 gimple stmt;
654 unsigned int i;
655 stmt_vec_info stmt_vinfo;
656 basic_block bb;
657 gimple phi;
658 bool live_p;
659 enum vect_relevant relevant, tmp_relevant;
660 enum vect_def_type def_type;
662 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE, vect_location,
664 "=== vect_mark_stmts_to_be_vectorized ===\n");
666 auto_vec<gimple, 64> worklist;
668 /* 1. Init worklist. */
669 for (i = 0; i < nbbs; i++)
671 bb = bbs[i];
672 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
674 phi = gsi_stmt (si);
675 if (dump_enabled_p ())
677 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
678 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
681 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
682 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
684 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
686 stmt = gsi_stmt (si);
687 if (dump_enabled_p ())
689 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
690 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
693 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
694 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
698 /* 2. Process_worklist */
699 while (worklist.length () > 0)
701 use_operand_p use_p;
702 ssa_op_iter iter;
704 stmt = worklist.pop ();
705 if (dump_enabled_p ())
707 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
708 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
711 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
712 (DEF_STMT) as relevant/irrelevant and live/dead according to the
713 liveness and relevance properties of STMT. */
714 stmt_vinfo = vinfo_for_stmt (stmt);
715 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
716 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
718 /* Generally, the liveness and relevance properties of STMT are
719 propagated as is to the DEF_STMTs of its USEs:
720 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
721 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
723 One exception is when STMT has been identified as defining a reduction
724 variable; in this case we set the liveness/relevance as follows:
725 live_p = false
726 relevant = vect_used_by_reduction
727 This is because we distinguish between two kinds of relevant stmts -
728 those that are used by a reduction computation, and those that are
729 (also) used by a regular computation. This allows us later on to
730 identify stmts that are used solely by a reduction, and therefore the
731 order of the results that they produce does not have to be kept. */
733 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
734 tmp_relevant = relevant;
735 switch (def_type)
737 case vect_reduction_def:
738 switch (tmp_relevant)
740 case vect_unused_in_scope:
741 relevant = vect_used_by_reduction;
742 break;
744 case vect_used_by_reduction:
745 if (gimple_code (stmt) == GIMPLE_PHI)
746 break;
747 /* fall through */
749 default:
750 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
752 "unsupported use of reduction.\n");
753 return false;
756 live_p = false;
757 break;
759 case vect_nested_cycle:
760 if (tmp_relevant != vect_unused_in_scope
761 && tmp_relevant != vect_used_in_outer_by_reduction
762 && tmp_relevant != vect_used_in_outer)
764 if (dump_enabled_p ())
765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
766 "unsupported use of nested cycle.\n");
768 return false;
771 live_p = false;
772 break;
774 case vect_double_reduction_def:
775 if (tmp_relevant != vect_unused_in_scope
776 && tmp_relevant != vect_used_by_reduction)
778 if (dump_enabled_p ())
779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
780 "unsupported use of double reduction.\n");
782 return false;
785 live_p = false;
786 break;
788 default:
789 break;
792 if (is_pattern_stmt_p (stmt_vinfo))
794 /* Pattern statements are not inserted into the code, so
795 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
796 have to scan the RHS or function arguments instead. */
797 if (is_gimple_assign (stmt))
799 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
800 tree op = gimple_assign_rhs1 (stmt);
802 i = 1;
803 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
805 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
806 live_p, relevant, &worklist, false)
807 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
808 live_p, relevant, &worklist, false))
809 return false;
810 i = 2;
812 for (; i < gimple_num_ops (stmt); i++)
814 op = gimple_op (stmt, i);
815 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
816 &worklist, false))
817 return false;
820 else if (is_gimple_call (stmt))
822 for (i = 0; i < gimple_call_num_args (stmt); i++)
824 tree arg = gimple_call_arg (stmt, i);
825 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
826 &worklist, false))
827 return false;
831 else
832 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
834 tree op = USE_FROM_PTR (use_p);
835 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
836 &worklist, false))
837 return false;
840 if (STMT_VINFO_GATHER_P (stmt_vinfo))
842 tree off;
843 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
844 gcc_assert (decl);
845 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
846 &worklist, true))
847 return false;
849 } /* while worklist */
851 return true;
855 /* Function vect_model_simple_cost.
857 Models cost for simple operations, i.e. those that only emit ncopies of a
858 single op. Right now, this does not account for multiple insns that could
859 be generated for the single vector op. We will handle that shortly. */
861 void
862 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
863 enum vect_def_type *dt,
864 stmt_vector_for_cost *prologue_cost_vec,
865 stmt_vector_for_cost *body_cost_vec)
867 int i;
868 int inside_cost = 0, prologue_cost = 0;
870 /* The SLP costs were already calculated during SLP tree build. */
871 if (PURE_SLP_STMT (stmt_info))
872 return;
874 /* FORNOW: Assuming maximum 2 args per stmts. */
875 for (i = 0; i < 2; i++)
876 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
877 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
878 stmt_info, 0, vect_prologue);
880 /* Pass the inside-of-loop statements to the target-specific cost model. */
881 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
882 stmt_info, 0, vect_body);
884 if (dump_enabled_p ())
885 dump_printf_loc (MSG_NOTE, vect_location,
886 "vect_model_simple_cost: inside_cost = %d, "
887 "prologue_cost = %d .\n", inside_cost, prologue_cost);
891 /* Model cost for type demotion and promotion operations. PWR is normally
892 zero for single-step promotions and demotions. It will be one if
893 two-step promotion/demotion is required, and so on. Each additional
894 step doubles the number of instructions required. */
896 static void
897 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
898 enum vect_def_type *dt, int pwr)
900 int i, tmp;
901 int inside_cost = 0, prologue_cost = 0;
902 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
903 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
904 void *target_cost_data;
906 /* The SLP costs were already calculated during SLP tree build. */
907 if (PURE_SLP_STMT (stmt_info))
908 return;
910 if (loop_vinfo)
911 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
912 else
913 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
915 for (i = 0; i < pwr + 1; i++)
917 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
918 (i + 1) : i;
919 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
920 vec_promote_demote, stmt_info, 0,
921 vect_body);
924 /* FORNOW: Assuming maximum 2 args per stmts. */
925 for (i = 0; i < 2; i++)
926 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
927 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
928 stmt_info, 0, vect_prologue);
930 if (dump_enabled_p ())
931 dump_printf_loc (MSG_NOTE, vect_location,
932 "vect_model_promotion_demotion_cost: inside_cost = %d, "
933 "prologue_cost = %d .\n", inside_cost, prologue_cost);
936 /* Function vect_cost_group_size
938 For grouped load or store, return the group_size only if it is the first
939 load or store of a group, else return 1. This ensures that group size is
940 only returned once per group. */
942 static int
943 vect_cost_group_size (stmt_vec_info stmt_info)
945 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
947 if (first_stmt == STMT_VINFO_STMT (stmt_info))
948 return GROUP_SIZE (stmt_info);
950 return 1;
954 /* Function vect_model_store_cost
956 Models cost for stores. In the case of grouped accesses, one access
957 has the overhead of the grouped access attributed to it. */
959 void
960 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
961 bool store_lanes_p, enum vect_def_type dt,
962 slp_tree slp_node,
963 stmt_vector_for_cost *prologue_cost_vec,
964 stmt_vector_for_cost *body_cost_vec)
966 int group_size;
967 unsigned int inside_cost = 0, prologue_cost = 0;
968 struct data_reference *first_dr;
969 gimple first_stmt;
971 if (dt == vect_constant_def || dt == vect_external_def)
972 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
973 stmt_info, 0, vect_prologue);
975 /* Grouped access? */
976 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
978 if (slp_node)
980 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
981 group_size = 1;
983 else
985 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
986 group_size = vect_cost_group_size (stmt_info);
989 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
991 /* Not a grouped access. */
992 else
994 group_size = 1;
995 first_dr = STMT_VINFO_DATA_REF (stmt_info);
998 /* We assume that the cost of a single store-lanes instruction is
999 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
1000 access is instead being provided by a permute-and-store operation,
1001 include the cost of the permutes. */
1002 if (!store_lanes_p && group_size > 1)
1004 /* Uses a high and low interleave or shuffle operations for each
1005 needed permute. */
1006 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1007 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1008 stmt_info, 0, vect_body);
1010 if (dump_enabled_p ())
1011 dump_printf_loc (MSG_NOTE, vect_location,
1012 "vect_model_store_cost: strided group_size = %d .\n",
1013 group_size);
1016 /* Costs of the stores. */
1017 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
1019 if (dump_enabled_p ())
1020 dump_printf_loc (MSG_NOTE, vect_location,
1021 "vect_model_store_cost: inside_cost = %d, "
1022 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1026 /* Calculate cost of DR's memory access. */
1027 void
1028 vect_get_store_cost (struct data_reference *dr, int ncopies,
1029 unsigned int *inside_cost,
1030 stmt_vector_for_cost *body_cost_vec)
1032 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1033 gimple stmt = DR_STMT (dr);
1034 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1036 switch (alignment_support_scheme)
1038 case dr_aligned:
1040 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1041 vector_store, stmt_info, 0,
1042 vect_body);
1044 if (dump_enabled_p ())
1045 dump_printf_loc (MSG_NOTE, vect_location,
1046 "vect_model_store_cost: aligned.\n");
1047 break;
1050 case dr_unaligned_supported:
1052 /* Here, we assign an additional cost for the unaligned store. */
1053 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1054 unaligned_store, stmt_info,
1055 DR_MISALIGNMENT (dr), vect_body);
1056 if (dump_enabled_p ())
1057 dump_printf_loc (MSG_NOTE, vect_location,
1058 "vect_model_store_cost: unaligned supported by "
1059 "hardware.\n");
1060 break;
1063 case dr_unaligned_unsupported:
1065 *inside_cost = VECT_MAX_COST;
1067 if (dump_enabled_p ())
1068 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1069 "vect_model_store_cost: unsupported access.\n");
1070 break;
1073 default:
1074 gcc_unreachable ();
1079 /* Function vect_model_load_cost
1081 Models cost for loads. In the case of grouped accesses, the last access
1082 has the overhead of the grouped access attributed to it. Since unaligned
1083 accesses are supported for loads, we also account for the costs of the
1084 access scheme chosen. */
1086 void
1087 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1088 bool load_lanes_p, slp_tree slp_node,
1089 stmt_vector_for_cost *prologue_cost_vec,
1090 stmt_vector_for_cost *body_cost_vec)
1092 int group_size;
1093 gimple first_stmt;
1094 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1095 unsigned int inside_cost = 0, prologue_cost = 0;
1097 /* Grouped accesses? */
1098 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1099 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1101 group_size = vect_cost_group_size (stmt_info);
1102 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1104 /* Not a grouped access. */
1105 else
1107 group_size = 1;
1108 first_dr = dr;
1111 /* We assume that the cost of a single load-lanes instruction is
1112 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1113 access is instead being provided by a load-and-permute operation,
1114 include the cost of the permutes. */
1115 if (!load_lanes_p && group_size > 1
1116 && !STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1118 /* Uses an even and odd extract operations or shuffle operations
1119 for each needed permute. */
1120 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1121 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1122 stmt_info, 0, vect_body);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE, vect_location,
1126 "vect_model_load_cost: strided group_size = %d .\n",
1127 group_size);
1130 /* The loads themselves. */
1131 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info)
1132 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1134 /* N scalar loads plus gathering them into a vector. */
1135 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1136 inside_cost += record_stmt_cost (body_cost_vec,
1137 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1138 scalar_load, stmt_info, 0, vect_body);
1140 else
1141 vect_get_load_cost (first_dr, ncopies,
1142 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1143 || group_size > 1 || slp_node),
1144 &inside_cost, &prologue_cost,
1145 prologue_cost_vec, body_cost_vec, true);
1146 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1147 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1148 stmt_info, 0, vect_body);
1150 if (dump_enabled_p ())
1151 dump_printf_loc (MSG_NOTE, vect_location,
1152 "vect_model_load_cost: inside_cost = %d, "
1153 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1157 /* Calculate cost of DR's memory access. */
1158 void
1159 vect_get_load_cost (struct data_reference *dr, int ncopies,
1160 bool add_realign_cost, unsigned int *inside_cost,
1161 unsigned int *prologue_cost,
1162 stmt_vector_for_cost *prologue_cost_vec,
1163 stmt_vector_for_cost *body_cost_vec,
1164 bool record_prologue_costs)
1166 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1167 gimple stmt = DR_STMT (dr);
1168 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1170 switch (alignment_support_scheme)
1172 case dr_aligned:
1174 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1175 stmt_info, 0, vect_body);
1177 if (dump_enabled_p ())
1178 dump_printf_loc (MSG_NOTE, vect_location,
1179 "vect_model_load_cost: aligned.\n");
1181 break;
1183 case dr_unaligned_supported:
1185 /* Here, we assign an additional cost for the unaligned load. */
1186 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1187 unaligned_load, stmt_info,
1188 DR_MISALIGNMENT (dr), vect_body);
1190 if (dump_enabled_p ())
1191 dump_printf_loc (MSG_NOTE, vect_location,
1192 "vect_model_load_cost: unaligned supported by "
1193 "hardware.\n");
1195 break;
1197 case dr_explicit_realign:
1199 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1200 vector_load, stmt_info, 0, vect_body);
1201 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1202 vec_perm, stmt_info, 0, vect_body);
1204 /* FIXME: If the misalignment remains fixed across the iterations of
1205 the containing loop, the following cost should be added to the
1206 prologue costs. */
1207 if (targetm.vectorize.builtin_mask_for_load)
1208 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1209 stmt_info, 0, vect_body);
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE, vect_location,
1213 "vect_model_load_cost: explicit realign\n");
1215 break;
1217 case dr_explicit_realign_optimized:
1219 if (dump_enabled_p ())
1220 dump_printf_loc (MSG_NOTE, vect_location,
1221 "vect_model_load_cost: unaligned software "
1222 "pipelined.\n");
1224 /* Unaligned software pipeline has a load of an address, an initial
1225 load, and possibly a mask operation to "prime" the loop. However,
1226 if this is an access in a group of loads, which provide grouped
1227 access, then the above cost should only be considered for one
1228 access in the group. Inside the loop, there is a load op
1229 and a realignment op. */
1231 if (add_realign_cost && record_prologue_costs)
1233 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1234 vector_stmt, stmt_info,
1235 0, vect_prologue);
1236 if (targetm.vectorize.builtin_mask_for_load)
1237 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1238 vector_stmt, stmt_info,
1239 0, vect_prologue);
1242 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1243 stmt_info, 0, vect_body);
1244 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1245 stmt_info, 0, vect_body);
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_NOTE, vect_location,
1249 "vect_model_load_cost: explicit realign optimized"
1250 "\n");
1252 break;
1255 case dr_unaligned_unsupported:
1257 *inside_cost = VECT_MAX_COST;
1259 if (dump_enabled_p ())
1260 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1261 "vect_model_load_cost: unsupported access.\n");
1262 break;
1265 default:
1266 gcc_unreachable ();
1270 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1271 the loop preheader for the vectorized stmt STMT. */
1273 static void
1274 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1276 if (gsi)
1277 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1278 else
1280 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1281 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1283 if (loop_vinfo)
1285 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1286 basic_block new_bb;
1287 edge pe;
1289 if (nested_in_vect_loop_p (loop, stmt))
1290 loop = loop->inner;
1292 pe = loop_preheader_edge (loop);
1293 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1294 gcc_assert (!new_bb);
1296 else
1298 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1299 basic_block bb;
1300 gimple_stmt_iterator gsi_bb_start;
1302 gcc_assert (bb_vinfo);
1303 bb = BB_VINFO_BB (bb_vinfo);
1304 gsi_bb_start = gsi_after_labels (bb);
1305 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1309 if (dump_enabled_p ())
1311 dump_printf_loc (MSG_NOTE, vect_location,
1312 "created new init_stmt: ");
1313 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1317 /* Function vect_init_vector.
1319 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1320 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1321 vector type a vector with all elements equal to VAL is created first.
1322 Place the initialization at BSI if it is not NULL. Otherwise, place the
1323 initialization at the loop preheader.
1324 Return the DEF of INIT_STMT.
1325 It will be used in the vectorization of STMT. */
1327 tree
1328 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1330 tree new_var;
1331 gimple init_stmt;
1332 tree vec_oprnd;
1333 tree new_temp;
1335 if (TREE_CODE (type) == VECTOR_TYPE
1336 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1338 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1340 if (CONSTANT_CLASS_P (val))
1341 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1342 else
1344 new_temp = make_ssa_name (TREE_TYPE (type));
1345 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1346 vect_init_vector_1 (stmt, init_stmt, gsi);
1347 val = new_temp;
1350 val = build_vector_from_val (type, val);
1353 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1354 init_stmt = gimple_build_assign (new_var, val);
1355 new_temp = make_ssa_name (new_var, init_stmt);
1356 gimple_assign_set_lhs (init_stmt, new_temp);
1357 vect_init_vector_1 (stmt, init_stmt, gsi);
1358 vec_oprnd = gimple_assign_lhs (init_stmt);
1359 return vec_oprnd;
1363 /* Function vect_get_vec_def_for_operand.
1365 OP is an operand in STMT. This function returns a (vector) def that will be
1366 used in the vectorized stmt for STMT.
1368 In the case that OP is an SSA_NAME which is defined in the loop, then
1369 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1371 In case OP is an invariant or constant, a new stmt that creates a vector def
1372 needs to be introduced. */
1374 tree
1375 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1377 tree vec_oprnd;
1378 gimple vec_stmt;
1379 gimple def_stmt;
1380 stmt_vec_info def_stmt_info = NULL;
1381 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1382 unsigned int nunits;
1383 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1384 tree def;
1385 enum vect_def_type dt;
1386 bool is_simple_use;
1387 tree vector_type;
1389 if (dump_enabled_p ())
1391 dump_printf_loc (MSG_NOTE, vect_location,
1392 "vect_get_vec_def_for_operand: ");
1393 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1394 dump_printf (MSG_NOTE, "\n");
1397 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1398 &def_stmt, &def, &dt);
1399 gcc_assert (is_simple_use);
1400 if (dump_enabled_p ())
1402 int loc_printed = 0;
1403 if (def)
1405 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1406 loc_printed = 1;
1407 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1408 dump_printf (MSG_NOTE, "\n");
1410 if (def_stmt)
1412 if (loc_printed)
1413 dump_printf (MSG_NOTE, " def_stmt = ");
1414 else
1415 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1416 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1420 switch (dt)
1422 /* Case 1: operand is a constant. */
1423 case vect_constant_def:
1425 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1426 gcc_assert (vector_type);
1427 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1429 if (scalar_def)
1430 *scalar_def = op;
1432 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1433 if (dump_enabled_p ())
1434 dump_printf_loc (MSG_NOTE, vect_location,
1435 "Create vector_cst. nunits = %d\n", nunits);
1437 return vect_init_vector (stmt, op, vector_type, NULL);
1440 /* Case 2: operand is defined outside the loop - loop invariant. */
1441 case vect_external_def:
1443 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1444 gcc_assert (vector_type);
1446 if (scalar_def)
1447 *scalar_def = def;
1449 /* Create 'vec_inv = {inv,inv,..,inv}' */
1450 if (dump_enabled_p ())
1451 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1453 return vect_init_vector (stmt, def, vector_type, NULL);
1456 /* Case 3: operand is defined inside the loop. */
1457 case vect_internal_def:
1459 if (scalar_def)
1460 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1462 /* Get the def from the vectorized stmt. */
1463 def_stmt_info = vinfo_for_stmt (def_stmt);
1465 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1466 /* Get vectorized pattern statement. */
1467 if (!vec_stmt
1468 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1469 && !STMT_VINFO_RELEVANT (def_stmt_info))
1470 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1471 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1472 gcc_assert (vec_stmt);
1473 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1474 vec_oprnd = PHI_RESULT (vec_stmt);
1475 else if (is_gimple_call (vec_stmt))
1476 vec_oprnd = gimple_call_lhs (vec_stmt);
1477 else
1478 vec_oprnd = gimple_assign_lhs (vec_stmt);
1479 return vec_oprnd;
1482 /* Case 4: operand is defined by a loop header phi - reduction */
1483 case vect_reduction_def:
1484 case vect_double_reduction_def:
1485 case vect_nested_cycle:
1487 struct loop *loop;
1489 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1490 loop = (gimple_bb (def_stmt))->loop_father;
1492 /* Get the def before the loop */
1493 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1494 return get_initial_def_for_reduction (stmt, op, scalar_def);
1497 /* Case 5: operand is defined by loop-header phi - induction. */
1498 case vect_induction_def:
1500 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1502 /* Get the def from the vectorized stmt. */
1503 def_stmt_info = vinfo_for_stmt (def_stmt);
1504 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1505 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1506 vec_oprnd = PHI_RESULT (vec_stmt);
1507 else
1508 vec_oprnd = gimple_get_lhs (vec_stmt);
1509 return vec_oprnd;
1512 default:
1513 gcc_unreachable ();
1518 /* Function vect_get_vec_def_for_stmt_copy
1520 Return a vector-def for an operand. This function is used when the
1521 vectorized stmt to be created (by the caller to this function) is a "copy"
1522 created in case the vectorized result cannot fit in one vector, and several
1523 copies of the vector-stmt are required. In this case the vector-def is
1524 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1525 of the stmt that defines VEC_OPRND.
1526 DT is the type of the vector def VEC_OPRND.
1528 Context:
1529 In case the vectorization factor (VF) is bigger than the number
1530 of elements that can fit in a vectype (nunits), we have to generate
1531 more than one vector stmt to vectorize the scalar stmt. This situation
1532 arises when there are multiple data-types operated upon in the loop; the
1533 smallest data-type determines the VF, and as a result, when vectorizing
1534 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1535 vector stmt (each computing a vector of 'nunits' results, and together
1536 computing 'VF' results in each iteration). This function is called when
1537 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1538 which VF=16 and nunits=4, so the number of copies required is 4):
1540 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1542 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1543 VS1.1: vx.1 = memref1 VS1.2
1544 VS1.2: vx.2 = memref2 VS1.3
1545 VS1.3: vx.3 = memref3
1547 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1548 VSnew.1: vz1 = vx.1 + ... VSnew.2
1549 VSnew.2: vz2 = vx.2 + ... VSnew.3
1550 VSnew.3: vz3 = vx.3 + ...
1552 The vectorization of S1 is explained in vectorizable_load.
1553 The vectorization of S2:
1554 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1555 the function 'vect_get_vec_def_for_operand' is called to
1556 get the relevant vector-def for each operand of S2. For operand x it
1557 returns the vector-def 'vx.0'.
1559 To create the remaining copies of the vector-stmt (VSnew.j), this
1560 function is called to get the relevant vector-def for each operand. It is
1561 obtained from the respective VS1.j stmt, which is recorded in the
1562 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1564 For example, to obtain the vector-def 'vx.1' in order to create the
1565 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1566 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1567 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1568 and return its def ('vx.1').
1569 Overall, to create the above sequence this function will be called 3 times:
1570 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1571 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1572 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1574 tree
1575 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1577 gimple vec_stmt_for_operand;
1578 stmt_vec_info def_stmt_info;
1580 /* Do nothing; can reuse same def. */
1581 if (dt == vect_external_def || dt == vect_constant_def )
1582 return vec_oprnd;
1584 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1585 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1586 gcc_assert (def_stmt_info);
1587 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1588 gcc_assert (vec_stmt_for_operand);
1589 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1590 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1591 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1592 else
1593 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1594 return vec_oprnd;
1598 /* Get vectorized definitions for the operands to create a copy of an original
1599 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1601 static void
1602 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1603 vec<tree> *vec_oprnds0,
1604 vec<tree> *vec_oprnds1)
1606 tree vec_oprnd = vec_oprnds0->pop ();
1608 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1609 vec_oprnds0->quick_push (vec_oprnd);
1611 if (vec_oprnds1 && vec_oprnds1->length ())
1613 vec_oprnd = vec_oprnds1->pop ();
1614 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1615 vec_oprnds1->quick_push (vec_oprnd);
1620 /* Get vectorized definitions for OP0 and OP1.
1621 REDUC_INDEX is the index of reduction operand in case of reduction,
1622 and -1 otherwise. */
1624 void
1625 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1626 vec<tree> *vec_oprnds0,
1627 vec<tree> *vec_oprnds1,
1628 slp_tree slp_node, int reduc_index)
1630 if (slp_node)
1632 int nops = (op1 == NULL_TREE) ? 1 : 2;
1633 auto_vec<tree> ops (nops);
1634 auto_vec<vec<tree> > vec_defs (nops);
1636 ops.quick_push (op0);
1637 if (op1)
1638 ops.quick_push (op1);
1640 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1642 *vec_oprnds0 = vec_defs[0];
1643 if (op1)
1644 *vec_oprnds1 = vec_defs[1];
1646 else
1648 tree vec_oprnd;
1650 vec_oprnds0->create (1);
1651 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1652 vec_oprnds0->quick_push (vec_oprnd);
1654 if (op1)
1656 vec_oprnds1->create (1);
1657 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1658 vec_oprnds1->quick_push (vec_oprnd);
1664 /* Function vect_finish_stmt_generation.
1666 Insert a new stmt. */
1668 void
1669 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1670 gimple_stmt_iterator *gsi)
1672 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1673 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1674 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1676 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1678 if (!gsi_end_p (*gsi)
1679 && gimple_has_mem_ops (vec_stmt))
1681 gimple at_stmt = gsi_stmt (*gsi);
1682 tree vuse = gimple_vuse (at_stmt);
1683 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1685 tree vdef = gimple_vdef (at_stmt);
1686 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1687 /* If we have an SSA vuse and insert a store, update virtual
1688 SSA form to avoid triggering the renamer. Do so only
1689 if we can easily see all uses - which is what almost always
1690 happens with the way vectorized stmts are inserted. */
1691 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1692 && ((is_gimple_assign (vec_stmt)
1693 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1694 || (is_gimple_call (vec_stmt)
1695 && !(gimple_call_flags (vec_stmt)
1696 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1698 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1699 gimple_set_vdef (vec_stmt, new_vdef);
1700 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1704 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1706 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1707 bb_vinfo));
1709 if (dump_enabled_p ())
1711 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1712 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1715 gimple_set_location (vec_stmt, gimple_location (stmt));
1717 /* While EH edges will generally prevent vectorization, stmt might
1718 e.g. be in a must-not-throw region. Ensure newly created stmts
1719 that could throw are part of the same region. */
1720 int lp_nr = lookup_stmt_eh_lp (stmt);
1721 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1722 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1725 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1726 a function declaration if the target has a vectorized version
1727 of the function, or NULL_TREE if the function cannot be vectorized. */
1729 tree
1730 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1732 tree fndecl = gimple_call_fndecl (call);
1734 /* We only handle functions that do not read or clobber memory -- i.e.
1735 const or novops ones. */
1736 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1737 return NULL_TREE;
1739 if (!fndecl
1740 || TREE_CODE (fndecl) != FUNCTION_DECL
1741 || !DECL_BUILT_IN (fndecl))
1742 return NULL_TREE;
1744 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1745 vectype_in);
1749 static tree permute_vec_elements (tree, tree, tree, gimple,
1750 gimple_stmt_iterator *);
1753 /* Function vectorizable_mask_load_store.
1755 Check if STMT performs a conditional load or store that can be vectorized.
1756 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1757 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1758 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1760 static bool
1761 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1762 gimple *vec_stmt, slp_tree slp_node)
1764 tree vec_dest = NULL;
1765 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1766 stmt_vec_info prev_stmt_info;
1767 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1768 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1769 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1770 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1771 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1772 tree elem_type;
1773 gimple new_stmt;
1774 tree dummy;
1775 tree dataref_ptr = NULL_TREE;
1776 gimple ptr_incr;
1777 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1778 int ncopies;
1779 int i, j;
1780 bool inv_p;
1781 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1782 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1783 int gather_scale = 1;
1784 enum vect_def_type gather_dt = vect_unknown_def_type;
1785 bool is_store;
1786 tree mask;
1787 gimple def_stmt;
1788 tree def;
1789 enum vect_def_type dt;
1791 if (slp_node != NULL)
1792 return false;
1794 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1795 gcc_assert (ncopies >= 1);
1797 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1798 mask = gimple_call_arg (stmt, 2);
1799 if (TYPE_PRECISION (TREE_TYPE (mask))
1800 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1801 return false;
1803 /* FORNOW. This restriction should be relaxed. */
1804 if (nested_in_vect_loop && ncopies > 1)
1806 if (dump_enabled_p ())
1807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1808 "multiple types in nested loop.");
1809 return false;
1812 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1813 return false;
1815 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1816 return false;
1818 if (!STMT_VINFO_DATA_REF (stmt_info))
1819 return false;
1821 elem_type = TREE_TYPE (vectype);
1823 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1824 return false;
1826 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1827 return false;
1829 if (STMT_VINFO_GATHER_P (stmt_info))
1831 gimple def_stmt;
1832 tree def;
1833 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1834 &gather_off, &gather_scale);
1835 gcc_assert (gather_decl);
1836 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1837 &def_stmt, &def, &gather_dt,
1838 &gather_off_vectype))
1840 if (dump_enabled_p ())
1841 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1842 "gather index use not simple.");
1843 return false;
1846 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1847 tree masktype
1848 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1849 if (TREE_CODE (masktype) == INTEGER_TYPE)
1851 if (dump_enabled_p ())
1852 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1853 "masked gather with integer mask not supported.");
1854 return false;
1857 else if (tree_int_cst_compare (nested_in_vect_loop
1858 ? STMT_VINFO_DR_STEP (stmt_info)
1859 : DR_STEP (dr), size_zero_node) <= 0)
1860 return false;
1861 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1862 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1863 return false;
1865 if (TREE_CODE (mask) != SSA_NAME)
1866 return false;
1868 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1869 &def_stmt, &def, &dt))
1870 return false;
1872 if (is_store)
1874 tree rhs = gimple_call_arg (stmt, 3);
1875 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1876 &def_stmt, &def, &dt))
1877 return false;
1880 if (!vec_stmt) /* transformation not required. */
1882 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1883 if (is_store)
1884 vect_model_store_cost (stmt_info, ncopies, false, dt,
1885 NULL, NULL, NULL);
1886 else
1887 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1888 return true;
1891 /** Transform. **/
1893 if (STMT_VINFO_GATHER_P (stmt_info))
1895 tree vec_oprnd0 = NULL_TREE, op;
1896 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1897 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1898 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1899 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1900 tree mask_perm_mask = NULL_TREE;
1901 edge pe = loop_preheader_edge (loop);
1902 gimple_seq seq;
1903 basic_block new_bb;
1904 enum { NARROW, NONE, WIDEN } modifier;
1905 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1907 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1908 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1909 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1910 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1911 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1912 scaletype = TREE_VALUE (arglist);
1913 gcc_checking_assert (types_compatible_p (srctype, rettype)
1914 && types_compatible_p (srctype, masktype));
1916 if (nunits == gather_off_nunits)
1917 modifier = NONE;
1918 else if (nunits == gather_off_nunits / 2)
1920 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1921 modifier = WIDEN;
1923 for (i = 0; i < gather_off_nunits; ++i)
1924 sel[i] = i | nunits;
1926 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1928 else if (nunits == gather_off_nunits * 2)
1930 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1931 modifier = NARROW;
1933 for (i = 0; i < nunits; ++i)
1934 sel[i] = i < gather_off_nunits
1935 ? i : i + nunits - gather_off_nunits;
1937 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1938 ncopies *= 2;
1939 for (i = 0; i < nunits; ++i)
1940 sel[i] = i | gather_off_nunits;
1941 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1943 else
1944 gcc_unreachable ();
1946 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1948 ptr = fold_convert (ptrtype, gather_base);
1949 if (!is_gimple_min_invariant (ptr))
1951 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1952 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1953 gcc_assert (!new_bb);
1956 scale = build_int_cst (scaletype, gather_scale);
1958 prev_stmt_info = NULL;
1959 for (j = 0; j < ncopies; ++j)
1961 if (modifier == WIDEN && (j & 1))
1962 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1963 perm_mask, stmt, gsi);
1964 else if (j == 0)
1965 op = vec_oprnd0
1966 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1967 else
1968 op = vec_oprnd0
1969 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1971 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1973 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1974 == TYPE_VECTOR_SUBPARTS (idxtype));
1975 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1976 var = make_ssa_name (var);
1977 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1978 new_stmt
1979 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1980 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1981 op = var;
1984 if (mask_perm_mask && (j & 1))
1985 mask_op = permute_vec_elements (mask_op, mask_op,
1986 mask_perm_mask, stmt, gsi);
1987 else
1989 if (j == 0)
1990 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
1991 else
1993 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
1994 &def_stmt, &def, &dt);
1995 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1998 mask_op = vec_mask;
1999 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2001 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2002 == TYPE_VECTOR_SUBPARTS (masktype));
2003 var = vect_get_new_vect_var (masktype, vect_simple_var,
2004 NULL);
2005 var = make_ssa_name (var);
2006 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2007 new_stmt
2008 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2009 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2010 mask_op = var;
2014 new_stmt
2015 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2016 scale);
2018 if (!useless_type_conversion_p (vectype, rettype))
2020 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2021 == TYPE_VECTOR_SUBPARTS (rettype));
2022 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2023 op = make_ssa_name (var, new_stmt);
2024 gimple_call_set_lhs (new_stmt, op);
2025 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2026 var = make_ssa_name (vec_dest);
2027 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2028 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2030 else
2032 var = make_ssa_name (vec_dest, new_stmt);
2033 gimple_call_set_lhs (new_stmt, var);
2036 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2038 if (modifier == NARROW)
2040 if ((j & 1) == 0)
2042 prev_res = var;
2043 continue;
2045 var = permute_vec_elements (prev_res, var,
2046 perm_mask, stmt, gsi);
2047 new_stmt = SSA_NAME_DEF_STMT (var);
2050 if (prev_stmt_info == NULL)
2051 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2052 else
2053 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2054 prev_stmt_info = vinfo_for_stmt (new_stmt);
2057 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2058 from the IL. */
2059 tree lhs = gimple_call_lhs (stmt);
2060 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2061 set_vinfo_for_stmt (new_stmt, stmt_info);
2062 set_vinfo_for_stmt (stmt, NULL);
2063 STMT_VINFO_STMT (stmt_info) = new_stmt;
2064 gsi_replace (gsi, new_stmt, true);
2065 return true;
2067 else if (is_store)
2069 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2070 prev_stmt_info = NULL;
2071 for (i = 0; i < ncopies; i++)
2073 unsigned align, misalign;
2075 if (i == 0)
2077 tree rhs = gimple_call_arg (stmt, 3);
2078 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2079 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2080 /* We should have catched mismatched types earlier. */
2081 gcc_assert (useless_type_conversion_p (vectype,
2082 TREE_TYPE (vec_rhs)));
2083 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2084 NULL_TREE, &dummy, gsi,
2085 &ptr_incr, false, &inv_p);
2086 gcc_assert (!inv_p);
2088 else
2090 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2091 &def, &dt);
2092 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2093 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2094 &def, &dt);
2095 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2096 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2097 TYPE_SIZE_UNIT (vectype));
2100 align = TYPE_ALIGN_UNIT (vectype);
2101 if (aligned_access_p (dr))
2102 misalign = 0;
2103 else if (DR_MISALIGNMENT (dr) == -1)
2105 align = TYPE_ALIGN_UNIT (elem_type);
2106 misalign = 0;
2108 else
2109 misalign = DR_MISALIGNMENT (dr);
2110 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2111 misalign);
2112 new_stmt
2113 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2114 gimple_call_arg (stmt, 1),
2115 vec_mask, vec_rhs);
2116 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2117 if (i == 0)
2118 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2119 else
2120 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2121 prev_stmt_info = vinfo_for_stmt (new_stmt);
2124 else
2126 tree vec_mask = NULL_TREE;
2127 prev_stmt_info = NULL;
2128 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2129 for (i = 0; i < ncopies; i++)
2131 unsigned align, misalign;
2133 if (i == 0)
2135 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2136 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2137 NULL_TREE, &dummy, gsi,
2138 &ptr_incr, false, &inv_p);
2139 gcc_assert (!inv_p);
2141 else
2143 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2144 &def, &dt);
2145 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2146 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2147 TYPE_SIZE_UNIT (vectype));
2150 align = TYPE_ALIGN_UNIT (vectype);
2151 if (aligned_access_p (dr))
2152 misalign = 0;
2153 else if (DR_MISALIGNMENT (dr) == -1)
2155 align = TYPE_ALIGN_UNIT (elem_type);
2156 misalign = 0;
2158 else
2159 misalign = DR_MISALIGNMENT (dr);
2160 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2161 misalign);
2162 new_stmt
2163 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2164 gimple_call_arg (stmt, 1),
2165 vec_mask);
2166 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2167 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2168 if (i == 0)
2169 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2170 else
2171 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2172 prev_stmt_info = vinfo_for_stmt (new_stmt);
2176 if (!is_store)
2178 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2179 from the IL. */
2180 tree lhs = gimple_call_lhs (stmt);
2181 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2182 set_vinfo_for_stmt (new_stmt, stmt_info);
2183 set_vinfo_for_stmt (stmt, NULL);
2184 STMT_VINFO_STMT (stmt_info) = new_stmt;
2185 gsi_replace (gsi, new_stmt, true);
2188 return true;
2192 /* Function vectorizable_call.
2194 Check if GS performs a function call that can be vectorized.
2195 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2196 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2197 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2199 static bool
2200 vectorizable_call (gimple gs, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2201 slp_tree slp_node)
2203 gcall *stmt;
2204 tree vec_dest;
2205 tree scalar_dest;
2206 tree op, type;
2207 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2208 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2209 tree vectype_out, vectype_in;
2210 int nunits_in;
2211 int nunits_out;
2212 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2213 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2214 tree fndecl, new_temp, def, rhs_type;
2215 gimple def_stmt;
2216 enum vect_def_type dt[3]
2217 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2218 gimple new_stmt = NULL;
2219 int ncopies, j;
2220 vec<tree> vargs = vNULL;
2221 enum { NARROW, NONE, WIDEN } modifier;
2222 size_t i, nargs;
2223 tree lhs;
2225 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2226 return false;
2228 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2229 return false;
2231 /* Is GS a vectorizable call? */
2232 stmt = dyn_cast <gcall *> (gs);
2233 if (!stmt)
2234 return false;
2236 if (gimple_call_internal_p (stmt)
2237 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2238 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2239 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2240 slp_node);
2242 if (gimple_call_lhs (stmt) == NULL_TREE
2243 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2244 return false;
2246 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2248 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2250 /* Process function arguments. */
2251 rhs_type = NULL_TREE;
2252 vectype_in = NULL_TREE;
2253 nargs = gimple_call_num_args (stmt);
2255 /* Bail out if the function has more than three arguments, we do not have
2256 interesting builtin functions to vectorize with more than two arguments
2257 except for fma. No arguments is also not good. */
2258 if (nargs == 0 || nargs > 3)
2259 return false;
2261 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2262 if (gimple_call_internal_p (stmt)
2263 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2265 nargs = 0;
2266 rhs_type = unsigned_type_node;
2269 for (i = 0; i < nargs; i++)
2271 tree opvectype;
2273 op = gimple_call_arg (stmt, i);
2275 /* We can only handle calls with arguments of the same type. */
2276 if (rhs_type
2277 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2281 "argument types differ.\n");
2282 return false;
2284 if (!rhs_type)
2285 rhs_type = TREE_TYPE (op);
2287 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2288 &def_stmt, &def, &dt[i], &opvectype))
2290 if (dump_enabled_p ())
2291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2292 "use not simple.\n");
2293 return false;
2296 if (!vectype_in)
2297 vectype_in = opvectype;
2298 else if (opvectype
2299 && opvectype != vectype_in)
2301 if (dump_enabled_p ())
2302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2303 "argument vector types differ.\n");
2304 return false;
2307 /* If all arguments are external or constant defs use a vector type with
2308 the same size as the output vector type. */
2309 if (!vectype_in)
2310 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2311 if (vec_stmt)
2312 gcc_assert (vectype_in);
2313 if (!vectype_in)
2315 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "no vectype for scalar type ");
2319 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2320 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2323 return false;
2326 /* FORNOW */
2327 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2328 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2329 if (nunits_in == nunits_out / 2)
2330 modifier = NARROW;
2331 else if (nunits_out == nunits_in)
2332 modifier = NONE;
2333 else if (nunits_out == nunits_in / 2)
2334 modifier = WIDEN;
2335 else
2336 return false;
2338 /* For now, we only vectorize functions if a target specific builtin
2339 is available. TODO -- in some cases, it might be profitable to
2340 insert the calls for pieces of the vector, in order to be able
2341 to vectorize other operations in the loop. */
2342 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2343 if (fndecl == NULL_TREE)
2345 if (gimple_call_internal_p (stmt)
2346 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2347 && !slp_node
2348 && loop_vinfo
2349 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2350 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2351 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2352 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2354 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2355 { 0, 1, 2, ... vf - 1 } vector. */
2356 gcc_assert (nargs == 0);
2358 else
2360 if (dump_enabled_p ())
2361 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2362 "function is not vectorizable.\n");
2363 return false;
2367 gcc_assert (!gimple_vuse (stmt));
2369 if (slp_node || PURE_SLP_STMT (stmt_info))
2370 ncopies = 1;
2371 else if (modifier == NARROW)
2372 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2373 else
2374 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2376 /* Sanity check: make sure that at least one copy of the vectorized stmt
2377 needs to be generated. */
2378 gcc_assert (ncopies >= 1);
2380 if (!vec_stmt) /* transformation not required. */
2382 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2383 if (dump_enabled_p ())
2384 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2385 "\n");
2386 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2387 return true;
2390 /** Transform. **/
2392 if (dump_enabled_p ())
2393 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2395 /* Handle def. */
2396 scalar_dest = gimple_call_lhs (stmt);
2397 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2399 prev_stmt_info = NULL;
2400 switch (modifier)
2402 case NONE:
2403 for (j = 0; j < ncopies; ++j)
2405 /* Build argument list for the vectorized call. */
2406 if (j == 0)
2407 vargs.create (nargs);
2408 else
2409 vargs.truncate (0);
2411 if (slp_node)
2413 auto_vec<vec<tree> > vec_defs (nargs);
2414 vec<tree> vec_oprnds0;
2416 for (i = 0; i < nargs; i++)
2417 vargs.quick_push (gimple_call_arg (stmt, i));
2418 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2419 vec_oprnds0 = vec_defs[0];
2421 /* Arguments are ready. Create the new vector stmt. */
2422 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2424 size_t k;
2425 for (k = 0; k < nargs; k++)
2427 vec<tree> vec_oprndsk = vec_defs[k];
2428 vargs[k] = vec_oprndsk[i];
2430 new_stmt = gimple_build_call_vec (fndecl, vargs);
2431 new_temp = make_ssa_name (vec_dest, new_stmt);
2432 gimple_call_set_lhs (new_stmt, new_temp);
2433 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2434 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2437 for (i = 0; i < nargs; i++)
2439 vec<tree> vec_oprndsi = vec_defs[i];
2440 vec_oprndsi.release ();
2442 continue;
2445 for (i = 0; i < nargs; i++)
2447 op = gimple_call_arg (stmt, i);
2448 if (j == 0)
2449 vec_oprnd0
2450 = vect_get_vec_def_for_operand (op, stmt, NULL);
2451 else
2453 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2454 vec_oprnd0
2455 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2458 vargs.quick_push (vec_oprnd0);
2461 if (gimple_call_internal_p (stmt)
2462 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2464 tree *v = XALLOCAVEC (tree, nunits_out);
2465 int k;
2466 for (k = 0; k < nunits_out; ++k)
2467 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2468 tree cst = build_vector (vectype_out, v);
2469 tree new_var
2470 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2471 gimple init_stmt = gimple_build_assign (new_var, cst);
2472 new_temp = make_ssa_name (new_var, init_stmt);
2473 gimple_assign_set_lhs (init_stmt, new_temp);
2474 vect_init_vector_1 (stmt, init_stmt, NULL);
2475 new_temp = make_ssa_name (vec_dest);
2476 new_stmt = gimple_build_assign (new_temp,
2477 gimple_assign_lhs (init_stmt));
2479 else
2481 new_stmt = gimple_build_call_vec (fndecl, vargs);
2482 new_temp = make_ssa_name (vec_dest, new_stmt);
2483 gimple_call_set_lhs (new_stmt, new_temp);
2485 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2487 if (j == 0)
2488 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2489 else
2490 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2492 prev_stmt_info = vinfo_for_stmt (new_stmt);
2495 break;
2497 case NARROW:
2498 for (j = 0; j < ncopies; ++j)
2500 /* Build argument list for the vectorized call. */
2501 if (j == 0)
2502 vargs.create (nargs * 2);
2503 else
2504 vargs.truncate (0);
2506 if (slp_node)
2508 auto_vec<vec<tree> > vec_defs (nargs);
2509 vec<tree> vec_oprnds0;
2511 for (i = 0; i < nargs; i++)
2512 vargs.quick_push (gimple_call_arg (stmt, i));
2513 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2514 vec_oprnds0 = vec_defs[0];
2516 /* Arguments are ready. Create the new vector stmt. */
2517 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2519 size_t k;
2520 vargs.truncate (0);
2521 for (k = 0; k < nargs; k++)
2523 vec<tree> vec_oprndsk = vec_defs[k];
2524 vargs.quick_push (vec_oprndsk[i]);
2525 vargs.quick_push (vec_oprndsk[i + 1]);
2527 new_stmt = gimple_build_call_vec (fndecl, vargs);
2528 new_temp = make_ssa_name (vec_dest, new_stmt);
2529 gimple_call_set_lhs (new_stmt, new_temp);
2530 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2531 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2534 for (i = 0; i < nargs; i++)
2536 vec<tree> vec_oprndsi = vec_defs[i];
2537 vec_oprndsi.release ();
2539 continue;
2542 for (i = 0; i < nargs; i++)
2544 op = gimple_call_arg (stmt, i);
2545 if (j == 0)
2547 vec_oprnd0
2548 = vect_get_vec_def_for_operand (op, stmt, NULL);
2549 vec_oprnd1
2550 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2552 else
2554 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2555 vec_oprnd0
2556 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2557 vec_oprnd1
2558 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2561 vargs.quick_push (vec_oprnd0);
2562 vargs.quick_push (vec_oprnd1);
2565 new_stmt = gimple_build_call_vec (fndecl, vargs);
2566 new_temp = make_ssa_name (vec_dest, new_stmt);
2567 gimple_call_set_lhs (new_stmt, new_temp);
2568 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2570 if (j == 0)
2571 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2572 else
2573 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2575 prev_stmt_info = vinfo_for_stmt (new_stmt);
2578 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2580 break;
2582 case WIDEN:
2583 /* No current target implements this case. */
2584 return false;
2587 vargs.release ();
2589 /* The call in STMT might prevent it from being removed in dce.
2590 We however cannot remove it here, due to the way the ssa name
2591 it defines is mapped to the new definition. So just replace
2592 rhs of the statement with something harmless. */
2594 if (slp_node)
2595 return true;
2597 type = TREE_TYPE (scalar_dest);
2598 if (is_pattern_stmt_p (stmt_info))
2599 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2600 else
2601 lhs = gimple_call_lhs (stmt);
2602 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2603 set_vinfo_for_stmt (new_stmt, stmt_info);
2604 set_vinfo_for_stmt (stmt, NULL);
2605 STMT_VINFO_STMT (stmt_info) = new_stmt;
2606 gsi_replace (gsi, new_stmt, false);
2608 return true;
2612 struct simd_call_arg_info
2614 tree vectype;
2615 tree op;
2616 enum vect_def_type dt;
2617 HOST_WIDE_INT linear_step;
2618 unsigned int align;
2621 /* Function vectorizable_simd_clone_call.
2623 Check if STMT performs a function call that can be vectorized
2624 by calling a simd clone of the function.
2625 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2626 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2627 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2629 static bool
2630 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2631 gimple *vec_stmt, slp_tree slp_node)
2633 tree vec_dest;
2634 tree scalar_dest;
2635 tree op, type;
2636 tree vec_oprnd0 = NULL_TREE;
2637 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2638 tree vectype;
2639 unsigned int nunits;
2640 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2641 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2642 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2643 tree fndecl, new_temp, def;
2644 gimple def_stmt;
2645 gimple new_stmt = NULL;
2646 int ncopies, j;
2647 vec<simd_call_arg_info> arginfo = vNULL;
2648 vec<tree> vargs = vNULL;
2649 size_t i, nargs;
2650 tree lhs, rtype, ratype;
2651 vec<constructor_elt, va_gc> *ret_ctor_elts;
2653 /* Is STMT a vectorizable call? */
2654 if (!is_gimple_call (stmt))
2655 return false;
2657 fndecl = gimple_call_fndecl (stmt);
2658 if (fndecl == NULL_TREE)
2659 return false;
2661 struct cgraph_node *node = cgraph_node::get (fndecl);
2662 if (node == NULL || node->simd_clones == NULL)
2663 return false;
2665 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2666 return false;
2668 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2669 return false;
2671 if (gimple_call_lhs (stmt)
2672 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2673 return false;
2675 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2677 vectype = STMT_VINFO_VECTYPE (stmt_info);
2679 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2680 return false;
2682 /* FORNOW */
2683 if (slp_node || PURE_SLP_STMT (stmt_info))
2684 return false;
2686 /* Process function arguments. */
2687 nargs = gimple_call_num_args (stmt);
2689 /* Bail out if the function has zero arguments. */
2690 if (nargs == 0)
2691 return false;
2693 arginfo.create (nargs);
2695 for (i = 0; i < nargs; i++)
2697 simd_call_arg_info thisarginfo;
2698 affine_iv iv;
2700 thisarginfo.linear_step = 0;
2701 thisarginfo.align = 0;
2702 thisarginfo.op = NULL_TREE;
2704 op = gimple_call_arg (stmt, i);
2705 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2706 &def_stmt, &def, &thisarginfo.dt,
2707 &thisarginfo.vectype)
2708 || thisarginfo.dt == vect_uninitialized_def)
2710 if (dump_enabled_p ())
2711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2712 "use not simple.\n");
2713 arginfo.release ();
2714 return false;
2717 if (thisarginfo.dt == vect_constant_def
2718 || thisarginfo.dt == vect_external_def)
2719 gcc_assert (thisarginfo.vectype == NULL_TREE);
2720 else
2721 gcc_assert (thisarginfo.vectype != NULL_TREE);
2723 /* For linear arguments, the analyze phase should have saved
2724 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2725 if (i * 2 + 3 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2726 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2])
2728 gcc_assert (vec_stmt);
2729 thisarginfo.linear_step
2730 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2]);
2731 thisarginfo.op
2732 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 1];
2733 /* If loop has been peeled for alignment, we need to adjust it. */
2734 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2735 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2736 if (n1 != n2)
2738 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2739 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2];
2740 tree opt = TREE_TYPE (thisarginfo.op);
2741 bias = fold_convert (TREE_TYPE (step), bias);
2742 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2743 thisarginfo.op
2744 = fold_build2 (POINTER_TYPE_P (opt)
2745 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2746 thisarginfo.op, bias);
2749 else if (!vec_stmt
2750 && thisarginfo.dt != vect_constant_def
2751 && thisarginfo.dt != vect_external_def
2752 && loop_vinfo
2753 && TREE_CODE (op) == SSA_NAME
2754 && simple_iv (loop, loop_containing_stmt (stmt), op,
2755 &iv, false)
2756 && tree_fits_shwi_p (iv.step))
2758 thisarginfo.linear_step = tree_to_shwi (iv.step);
2759 thisarginfo.op = iv.base;
2761 else if ((thisarginfo.dt == vect_constant_def
2762 || thisarginfo.dt == vect_external_def)
2763 && POINTER_TYPE_P (TREE_TYPE (op)))
2764 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2766 arginfo.quick_push (thisarginfo);
2769 unsigned int badness = 0;
2770 struct cgraph_node *bestn = NULL;
2771 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2772 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2773 else
2774 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2775 n = n->simdclone->next_clone)
2777 unsigned int this_badness = 0;
2778 if (n->simdclone->simdlen
2779 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2780 || n->simdclone->nargs != nargs)
2781 continue;
2782 if (n->simdclone->simdlen
2783 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2784 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2785 - exact_log2 (n->simdclone->simdlen)) * 1024;
2786 if (n->simdclone->inbranch)
2787 this_badness += 2048;
2788 int target_badness = targetm.simd_clone.usable (n);
2789 if (target_badness < 0)
2790 continue;
2791 this_badness += target_badness * 512;
2792 /* FORNOW: Have to add code to add the mask argument. */
2793 if (n->simdclone->inbranch)
2794 continue;
2795 for (i = 0; i < nargs; i++)
2797 switch (n->simdclone->args[i].arg_type)
2799 case SIMD_CLONE_ARG_TYPE_VECTOR:
2800 if (!useless_type_conversion_p
2801 (n->simdclone->args[i].orig_type,
2802 TREE_TYPE (gimple_call_arg (stmt, i))))
2803 i = -1;
2804 else if (arginfo[i].dt == vect_constant_def
2805 || arginfo[i].dt == vect_external_def
2806 || arginfo[i].linear_step)
2807 this_badness += 64;
2808 break;
2809 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2810 if (arginfo[i].dt != vect_constant_def
2811 && arginfo[i].dt != vect_external_def)
2812 i = -1;
2813 break;
2814 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2815 if (arginfo[i].dt == vect_constant_def
2816 || arginfo[i].dt == vect_external_def
2817 || (arginfo[i].linear_step
2818 != n->simdclone->args[i].linear_step))
2819 i = -1;
2820 break;
2821 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2822 /* FORNOW */
2823 i = -1;
2824 break;
2825 case SIMD_CLONE_ARG_TYPE_MASK:
2826 gcc_unreachable ();
2828 if (i == (size_t) -1)
2829 break;
2830 if (n->simdclone->args[i].alignment > arginfo[i].align)
2832 i = -1;
2833 break;
2835 if (arginfo[i].align)
2836 this_badness += (exact_log2 (arginfo[i].align)
2837 - exact_log2 (n->simdclone->args[i].alignment));
2839 if (i == (size_t) -1)
2840 continue;
2841 if (bestn == NULL || this_badness < badness)
2843 bestn = n;
2844 badness = this_badness;
2848 if (bestn == NULL)
2850 arginfo.release ();
2851 return false;
2854 for (i = 0; i < nargs; i++)
2855 if ((arginfo[i].dt == vect_constant_def
2856 || arginfo[i].dt == vect_external_def)
2857 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2859 arginfo[i].vectype
2860 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2861 i)));
2862 if (arginfo[i].vectype == NULL
2863 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2864 > bestn->simdclone->simdlen))
2866 arginfo.release ();
2867 return false;
2871 fndecl = bestn->decl;
2872 nunits = bestn->simdclone->simdlen;
2873 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2875 /* If the function isn't const, only allow it in simd loops where user
2876 has asserted that at least nunits consecutive iterations can be
2877 performed using SIMD instructions. */
2878 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2879 && gimple_vuse (stmt))
2881 arginfo.release ();
2882 return false;
2885 /* Sanity check: make sure that at least one copy of the vectorized stmt
2886 needs to be generated. */
2887 gcc_assert (ncopies >= 1);
2889 if (!vec_stmt) /* transformation not required. */
2891 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2892 for (i = 0; i < nargs; i++)
2893 if (bestn->simdclone->args[i].arg_type
2894 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2896 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 2
2897 + 1);
2898 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2899 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2900 ? size_type_node : TREE_TYPE (arginfo[i].op);
2901 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2902 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2904 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2905 if (dump_enabled_p ())
2906 dump_printf_loc (MSG_NOTE, vect_location,
2907 "=== vectorizable_simd_clone_call ===\n");
2908 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2909 arginfo.release ();
2910 return true;
2913 /** Transform. **/
2915 if (dump_enabled_p ())
2916 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2918 /* Handle def. */
2919 scalar_dest = gimple_call_lhs (stmt);
2920 vec_dest = NULL_TREE;
2921 rtype = NULL_TREE;
2922 ratype = NULL_TREE;
2923 if (scalar_dest)
2925 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2926 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2927 if (TREE_CODE (rtype) == ARRAY_TYPE)
2929 ratype = rtype;
2930 rtype = TREE_TYPE (ratype);
2934 prev_stmt_info = NULL;
2935 for (j = 0; j < ncopies; ++j)
2937 /* Build argument list for the vectorized call. */
2938 if (j == 0)
2939 vargs.create (nargs);
2940 else
2941 vargs.truncate (0);
2943 for (i = 0; i < nargs; i++)
2945 unsigned int k, l, m, o;
2946 tree atype;
2947 op = gimple_call_arg (stmt, i);
2948 switch (bestn->simdclone->args[i].arg_type)
2950 case SIMD_CLONE_ARG_TYPE_VECTOR:
2951 atype = bestn->simdclone->args[i].vector_type;
2952 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2953 for (m = j * o; m < (j + 1) * o; m++)
2955 if (TYPE_VECTOR_SUBPARTS (atype)
2956 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2958 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2959 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2960 / TYPE_VECTOR_SUBPARTS (atype));
2961 gcc_assert ((k & (k - 1)) == 0);
2962 if (m == 0)
2963 vec_oprnd0
2964 = vect_get_vec_def_for_operand (op, stmt, NULL);
2965 else
2967 vec_oprnd0 = arginfo[i].op;
2968 if ((m & (k - 1)) == 0)
2969 vec_oprnd0
2970 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2971 vec_oprnd0);
2973 arginfo[i].op = vec_oprnd0;
2974 vec_oprnd0
2975 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2976 size_int (prec),
2977 bitsize_int ((m & (k - 1)) * prec));
2978 new_stmt
2979 = gimple_build_assign (make_ssa_name (atype),
2980 vec_oprnd0);
2981 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2982 vargs.safe_push (gimple_assign_lhs (new_stmt));
2984 else
2986 k = (TYPE_VECTOR_SUBPARTS (atype)
2987 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
2988 gcc_assert ((k & (k - 1)) == 0);
2989 vec<constructor_elt, va_gc> *ctor_elts;
2990 if (k != 1)
2991 vec_alloc (ctor_elts, k);
2992 else
2993 ctor_elts = NULL;
2994 for (l = 0; l < k; l++)
2996 if (m == 0 && l == 0)
2997 vec_oprnd0
2998 = vect_get_vec_def_for_operand (op, stmt, NULL);
2999 else
3000 vec_oprnd0
3001 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3002 arginfo[i].op);
3003 arginfo[i].op = vec_oprnd0;
3004 if (k == 1)
3005 break;
3006 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3007 vec_oprnd0);
3009 if (k == 1)
3010 vargs.safe_push (vec_oprnd0);
3011 else
3013 vec_oprnd0 = build_constructor (atype, ctor_elts);
3014 new_stmt
3015 = gimple_build_assign (make_ssa_name (atype),
3016 vec_oprnd0);
3017 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3018 vargs.safe_push (gimple_assign_lhs (new_stmt));
3022 break;
3023 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3024 vargs.safe_push (op);
3025 break;
3026 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3027 if (j == 0)
3029 gimple_seq stmts;
3030 arginfo[i].op
3031 = force_gimple_operand (arginfo[i].op, &stmts, true,
3032 NULL_TREE);
3033 if (stmts != NULL)
3035 basic_block new_bb;
3036 edge pe = loop_preheader_edge (loop);
3037 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3038 gcc_assert (!new_bb);
3040 tree phi_res = copy_ssa_name (op);
3041 gphi *new_phi = create_phi_node (phi_res, loop->header);
3042 set_vinfo_for_stmt (new_phi,
3043 new_stmt_vec_info (new_phi, loop_vinfo,
3044 NULL));
3045 add_phi_arg (new_phi, arginfo[i].op,
3046 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3047 enum tree_code code
3048 = POINTER_TYPE_P (TREE_TYPE (op))
3049 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3050 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3051 ? sizetype : TREE_TYPE (op);
3052 widest_int cst
3053 = wi::mul (bestn->simdclone->args[i].linear_step,
3054 ncopies * nunits);
3055 tree tcst = wide_int_to_tree (type, cst);
3056 tree phi_arg = copy_ssa_name (op);
3057 new_stmt
3058 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3059 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3060 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3061 set_vinfo_for_stmt (new_stmt,
3062 new_stmt_vec_info (new_stmt, loop_vinfo,
3063 NULL));
3064 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3065 UNKNOWN_LOCATION);
3066 arginfo[i].op = phi_res;
3067 vargs.safe_push (phi_res);
3069 else
3071 enum tree_code code
3072 = POINTER_TYPE_P (TREE_TYPE (op))
3073 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3074 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3075 ? sizetype : TREE_TYPE (op);
3076 widest_int cst
3077 = wi::mul (bestn->simdclone->args[i].linear_step,
3078 j * nunits);
3079 tree tcst = wide_int_to_tree (type, cst);
3080 new_temp = make_ssa_name (TREE_TYPE (op));
3081 new_stmt = gimple_build_assign (new_temp, code,
3082 arginfo[i].op, tcst);
3083 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3084 vargs.safe_push (new_temp);
3086 break;
3087 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3088 default:
3089 gcc_unreachable ();
3093 new_stmt = gimple_build_call_vec (fndecl, vargs);
3094 if (vec_dest)
3096 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3097 if (ratype)
3098 new_temp = create_tmp_var (ratype);
3099 else if (TYPE_VECTOR_SUBPARTS (vectype)
3100 == TYPE_VECTOR_SUBPARTS (rtype))
3101 new_temp = make_ssa_name (vec_dest, new_stmt);
3102 else
3103 new_temp = make_ssa_name (rtype, new_stmt);
3104 gimple_call_set_lhs (new_stmt, new_temp);
3106 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3108 if (vec_dest)
3110 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3112 unsigned int k, l;
3113 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3114 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3115 gcc_assert ((k & (k - 1)) == 0);
3116 for (l = 0; l < k; l++)
3118 tree t;
3119 if (ratype)
3121 t = build_fold_addr_expr (new_temp);
3122 t = build2 (MEM_REF, vectype, t,
3123 build_int_cst (TREE_TYPE (t),
3124 l * prec / BITS_PER_UNIT));
3126 else
3127 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3128 size_int (prec), bitsize_int (l * prec));
3129 new_stmt
3130 = gimple_build_assign (make_ssa_name (vectype), t);
3131 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3132 if (j == 0 && l == 0)
3133 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3134 else
3135 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3137 prev_stmt_info = vinfo_for_stmt (new_stmt);
3140 if (ratype)
3142 tree clobber = build_constructor (ratype, NULL);
3143 TREE_THIS_VOLATILE (clobber) = 1;
3144 new_stmt = gimple_build_assign (new_temp, clobber);
3145 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3147 continue;
3149 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3151 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3152 / TYPE_VECTOR_SUBPARTS (rtype));
3153 gcc_assert ((k & (k - 1)) == 0);
3154 if ((j & (k - 1)) == 0)
3155 vec_alloc (ret_ctor_elts, k);
3156 if (ratype)
3158 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3159 for (m = 0; m < o; m++)
3161 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3162 size_int (m), NULL_TREE, NULL_TREE);
3163 new_stmt
3164 = gimple_build_assign (make_ssa_name (rtype), tem);
3165 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3166 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3167 gimple_assign_lhs (new_stmt));
3169 tree clobber = build_constructor (ratype, NULL);
3170 TREE_THIS_VOLATILE (clobber) = 1;
3171 new_stmt = gimple_build_assign (new_temp, clobber);
3172 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3174 else
3175 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3176 if ((j & (k - 1)) != k - 1)
3177 continue;
3178 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3179 new_stmt
3180 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3181 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3183 if ((unsigned) j == k - 1)
3184 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3185 else
3186 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3188 prev_stmt_info = vinfo_for_stmt (new_stmt);
3189 continue;
3191 else if (ratype)
3193 tree t = build_fold_addr_expr (new_temp);
3194 t = build2 (MEM_REF, vectype, t,
3195 build_int_cst (TREE_TYPE (t), 0));
3196 new_stmt
3197 = gimple_build_assign (make_ssa_name (vec_dest), t);
3198 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3199 tree clobber = build_constructor (ratype, NULL);
3200 TREE_THIS_VOLATILE (clobber) = 1;
3201 vect_finish_stmt_generation (stmt,
3202 gimple_build_assign (new_temp,
3203 clobber), gsi);
3207 if (j == 0)
3208 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3209 else
3210 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3212 prev_stmt_info = vinfo_for_stmt (new_stmt);
3215 vargs.release ();
3217 /* The call in STMT might prevent it from being removed in dce.
3218 We however cannot remove it here, due to the way the ssa name
3219 it defines is mapped to the new definition. So just replace
3220 rhs of the statement with something harmless. */
3222 if (slp_node)
3223 return true;
3225 if (scalar_dest)
3227 type = TREE_TYPE (scalar_dest);
3228 if (is_pattern_stmt_p (stmt_info))
3229 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3230 else
3231 lhs = gimple_call_lhs (stmt);
3232 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3234 else
3235 new_stmt = gimple_build_nop ();
3236 set_vinfo_for_stmt (new_stmt, stmt_info);
3237 set_vinfo_for_stmt (stmt, NULL);
3238 STMT_VINFO_STMT (stmt_info) = new_stmt;
3239 gsi_replace (gsi, new_stmt, true);
3240 unlink_stmt_vdef (stmt);
3242 return true;
3246 /* Function vect_gen_widened_results_half
3248 Create a vector stmt whose code, type, number of arguments, and result
3249 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3250 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3251 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3252 needs to be created (DECL is a function-decl of a target-builtin).
3253 STMT is the original scalar stmt that we are vectorizing. */
3255 static gimple
3256 vect_gen_widened_results_half (enum tree_code code,
3257 tree decl,
3258 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3259 tree vec_dest, gimple_stmt_iterator *gsi,
3260 gimple stmt)
3262 gimple new_stmt;
3263 tree new_temp;
3265 /* Generate half of the widened result: */
3266 if (code == CALL_EXPR)
3268 /* Target specific support */
3269 if (op_type == binary_op)
3270 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3271 else
3272 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3273 new_temp = make_ssa_name (vec_dest, new_stmt);
3274 gimple_call_set_lhs (new_stmt, new_temp);
3276 else
3278 /* Generic support */
3279 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3280 if (op_type != binary_op)
3281 vec_oprnd1 = NULL;
3282 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3283 new_temp = make_ssa_name (vec_dest, new_stmt);
3284 gimple_assign_set_lhs (new_stmt, new_temp);
3286 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3288 return new_stmt;
3292 /* Get vectorized definitions for loop-based vectorization. For the first
3293 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3294 scalar operand), and for the rest we get a copy with
3295 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3296 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3297 The vectors are collected into VEC_OPRNDS. */
3299 static void
3300 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3301 vec<tree> *vec_oprnds, int multi_step_cvt)
3303 tree vec_oprnd;
3305 /* Get first vector operand. */
3306 /* All the vector operands except the very first one (that is scalar oprnd)
3307 are stmt copies. */
3308 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3309 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3310 else
3311 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3313 vec_oprnds->quick_push (vec_oprnd);
3315 /* Get second vector operand. */
3316 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3317 vec_oprnds->quick_push (vec_oprnd);
3319 *oprnd = vec_oprnd;
3321 /* For conversion in multiple steps, continue to get operands
3322 recursively. */
3323 if (multi_step_cvt)
3324 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3328 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3329 For multi-step conversions store the resulting vectors and call the function
3330 recursively. */
3332 static void
3333 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3334 int multi_step_cvt, gimple stmt,
3335 vec<tree> vec_dsts,
3336 gimple_stmt_iterator *gsi,
3337 slp_tree slp_node, enum tree_code code,
3338 stmt_vec_info *prev_stmt_info)
3340 unsigned int i;
3341 tree vop0, vop1, new_tmp, vec_dest;
3342 gimple new_stmt;
3343 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3345 vec_dest = vec_dsts.pop ();
3347 for (i = 0; i < vec_oprnds->length (); i += 2)
3349 /* Create demotion operation. */
3350 vop0 = (*vec_oprnds)[i];
3351 vop1 = (*vec_oprnds)[i + 1];
3352 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3353 new_tmp = make_ssa_name (vec_dest, new_stmt);
3354 gimple_assign_set_lhs (new_stmt, new_tmp);
3355 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3357 if (multi_step_cvt)
3358 /* Store the resulting vector for next recursive call. */
3359 (*vec_oprnds)[i/2] = new_tmp;
3360 else
3362 /* This is the last step of the conversion sequence. Store the
3363 vectors in SLP_NODE or in vector info of the scalar statement
3364 (or in STMT_VINFO_RELATED_STMT chain). */
3365 if (slp_node)
3366 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3367 else
3369 if (!*prev_stmt_info)
3370 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3371 else
3372 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3374 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3379 /* For multi-step demotion operations we first generate demotion operations
3380 from the source type to the intermediate types, and then combine the
3381 results (stored in VEC_OPRNDS) in demotion operation to the destination
3382 type. */
3383 if (multi_step_cvt)
3385 /* At each level of recursion we have half of the operands we had at the
3386 previous level. */
3387 vec_oprnds->truncate ((i+1)/2);
3388 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3389 stmt, vec_dsts, gsi, slp_node,
3390 VEC_PACK_TRUNC_EXPR,
3391 prev_stmt_info);
3394 vec_dsts.quick_push (vec_dest);
3398 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3399 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3400 the resulting vectors and call the function recursively. */
3402 static void
3403 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3404 vec<tree> *vec_oprnds1,
3405 gimple stmt, tree vec_dest,
3406 gimple_stmt_iterator *gsi,
3407 enum tree_code code1,
3408 enum tree_code code2, tree decl1,
3409 tree decl2, int op_type)
3411 int i;
3412 tree vop0, vop1, new_tmp1, new_tmp2;
3413 gimple new_stmt1, new_stmt2;
3414 vec<tree> vec_tmp = vNULL;
3416 vec_tmp.create (vec_oprnds0->length () * 2);
3417 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3419 if (op_type == binary_op)
3420 vop1 = (*vec_oprnds1)[i];
3421 else
3422 vop1 = NULL_TREE;
3424 /* Generate the two halves of promotion operation. */
3425 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3426 op_type, vec_dest, gsi, stmt);
3427 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3428 op_type, vec_dest, gsi, stmt);
3429 if (is_gimple_call (new_stmt1))
3431 new_tmp1 = gimple_call_lhs (new_stmt1);
3432 new_tmp2 = gimple_call_lhs (new_stmt2);
3434 else
3436 new_tmp1 = gimple_assign_lhs (new_stmt1);
3437 new_tmp2 = gimple_assign_lhs (new_stmt2);
3440 /* Store the results for the next step. */
3441 vec_tmp.quick_push (new_tmp1);
3442 vec_tmp.quick_push (new_tmp2);
3445 vec_oprnds0->release ();
3446 *vec_oprnds0 = vec_tmp;
3450 /* Check if STMT performs a conversion operation, that can be vectorized.
3451 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3452 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3453 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3455 static bool
3456 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3457 gimple *vec_stmt, slp_tree slp_node)
3459 tree vec_dest;
3460 tree scalar_dest;
3461 tree op0, op1 = NULL_TREE;
3462 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3463 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3464 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3465 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3466 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3467 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3468 tree new_temp;
3469 tree def;
3470 gimple def_stmt;
3471 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3472 gimple new_stmt = NULL;
3473 stmt_vec_info prev_stmt_info;
3474 int nunits_in;
3475 int nunits_out;
3476 tree vectype_out, vectype_in;
3477 int ncopies, i, j;
3478 tree lhs_type, rhs_type;
3479 enum { NARROW, NONE, WIDEN } modifier;
3480 vec<tree> vec_oprnds0 = vNULL;
3481 vec<tree> vec_oprnds1 = vNULL;
3482 tree vop0;
3483 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3484 int multi_step_cvt = 0;
3485 vec<tree> vec_dsts = vNULL;
3486 vec<tree> interm_types = vNULL;
3487 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3488 int op_type;
3489 machine_mode rhs_mode;
3490 unsigned short fltsz;
3492 /* Is STMT a vectorizable conversion? */
3494 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3495 return false;
3497 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3498 return false;
3500 if (!is_gimple_assign (stmt))
3501 return false;
3503 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3504 return false;
3506 code = gimple_assign_rhs_code (stmt);
3507 if (!CONVERT_EXPR_CODE_P (code)
3508 && code != FIX_TRUNC_EXPR
3509 && code != FLOAT_EXPR
3510 && code != WIDEN_MULT_EXPR
3511 && code != WIDEN_LSHIFT_EXPR)
3512 return false;
3514 op_type = TREE_CODE_LENGTH (code);
3516 /* Check types of lhs and rhs. */
3517 scalar_dest = gimple_assign_lhs (stmt);
3518 lhs_type = TREE_TYPE (scalar_dest);
3519 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3521 op0 = gimple_assign_rhs1 (stmt);
3522 rhs_type = TREE_TYPE (op0);
3524 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3525 && !((INTEGRAL_TYPE_P (lhs_type)
3526 && INTEGRAL_TYPE_P (rhs_type))
3527 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3528 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3529 return false;
3531 if ((INTEGRAL_TYPE_P (lhs_type)
3532 && (TYPE_PRECISION (lhs_type)
3533 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3534 || (INTEGRAL_TYPE_P (rhs_type)
3535 && (TYPE_PRECISION (rhs_type)
3536 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3538 if (dump_enabled_p ())
3539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3540 "type conversion to/from bit-precision unsupported."
3541 "\n");
3542 return false;
3545 /* Check the operands of the operation. */
3546 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3547 &def_stmt, &def, &dt[0], &vectype_in))
3549 if (dump_enabled_p ())
3550 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3551 "use not simple.\n");
3552 return false;
3554 if (op_type == binary_op)
3556 bool ok;
3558 op1 = gimple_assign_rhs2 (stmt);
3559 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3560 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3561 OP1. */
3562 if (CONSTANT_CLASS_P (op0))
3563 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3564 &def_stmt, &def, &dt[1], &vectype_in);
3565 else
3566 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3567 &def, &dt[1]);
3569 if (!ok)
3571 if (dump_enabled_p ())
3572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3573 "use not simple.\n");
3574 return false;
3578 /* If op0 is an external or constant defs use a vector type of
3579 the same size as the output vector type. */
3580 if (!vectype_in)
3581 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3582 if (vec_stmt)
3583 gcc_assert (vectype_in);
3584 if (!vectype_in)
3586 if (dump_enabled_p ())
3588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3589 "no vectype for scalar type ");
3590 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3591 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3594 return false;
3597 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3598 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3599 if (nunits_in < nunits_out)
3600 modifier = NARROW;
3601 else if (nunits_out == nunits_in)
3602 modifier = NONE;
3603 else
3604 modifier = WIDEN;
3606 /* Multiple types in SLP are handled by creating the appropriate number of
3607 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3608 case of SLP. */
3609 if (slp_node || PURE_SLP_STMT (stmt_info))
3610 ncopies = 1;
3611 else if (modifier == NARROW)
3612 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3613 else
3614 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3616 /* Sanity check: make sure that at least one copy of the vectorized stmt
3617 needs to be generated. */
3618 gcc_assert (ncopies >= 1);
3620 /* Supportable by target? */
3621 switch (modifier)
3623 case NONE:
3624 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3625 return false;
3626 if (supportable_convert_operation (code, vectype_out, vectype_in,
3627 &decl1, &code1))
3628 break;
3629 /* FALLTHRU */
3630 unsupported:
3631 if (dump_enabled_p ())
3632 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3633 "conversion not supported by target.\n");
3634 return false;
3636 case WIDEN:
3637 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3638 &code1, &code2, &multi_step_cvt,
3639 &interm_types))
3641 /* Binary widening operation can only be supported directly by the
3642 architecture. */
3643 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3644 break;
3647 if (code != FLOAT_EXPR
3648 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3649 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3650 goto unsupported;
3652 rhs_mode = TYPE_MODE (rhs_type);
3653 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3654 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3655 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3656 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3658 cvt_type
3659 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3660 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3661 if (cvt_type == NULL_TREE)
3662 goto unsupported;
3664 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3666 if (!supportable_convert_operation (code, vectype_out,
3667 cvt_type, &decl1, &codecvt1))
3668 goto unsupported;
3670 else if (!supportable_widening_operation (code, stmt, vectype_out,
3671 cvt_type, &codecvt1,
3672 &codecvt2, &multi_step_cvt,
3673 &interm_types))
3674 continue;
3675 else
3676 gcc_assert (multi_step_cvt == 0);
3678 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3679 vectype_in, &code1, &code2,
3680 &multi_step_cvt, &interm_types))
3681 break;
3684 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3685 goto unsupported;
3687 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3688 codecvt2 = ERROR_MARK;
3689 else
3691 multi_step_cvt++;
3692 interm_types.safe_push (cvt_type);
3693 cvt_type = NULL_TREE;
3695 break;
3697 case NARROW:
3698 gcc_assert (op_type == unary_op);
3699 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3700 &code1, &multi_step_cvt,
3701 &interm_types))
3702 break;
3704 if (code != FIX_TRUNC_EXPR
3705 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3706 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3707 goto unsupported;
3709 rhs_mode = TYPE_MODE (rhs_type);
3710 cvt_type
3711 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3712 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3713 if (cvt_type == NULL_TREE)
3714 goto unsupported;
3715 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3716 &decl1, &codecvt1))
3717 goto unsupported;
3718 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3719 &code1, &multi_step_cvt,
3720 &interm_types))
3721 break;
3722 goto unsupported;
3724 default:
3725 gcc_unreachable ();
3728 if (!vec_stmt) /* transformation not required. */
3730 if (dump_enabled_p ())
3731 dump_printf_loc (MSG_NOTE, vect_location,
3732 "=== vectorizable_conversion ===\n");
3733 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3735 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3736 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3738 else if (modifier == NARROW)
3740 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3741 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3743 else
3745 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3746 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3748 interm_types.release ();
3749 return true;
3752 /** Transform. **/
3753 if (dump_enabled_p ())
3754 dump_printf_loc (MSG_NOTE, vect_location,
3755 "transform conversion. ncopies = %d.\n", ncopies);
3757 if (op_type == binary_op)
3759 if (CONSTANT_CLASS_P (op0))
3760 op0 = fold_convert (TREE_TYPE (op1), op0);
3761 else if (CONSTANT_CLASS_P (op1))
3762 op1 = fold_convert (TREE_TYPE (op0), op1);
3765 /* In case of multi-step conversion, we first generate conversion operations
3766 to the intermediate types, and then from that types to the final one.
3767 We create vector destinations for the intermediate type (TYPES) received
3768 from supportable_*_operation, and store them in the correct order
3769 for future use in vect_create_vectorized_*_stmts (). */
3770 vec_dsts.create (multi_step_cvt + 1);
3771 vec_dest = vect_create_destination_var (scalar_dest,
3772 (cvt_type && modifier == WIDEN)
3773 ? cvt_type : vectype_out);
3774 vec_dsts.quick_push (vec_dest);
3776 if (multi_step_cvt)
3778 for (i = interm_types.length () - 1;
3779 interm_types.iterate (i, &intermediate_type); i--)
3781 vec_dest = vect_create_destination_var (scalar_dest,
3782 intermediate_type);
3783 vec_dsts.quick_push (vec_dest);
3787 if (cvt_type)
3788 vec_dest = vect_create_destination_var (scalar_dest,
3789 modifier == WIDEN
3790 ? vectype_out : cvt_type);
3792 if (!slp_node)
3794 if (modifier == WIDEN)
3796 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3797 if (op_type == binary_op)
3798 vec_oprnds1.create (1);
3800 else if (modifier == NARROW)
3801 vec_oprnds0.create (
3802 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3804 else if (code == WIDEN_LSHIFT_EXPR)
3805 vec_oprnds1.create (slp_node->vec_stmts_size);
3807 last_oprnd = op0;
3808 prev_stmt_info = NULL;
3809 switch (modifier)
3811 case NONE:
3812 for (j = 0; j < ncopies; j++)
3814 if (j == 0)
3815 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3816 -1);
3817 else
3818 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3820 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3822 /* Arguments are ready, create the new vector stmt. */
3823 if (code1 == CALL_EXPR)
3825 new_stmt = gimple_build_call (decl1, 1, vop0);
3826 new_temp = make_ssa_name (vec_dest, new_stmt);
3827 gimple_call_set_lhs (new_stmt, new_temp);
3829 else
3831 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3832 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3833 new_temp = make_ssa_name (vec_dest, new_stmt);
3834 gimple_assign_set_lhs (new_stmt, new_temp);
3837 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3838 if (slp_node)
3839 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3842 if (j == 0)
3843 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3844 else
3845 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3846 prev_stmt_info = vinfo_for_stmt (new_stmt);
3848 break;
3850 case WIDEN:
3851 /* In case the vectorization factor (VF) is bigger than the number
3852 of elements that we can fit in a vectype (nunits), we have to
3853 generate more than one vector stmt - i.e - we need to "unroll"
3854 the vector stmt by a factor VF/nunits. */
3855 for (j = 0; j < ncopies; j++)
3857 /* Handle uses. */
3858 if (j == 0)
3860 if (slp_node)
3862 if (code == WIDEN_LSHIFT_EXPR)
3864 unsigned int k;
3866 vec_oprnd1 = op1;
3867 /* Store vec_oprnd1 for every vector stmt to be created
3868 for SLP_NODE. We check during the analysis that all
3869 the shift arguments are the same. */
3870 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3871 vec_oprnds1.quick_push (vec_oprnd1);
3873 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3874 slp_node, -1);
3876 else
3877 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3878 &vec_oprnds1, slp_node, -1);
3880 else
3882 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3883 vec_oprnds0.quick_push (vec_oprnd0);
3884 if (op_type == binary_op)
3886 if (code == WIDEN_LSHIFT_EXPR)
3887 vec_oprnd1 = op1;
3888 else
3889 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3890 NULL);
3891 vec_oprnds1.quick_push (vec_oprnd1);
3895 else
3897 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3898 vec_oprnds0.truncate (0);
3899 vec_oprnds0.quick_push (vec_oprnd0);
3900 if (op_type == binary_op)
3902 if (code == WIDEN_LSHIFT_EXPR)
3903 vec_oprnd1 = op1;
3904 else
3905 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3906 vec_oprnd1);
3907 vec_oprnds1.truncate (0);
3908 vec_oprnds1.quick_push (vec_oprnd1);
3912 /* Arguments are ready. Create the new vector stmts. */
3913 for (i = multi_step_cvt; i >= 0; i--)
3915 tree this_dest = vec_dsts[i];
3916 enum tree_code c1 = code1, c2 = code2;
3917 if (i == 0 && codecvt2 != ERROR_MARK)
3919 c1 = codecvt1;
3920 c2 = codecvt2;
3922 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3923 &vec_oprnds1,
3924 stmt, this_dest, gsi,
3925 c1, c2, decl1, decl2,
3926 op_type);
3929 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3931 if (cvt_type)
3933 if (codecvt1 == CALL_EXPR)
3935 new_stmt = gimple_build_call (decl1, 1, vop0);
3936 new_temp = make_ssa_name (vec_dest, new_stmt);
3937 gimple_call_set_lhs (new_stmt, new_temp);
3939 else
3941 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3942 new_temp = make_ssa_name (vec_dest);
3943 new_stmt = gimple_build_assign (new_temp, codecvt1,
3944 vop0);
3947 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3949 else
3950 new_stmt = SSA_NAME_DEF_STMT (vop0);
3952 if (slp_node)
3953 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3954 else
3956 if (!prev_stmt_info)
3957 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3958 else
3959 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3960 prev_stmt_info = vinfo_for_stmt (new_stmt);
3965 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3966 break;
3968 case NARROW:
3969 /* In case the vectorization factor (VF) is bigger than the number
3970 of elements that we can fit in a vectype (nunits), we have to
3971 generate more than one vector stmt - i.e - we need to "unroll"
3972 the vector stmt by a factor VF/nunits. */
3973 for (j = 0; j < ncopies; j++)
3975 /* Handle uses. */
3976 if (slp_node)
3977 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3978 slp_node, -1);
3979 else
3981 vec_oprnds0.truncate (0);
3982 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3983 vect_pow2 (multi_step_cvt) - 1);
3986 /* Arguments are ready. Create the new vector stmts. */
3987 if (cvt_type)
3988 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3990 if (codecvt1 == CALL_EXPR)
3992 new_stmt = gimple_build_call (decl1, 1, vop0);
3993 new_temp = make_ssa_name (vec_dest, new_stmt);
3994 gimple_call_set_lhs (new_stmt, new_temp);
3996 else
3998 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3999 new_temp = make_ssa_name (vec_dest);
4000 new_stmt = gimple_build_assign (new_temp, codecvt1,
4001 vop0);
4004 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4005 vec_oprnds0[i] = new_temp;
4008 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4009 stmt, vec_dsts, gsi,
4010 slp_node, code1,
4011 &prev_stmt_info);
4014 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4015 break;
4018 vec_oprnds0.release ();
4019 vec_oprnds1.release ();
4020 vec_dsts.release ();
4021 interm_types.release ();
4023 return true;
4027 /* Function vectorizable_assignment.
4029 Check if STMT performs an assignment (copy) that can be vectorized.
4030 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4031 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4032 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4034 static bool
4035 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
4036 gimple *vec_stmt, slp_tree slp_node)
4038 tree vec_dest;
4039 tree scalar_dest;
4040 tree op;
4041 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4042 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4043 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4044 tree new_temp;
4045 tree def;
4046 gimple def_stmt;
4047 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4048 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4049 int ncopies;
4050 int i, j;
4051 vec<tree> vec_oprnds = vNULL;
4052 tree vop;
4053 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4054 gimple new_stmt = NULL;
4055 stmt_vec_info prev_stmt_info = NULL;
4056 enum tree_code code;
4057 tree vectype_in;
4059 /* Multiple types in SLP are handled by creating the appropriate number of
4060 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4061 case of SLP. */
4062 if (slp_node || PURE_SLP_STMT (stmt_info))
4063 ncopies = 1;
4064 else
4065 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4067 gcc_assert (ncopies >= 1);
4069 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4070 return false;
4072 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4073 return false;
4075 /* Is vectorizable assignment? */
4076 if (!is_gimple_assign (stmt))
4077 return false;
4079 scalar_dest = gimple_assign_lhs (stmt);
4080 if (TREE_CODE (scalar_dest) != SSA_NAME)
4081 return false;
4083 code = gimple_assign_rhs_code (stmt);
4084 if (gimple_assign_single_p (stmt)
4085 || code == PAREN_EXPR
4086 || CONVERT_EXPR_CODE_P (code))
4087 op = gimple_assign_rhs1 (stmt);
4088 else
4089 return false;
4091 if (code == VIEW_CONVERT_EXPR)
4092 op = TREE_OPERAND (op, 0);
4094 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4095 &def_stmt, &def, &dt[0], &vectype_in))
4097 if (dump_enabled_p ())
4098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4099 "use not simple.\n");
4100 return false;
4103 /* We can handle NOP_EXPR conversions that do not change the number
4104 of elements or the vector size. */
4105 if ((CONVERT_EXPR_CODE_P (code)
4106 || code == VIEW_CONVERT_EXPR)
4107 && (!vectype_in
4108 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4109 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4110 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4111 return false;
4113 /* We do not handle bit-precision changes. */
4114 if ((CONVERT_EXPR_CODE_P (code)
4115 || code == VIEW_CONVERT_EXPR)
4116 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4117 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4118 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4119 || ((TYPE_PRECISION (TREE_TYPE (op))
4120 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4121 /* But a conversion that does not change the bit-pattern is ok. */
4122 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4123 > TYPE_PRECISION (TREE_TYPE (op)))
4124 && TYPE_UNSIGNED (TREE_TYPE (op))))
4126 if (dump_enabled_p ())
4127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4128 "type conversion to/from bit-precision "
4129 "unsupported.\n");
4130 return false;
4133 if (!vec_stmt) /* transformation not required. */
4135 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4136 if (dump_enabled_p ())
4137 dump_printf_loc (MSG_NOTE, vect_location,
4138 "=== vectorizable_assignment ===\n");
4139 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4140 return true;
4143 /** Transform. **/
4144 if (dump_enabled_p ())
4145 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4147 /* Handle def. */
4148 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4150 /* Handle use. */
4151 for (j = 0; j < ncopies; j++)
4153 /* Handle uses. */
4154 if (j == 0)
4155 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4156 else
4157 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4159 /* Arguments are ready. create the new vector stmt. */
4160 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4162 if (CONVERT_EXPR_CODE_P (code)
4163 || code == VIEW_CONVERT_EXPR)
4164 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4165 new_stmt = gimple_build_assign (vec_dest, vop);
4166 new_temp = make_ssa_name (vec_dest, new_stmt);
4167 gimple_assign_set_lhs (new_stmt, new_temp);
4168 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4169 if (slp_node)
4170 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4173 if (slp_node)
4174 continue;
4176 if (j == 0)
4177 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4178 else
4179 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4181 prev_stmt_info = vinfo_for_stmt (new_stmt);
4184 vec_oprnds.release ();
4185 return true;
4189 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4190 either as shift by a scalar or by a vector. */
4192 bool
4193 vect_supportable_shift (enum tree_code code, tree scalar_type)
4196 machine_mode vec_mode;
4197 optab optab;
4198 int icode;
4199 tree vectype;
4201 vectype = get_vectype_for_scalar_type (scalar_type);
4202 if (!vectype)
4203 return false;
4205 optab = optab_for_tree_code (code, vectype, optab_scalar);
4206 if (!optab
4207 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4209 optab = optab_for_tree_code (code, vectype, optab_vector);
4210 if (!optab
4211 || (optab_handler (optab, TYPE_MODE (vectype))
4212 == CODE_FOR_nothing))
4213 return false;
4216 vec_mode = TYPE_MODE (vectype);
4217 icode = (int) optab_handler (optab, vec_mode);
4218 if (icode == CODE_FOR_nothing)
4219 return false;
4221 return true;
4225 /* Function vectorizable_shift.
4227 Check if STMT performs a shift operation that can be vectorized.
4228 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4229 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4230 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4232 static bool
4233 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4234 gimple *vec_stmt, slp_tree slp_node)
4236 tree vec_dest;
4237 tree scalar_dest;
4238 tree op0, op1 = NULL;
4239 tree vec_oprnd1 = NULL_TREE;
4240 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4241 tree vectype;
4242 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4243 enum tree_code code;
4244 machine_mode vec_mode;
4245 tree new_temp;
4246 optab optab;
4247 int icode;
4248 machine_mode optab_op2_mode;
4249 tree def;
4250 gimple def_stmt;
4251 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4252 gimple new_stmt = NULL;
4253 stmt_vec_info prev_stmt_info;
4254 int nunits_in;
4255 int nunits_out;
4256 tree vectype_out;
4257 tree op1_vectype;
4258 int ncopies;
4259 int j, i;
4260 vec<tree> vec_oprnds0 = vNULL;
4261 vec<tree> vec_oprnds1 = vNULL;
4262 tree vop0, vop1;
4263 unsigned int k;
4264 bool scalar_shift_arg = true;
4265 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4266 int vf;
4268 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4269 return false;
4271 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4272 return false;
4274 /* Is STMT a vectorizable binary/unary operation? */
4275 if (!is_gimple_assign (stmt))
4276 return false;
4278 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4279 return false;
4281 code = gimple_assign_rhs_code (stmt);
4283 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4284 || code == RROTATE_EXPR))
4285 return false;
4287 scalar_dest = gimple_assign_lhs (stmt);
4288 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4289 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4290 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4292 if (dump_enabled_p ())
4293 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4294 "bit-precision shifts not supported.\n");
4295 return false;
4298 op0 = gimple_assign_rhs1 (stmt);
4299 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4300 &def_stmt, &def, &dt[0], &vectype))
4302 if (dump_enabled_p ())
4303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4304 "use not simple.\n");
4305 return false;
4307 /* If op0 is an external or constant def use a vector type with
4308 the same size as the output vector type. */
4309 if (!vectype)
4310 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4311 if (vec_stmt)
4312 gcc_assert (vectype);
4313 if (!vectype)
4315 if (dump_enabled_p ())
4316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4317 "no vectype for scalar type\n");
4318 return false;
4321 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4322 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4323 if (nunits_out != nunits_in)
4324 return false;
4326 op1 = gimple_assign_rhs2 (stmt);
4327 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4328 &def, &dt[1], &op1_vectype))
4330 if (dump_enabled_p ())
4331 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4332 "use not simple.\n");
4333 return false;
4336 if (loop_vinfo)
4337 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4338 else
4339 vf = 1;
4341 /* Multiple types in SLP are handled by creating the appropriate number of
4342 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4343 case of SLP. */
4344 if (slp_node || PURE_SLP_STMT (stmt_info))
4345 ncopies = 1;
4346 else
4347 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4349 gcc_assert (ncopies >= 1);
4351 /* Determine whether the shift amount is a vector, or scalar. If the
4352 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4354 if (dt[1] == vect_internal_def && !slp_node)
4355 scalar_shift_arg = false;
4356 else if (dt[1] == vect_constant_def
4357 || dt[1] == vect_external_def
4358 || dt[1] == vect_internal_def)
4360 /* In SLP, need to check whether the shift count is the same,
4361 in loops if it is a constant or invariant, it is always
4362 a scalar shift. */
4363 if (slp_node)
4365 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4366 gimple slpstmt;
4368 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4369 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4370 scalar_shift_arg = false;
4373 else
4375 if (dump_enabled_p ())
4376 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4377 "operand mode requires invariant argument.\n");
4378 return false;
4381 /* Vector shifted by vector. */
4382 if (!scalar_shift_arg)
4384 optab = optab_for_tree_code (code, vectype, optab_vector);
4385 if (dump_enabled_p ())
4386 dump_printf_loc (MSG_NOTE, vect_location,
4387 "vector/vector shift/rotate found.\n");
4389 if (!op1_vectype)
4390 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4391 if (op1_vectype == NULL_TREE
4392 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4394 if (dump_enabled_p ())
4395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4396 "unusable type for last operand in"
4397 " vector/vector shift/rotate.\n");
4398 return false;
4401 /* See if the machine has a vector shifted by scalar insn and if not
4402 then see if it has a vector shifted by vector insn. */
4403 else
4405 optab = optab_for_tree_code (code, vectype, optab_scalar);
4406 if (optab
4407 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4409 if (dump_enabled_p ())
4410 dump_printf_loc (MSG_NOTE, vect_location,
4411 "vector/scalar shift/rotate found.\n");
4413 else
4415 optab = optab_for_tree_code (code, vectype, optab_vector);
4416 if (optab
4417 && (optab_handler (optab, TYPE_MODE (vectype))
4418 != CODE_FOR_nothing))
4420 scalar_shift_arg = false;
4422 if (dump_enabled_p ())
4423 dump_printf_loc (MSG_NOTE, vect_location,
4424 "vector/vector shift/rotate found.\n");
4426 /* Unlike the other binary operators, shifts/rotates have
4427 the rhs being int, instead of the same type as the lhs,
4428 so make sure the scalar is the right type if we are
4429 dealing with vectors of long long/long/short/char. */
4430 if (dt[1] == vect_constant_def)
4431 op1 = fold_convert (TREE_TYPE (vectype), op1);
4432 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4433 TREE_TYPE (op1)))
4435 if (slp_node
4436 && TYPE_MODE (TREE_TYPE (vectype))
4437 != TYPE_MODE (TREE_TYPE (op1)))
4439 if (dump_enabled_p ())
4440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4441 "unusable type for last operand in"
4442 " vector/vector shift/rotate.\n");
4443 return false;
4445 if (vec_stmt && !slp_node)
4447 op1 = fold_convert (TREE_TYPE (vectype), op1);
4448 op1 = vect_init_vector (stmt, op1,
4449 TREE_TYPE (vectype), NULL);
4456 /* Supportable by target? */
4457 if (!optab)
4459 if (dump_enabled_p ())
4460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4461 "no optab.\n");
4462 return false;
4464 vec_mode = TYPE_MODE (vectype);
4465 icode = (int) optab_handler (optab, vec_mode);
4466 if (icode == CODE_FOR_nothing)
4468 if (dump_enabled_p ())
4469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4470 "op not supported by target.\n");
4471 /* Check only during analysis. */
4472 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4473 || (vf < vect_min_worthwhile_factor (code)
4474 && !vec_stmt))
4475 return false;
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_NOTE, vect_location,
4478 "proceeding using word mode.\n");
4481 /* Worthwhile without SIMD support? Check only during analysis. */
4482 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4483 && vf < vect_min_worthwhile_factor (code)
4484 && !vec_stmt)
4486 if (dump_enabled_p ())
4487 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4488 "not worthwhile without SIMD support.\n");
4489 return false;
4492 if (!vec_stmt) /* transformation not required. */
4494 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4495 if (dump_enabled_p ())
4496 dump_printf_loc (MSG_NOTE, vect_location,
4497 "=== vectorizable_shift ===\n");
4498 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4499 return true;
4502 /** Transform. **/
4504 if (dump_enabled_p ())
4505 dump_printf_loc (MSG_NOTE, vect_location,
4506 "transform binary/unary operation.\n");
4508 /* Handle def. */
4509 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4511 prev_stmt_info = NULL;
4512 for (j = 0; j < ncopies; j++)
4514 /* Handle uses. */
4515 if (j == 0)
4517 if (scalar_shift_arg)
4519 /* Vector shl and shr insn patterns can be defined with scalar
4520 operand 2 (shift operand). In this case, use constant or loop
4521 invariant op1 directly, without extending it to vector mode
4522 first. */
4523 optab_op2_mode = insn_data[icode].operand[2].mode;
4524 if (!VECTOR_MODE_P (optab_op2_mode))
4526 if (dump_enabled_p ())
4527 dump_printf_loc (MSG_NOTE, vect_location,
4528 "operand 1 using scalar mode.\n");
4529 vec_oprnd1 = op1;
4530 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4531 vec_oprnds1.quick_push (vec_oprnd1);
4532 if (slp_node)
4534 /* Store vec_oprnd1 for every vector stmt to be created
4535 for SLP_NODE. We check during the analysis that all
4536 the shift arguments are the same.
4537 TODO: Allow different constants for different vector
4538 stmts generated for an SLP instance. */
4539 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4540 vec_oprnds1.quick_push (vec_oprnd1);
4545 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4546 (a special case for certain kind of vector shifts); otherwise,
4547 operand 1 should be of a vector type (the usual case). */
4548 if (vec_oprnd1)
4549 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4550 slp_node, -1);
4551 else
4552 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4553 slp_node, -1);
4555 else
4556 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4558 /* Arguments are ready. Create the new vector stmt. */
4559 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4561 vop1 = vec_oprnds1[i];
4562 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4563 new_temp = make_ssa_name (vec_dest, new_stmt);
4564 gimple_assign_set_lhs (new_stmt, new_temp);
4565 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4566 if (slp_node)
4567 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4570 if (slp_node)
4571 continue;
4573 if (j == 0)
4574 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4575 else
4576 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4577 prev_stmt_info = vinfo_for_stmt (new_stmt);
4580 vec_oprnds0.release ();
4581 vec_oprnds1.release ();
4583 return true;
4587 /* Function vectorizable_operation.
4589 Check if STMT performs a binary, unary or ternary operation that can
4590 be vectorized.
4591 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4592 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4593 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4595 static bool
4596 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4597 gimple *vec_stmt, slp_tree slp_node)
4599 tree vec_dest;
4600 tree scalar_dest;
4601 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4602 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4603 tree vectype;
4604 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4605 enum tree_code code;
4606 machine_mode vec_mode;
4607 tree new_temp;
4608 int op_type;
4609 optab optab;
4610 int icode;
4611 tree def;
4612 gimple def_stmt;
4613 enum vect_def_type dt[3]
4614 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4615 gimple new_stmt = NULL;
4616 stmt_vec_info prev_stmt_info;
4617 int nunits_in;
4618 int nunits_out;
4619 tree vectype_out;
4620 int ncopies;
4621 int j, i;
4622 vec<tree> vec_oprnds0 = vNULL;
4623 vec<tree> vec_oprnds1 = vNULL;
4624 vec<tree> vec_oprnds2 = vNULL;
4625 tree vop0, vop1, vop2;
4626 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4627 int vf;
4629 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4630 return false;
4632 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4633 return false;
4635 /* Is STMT a vectorizable binary/unary operation? */
4636 if (!is_gimple_assign (stmt))
4637 return false;
4639 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4640 return false;
4642 code = gimple_assign_rhs_code (stmt);
4644 /* For pointer addition, we should use the normal plus for
4645 the vector addition. */
4646 if (code == POINTER_PLUS_EXPR)
4647 code = PLUS_EXPR;
4649 /* Support only unary or binary operations. */
4650 op_type = TREE_CODE_LENGTH (code);
4651 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4653 if (dump_enabled_p ())
4654 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4655 "num. args = %d (not unary/binary/ternary op).\n",
4656 op_type);
4657 return false;
4660 scalar_dest = gimple_assign_lhs (stmt);
4661 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4663 /* Most operations cannot handle bit-precision types without extra
4664 truncations. */
4665 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4666 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4667 /* Exception are bitwise binary operations. */
4668 && code != BIT_IOR_EXPR
4669 && code != BIT_XOR_EXPR
4670 && code != BIT_AND_EXPR)
4672 if (dump_enabled_p ())
4673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4674 "bit-precision arithmetic not supported.\n");
4675 return false;
4678 op0 = gimple_assign_rhs1 (stmt);
4679 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4680 &def_stmt, &def, &dt[0], &vectype))
4682 if (dump_enabled_p ())
4683 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4684 "use not simple.\n");
4685 return false;
4687 /* If op0 is an external or constant def use a vector type with
4688 the same size as the output vector type. */
4689 if (!vectype)
4690 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4691 if (vec_stmt)
4692 gcc_assert (vectype);
4693 if (!vectype)
4695 if (dump_enabled_p ())
4697 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4698 "no vectype for scalar type ");
4699 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4700 TREE_TYPE (op0));
4701 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4704 return false;
4707 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4708 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4709 if (nunits_out != nunits_in)
4710 return false;
4712 if (op_type == binary_op || op_type == ternary_op)
4714 op1 = gimple_assign_rhs2 (stmt);
4715 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4716 &def, &dt[1]))
4718 if (dump_enabled_p ())
4719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4720 "use not simple.\n");
4721 return false;
4724 if (op_type == ternary_op)
4726 op2 = gimple_assign_rhs3 (stmt);
4727 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4728 &def, &dt[2]))
4730 if (dump_enabled_p ())
4731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4732 "use not simple.\n");
4733 return false;
4737 if (loop_vinfo)
4738 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4739 else
4740 vf = 1;
4742 /* Multiple types in SLP are handled by creating the appropriate number of
4743 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4744 case of SLP. */
4745 if (slp_node || PURE_SLP_STMT (stmt_info))
4746 ncopies = 1;
4747 else
4748 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4750 gcc_assert (ncopies >= 1);
4752 /* Shifts are handled in vectorizable_shift (). */
4753 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4754 || code == RROTATE_EXPR)
4755 return false;
4757 /* Supportable by target? */
4759 vec_mode = TYPE_MODE (vectype);
4760 if (code == MULT_HIGHPART_EXPR)
4762 if (can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)))
4763 icode = LAST_INSN_CODE;
4764 else
4765 icode = CODE_FOR_nothing;
4767 else
4769 optab = optab_for_tree_code (code, vectype, optab_default);
4770 if (!optab)
4772 if (dump_enabled_p ())
4773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4774 "no optab.\n");
4775 return false;
4777 icode = (int) optab_handler (optab, vec_mode);
4780 if (icode == CODE_FOR_nothing)
4782 if (dump_enabled_p ())
4783 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4784 "op not supported by target.\n");
4785 /* Check only during analysis. */
4786 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4787 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4788 return false;
4789 if (dump_enabled_p ())
4790 dump_printf_loc (MSG_NOTE, vect_location,
4791 "proceeding using word mode.\n");
4794 /* Worthwhile without SIMD support? Check only during analysis. */
4795 if (!VECTOR_MODE_P (vec_mode)
4796 && !vec_stmt
4797 && vf < vect_min_worthwhile_factor (code))
4799 if (dump_enabled_p ())
4800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4801 "not worthwhile without SIMD support.\n");
4802 return false;
4805 if (!vec_stmt) /* transformation not required. */
4807 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_NOTE, vect_location,
4810 "=== vectorizable_operation ===\n");
4811 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4812 return true;
4815 /** Transform. **/
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_NOTE, vect_location,
4819 "transform binary/unary operation.\n");
4821 /* Handle def. */
4822 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4824 /* In case the vectorization factor (VF) is bigger than the number
4825 of elements that we can fit in a vectype (nunits), we have to generate
4826 more than one vector stmt - i.e - we need to "unroll" the
4827 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4828 from one copy of the vector stmt to the next, in the field
4829 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4830 stages to find the correct vector defs to be used when vectorizing
4831 stmts that use the defs of the current stmt. The example below
4832 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4833 we need to create 4 vectorized stmts):
4835 before vectorization:
4836 RELATED_STMT VEC_STMT
4837 S1: x = memref - -
4838 S2: z = x + 1 - -
4840 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4841 there):
4842 RELATED_STMT VEC_STMT
4843 VS1_0: vx0 = memref0 VS1_1 -
4844 VS1_1: vx1 = memref1 VS1_2 -
4845 VS1_2: vx2 = memref2 VS1_3 -
4846 VS1_3: vx3 = memref3 - -
4847 S1: x = load - VS1_0
4848 S2: z = x + 1 - -
4850 step2: vectorize stmt S2 (done here):
4851 To vectorize stmt S2 we first need to find the relevant vector
4852 def for the first operand 'x'. This is, as usual, obtained from
4853 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4854 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4855 relevant vector def 'vx0'. Having found 'vx0' we can generate
4856 the vector stmt VS2_0, and as usual, record it in the
4857 STMT_VINFO_VEC_STMT of stmt S2.
4858 When creating the second copy (VS2_1), we obtain the relevant vector
4859 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4860 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4861 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4862 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4863 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4864 chain of stmts and pointers:
4865 RELATED_STMT VEC_STMT
4866 VS1_0: vx0 = memref0 VS1_1 -
4867 VS1_1: vx1 = memref1 VS1_2 -
4868 VS1_2: vx2 = memref2 VS1_3 -
4869 VS1_3: vx3 = memref3 - -
4870 S1: x = load - VS1_0
4871 VS2_0: vz0 = vx0 + v1 VS2_1 -
4872 VS2_1: vz1 = vx1 + v1 VS2_2 -
4873 VS2_2: vz2 = vx2 + v1 VS2_3 -
4874 VS2_3: vz3 = vx3 + v1 - -
4875 S2: z = x + 1 - VS2_0 */
4877 prev_stmt_info = NULL;
4878 for (j = 0; j < ncopies; j++)
4880 /* Handle uses. */
4881 if (j == 0)
4883 if (op_type == binary_op || op_type == ternary_op)
4884 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4885 slp_node, -1);
4886 else
4887 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4888 slp_node, -1);
4889 if (op_type == ternary_op)
4891 vec_oprnds2.create (1);
4892 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4893 stmt,
4894 NULL));
4897 else
4899 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4900 if (op_type == ternary_op)
4902 tree vec_oprnd = vec_oprnds2.pop ();
4903 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4904 vec_oprnd));
4908 /* Arguments are ready. Create the new vector stmt. */
4909 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4911 vop1 = ((op_type == binary_op || op_type == ternary_op)
4912 ? vec_oprnds1[i] : NULL_TREE);
4913 vop2 = ((op_type == ternary_op)
4914 ? vec_oprnds2[i] : NULL_TREE);
4915 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4916 new_temp = make_ssa_name (vec_dest, new_stmt);
4917 gimple_assign_set_lhs (new_stmt, new_temp);
4918 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4919 if (slp_node)
4920 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4923 if (slp_node)
4924 continue;
4926 if (j == 0)
4927 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4928 else
4929 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4930 prev_stmt_info = vinfo_for_stmt (new_stmt);
4933 vec_oprnds0.release ();
4934 vec_oprnds1.release ();
4935 vec_oprnds2.release ();
4937 return true;
4940 /* A helper function to ensure data reference DR's base alignment
4941 for STMT_INFO. */
4943 static void
4944 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4946 if (!dr->aux)
4947 return;
4949 if (((dataref_aux *)dr->aux)->base_misaligned)
4951 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4952 tree base_decl = ((dataref_aux *)dr->aux)->base_decl;
4954 if (decl_in_symtab_p (base_decl))
4955 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4956 else
4958 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4959 DECL_USER_ALIGN (base_decl) = 1;
4961 ((dataref_aux *)dr->aux)->base_misaligned = false;
4966 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4967 reversal of the vector elements. If that is impossible to do,
4968 returns NULL. */
4970 static tree
4971 perm_mask_for_reverse (tree vectype)
4973 int i, nunits;
4974 unsigned char *sel;
4976 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4977 sel = XALLOCAVEC (unsigned char, nunits);
4979 for (i = 0; i < nunits; ++i)
4980 sel[i] = nunits - 1 - i;
4982 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4983 return NULL_TREE;
4984 return vect_gen_perm_mask_checked (vectype, sel);
4987 /* Function vectorizable_store.
4989 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4990 can be vectorized.
4991 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4992 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4993 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4995 static bool
4996 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
4997 slp_tree slp_node)
4999 tree scalar_dest;
5000 tree data_ref;
5001 tree op;
5002 tree vec_oprnd = NULL_TREE;
5003 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5004 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5006 tree elem_type;
5007 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5008 struct loop *loop = NULL;
5009 machine_mode vec_mode;
5010 tree dummy;
5011 enum dr_alignment_support alignment_support_scheme;
5012 tree def;
5013 gimple def_stmt;
5014 enum vect_def_type dt;
5015 stmt_vec_info prev_stmt_info = NULL;
5016 tree dataref_ptr = NULL_TREE;
5017 tree dataref_offset = NULL_TREE;
5018 gimple ptr_incr = NULL;
5019 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5020 int ncopies;
5021 int j;
5022 gimple next_stmt, first_stmt = NULL;
5023 bool grouped_store = false;
5024 bool store_lanes_p = false;
5025 unsigned int group_size, i;
5026 vec<tree> dr_chain = vNULL;
5027 vec<tree> oprnds = vNULL;
5028 vec<tree> result_chain = vNULL;
5029 bool inv_p;
5030 bool negative = false;
5031 tree offset = NULL_TREE;
5032 vec<tree> vec_oprnds = vNULL;
5033 bool slp = (slp_node != NULL);
5034 unsigned int vec_num;
5035 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5036 tree aggr_type;
5038 if (loop_vinfo)
5039 loop = LOOP_VINFO_LOOP (loop_vinfo);
5041 /* Multiple types in SLP are handled by creating the appropriate number of
5042 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5043 case of SLP. */
5044 if (slp || PURE_SLP_STMT (stmt_info))
5045 ncopies = 1;
5046 else
5047 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5049 gcc_assert (ncopies >= 1);
5051 /* FORNOW. This restriction should be relaxed. */
5052 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5054 if (dump_enabled_p ())
5055 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5056 "multiple types in nested loop.\n");
5057 return false;
5060 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5061 return false;
5063 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5064 return false;
5066 /* Is vectorizable store? */
5068 if (!is_gimple_assign (stmt))
5069 return false;
5071 scalar_dest = gimple_assign_lhs (stmt);
5072 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5073 && is_pattern_stmt_p (stmt_info))
5074 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5075 if (TREE_CODE (scalar_dest) != ARRAY_REF
5076 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5077 && TREE_CODE (scalar_dest) != INDIRECT_REF
5078 && TREE_CODE (scalar_dest) != COMPONENT_REF
5079 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5080 && TREE_CODE (scalar_dest) != REALPART_EXPR
5081 && TREE_CODE (scalar_dest) != MEM_REF)
5082 return false;
5084 gcc_assert (gimple_assign_single_p (stmt));
5085 op = gimple_assign_rhs1 (stmt);
5086 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5087 &def, &dt))
5089 if (dump_enabled_p ())
5090 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5091 "use not simple.\n");
5092 return false;
5095 elem_type = TREE_TYPE (vectype);
5096 vec_mode = TYPE_MODE (vectype);
5098 /* FORNOW. In some cases can vectorize even if data-type not supported
5099 (e.g. - array initialization with 0). */
5100 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5101 return false;
5103 if (!STMT_VINFO_DATA_REF (stmt_info))
5104 return false;
5106 negative =
5107 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5108 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5109 size_zero_node) < 0;
5110 if (negative && ncopies > 1)
5112 if (dump_enabled_p ())
5113 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5114 "multiple types with negative step.\n");
5115 return false;
5118 if (negative)
5120 gcc_assert (!grouped_store);
5121 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5122 if (alignment_support_scheme != dr_aligned
5123 && alignment_support_scheme != dr_unaligned_supported)
5125 if (dump_enabled_p ())
5126 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5127 "negative step but alignment required.\n");
5128 return false;
5130 if (dt != vect_constant_def
5131 && dt != vect_external_def
5132 && !perm_mask_for_reverse (vectype))
5134 if (dump_enabled_p ())
5135 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5136 "negative step and reversing not supported.\n");
5137 return false;
5141 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5143 grouped_store = true;
5144 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5145 if (!slp && !PURE_SLP_STMT (stmt_info))
5147 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5148 if (vect_store_lanes_supported (vectype, group_size))
5149 store_lanes_p = true;
5150 else if (!vect_grouped_store_supported (vectype, group_size))
5151 return false;
5154 if (first_stmt == stmt)
5156 /* STMT is the leader of the group. Check the operands of all the
5157 stmts of the group. */
5158 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5159 while (next_stmt)
5161 gcc_assert (gimple_assign_single_p (next_stmt));
5162 op = gimple_assign_rhs1 (next_stmt);
5163 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5164 &def_stmt, &def, &dt))
5166 if (dump_enabled_p ())
5167 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5168 "use not simple.\n");
5169 return false;
5171 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5176 if (!vec_stmt) /* transformation not required. */
5178 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5179 /* The SLP costs are calculated during SLP analysis. */
5180 if (!PURE_SLP_STMT (stmt_info))
5181 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5182 NULL, NULL, NULL);
5183 return true;
5186 /** Transform. **/
5188 ensure_base_align (stmt_info, dr);
5190 if (grouped_store)
5192 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5193 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5195 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5197 /* FORNOW */
5198 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5200 /* We vectorize all the stmts of the interleaving group when we
5201 reach the last stmt in the group. */
5202 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5203 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5204 && !slp)
5206 *vec_stmt = NULL;
5207 return true;
5210 if (slp)
5212 grouped_store = false;
5213 /* VEC_NUM is the number of vect stmts to be created for this
5214 group. */
5215 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5216 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5217 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5218 op = gimple_assign_rhs1 (first_stmt);
5220 else
5221 /* VEC_NUM is the number of vect stmts to be created for this
5222 group. */
5223 vec_num = group_size;
5225 else
5227 first_stmt = stmt;
5228 first_dr = dr;
5229 group_size = vec_num = 1;
5232 if (dump_enabled_p ())
5233 dump_printf_loc (MSG_NOTE, vect_location,
5234 "transform store. ncopies = %d\n", ncopies);
5236 dr_chain.create (group_size);
5237 oprnds.create (group_size);
5239 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5240 gcc_assert (alignment_support_scheme);
5241 /* Targets with store-lane instructions must not require explicit
5242 realignment. */
5243 gcc_assert (!store_lanes_p
5244 || alignment_support_scheme == dr_aligned
5245 || alignment_support_scheme == dr_unaligned_supported);
5247 if (negative)
5248 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5250 if (store_lanes_p)
5251 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5252 else
5253 aggr_type = vectype;
5255 /* In case the vectorization factor (VF) is bigger than the number
5256 of elements that we can fit in a vectype (nunits), we have to generate
5257 more than one vector stmt - i.e - we need to "unroll" the
5258 vector stmt by a factor VF/nunits. For more details see documentation in
5259 vect_get_vec_def_for_copy_stmt. */
5261 /* In case of interleaving (non-unit grouped access):
5263 S1: &base + 2 = x2
5264 S2: &base = x0
5265 S3: &base + 1 = x1
5266 S4: &base + 3 = x3
5268 We create vectorized stores starting from base address (the access of the
5269 first stmt in the chain (S2 in the above example), when the last store stmt
5270 of the chain (S4) is reached:
5272 VS1: &base = vx2
5273 VS2: &base + vec_size*1 = vx0
5274 VS3: &base + vec_size*2 = vx1
5275 VS4: &base + vec_size*3 = vx3
5277 Then permutation statements are generated:
5279 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5280 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5283 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5284 (the order of the data-refs in the output of vect_permute_store_chain
5285 corresponds to the order of scalar stmts in the interleaving chain - see
5286 the documentation of vect_permute_store_chain()).
5288 In case of both multiple types and interleaving, above vector stores and
5289 permutation stmts are created for every copy. The result vector stmts are
5290 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5291 STMT_VINFO_RELATED_STMT for the next copies.
5294 prev_stmt_info = NULL;
5295 for (j = 0; j < ncopies; j++)
5297 gimple new_stmt;
5299 if (j == 0)
5301 if (slp)
5303 /* Get vectorized arguments for SLP_NODE. */
5304 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5305 NULL, slp_node, -1);
5307 vec_oprnd = vec_oprnds[0];
5309 else
5311 /* For interleaved stores we collect vectorized defs for all the
5312 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5313 used as an input to vect_permute_store_chain(), and OPRNDS as
5314 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5316 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5317 OPRNDS are of size 1. */
5318 next_stmt = first_stmt;
5319 for (i = 0; i < group_size; i++)
5321 /* Since gaps are not supported for interleaved stores,
5322 GROUP_SIZE is the exact number of stmts in the chain.
5323 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5324 there is no interleaving, GROUP_SIZE is 1, and only one
5325 iteration of the loop will be executed. */
5326 gcc_assert (next_stmt
5327 && gimple_assign_single_p (next_stmt));
5328 op = gimple_assign_rhs1 (next_stmt);
5330 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5331 NULL);
5332 dr_chain.quick_push (vec_oprnd);
5333 oprnds.quick_push (vec_oprnd);
5334 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5338 /* We should have catched mismatched types earlier. */
5339 gcc_assert (useless_type_conversion_p (vectype,
5340 TREE_TYPE (vec_oprnd)));
5341 bool simd_lane_access_p
5342 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5343 if (simd_lane_access_p
5344 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5345 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5346 && integer_zerop (DR_OFFSET (first_dr))
5347 && integer_zerop (DR_INIT (first_dr))
5348 && alias_sets_conflict_p (get_alias_set (aggr_type),
5349 get_alias_set (DR_REF (first_dr))))
5351 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5352 dataref_offset = build_int_cst (reference_alias_ptr_type
5353 (DR_REF (first_dr)), 0);
5354 inv_p = false;
5356 else
5357 dataref_ptr
5358 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5359 simd_lane_access_p ? loop : NULL,
5360 offset, &dummy, gsi, &ptr_incr,
5361 simd_lane_access_p, &inv_p);
5362 gcc_assert (bb_vinfo || !inv_p);
5364 else
5366 /* For interleaved stores we created vectorized defs for all the
5367 defs stored in OPRNDS in the previous iteration (previous copy).
5368 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5369 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5370 next copy.
5371 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5372 OPRNDS are of size 1. */
5373 for (i = 0; i < group_size; i++)
5375 op = oprnds[i];
5376 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5377 &def, &dt);
5378 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5379 dr_chain[i] = vec_oprnd;
5380 oprnds[i] = vec_oprnd;
5382 if (dataref_offset)
5383 dataref_offset
5384 = int_const_binop (PLUS_EXPR, dataref_offset,
5385 TYPE_SIZE_UNIT (aggr_type));
5386 else
5387 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5388 TYPE_SIZE_UNIT (aggr_type));
5391 if (store_lanes_p)
5393 tree vec_array;
5395 /* Combine all the vectors into an array. */
5396 vec_array = create_vector_array (vectype, vec_num);
5397 for (i = 0; i < vec_num; i++)
5399 vec_oprnd = dr_chain[i];
5400 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5403 /* Emit:
5404 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5405 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5406 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5407 gimple_call_set_lhs (new_stmt, data_ref);
5408 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5410 else
5412 new_stmt = NULL;
5413 if (grouped_store)
5415 if (j == 0)
5416 result_chain.create (group_size);
5417 /* Permute. */
5418 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5419 &result_chain);
5422 next_stmt = first_stmt;
5423 for (i = 0; i < vec_num; i++)
5425 unsigned align, misalign;
5427 if (i > 0)
5428 /* Bump the vector pointer. */
5429 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5430 stmt, NULL_TREE);
5432 if (slp)
5433 vec_oprnd = vec_oprnds[i];
5434 else if (grouped_store)
5435 /* For grouped stores vectorized defs are interleaved in
5436 vect_permute_store_chain(). */
5437 vec_oprnd = result_chain[i];
5439 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
5440 dataref_offset
5441 ? dataref_offset
5442 : build_int_cst (reference_alias_ptr_type
5443 (DR_REF (first_dr)), 0));
5444 align = TYPE_ALIGN_UNIT (vectype);
5445 if (aligned_access_p (first_dr))
5446 misalign = 0;
5447 else if (DR_MISALIGNMENT (first_dr) == -1)
5449 TREE_TYPE (data_ref)
5450 = build_aligned_type (TREE_TYPE (data_ref),
5451 TYPE_ALIGN (elem_type));
5452 align = TYPE_ALIGN_UNIT (elem_type);
5453 misalign = 0;
5455 else
5457 TREE_TYPE (data_ref)
5458 = build_aligned_type (TREE_TYPE (data_ref),
5459 TYPE_ALIGN (elem_type));
5460 misalign = DR_MISALIGNMENT (first_dr);
5462 if (dataref_offset == NULL_TREE)
5463 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5464 misalign);
5466 if (negative
5467 && dt != vect_constant_def
5468 && dt != vect_external_def)
5470 tree perm_mask = perm_mask_for_reverse (vectype);
5471 tree perm_dest
5472 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5473 vectype);
5474 tree new_temp = make_ssa_name (perm_dest);
5476 /* Generate the permute statement. */
5477 gimple perm_stmt
5478 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5479 vec_oprnd, perm_mask);
5480 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5482 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5483 vec_oprnd = new_temp;
5486 /* Arguments are ready. Create the new vector stmt. */
5487 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5488 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5490 if (slp)
5491 continue;
5493 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5494 if (!next_stmt)
5495 break;
5498 if (!slp)
5500 if (j == 0)
5501 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5502 else
5503 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5504 prev_stmt_info = vinfo_for_stmt (new_stmt);
5508 dr_chain.release ();
5509 oprnds.release ();
5510 result_chain.release ();
5511 vec_oprnds.release ();
5513 return true;
5516 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5517 VECTOR_CST mask. No checks are made that the target platform supports the
5518 mask, so callers may wish to test can_vec_perm_p separately, or use
5519 vect_gen_perm_mask_checked. */
5521 tree
5522 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5524 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5525 int i, nunits;
5527 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5529 mask_elt_type = lang_hooks.types.type_for_mode
5530 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5531 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5533 mask_elts = XALLOCAVEC (tree, nunits);
5534 for (i = nunits - 1; i >= 0; i--)
5535 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5536 mask_vec = build_vector (mask_type, mask_elts);
5538 return mask_vec;
5541 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5542 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5544 tree
5545 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5547 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5548 return vect_gen_perm_mask_any (vectype, sel);
5551 /* Given a vector variable X and Y, that was generated for the scalar
5552 STMT, generate instructions to permute the vector elements of X and Y
5553 using permutation mask MASK_VEC, insert them at *GSI and return the
5554 permuted vector variable. */
5556 static tree
5557 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5558 gimple_stmt_iterator *gsi)
5560 tree vectype = TREE_TYPE (x);
5561 tree perm_dest, data_ref;
5562 gimple perm_stmt;
5564 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5565 data_ref = make_ssa_name (perm_dest);
5567 /* Generate the permute statement. */
5568 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5569 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5571 return data_ref;
5574 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5575 inserting them on the loops preheader edge. Returns true if we
5576 were successful in doing so (and thus STMT can be moved then),
5577 otherwise returns false. */
5579 static bool
5580 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5582 ssa_op_iter i;
5583 tree op;
5584 bool any = false;
5586 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5588 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5589 if (!gimple_nop_p (def_stmt)
5590 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5592 /* Make sure we don't need to recurse. While we could do
5593 so in simple cases when there are more complex use webs
5594 we don't have an easy way to preserve stmt order to fulfil
5595 dependencies within them. */
5596 tree op2;
5597 ssa_op_iter i2;
5598 if (gimple_code (def_stmt) == GIMPLE_PHI)
5599 return false;
5600 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5602 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5603 if (!gimple_nop_p (def_stmt2)
5604 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5605 return false;
5607 any = true;
5611 if (!any)
5612 return true;
5614 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5616 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5617 if (!gimple_nop_p (def_stmt)
5618 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5620 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5621 gsi_remove (&gsi, false);
5622 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5626 return true;
5629 /* vectorizable_load.
5631 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5632 can be vectorized.
5633 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5634 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5635 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5637 static bool
5638 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5639 slp_tree slp_node, slp_instance slp_node_instance)
5641 tree scalar_dest;
5642 tree vec_dest = NULL;
5643 tree data_ref = NULL;
5644 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5645 stmt_vec_info prev_stmt_info;
5646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5647 struct loop *loop = NULL;
5648 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5649 bool nested_in_vect_loop = false;
5650 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5651 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5652 tree elem_type;
5653 tree new_temp;
5654 machine_mode mode;
5655 gimple new_stmt = NULL;
5656 tree dummy;
5657 enum dr_alignment_support alignment_support_scheme;
5658 tree dataref_ptr = NULL_TREE;
5659 tree dataref_offset = NULL_TREE;
5660 gimple ptr_incr = NULL;
5661 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5662 int ncopies;
5663 int i, j, group_size = -1, group_gap;
5664 tree msq = NULL_TREE, lsq;
5665 tree offset = NULL_TREE;
5666 tree byte_offset = NULL_TREE;
5667 tree realignment_token = NULL_TREE;
5668 gphi *phi = NULL;
5669 vec<tree> dr_chain = vNULL;
5670 bool grouped_load = false;
5671 bool load_lanes_p = false;
5672 gimple first_stmt;
5673 bool inv_p;
5674 bool negative = false;
5675 bool compute_in_loop = false;
5676 struct loop *at_loop;
5677 int vec_num;
5678 bool slp = (slp_node != NULL);
5679 bool slp_perm = false;
5680 enum tree_code code;
5681 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5682 int vf;
5683 tree aggr_type;
5684 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5685 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5686 int gather_scale = 1;
5687 enum vect_def_type gather_dt = vect_unknown_def_type;
5689 if (loop_vinfo)
5691 loop = LOOP_VINFO_LOOP (loop_vinfo);
5692 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5693 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5695 else
5696 vf = 1;
5698 /* Multiple types in SLP are handled by creating the appropriate number of
5699 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5700 case of SLP. */
5701 if (slp || PURE_SLP_STMT (stmt_info))
5702 ncopies = 1;
5703 else
5704 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5706 gcc_assert (ncopies >= 1);
5708 /* FORNOW. This restriction should be relaxed. */
5709 if (nested_in_vect_loop && ncopies > 1)
5711 if (dump_enabled_p ())
5712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5713 "multiple types in nested loop.\n");
5714 return false;
5717 /* Invalidate assumptions made by dependence analysis when vectorization
5718 on the unrolled body effectively re-orders stmts. */
5719 if (ncopies > 1
5720 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5721 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5722 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5724 if (dump_enabled_p ())
5725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5726 "cannot perform implicit CSE when unrolling "
5727 "with negative dependence distance\n");
5728 return false;
5731 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5732 return false;
5734 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5735 return false;
5737 /* Is vectorizable load? */
5738 if (!is_gimple_assign (stmt))
5739 return false;
5741 scalar_dest = gimple_assign_lhs (stmt);
5742 if (TREE_CODE (scalar_dest) != SSA_NAME)
5743 return false;
5745 code = gimple_assign_rhs_code (stmt);
5746 if (code != ARRAY_REF
5747 && code != BIT_FIELD_REF
5748 && code != INDIRECT_REF
5749 && code != COMPONENT_REF
5750 && code != IMAGPART_EXPR
5751 && code != REALPART_EXPR
5752 && code != MEM_REF
5753 && TREE_CODE_CLASS (code) != tcc_declaration)
5754 return false;
5756 if (!STMT_VINFO_DATA_REF (stmt_info))
5757 return false;
5759 elem_type = TREE_TYPE (vectype);
5760 mode = TYPE_MODE (vectype);
5762 /* FORNOW. In some cases can vectorize even if data-type not supported
5763 (e.g. - data copies). */
5764 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
5766 if (dump_enabled_p ())
5767 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5768 "Aligned load, but unsupported type.\n");
5769 return false;
5772 /* Check if the load is a part of an interleaving chain. */
5773 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5775 grouped_load = true;
5776 /* FORNOW */
5777 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
5779 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5781 /* If this is single-element interleaving with an element distance
5782 that leaves unused vector loads around punt - we at least create
5783 very sub-optimal code in that case (and blow up memory,
5784 see PR65518). */
5785 if (first_stmt == stmt
5786 && !GROUP_NEXT_ELEMENT (stmt_info)
5787 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
5789 if (dump_enabled_p ())
5790 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5791 "single-element interleaving not supported "
5792 "for not adjacent vector loads\n");
5793 return false;
5796 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5797 if (!slp
5798 && !PURE_SLP_STMT (stmt_info)
5799 && !STMT_VINFO_STRIDE_LOAD_P (stmt_info))
5801 if (vect_load_lanes_supported (vectype, group_size))
5802 load_lanes_p = true;
5803 else if (!vect_grouped_load_supported (vectype, group_size))
5804 return false;
5807 /* Invalidate assumptions made by dependence analysis when vectorization
5808 on the unrolled body effectively re-orders stmts. */
5809 if (!PURE_SLP_STMT (stmt_info)
5810 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5811 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5812 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5814 if (dump_enabled_p ())
5815 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5816 "cannot perform implicit CSE when performing "
5817 "group loads with negative dependence distance\n");
5818 return false;
5821 /* Similarly when the stmt is a load that is both part of a SLP
5822 instance and a loop vectorized stmt via the same-dr mechanism
5823 we have to give up. */
5824 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
5825 && (STMT_SLP_TYPE (stmt_info)
5826 != STMT_SLP_TYPE (vinfo_for_stmt
5827 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
5829 if (dump_enabled_p ())
5830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5831 "conflicting SLP types for CSEd load\n");
5832 return false;
5837 if (STMT_VINFO_GATHER_P (stmt_info))
5839 gimple def_stmt;
5840 tree def;
5841 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
5842 &gather_off, &gather_scale);
5843 gcc_assert (gather_decl);
5844 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
5845 &def_stmt, &def, &gather_dt,
5846 &gather_off_vectype))
5848 if (dump_enabled_p ())
5849 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5850 "gather index use not simple.\n");
5851 return false;
5854 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
5856 if ((grouped_load
5857 && (slp || PURE_SLP_STMT (stmt_info)))
5858 && (group_size > nunits
5859 || nunits % group_size != 0
5860 /* ??? During analysis phase we are not called with the
5861 slp node/instance we are in so whether we'll end up
5862 with a permutation we don't know. Still we don't
5863 support load permutations. */
5864 || slp_perm))
5866 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5867 "unhandled strided group load\n");
5868 return false;
5871 else
5873 negative = tree_int_cst_compare (nested_in_vect_loop
5874 ? STMT_VINFO_DR_STEP (stmt_info)
5875 : DR_STEP (dr),
5876 size_zero_node) < 0;
5877 if (negative && ncopies > 1)
5879 if (dump_enabled_p ())
5880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5881 "multiple types with negative step.\n");
5882 return false;
5885 if (negative)
5887 if (grouped_load)
5889 if (dump_enabled_p ())
5890 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5891 "negative step for group load not supported"
5892 "\n");
5893 return false;
5895 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5896 if (alignment_support_scheme != dr_aligned
5897 && alignment_support_scheme != dr_unaligned_supported)
5899 if (dump_enabled_p ())
5900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5901 "negative step but alignment required.\n");
5902 return false;
5904 if (!perm_mask_for_reverse (vectype))
5906 if (dump_enabled_p ())
5907 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5908 "negative step and reversing not supported."
5909 "\n");
5910 return false;
5915 if (!vec_stmt) /* transformation not required. */
5917 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
5918 /* The SLP costs are calculated during SLP analysis. */
5919 if (!PURE_SLP_STMT (stmt_info))
5920 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
5921 NULL, NULL, NULL);
5922 return true;
5925 if (dump_enabled_p ())
5926 dump_printf_loc (MSG_NOTE, vect_location,
5927 "transform load. ncopies = %d\n", ncopies);
5929 /** Transform. **/
5931 ensure_base_align (stmt_info, dr);
5933 if (STMT_VINFO_GATHER_P (stmt_info))
5935 tree vec_oprnd0 = NULL_TREE, op;
5936 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
5937 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5938 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
5939 edge pe = loop_preheader_edge (loop);
5940 gimple_seq seq;
5941 basic_block new_bb;
5942 enum { NARROW, NONE, WIDEN } modifier;
5943 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
5945 if (nunits == gather_off_nunits)
5946 modifier = NONE;
5947 else if (nunits == gather_off_nunits / 2)
5949 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
5950 modifier = WIDEN;
5952 for (i = 0; i < gather_off_nunits; ++i)
5953 sel[i] = i | nunits;
5955 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
5957 else if (nunits == gather_off_nunits * 2)
5959 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5960 modifier = NARROW;
5962 for (i = 0; i < nunits; ++i)
5963 sel[i] = i < gather_off_nunits
5964 ? i : i + nunits - gather_off_nunits;
5966 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5967 ncopies *= 2;
5969 else
5970 gcc_unreachable ();
5972 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
5973 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5974 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5975 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5976 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5977 scaletype = TREE_VALUE (arglist);
5978 gcc_checking_assert (types_compatible_p (srctype, rettype));
5980 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5982 ptr = fold_convert (ptrtype, gather_base);
5983 if (!is_gimple_min_invariant (ptr))
5985 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5986 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5987 gcc_assert (!new_bb);
5990 /* Currently we support only unconditional gather loads,
5991 so mask should be all ones. */
5992 if (TREE_CODE (masktype) == INTEGER_TYPE)
5993 mask = build_int_cst (masktype, -1);
5994 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
5996 mask = build_int_cst (TREE_TYPE (masktype), -1);
5997 mask = build_vector_from_val (masktype, mask);
5998 mask = vect_init_vector (stmt, mask, masktype, NULL);
6000 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6002 REAL_VALUE_TYPE r;
6003 long tmp[6];
6004 for (j = 0; j < 6; ++j)
6005 tmp[j] = -1;
6006 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6007 mask = build_real (TREE_TYPE (masktype), r);
6008 mask = build_vector_from_val (masktype, mask);
6009 mask = vect_init_vector (stmt, mask, masktype, NULL);
6011 else
6012 gcc_unreachable ();
6014 scale = build_int_cst (scaletype, gather_scale);
6016 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6017 merge = build_int_cst (TREE_TYPE (rettype), 0);
6018 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6020 REAL_VALUE_TYPE r;
6021 long tmp[6];
6022 for (j = 0; j < 6; ++j)
6023 tmp[j] = 0;
6024 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6025 merge = build_real (TREE_TYPE (rettype), r);
6027 else
6028 gcc_unreachable ();
6029 merge = build_vector_from_val (rettype, merge);
6030 merge = vect_init_vector (stmt, merge, rettype, NULL);
6032 prev_stmt_info = NULL;
6033 for (j = 0; j < ncopies; ++j)
6035 if (modifier == WIDEN && (j & 1))
6036 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6037 perm_mask, stmt, gsi);
6038 else if (j == 0)
6039 op = vec_oprnd0
6040 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
6041 else
6042 op = vec_oprnd0
6043 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6045 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6047 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6048 == TYPE_VECTOR_SUBPARTS (idxtype));
6049 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
6050 var = make_ssa_name (var);
6051 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6052 new_stmt
6053 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6054 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6055 op = var;
6058 new_stmt
6059 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6061 if (!useless_type_conversion_p (vectype, rettype))
6063 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6064 == TYPE_VECTOR_SUBPARTS (rettype));
6065 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
6066 op = make_ssa_name (var, new_stmt);
6067 gimple_call_set_lhs (new_stmt, op);
6068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6069 var = make_ssa_name (vec_dest);
6070 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6071 new_stmt
6072 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6074 else
6076 var = make_ssa_name (vec_dest, new_stmt);
6077 gimple_call_set_lhs (new_stmt, var);
6080 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6082 if (modifier == NARROW)
6084 if ((j & 1) == 0)
6086 prev_res = var;
6087 continue;
6089 var = permute_vec_elements (prev_res, var,
6090 perm_mask, stmt, gsi);
6091 new_stmt = SSA_NAME_DEF_STMT (var);
6094 if (prev_stmt_info == NULL)
6095 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6096 else
6097 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6098 prev_stmt_info = vinfo_for_stmt (new_stmt);
6100 return true;
6102 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
6104 gimple_stmt_iterator incr_gsi;
6105 bool insert_after;
6106 gimple incr;
6107 tree offvar;
6108 tree ivstep;
6109 tree running_off;
6110 vec<constructor_elt, va_gc> *v = NULL;
6111 gimple_seq stmts = NULL;
6112 tree stride_base, stride_step, alias_off;
6114 gcc_assert (!nested_in_vect_loop);
6116 stride_base
6117 = fold_build_pointer_plus
6118 (unshare_expr (DR_BASE_ADDRESS (dr)),
6119 size_binop (PLUS_EXPR,
6120 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
6121 convert_to_ptrofftype (DR_INIT (dr))));
6122 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
6124 /* For a load with loop-invariant (but other than power-of-2)
6125 stride (i.e. not a grouped access) like so:
6127 for (i = 0; i < n; i += stride)
6128 ... = array[i];
6130 we generate a new induction variable and new accesses to
6131 form a new vector (or vectors, depending on ncopies):
6133 for (j = 0; ; j += VF*stride)
6134 tmp1 = array[j];
6135 tmp2 = array[j + stride];
6137 vectemp = {tmp1, tmp2, ...}
6140 ivstep = stride_step;
6141 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6142 build_int_cst (TREE_TYPE (ivstep), vf));
6144 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6146 create_iv (stride_base, ivstep, NULL,
6147 loop, &incr_gsi, insert_after,
6148 &offvar, NULL);
6149 incr = gsi_stmt (incr_gsi);
6150 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6152 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6153 if (stmts)
6154 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6156 prev_stmt_info = NULL;
6157 running_off = offvar;
6158 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
6159 int nloads = nunits;
6160 tree ltype = TREE_TYPE (vectype);
6161 if (slp)
6163 nloads = nunits / group_size;
6164 if (group_size < nunits)
6165 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6166 else
6167 ltype = vectype;
6168 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6169 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6170 gcc_assert (!slp_perm);
6172 for (j = 0; j < ncopies; j++)
6174 tree vec_inv;
6176 if (nloads > 1)
6178 vec_alloc (v, nloads);
6179 for (i = 0; i < nloads; i++)
6181 tree newref, newoff;
6182 gimple incr;
6183 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6185 newref = force_gimple_operand_gsi (gsi, newref, true,
6186 NULL_TREE, true,
6187 GSI_SAME_STMT);
6188 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6189 newoff = copy_ssa_name (running_off);
6190 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6191 running_off, stride_step);
6192 vect_finish_stmt_generation (stmt, incr, gsi);
6194 running_off = newoff;
6197 vec_inv = build_constructor (vectype, v);
6198 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6199 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6201 else
6203 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6204 build2 (MEM_REF, ltype,
6205 running_off, alias_off));
6206 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6208 tree newoff = copy_ssa_name (running_off);
6209 gimple incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6210 running_off, stride_step);
6211 vect_finish_stmt_generation (stmt, incr, gsi);
6213 running_off = newoff;
6216 if (slp)
6217 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6218 if (j == 0)
6219 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6220 else
6221 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6222 prev_stmt_info = vinfo_for_stmt (new_stmt);
6224 return true;
6227 if (grouped_load)
6229 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6230 if (slp
6231 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6232 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6233 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6235 /* Check if the chain of loads is already vectorized. */
6236 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6237 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6238 ??? But we can only do so if there is exactly one
6239 as we have no way to get at the rest. Leave the CSE
6240 opportunity alone.
6241 ??? With the group load eventually participating
6242 in multiple different permutations (having multiple
6243 slp nodes which refer to the same group) the CSE
6244 is even wrong code. See PR56270. */
6245 && !slp)
6247 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6248 return true;
6250 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6251 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6253 /* VEC_NUM is the number of vect stmts to be created for this group. */
6254 if (slp)
6256 grouped_load = false;
6257 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6258 if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6259 slp_perm = true;
6260 group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
6262 else
6264 vec_num = group_size;
6265 group_gap = 0;
6268 else
6270 first_stmt = stmt;
6271 first_dr = dr;
6272 group_size = vec_num = 1;
6273 group_gap = 0;
6276 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6277 gcc_assert (alignment_support_scheme);
6278 /* Targets with load-lane instructions must not require explicit
6279 realignment. */
6280 gcc_assert (!load_lanes_p
6281 || alignment_support_scheme == dr_aligned
6282 || alignment_support_scheme == dr_unaligned_supported);
6284 /* In case the vectorization factor (VF) is bigger than the number
6285 of elements that we can fit in a vectype (nunits), we have to generate
6286 more than one vector stmt - i.e - we need to "unroll" the
6287 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6288 from one copy of the vector stmt to the next, in the field
6289 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6290 stages to find the correct vector defs to be used when vectorizing
6291 stmts that use the defs of the current stmt. The example below
6292 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6293 need to create 4 vectorized stmts):
6295 before vectorization:
6296 RELATED_STMT VEC_STMT
6297 S1: x = memref - -
6298 S2: z = x + 1 - -
6300 step 1: vectorize stmt S1:
6301 We first create the vector stmt VS1_0, and, as usual, record a
6302 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6303 Next, we create the vector stmt VS1_1, and record a pointer to
6304 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6305 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6306 stmts and pointers:
6307 RELATED_STMT VEC_STMT
6308 VS1_0: vx0 = memref0 VS1_1 -
6309 VS1_1: vx1 = memref1 VS1_2 -
6310 VS1_2: vx2 = memref2 VS1_3 -
6311 VS1_3: vx3 = memref3 - -
6312 S1: x = load - VS1_0
6313 S2: z = x + 1 - -
6315 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6316 information we recorded in RELATED_STMT field is used to vectorize
6317 stmt S2. */
6319 /* In case of interleaving (non-unit grouped access):
6321 S1: x2 = &base + 2
6322 S2: x0 = &base
6323 S3: x1 = &base + 1
6324 S4: x3 = &base + 3
6326 Vectorized loads are created in the order of memory accesses
6327 starting from the access of the first stmt of the chain:
6329 VS1: vx0 = &base
6330 VS2: vx1 = &base + vec_size*1
6331 VS3: vx3 = &base + vec_size*2
6332 VS4: vx4 = &base + vec_size*3
6334 Then permutation statements are generated:
6336 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6337 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6340 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6341 (the order of the data-refs in the output of vect_permute_load_chain
6342 corresponds to the order of scalar stmts in the interleaving chain - see
6343 the documentation of vect_permute_load_chain()).
6344 The generation of permutation stmts and recording them in
6345 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6347 In case of both multiple types and interleaving, the vector loads and
6348 permutation stmts above are created for every copy. The result vector
6349 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6350 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6352 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6353 on a target that supports unaligned accesses (dr_unaligned_supported)
6354 we generate the following code:
6355 p = initial_addr;
6356 indx = 0;
6357 loop {
6358 p = p + indx * vectype_size;
6359 vec_dest = *(p);
6360 indx = indx + 1;
6363 Otherwise, the data reference is potentially unaligned on a target that
6364 does not support unaligned accesses (dr_explicit_realign_optimized) -
6365 then generate the following code, in which the data in each iteration is
6366 obtained by two vector loads, one from the previous iteration, and one
6367 from the current iteration:
6368 p1 = initial_addr;
6369 msq_init = *(floor(p1))
6370 p2 = initial_addr + VS - 1;
6371 realignment_token = call target_builtin;
6372 indx = 0;
6373 loop {
6374 p2 = p2 + indx * vectype_size
6375 lsq = *(floor(p2))
6376 vec_dest = realign_load (msq, lsq, realignment_token)
6377 indx = indx + 1;
6378 msq = lsq;
6379 } */
6381 /* If the misalignment remains the same throughout the execution of the
6382 loop, we can create the init_addr and permutation mask at the loop
6383 preheader. Otherwise, it needs to be created inside the loop.
6384 This can only occur when vectorizing memory accesses in the inner-loop
6385 nested within an outer-loop that is being vectorized. */
6387 if (nested_in_vect_loop
6388 && (TREE_INT_CST_LOW (DR_STEP (dr))
6389 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6391 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6392 compute_in_loop = true;
6395 if ((alignment_support_scheme == dr_explicit_realign_optimized
6396 || alignment_support_scheme == dr_explicit_realign)
6397 && !compute_in_loop)
6399 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6400 alignment_support_scheme, NULL_TREE,
6401 &at_loop);
6402 if (alignment_support_scheme == dr_explicit_realign_optimized)
6404 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6405 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6406 size_one_node);
6409 else
6410 at_loop = loop;
6412 if (negative)
6413 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6415 if (load_lanes_p)
6416 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6417 else
6418 aggr_type = vectype;
6420 prev_stmt_info = NULL;
6421 for (j = 0; j < ncopies; j++)
6423 /* 1. Create the vector or array pointer update chain. */
6424 if (j == 0)
6426 bool simd_lane_access_p
6427 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6428 if (simd_lane_access_p
6429 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6430 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6431 && integer_zerop (DR_OFFSET (first_dr))
6432 && integer_zerop (DR_INIT (first_dr))
6433 && alias_sets_conflict_p (get_alias_set (aggr_type),
6434 get_alias_set (DR_REF (first_dr)))
6435 && (alignment_support_scheme == dr_aligned
6436 || alignment_support_scheme == dr_unaligned_supported))
6438 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6439 dataref_offset = build_int_cst (reference_alias_ptr_type
6440 (DR_REF (first_dr)), 0);
6441 inv_p = false;
6443 else
6444 dataref_ptr
6445 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6446 offset, &dummy, gsi, &ptr_incr,
6447 simd_lane_access_p, &inv_p,
6448 byte_offset);
6450 else if (dataref_offset)
6451 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6452 TYPE_SIZE_UNIT (aggr_type));
6453 else
6454 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6455 TYPE_SIZE_UNIT (aggr_type));
6457 if (grouped_load || slp_perm)
6458 dr_chain.create (vec_num);
6460 if (load_lanes_p)
6462 tree vec_array;
6464 vec_array = create_vector_array (vectype, vec_num);
6466 /* Emit:
6467 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6468 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6469 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6470 gimple_call_set_lhs (new_stmt, vec_array);
6471 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6473 /* Extract each vector into an SSA_NAME. */
6474 for (i = 0; i < vec_num; i++)
6476 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6477 vec_array, i);
6478 dr_chain.quick_push (new_temp);
6481 /* Record the mapping between SSA_NAMEs and statements. */
6482 vect_record_grouped_load_vectors (stmt, dr_chain);
6484 else
6486 for (i = 0; i < vec_num; i++)
6488 if (i > 0)
6489 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6490 stmt, NULL_TREE);
6492 /* 2. Create the vector-load in the loop. */
6493 switch (alignment_support_scheme)
6495 case dr_aligned:
6496 case dr_unaligned_supported:
6498 unsigned int align, misalign;
6500 data_ref
6501 = build2 (MEM_REF, vectype, dataref_ptr,
6502 dataref_offset
6503 ? dataref_offset
6504 : build_int_cst (reference_alias_ptr_type
6505 (DR_REF (first_dr)), 0));
6506 align = TYPE_ALIGN_UNIT (vectype);
6507 if (alignment_support_scheme == dr_aligned)
6509 gcc_assert (aligned_access_p (first_dr));
6510 misalign = 0;
6512 else if (DR_MISALIGNMENT (first_dr) == -1)
6514 TREE_TYPE (data_ref)
6515 = build_aligned_type (TREE_TYPE (data_ref),
6516 TYPE_ALIGN (elem_type));
6517 align = TYPE_ALIGN_UNIT (elem_type);
6518 misalign = 0;
6520 else
6522 TREE_TYPE (data_ref)
6523 = build_aligned_type (TREE_TYPE (data_ref),
6524 TYPE_ALIGN (elem_type));
6525 misalign = DR_MISALIGNMENT (first_dr);
6527 if (dataref_offset == NULL_TREE)
6528 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6529 align, misalign);
6530 break;
6532 case dr_explicit_realign:
6534 tree ptr, bump;
6536 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6538 if (compute_in_loop)
6539 msq = vect_setup_realignment (first_stmt, gsi,
6540 &realignment_token,
6541 dr_explicit_realign,
6542 dataref_ptr, NULL);
6544 ptr = copy_ssa_name (dataref_ptr);
6545 new_stmt = gimple_build_assign
6546 (ptr, BIT_AND_EXPR, dataref_ptr,
6547 build_int_cst
6548 (TREE_TYPE (dataref_ptr),
6549 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6550 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6551 data_ref
6552 = build2 (MEM_REF, vectype, ptr,
6553 build_int_cst (reference_alias_ptr_type
6554 (DR_REF (first_dr)), 0));
6555 vec_dest = vect_create_destination_var (scalar_dest,
6556 vectype);
6557 new_stmt = gimple_build_assign (vec_dest, data_ref);
6558 new_temp = make_ssa_name (vec_dest, new_stmt);
6559 gimple_assign_set_lhs (new_stmt, new_temp);
6560 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6561 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6562 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6563 msq = new_temp;
6565 bump = size_binop (MULT_EXPR, vs,
6566 TYPE_SIZE_UNIT (elem_type));
6567 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6568 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6569 new_stmt = gimple_build_assign
6570 (NULL_TREE, BIT_AND_EXPR, ptr,
6571 build_int_cst
6572 (TREE_TYPE (ptr),
6573 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6574 ptr = copy_ssa_name (dataref_ptr, new_stmt);
6575 gimple_assign_set_lhs (new_stmt, ptr);
6576 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6577 data_ref
6578 = build2 (MEM_REF, vectype, ptr,
6579 build_int_cst (reference_alias_ptr_type
6580 (DR_REF (first_dr)), 0));
6581 break;
6583 case dr_explicit_realign_optimized:
6584 new_temp = copy_ssa_name (dataref_ptr);
6585 new_stmt = gimple_build_assign
6586 (new_temp, BIT_AND_EXPR, dataref_ptr,
6587 build_int_cst
6588 (TREE_TYPE (dataref_ptr),
6589 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6590 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6591 data_ref
6592 = build2 (MEM_REF, vectype, new_temp,
6593 build_int_cst (reference_alias_ptr_type
6594 (DR_REF (first_dr)), 0));
6595 break;
6596 default:
6597 gcc_unreachable ();
6599 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6600 new_stmt = gimple_build_assign (vec_dest, data_ref);
6601 new_temp = make_ssa_name (vec_dest, new_stmt);
6602 gimple_assign_set_lhs (new_stmt, new_temp);
6603 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6605 /* 3. Handle explicit realignment if necessary/supported.
6606 Create in loop:
6607 vec_dest = realign_load (msq, lsq, realignment_token) */
6608 if (alignment_support_scheme == dr_explicit_realign_optimized
6609 || alignment_support_scheme == dr_explicit_realign)
6611 lsq = gimple_assign_lhs (new_stmt);
6612 if (!realignment_token)
6613 realignment_token = dataref_ptr;
6614 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6615 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6616 msq, lsq, realignment_token);
6617 new_temp = make_ssa_name (vec_dest, new_stmt);
6618 gimple_assign_set_lhs (new_stmt, new_temp);
6619 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6621 if (alignment_support_scheme == dr_explicit_realign_optimized)
6623 gcc_assert (phi);
6624 if (i == vec_num - 1 && j == ncopies - 1)
6625 add_phi_arg (phi, lsq,
6626 loop_latch_edge (containing_loop),
6627 UNKNOWN_LOCATION);
6628 msq = lsq;
6632 /* 4. Handle invariant-load. */
6633 if (inv_p && !bb_vinfo)
6635 gcc_assert (!grouped_load);
6636 /* If we have versioned for aliasing or the loop doesn't
6637 have any data dependencies that would preclude this,
6638 then we are sure this is a loop invariant load and
6639 thus we can insert it on the preheader edge. */
6640 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6641 && !nested_in_vect_loop
6642 && hoist_defs_of_uses (stmt, loop))
6644 if (dump_enabled_p ())
6646 dump_printf_loc (MSG_NOTE, vect_location,
6647 "hoisting out of the vectorized "
6648 "loop: ");
6649 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6651 tree tem = copy_ssa_name (scalar_dest);
6652 gsi_insert_on_edge_immediate
6653 (loop_preheader_edge (loop),
6654 gimple_build_assign (tem,
6655 unshare_expr
6656 (gimple_assign_rhs1 (stmt))));
6657 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6659 else
6661 gimple_stmt_iterator gsi2 = *gsi;
6662 gsi_next (&gsi2);
6663 new_temp = vect_init_vector (stmt, scalar_dest,
6664 vectype, &gsi2);
6666 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6667 set_vinfo_for_stmt (new_stmt,
6668 new_stmt_vec_info (new_stmt, loop_vinfo,
6669 bb_vinfo));
6672 if (negative)
6674 tree perm_mask = perm_mask_for_reverse (vectype);
6675 new_temp = permute_vec_elements (new_temp, new_temp,
6676 perm_mask, stmt, gsi);
6677 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6680 /* Collect vector loads and later create their permutation in
6681 vect_transform_grouped_load (). */
6682 if (grouped_load || slp_perm)
6683 dr_chain.quick_push (new_temp);
6685 /* Store vector loads in the corresponding SLP_NODE. */
6686 if (slp && !slp_perm)
6687 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6689 /* Bump the vector pointer to account for a gap. */
6690 if (slp && group_gap != 0)
6692 tree bump = size_binop (MULT_EXPR,
6693 TYPE_SIZE_UNIT (elem_type),
6694 size_int (group_gap));
6695 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6696 stmt, bump);
6700 if (slp && !slp_perm)
6701 continue;
6703 if (slp_perm)
6705 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6706 slp_node_instance, false))
6708 dr_chain.release ();
6709 return false;
6712 else
6714 if (grouped_load)
6716 if (!load_lanes_p)
6717 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
6718 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6720 else
6722 if (j == 0)
6723 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6724 else
6725 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6726 prev_stmt_info = vinfo_for_stmt (new_stmt);
6729 dr_chain.release ();
6732 return true;
6735 /* Function vect_is_simple_cond.
6737 Input:
6738 LOOP - the loop that is being vectorized.
6739 COND - Condition that is checked for simple use.
6741 Output:
6742 *COMP_VECTYPE - the vector type for the comparison.
6744 Returns whether a COND can be vectorized. Checks whether
6745 condition operands are supportable using vec_is_simple_use. */
6747 static bool
6748 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
6749 bb_vec_info bb_vinfo, tree *comp_vectype)
6751 tree lhs, rhs;
6752 tree def;
6753 enum vect_def_type dt;
6754 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
6756 if (!COMPARISON_CLASS_P (cond))
6757 return false;
6759 lhs = TREE_OPERAND (cond, 0);
6760 rhs = TREE_OPERAND (cond, 1);
6762 if (TREE_CODE (lhs) == SSA_NAME)
6764 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6765 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
6766 &lhs_def_stmt, &def, &dt, &vectype1))
6767 return false;
6769 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6770 && TREE_CODE (lhs) != FIXED_CST)
6771 return false;
6773 if (TREE_CODE (rhs) == SSA_NAME)
6775 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
6776 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
6777 &rhs_def_stmt, &def, &dt, &vectype2))
6778 return false;
6780 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
6781 && TREE_CODE (rhs) != FIXED_CST)
6782 return false;
6784 *comp_vectype = vectype1 ? vectype1 : vectype2;
6785 return true;
6788 /* vectorizable_condition.
6790 Check if STMT is conditional modify expression that can be vectorized.
6791 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6792 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6793 at GSI.
6795 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6796 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6797 else caluse if it is 2).
6799 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6801 bool
6802 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
6803 gimple *vec_stmt, tree reduc_def, int reduc_index,
6804 slp_tree slp_node)
6806 tree scalar_dest = NULL_TREE;
6807 tree vec_dest = NULL_TREE;
6808 tree cond_expr, then_clause, else_clause;
6809 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6810 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6811 tree comp_vectype = NULL_TREE;
6812 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
6813 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
6814 tree vec_compare, vec_cond_expr;
6815 tree new_temp;
6816 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6817 tree def;
6818 enum vect_def_type dt, dts[4];
6819 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6820 int ncopies;
6821 enum tree_code code;
6822 stmt_vec_info prev_stmt_info = NULL;
6823 int i, j;
6824 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6825 vec<tree> vec_oprnds0 = vNULL;
6826 vec<tree> vec_oprnds1 = vNULL;
6827 vec<tree> vec_oprnds2 = vNULL;
6828 vec<tree> vec_oprnds3 = vNULL;
6829 tree vec_cmp_type;
6831 if (slp_node || PURE_SLP_STMT (stmt_info))
6832 ncopies = 1;
6833 else
6834 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6836 gcc_assert (ncopies >= 1);
6837 if (reduc_index && ncopies > 1)
6838 return false; /* FORNOW */
6840 if (reduc_index && STMT_SLP_TYPE (stmt_info))
6841 return false;
6843 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6844 return false;
6846 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6847 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
6848 && reduc_def))
6849 return false;
6851 /* FORNOW: not yet supported. */
6852 if (STMT_VINFO_LIVE_P (stmt_info))
6854 if (dump_enabled_p ())
6855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6856 "value used after loop.\n");
6857 return false;
6860 /* Is vectorizable conditional operation? */
6861 if (!is_gimple_assign (stmt))
6862 return false;
6864 code = gimple_assign_rhs_code (stmt);
6866 if (code != COND_EXPR)
6867 return false;
6869 cond_expr = gimple_assign_rhs1 (stmt);
6870 then_clause = gimple_assign_rhs2 (stmt);
6871 else_clause = gimple_assign_rhs3 (stmt);
6873 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
6874 &comp_vectype)
6875 || !comp_vectype)
6876 return false;
6878 if (TREE_CODE (then_clause) == SSA_NAME)
6880 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
6881 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
6882 &then_def_stmt, &def, &dt))
6883 return false;
6885 else if (TREE_CODE (then_clause) != INTEGER_CST
6886 && TREE_CODE (then_clause) != REAL_CST
6887 && TREE_CODE (then_clause) != FIXED_CST)
6888 return false;
6890 if (TREE_CODE (else_clause) == SSA_NAME)
6892 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
6893 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
6894 &else_def_stmt, &def, &dt))
6895 return false;
6897 else if (TREE_CODE (else_clause) != INTEGER_CST
6898 && TREE_CODE (else_clause) != REAL_CST
6899 && TREE_CODE (else_clause) != FIXED_CST)
6900 return false;
6902 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
6903 /* The result of a vector comparison should be signed type. */
6904 tree cmp_type = build_nonstandard_integer_type (prec, 0);
6905 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
6906 if (vec_cmp_type == NULL_TREE)
6907 return false;
6909 if (!vec_stmt)
6911 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
6912 return expand_vec_cond_expr_p (vectype, comp_vectype);
6915 /* Transform. */
6917 if (!slp_node)
6919 vec_oprnds0.create (1);
6920 vec_oprnds1.create (1);
6921 vec_oprnds2.create (1);
6922 vec_oprnds3.create (1);
6925 /* Handle def. */
6926 scalar_dest = gimple_assign_lhs (stmt);
6927 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6929 /* Handle cond expr. */
6930 for (j = 0; j < ncopies; j++)
6932 gassign *new_stmt = NULL;
6933 if (j == 0)
6935 if (slp_node)
6937 auto_vec<tree, 4> ops;
6938 auto_vec<vec<tree>, 4> vec_defs;
6940 ops.safe_push (TREE_OPERAND (cond_expr, 0));
6941 ops.safe_push (TREE_OPERAND (cond_expr, 1));
6942 ops.safe_push (then_clause);
6943 ops.safe_push (else_clause);
6944 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
6945 vec_oprnds3 = vec_defs.pop ();
6946 vec_oprnds2 = vec_defs.pop ();
6947 vec_oprnds1 = vec_defs.pop ();
6948 vec_oprnds0 = vec_defs.pop ();
6950 ops.release ();
6951 vec_defs.release ();
6953 else
6955 gimple gtemp;
6956 vec_cond_lhs =
6957 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
6958 stmt, NULL);
6959 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
6960 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
6962 vec_cond_rhs =
6963 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
6964 stmt, NULL);
6965 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
6966 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
6967 if (reduc_index == 1)
6968 vec_then_clause = reduc_def;
6969 else
6971 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
6972 stmt, NULL);
6973 vect_is_simple_use (then_clause, stmt, loop_vinfo,
6974 NULL, &gtemp, &def, &dts[2]);
6976 if (reduc_index == 2)
6977 vec_else_clause = reduc_def;
6978 else
6980 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
6981 stmt, NULL);
6982 vect_is_simple_use (else_clause, stmt, loop_vinfo,
6983 NULL, &gtemp, &def, &dts[3]);
6987 else
6989 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
6990 vec_oprnds0.pop ());
6991 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
6992 vec_oprnds1.pop ());
6993 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
6994 vec_oprnds2.pop ());
6995 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
6996 vec_oprnds3.pop ());
6999 if (!slp_node)
7001 vec_oprnds0.quick_push (vec_cond_lhs);
7002 vec_oprnds1.quick_push (vec_cond_rhs);
7003 vec_oprnds2.quick_push (vec_then_clause);
7004 vec_oprnds3.quick_push (vec_else_clause);
7007 /* Arguments are ready. Create the new vector stmt. */
7008 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7010 vec_cond_rhs = vec_oprnds1[i];
7011 vec_then_clause = vec_oprnds2[i];
7012 vec_else_clause = vec_oprnds3[i];
7014 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7015 vec_cond_lhs, vec_cond_rhs);
7016 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7017 vec_compare, vec_then_clause, vec_else_clause);
7019 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7020 new_temp = make_ssa_name (vec_dest, new_stmt);
7021 gimple_assign_set_lhs (new_stmt, new_temp);
7022 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7023 if (slp_node)
7024 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7027 if (slp_node)
7028 continue;
7030 if (j == 0)
7031 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7032 else
7033 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7035 prev_stmt_info = vinfo_for_stmt (new_stmt);
7038 vec_oprnds0.release ();
7039 vec_oprnds1.release ();
7040 vec_oprnds2.release ();
7041 vec_oprnds3.release ();
7043 return true;
7047 /* Make sure the statement is vectorizable. */
7049 bool
7050 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
7052 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7053 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7054 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7055 bool ok;
7056 tree scalar_type, vectype;
7057 gimple pattern_stmt;
7058 gimple_seq pattern_def_seq;
7060 if (dump_enabled_p ())
7062 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7063 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7066 if (gimple_has_volatile_ops (stmt))
7068 if (dump_enabled_p ())
7069 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7070 "not vectorized: stmt has volatile operands\n");
7072 return false;
7075 /* Skip stmts that do not need to be vectorized. In loops this is expected
7076 to include:
7077 - the COND_EXPR which is the loop exit condition
7078 - any LABEL_EXPRs in the loop
7079 - computations that are used only for array indexing or loop control.
7080 In basic blocks we only analyze statements that are a part of some SLP
7081 instance, therefore, all the statements are relevant.
7083 Pattern statement needs to be analyzed instead of the original statement
7084 if the original statement is not relevant. Otherwise, we analyze both
7085 statements. In basic blocks we are called from some SLP instance
7086 traversal, don't analyze pattern stmts instead, the pattern stmts
7087 already will be part of SLP instance. */
7089 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7090 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7091 && !STMT_VINFO_LIVE_P (stmt_info))
7093 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7094 && pattern_stmt
7095 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7096 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7098 /* Analyze PATTERN_STMT instead of the original stmt. */
7099 stmt = pattern_stmt;
7100 stmt_info = vinfo_for_stmt (pattern_stmt);
7101 if (dump_enabled_p ())
7103 dump_printf_loc (MSG_NOTE, vect_location,
7104 "==> examining pattern statement: ");
7105 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7108 else
7110 if (dump_enabled_p ())
7111 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7113 return true;
7116 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7117 && node == NULL
7118 && pattern_stmt
7119 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7120 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7122 /* Analyze PATTERN_STMT too. */
7123 if (dump_enabled_p ())
7125 dump_printf_loc (MSG_NOTE, vect_location,
7126 "==> examining pattern statement: ");
7127 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7130 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7131 return false;
7134 if (is_pattern_stmt_p (stmt_info)
7135 && node == NULL
7136 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7138 gimple_stmt_iterator si;
7140 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7142 gimple pattern_def_stmt = gsi_stmt (si);
7143 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7144 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7146 /* Analyze def stmt of STMT if it's a pattern stmt. */
7147 if (dump_enabled_p ())
7149 dump_printf_loc (MSG_NOTE, vect_location,
7150 "==> examining pattern def statement: ");
7151 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7154 if (!vect_analyze_stmt (pattern_def_stmt,
7155 need_to_vectorize, node))
7156 return false;
7161 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7163 case vect_internal_def:
7164 break;
7166 case vect_reduction_def:
7167 case vect_nested_cycle:
7168 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
7169 || relevance == vect_used_in_outer_by_reduction
7170 || relevance == vect_unused_in_scope));
7171 break;
7173 case vect_induction_def:
7174 case vect_constant_def:
7175 case vect_external_def:
7176 case vect_unknown_def_type:
7177 default:
7178 gcc_unreachable ();
7181 if (bb_vinfo)
7183 gcc_assert (PURE_SLP_STMT (stmt_info));
7185 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7186 if (dump_enabled_p ())
7188 dump_printf_loc (MSG_NOTE, vect_location,
7189 "get vectype for scalar type: ");
7190 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7191 dump_printf (MSG_NOTE, "\n");
7194 vectype = get_vectype_for_scalar_type (scalar_type);
7195 if (!vectype)
7197 if (dump_enabled_p ())
7199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7200 "not SLPed: unsupported data-type ");
7201 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7202 scalar_type);
7203 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7205 return false;
7208 if (dump_enabled_p ())
7210 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7211 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7212 dump_printf (MSG_NOTE, "\n");
7215 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7218 if (STMT_VINFO_RELEVANT_P (stmt_info))
7220 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7221 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7222 || (is_gimple_call (stmt)
7223 && gimple_call_lhs (stmt) == NULL_TREE));
7224 *need_to_vectorize = true;
7227 ok = true;
7228 if (!bb_vinfo
7229 && (STMT_VINFO_RELEVANT_P (stmt_info)
7230 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7231 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, NULL)
7232 || vectorizable_conversion (stmt, NULL, NULL, NULL)
7233 || vectorizable_shift (stmt, NULL, NULL, NULL)
7234 || vectorizable_operation (stmt, NULL, NULL, NULL)
7235 || vectorizable_assignment (stmt, NULL, NULL, NULL)
7236 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
7237 || vectorizable_call (stmt, NULL, NULL, NULL)
7238 || vectorizable_store (stmt, NULL, NULL, NULL)
7239 || vectorizable_reduction (stmt, NULL, NULL, NULL)
7240 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, NULL));
7241 else
7243 if (bb_vinfo)
7244 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7245 || vectorizable_conversion (stmt, NULL, NULL, node)
7246 || vectorizable_shift (stmt, NULL, NULL, node)
7247 || vectorizable_operation (stmt, NULL, NULL, node)
7248 || vectorizable_assignment (stmt, NULL, NULL, node)
7249 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7250 || vectorizable_call (stmt, NULL, NULL, node)
7251 || vectorizable_store (stmt, NULL, NULL, node)
7252 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7255 if (!ok)
7257 if (dump_enabled_p ())
7259 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7260 "not vectorized: relevant stmt not ");
7261 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7262 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7265 return false;
7268 if (bb_vinfo)
7269 return true;
7271 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7272 need extra handling, except for vectorizable reductions. */
7273 if (STMT_VINFO_LIVE_P (stmt_info)
7274 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7275 ok = vectorizable_live_operation (stmt, NULL, NULL);
7277 if (!ok)
7279 if (dump_enabled_p ())
7281 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7282 "not vectorized: live stmt not ");
7283 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7284 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7287 return false;
7290 return true;
7294 /* Function vect_transform_stmt.
7296 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7298 bool
7299 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7300 bool *grouped_store, slp_tree slp_node,
7301 slp_instance slp_node_instance)
7303 bool is_store = false;
7304 gimple vec_stmt = NULL;
7305 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7306 bool done;
7308 switch (STMT_VINFO_TYPE (stmt_info))
7310 case type_demotion_vec_info_type:
7311 case type_promotion_vec_info_type:
7312 case type_conversion_vec_info_type:
7313 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7314 gcc_assert (done);
7315 break;
7317 case induc_vec_info_type:
7318 gcc_assert (!slp_node);
7319 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7320 gcc_assert (done);
7321 break;
7323 case shift_vec_info_type:
7324 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7325 gcc_assert (done);
7326 break;
7328 case op_vec_info_type:
7329 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7330 gcc_assert (done);
7331 break;
7333 case assignment_vec_info_type:
7334 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7335 gcc_assert (done);
7336 break;
7338 case load_vec_info_type:
7339 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7340 slp_node_instance);
7341 gcc_assert (done);
7342 break;
7344 case store_vec_info_type:
7345 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7346 gcc_assert (done);
7347 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7349 /* In case of interleaving, the whole chain is vectorized when the
7350 last store in the chain is reached. Store stmts before the last
7351 one are skipped, and there vec_stmt_info shouldn't be freed
7352 meanwhile. */
7353 *grouped_store = true;
7354 if (STMT_VINFO_VEC_STMT (stmt_info))
7355 is_store = true;
7357 else
7358 is_store = true;
7359 break;
7361 case condition_vec_info_type:
7362 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7363 gcc_assert (done);
7364 break;
7366 case call_vec_info_type:
7367 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7368 stmt = gsi_stmt (*gsi);
7369 if (is_gimple_call (stmt)
7370 && gimple_call_internal_p (stmt)
7371 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7372 is_store = true;
7373 break;
7375 case call_simd_clone_vec_info_type:
7376 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7377 stmt = gsi_stmt (*gsi);
7378 break;
7380 case reduc_vec_info_type:
7381 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7382 gcc_assert (done);
7383 break;
7385 default:
7386 if (!STMT_VINFO_LIVE_P (stmt_info))
7388 if (dump_enabled_p ())
7389 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7390 "stmt not supported.\n");
7391 gcc_unreachable ();
7395 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7396 is being vectorized, but outside the immediately enclosing loop. */
7397 if (vec_stmt
7398 && STMT_VINFO_LOOP_VINFO (stmt_info)
7399 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7400 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7401 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7402 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7403 || STMT_VINFO_RELEVANT (stmt_info) ==
7404 vect_used_in_outer_by_reduction))
7406 struct loop *innerloop = LOOP_VINFO_LOOP (
7407 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7408 imm_use_iterator imm_iter;
7409 use_operand_p use_p;
7410 tree scalar_dest;
7411 gimple exit_phi;
7413 if (dump_enabled_p ())
7414 dump_printf_loc (MSG_NOTE, vect_location,
7415 "Record the vdef for outer-loop vectorization.\n");
7417 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7418 (to be used when vectorizing outer-loop stmts that use the DEF of
7419 STMT). */
7420 if (gimple_code (stmt) == GIMPLE_PHI)
7421 scalar_dest = PHI_RESULT (stmt);
7422 else
7423 scalar_dest = gimple_assign_lhs (stmt);
7425 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7427 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7429 exit_phi = USE_STMT (use_p);
7430 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7435 /* Handle stmts whose DEF is used outside the loop-nest that is
7436 being vectorized. */
7437 if (STMT_VINFO_LIVE_P (stmt_info)
7438 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7440 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7441 gcc_assert (done);
7444 if (vec_stmt)
7445 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7447 return is_store;
7451 /* Remove a group of stores (for SLP or interleaving), free their
7452 stmt_vec_info. */
7454 void
7455 vect_remove_stores (gimple first_stmt)
7457 gimple next = first_stmt;
7458 gimple tmp;
7459 gimple_stmt_iterator next_si;
7461 while (next)
7463 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7465 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7466 if (is_pattern_stmt_p (stmt_info))
7467 next = STMT_VINFO_RELATED_STMT (stmt_info);
7468 /* Free the attached stmt_vec_info and remove the stmt. */
7469 next_si = gsi_for_stmt (next);
7470 unlink_stmt_vdef (next);
7471 gsi_remove (&next_si, true);
7472 release_defs (next);
7473 free_stmt_vec_info (next);
7474 next = tmp;
7479 /* Function new_stmt_vec_info.
7481 Create and initialize a new stmt_vec_info struct for STMT. */
7483 stmt_vec_info
7484 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7485 bb_vec_info bb_vinfo)
7487 stmt_vec_info res;
7488 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7490 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7491 STMT_VINFO_STMT (res) = stmt;
7492 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7493 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7494 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7495 STMT_VINFO_LIVE_P (res) = false;
7496 STMT_VINFO_VECTYPE (res) = NULL;
7497 STMT_VINFO_VEC_STMT (res) = NULL;
7498 STMT_VINFO_VECTORIZABLE (res) = true;
7499 STMT_VINFO_IN_PATTERN_P (res) = false;
7500 STMT_VINFO_RELATED_STMT (res) = NULL;
7501 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7502 STMT_VINFO_DATA_REF (res) = NULL;
7504 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7505 STMT_VINFO_DR_OFFSET (res) = NULL;
7506 STMT_VINFO_DR_INIT (res) = NULL;
7507 STMT_VINFO_DR_STEP (res) = NULL;
7508 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7510 if (gimple_code (stmt) == GIMPLE_PHI
7511 && is_loop_header_bb_p (gimple_bb (stmt)))
7512 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7513 else
7514 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7516 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7517 STMT_SLP_TYPE (res) = loop_vect;
7518 GROUP_FIRST_ELEMENT (res) = NULL;
7519 GROUP_NEXT_ELEMENT (res) = NULL;
7520 GROUP_SIZE (res) = 0;
7521 GROUP_STORE_COUNT (res) = 0;
7522 GROUP_GAP (res) = 0;
7523 GROUP_SAME_DR_STMT (res) = NULL;
7525 return res;
7529 /* Create a hash table for stmt_vec_info. */
7531 void
7532 init_stmt_vec_info_vec (void)
7534 gcc_assert (!stmt_vec_info_vec.exists ());
7535 stmt_vec_info_vec.create (50);
7539 /* Free hash table for stmt_vec_info. */
7541 void
7542 free_stmt_vec_info_vec (void)
7544 unsigned int i;
7545 vec_void_p info;
7546 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7547 if (info != NULL)
7548 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7549 gcc_assert (stmt_vec_info_vec.exists ());
7550 stmt_vec_info_vec.release ();
7554 /* Free stmt vectorization related info. */
7556 void
7557 free_stmt_vec_info (gimple stmt)
7559 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7561 if (!stmt_info)
7562 return;
7564 /* Check if this statement has a related "pattern stmt"
7565 (introduced by the vectorizer during the pattern recognition
7566 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7567 too. */
7568 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7570 stmt_vec_info patt_info
7571 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7572 if (patt_info)
7574 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7575 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7576 gimple_set_bb (patt_stmt, NULL);
7577 tree lhs = gimple_get_lhs (patt_stmt);
7578 if (TREE_CODE (lhs) == SSA_NAME)
7579 release_ssa_name (lhs);
7580 if (seq)
7582 gimple_stmt_iterator si;
7583 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7585 gimple seq_stmt = gsi_stmt (si);
7586 gimple_set_bb (seq_stmt, NULL);
7587 lhs = gimple_get_lhs (patt_stmt);
7588 if (TREE_CODE (lhs) == SSA_NAME)
7589 release_ssa_name (lhs);
7590 free_stmt_vec_info (seq_stmt);
7593 free_stmt_vec_info (patt_stmt);
7597 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7598 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7599 set_vinfo_for_stmt (stmt, NULL);
7600 free (stmt_info);
7604 /* Function get_vectype_for_scalar_type_and_size.
7606 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7607 by the target. */
7609 static tree
7610 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7612 machine_mode inner_mode = TYPE_MODE (scalar_type);
7613 machine_mode simd_mode;
7614 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7615 int nunits;
7616 tree vectype;
7618 if (nbytes == 0)
7619 return NULL_TREE;
7621 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7622 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7623 return NULL_TREE;
7625 /* For vector types of elements whose mode precision doesn't
7626 match their types precision we use a element type of mode
7627 precision. The vectorization routines will have to make sure
7628 they support the proper result truncation/extension.
7629 We also make sure to build vector types with INTEGER_TYPE
7630 component type only. */
7631 if (INTEGRAL_TYPE_P (scalar_type)
7632 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7633 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7634 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7635 TYPE_UNSIGNED (scalar_type));
7637 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7638 When the component mode passes the above test simply use a type
7639 corresponding to that mode. The theory is that any use that
7640 would cause problems with this will disable vectorization anyway. */
7641 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7642 && !INTEGRAL_TYPE_P (scalar_type))
7643 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7645 /* We can't build a vector type of elements with alignment bigger than
7646 their size. */
7647 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7648 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7649 TYPE_UNSIGNED (scalar_type));
7651 /* If we felt back to using the mode fail if there was
7652 no scalar type for it. */
7653 if (scalar_type == NULL_TREE)
7654 return NULL_TREE;
7656 /* If no size was supplied use the mode the target prefers. Otherwise
7657 lookup a vector mode of the specified size. */
7658 if (size == 0)
7659 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
7660 else
7661 simd_mode = mode_for_vector (inner_mode, size / nbytes);
7662 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
7663 if (nunits <= 1)
7664 return NULL_TREE;
7666 vectype = build_vector_type (scalar_type, nunits);
7668 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
7669 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
7670 return NULL_TREE;
7672 return vectype;
7675 unsigned int current_vector_size;
7677 /* Function get_vectype_for_scalar_type.
7679 Returns the vector type corresponding to SCALAR_TYPE as supported
7680 by the target. */
7682 tree
7683 get_vectype_for_scalar_type (tree scalar_type)
7685 tree vectype;
7686 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
7687 current_vector_size);
7688 if (vectype
7689 && current_vector_size == 0)
7690 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
7691 return vectype;
7694 /* Function get_same_sized_vectype
7696 Returns a vector type corresponding to SCALAR_TYPE of size
7697 VECTOR_TYPE if supported by the target. */
7699 tree
7700 get_same_sized_vectype (tree scalar_type, tree vector_type)
7702 return get_vectype_for_scalar_type_and_size
7703 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
7706 /* Function vect_is_simple_use.
7708 Input:
7709 LOOP_VINFO - the vect info of the loop that is being vectorized.
7710 BB_VINFO - the vect info of the basic block that is being vectorized.
7711 OPERAND - operand of STMT in the loop or bb.
7712 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7714 Returns whether a stmt with OPERAND can be vectorized.
7715 For loops, supportable operands are constants, loop invariants, and operands
7716 that are defined by the current iteration of the loop. Unsupportable
7717 operands are those that are defined by a previous iteration of the loop (as
7718 is the case in reduction/induction computations).
7719 For basic blocks, supportable operands are constants and bb invariants.
7720 For now, operands defined outside the basic block are not supported. */
7722 bool
7723 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7724 bb_vec_info bb_vinfo, gimple *def_stmt,
7725 tree *def, enum vect_def_type *dt)
7727 basic_block bb;
7728 stmt_vec_info stmt_vinfo;
7729 struct loop *loop = NULL;
7731 if (loop_vinfo)
7732 loop = LOOP_VINFO_LOOP (loop_vinfo);
7734 *def_stmt = NULL;
7735 *def = NULL_TREE;
7737 if (dump_enabled_p ())
7739 dump_printf_loc (MSG_NOTE, vect_location,
7740 "vect_is_simple_use: operand ");
7741 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
7742 dump_printf (MSG_NOTE, "\n");
7745 if (CONSTANT_CLASS_P (operand))
7747 *dt = vect_constant_def;
7748 return true;
7751 if (is_gimple_min_invariant (operand))
7753 *def = operand;
7754 *dt = vect_external_def;
7755 return true;
7758 if (TREE_CODE (operand) == PAREN_EXPR)
7760 if (dump_enabled_p ())
7761 dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.\n");
7762 operand = TREE_OPERAND (operand, 0);
7765 if (TREE_CODE (operand) != SSA_NAME)
7767 if (dump_enabled_p ())
7768 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7769 "not ssa-name.\n");
7770 return false;
7773 *def_stmt = SSA_NAME_DEF_STMT (operand);
7774 if (*def_stmt == NULL)
7776 if (dump_enabled_p ())
7777 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7778 "no def_stmt.\n");
7779 return false;
7782 if (dump_enabled_p ())
7784 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
7785 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
7788 /* Empty stmt is expected only in case of a function argument.
7789 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7790 if (gimple_nop_p (*def_stmt))
7792 *def = operand;
7793 *dt = vect_external_def;
7794 return true;
7797 bb = gimple_bb (*def_stmt);
7799 if ((loop && !flow_bb_inside_loop_p (loop, bb))
7800 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
7801 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
7802 *dt = vect_external_def;
7803 else
7805 stmt_vinfo = vinfo_for_stmt (*def_stmt);
7806 if (!loop && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
7807 *dt = vect_external_def;
7808 else
7809 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
7812 if (dump_enabled_p ())
7814 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
7815 switch (*dt)
7817 case vect_uninitialized_def:
7818 dump_printf (MSG_NOTE, "uninitialized\n");
7819 break;
7820 case vect_constant_def:
7821 dump_printf (MSG_NOTE, "constant\n");
7822 break;
7823 case vect_external_def:
7824 dump_printf (MSG_NOTE, "external\n");
7825 break;
7826 case vect_internal_def:
7827 dump_printf (MSG_NOTE, "internal\n");
7828 break;
7829 case vect_induction_def:
7830 dump_printf (MSG_NOTE, "induction\n");
7831 break;
7832 case vect_reduction_def:
7833 dump_printf (MSG_NOTE, "reduction\n");
7834 break;
7835 case vect_double_reduction_def:
7836 dump_printf (MSG_NOTE, "double reduction\n");
7837 break;
7838 case vect_nested_cycle:
7839 dump_printf (MSG_NOTE, "nested cycle\n");
7840 break;
7841 case vect_unknown_def_type:
7842 dump_printf (MSG_NOTE, "unknown\n");
7843 break;
7847 if (*dt == vect_unknown_def_type
7848 || (stmt
7849 && *dt == vect_double_reduction_def
7850 && gimple_code (stmt) != GIMPLE_PHI))
7852 if (dump_enabled_p ())
7853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7854 "Unsupported pattern.\n");
7855 return false;
7858 switch (gimple_code (*def_stmt))
7860 case GIMPLE_PHI:
7861 *def = gimple_phi_result (*def_stmt);
7862 break;
7864 case GIMPLE_ASSIGN:
7865 *def = gimple_assign_lhs (*def_stmt);
7866 break;
7868 case GIMPLE_CALL:
7869 *def = gimple_call_lhs (*def_stmt);
7870 if (*def != NULL)
7871 break;
7872 /* FALLTHRU */
7873 default:
7874 if (dump_enabled_p ())
7875 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7876 "unsupported defining stmt:\n");
7877 return false;
7880 return true;
7883 /* Function vect_is_simple_use_1.
7885 Same as vect_is_simple_use_1 but also determines the vector operand
7886 type of OPERAND and stores it to *VECTYPE. If the definition of
7887 OPERAND is vect_uninitialized_def, vect_constant_def or
7888 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7889 is responsible to compute the best suited vector type for the
7890 scalar operand. */
7892 bool
7893 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7894 bb_vec_info bb_vinfo, gimple *def_stmt,
7895 tree *def, enum vect_def_type *dt, tree *vectype)
7897 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
7898 def, dt))
7899 return false;
7901 /* Now get a vector type if the def is internal, otherwise supply
7902 NULL_TREE and leave it up to the caller to figure out a proper
7903 type for the use stmt. */
7904 if (*dt == vect_internal_def
7905 || *dt == vect_induction_def
7906 || *dt == vect_reduction_def
7907 || *dt == vect_double_reduction_def
7908 || *dt == vect_nested_cycle)
7910 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
7912 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7913 && !STMT_VINFO_RELEVANT (stmt_info)
7914 && !STMT_VINFO_LIVE_P (stmt_info))
7915 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7917 *vectype = STMT_VINFO_VECTYPE (stmt_info);
7918 gcc_assert (*vectype != NULL_TREE);
7920 else if (*dt == vect_uninitialized_def
7921 || *dt == vect_constant_def
7922 || *dt == vect_external_def)
7923 *vectype = NULL_TREE;
7924 else
7925 gcc_unreachable ();
7927 return true;
7931 /* Function supportable_widening_operation
7933 Check whether an operation represented by the code CODE is a
7934 widening operation that is supported by the target platform in
7935 vector form (i.e., when operating on arguments of type VECTYPE_IN
7936 producing a result of type VECTYPE_OUT).
7938 Widening operations we currently support are NOP (CONVERT), FLOAT
7939 and WIDEN_MULT. This function checks if these operations are supported
7940 by the target platform either directly (via vector tree-codes), or via
7941 target builtins.
7943 Output:
7944 - CODE1 and CODE2 are codes of vector operations to be used when
7945 vectorizing the operation, if available.
7946 - MULTI_STEP_CVT determines the number of required intermediate steps in
7947 case of multi-step conversion (like char->short->int - in that case
7948 MULTI_STEP_CVT will be 1).
7949 - INTERM_TYPES contains the intermediate type required to perform the
7950 widening operation (short in the above example). */
7952 bool
7953 supportable_widening_operation (enum tree_code code, gimple stmt,
7954 tree vectype_out, tree vectype_in,
7955 enum tree_code *code1, enum tree_code *code2,
7956 int *multi_step_cvt,
7957 vec<tree> *interm_types)
7959 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7960 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
7961 struct loop *vect_loop = NULL;
7962 machine_mode vec_mode;
7963 enum insn_code icode1, icode2;
7964 optab optab1, optab2;
7965 tree vectype = vectype_in;
7966 tree wide_vectype = vectype_out;
7967 enum tree_code c1, c2;
7968 int i;
7969 tree prev_type, intermediate_type;
7970 machine_mode intermediate_mode, prev_mode;
7971 optab optab3, optab4;
7973 *multi_step_cvt = 0;
7974 if (loop_info)
7975 vect_loop = LOOP_VINFO_LOOP (loop_info);
7977 switch (code)
7979 case WIDEN_MULT_EXPR:
7980 /* The result of a vectorized widening operation usually requires
7981 two vectors (because the widened results do not fit into one vector).
7982 The generated vector results would normally be expected to be
7983 generated in the same order as in the original scalar computation,
7984 i.e. if 8 results are generated in each vector iteration, they are
7985 to be organized as follows:
7986 vect1: [res1,res2,res3,res4],
7987 vect2: [res5,res6,res7,res8].
7989 However, in the special case that the result of the widening
7990 operation is used in a reduction computation only, the order doesn't
7991 matter (because when vectorizing a reduction we change the order of
7992 the computation). Some targets can take advantage of this and
7993 generate more efficient code. For example, targets like Altivec,
7994 that support widen_mult using a sequence of {mult_even,mult_odd}
7995 generate the following vectors:
7996 vect1: [res1,res3,res5,res7],
7997 vect2: [res2,res4,res6,res8].
7999 When vectorizing outer-loops, we execute the inner-loop sequentially
8000 (each vectorized inner-loop iteration contributes to VF outer-loop
8001 iterations in parallel). We therefore don't allow to change the
8002 order of the computation in the inner-loop during outer-loop
8003 vectorization. */
8004 /* TODO: Another case in which order doesn't *really* matter is when we
8005 widen and then contract again, e.g. (short)((int)x * y >> 8).
8006 Normally, pack_trunc performs an even/odd permute, whereas the
8007 repack from an even/odd expansion would be an interleave, which
8008 would be significantly simpler for e.g. AVX2. */
8009 /* In any case, in order to avoid duplicating the code below, recurse
8010 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8011 are properly set up for the caller. If we fail, we'll continue with
8012 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8013 if (vect_loop
8014 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8015 && !nested_in_vect_loop_p (vect_loop, stmt)
8016 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8017 stmt, vectype_out, vectype_in,
8018 code1, code2, multi_step_cvt,
8019 interm_types))
8021 /* Elements in a vector with vect_used_by_reduction property cannot
8022 be reordered if the use chain with this property does not have the
8023 same operation. One such an example is s += a * b, where elements
8024 in a and b cannot be reordered. Here we check if the vector defined
8025 by STMT is only directly used in the reduction statement. */
8026 tree lhs = gimple_assign_lhs (stmt);
8027 use_operand_p dummy;
8028 gimple use_stmt;
8029 stmt_vec_info use_stmt_info = NULL;
8030 if (single_imm_use (lhs, &dummy, &use_stmt)
8031 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8032 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8033 return true;
8035 c1 = VEC_WIDEN_MULT_LO_EXPR;
8036 c2 = VEC_WIDEN_MULT_HI_EXPR;
8037 break;
8039 case VEC_WIDEN_MULT_EVEN_EXPR:
8040 /* Support the recursion induced just above. */
8041 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8042 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8043 break;
8045 case WIDEN_LSHIFT_EXPR:
8046 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8047 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8048 break;
8050 CASE_CONVERT:
8051 c1 = VEC_UNPACK_LO_EXPR;
8052 c2 = VEC_UNPACK_HI_EXPR;
8053 break;
8055 case FLOAT_EXPR:
8056 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8057 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8058 break;
8060 case FIX_TRUNC_EXPR:
8061 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8062 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8063 computing the operation. */
8064 return false;
8066 default:
8067 gcc_unreachable ();
8070 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8072 enum tree_code ctmp = c1;
8073 c1 = c2;
8074 c2 = ctmp;
8077 if (code == FIX_TRUNC_EXPR)
8079 /* The signedness is determined from output operand. */
8080 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8081 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8083 else
8085 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8086 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8089 if (!optab1 || !optab2)
8090 return false;
8092 vec_mode = TYPE_MODE (vectype);
8093 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8094 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8095 return false;
8097 *code1 = c1;
8098 *code2 = c2;
8100 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8101 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8102 return true;
8104 /* Check if it's a multi-step conversion that can be done using intermediate
8105 types. */
8107 prev_type = vectype;
8108 prev_mode = vec_mode;
8110 if (!CONVERT_EXPR_CODE_P (code))
8111 return false;
8113 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8114 intermediate steps in promotion sequence. We try
8115 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8116 not. */
8117 interm_types->create (MAX_INTERM_CVT_STEPS);
8118 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8120 intermediate_mode = insn_data[icode1].operand[0].mode;
8121 intermediate_type
8122 = lang_hooks.types.type_for_mode (intermediate_mode,
8123 TYPE_UNSIGNED (prev_type));
8124 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8125 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8127 if (!optab3 || !optab4
8128 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8129 || insn_data[icode1].operand[0].mode != intermediate_mode
8130 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8131 || insn_data[icode2].operand[0].mode != intermediate_mode
8132 || ((icode1 = optab_handler (optab3, intermediate_mode))
8133 == CODE_FOR_nothing)
8134 || ((icode2 = optab_handler (optab4, intermediate_mode))
8135 == CODE_FOR_nothing))
8136 break;
8138 interm_types->quick_push (intermediate_type);
8139 (*multi_step_cvt)++;
8141 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8142 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8143 return true;
8145 prev_type = intermediate_type;
8146 prev_mode = intermediate_mode;
8149 interm_types->release ();
8150 return false;
8154 /* Function supportable_narrowing_operation
8156 Check whether an operation represented by the code CODE is a
8157 narrowing operation that is supported by the target platform in
8158 vector form (i.e., when operating on arguments of type VECTYPE_IN
8159 and producing a result of type VECTYPE_OUT).
8161 Narrowing operations we currently support are NOP (CONVERT) and
8162 FIX_TRUNC. This function checks if these operations are supported by
8163 the target platform directly via vector tree-codes.
8165 Output:
8166 - CODE1 is the code of a vector operation to be used when
8167 vectorizing the operation, if available.
8168 - MULTI_STEP_CVT determines the number of required intermediate steps in
8169 case of multi-step conversion (like int->short->char - in that case
8170 MULTI_STEP_CVT will be 1).
8171 - INTERM_TYPES contains the intermediate type required to perform the
8172 narrowing operation (short in the above example). */
8174 bool
8175 supportable_narrowing_operation (enum tree_code code,
8176 tree vectype_out, tree vectype_in,
8177 enum tree_code *code1, int *multi_step_cvt,
8178 vec<tree> *interm_types)
8180 machine_mode vec_mode;
8181 enum insn_code icode1;
8182 optab optab1, interm_optab;
8183 tree vectype = vectype_in;
8184 tree narrow_vectype = vectype_out;
8185 enum tree_code c1;
8186 tree intermediate_type;
8187 machine_mode intermediate_mode, prev_mode;
8188 int i;
8189 bool uns;
8191 *multi_step_cvt = 0;
8192 switch (code)
8194 CASE_CONVERT:
8195 c1 = VEC_PACK_TRUNC_EXPR;
8196 break;
8198 case FIX_TRUNC_EXPR:
8199 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8200 break;
8202 case FLOAT_EXPR:
8203 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8204 tree code and optabs used for computing the operation. */
8205 return false;
8207 default:
8208 gcc_unreachable ();
8211 if (code == FIX_TRUNC_EXPR)
8212 /* The signedness is determined from output operand. */
8213 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8214 else
8215 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8217 if (!optab1)
8218 return false;
8220 vec_mode = TYPE_MODE (vectype);
8221 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8222 return false;
8224 *code1 = c1;
8226 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8227 return true;
8229 /* Check if it's a multi-step conversion that can be done using intermediate
8230 types. */
8231 prev_mode = vec_mode;
8232 if (code == FIX_TRUNC_EXPR)
8233 uns = TYPE_UNSIGNED (vectype_out);
8234 else
8235 uns = TYPE_UNSIGNED (vectype);
8237 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8238 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8239 costly than signed. */
8240 if (code == FIX_TRUNC_EXPR && uns)
8242 enum insn_code icode2;
8244 intermediate_type
8245 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8246 interm_optab
8247 = optab_for_tree_code (c1, intermediate_type, optab_default);
8248 if (interm_optab != unknown_optab
8249 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8250 && insn_data[icode1].operand[0].mode
8251 == insn_data[icode2].operand[0].mode)
8253 uns = false;
8254 optab1 = interm_optab;
8255 icode1 = icode2;
8259 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8260 intermediate steps in promotion sequence. We try
8261 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8262 interm_types->create (MAX_INTERM_CVT_STEPS);
8263 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8265 intermediate_mode = insn_data[icode1].operand[0].mode;
8266 intermediate_type
8267 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8268 interm_optab
8269 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8270 optab_default);
8271 if (!interm_optab
8272 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8273 || insn_data[icode1].operand[0].mode != intermediate_mode
8274 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8275 == CODE_FOR_nothing))
8276 break;
8278 interm_types->quick_push (intermediate_type);
8279 (*multi_step_cvt)++;
8281 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8282 return true;
8284 prev_mode = intermediate_mode;
8285 optab1 = interm_optab;
8288 interm_types->release ();
8289 return false;