Allow indirect branch via GOT slot for x32
[official-gcc.git] / gcc / tree-vect-stmts.c
blob37a706fa849456b7a5689c0bd366d7e5831749e9
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "dominance.h"
44 #include "cfg.h"
45 #include "basic-block.h"
46 #include "gimple-pretty-print.h"
47 #include "tree-ssa-alias.h"
48 #include "internal-fn.h"
49 #include "tree-eh.h"
50 #include "gimple-expr.h"
51 #include "is-a.h"
52 #include "gimple.h"
53 #include "gimplify.h"
54 #include "gimple-iterator.h"
55 #include "gimplify-me.h"
56 #include "gimple-ssa.h"
57 #include "tree-cfg.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-manip.h"
63 #include "cfgloop.h"
64 #include "tree-ssa-loop.h"
65 #include "tree-scalar-evolution.h"
66 #include "hashtab.h"
67 #include "rtl.h"
68 #include "flags.h"
69 #include "statistics.h"
70 #include "real.h"
71 #include "fixed-value.h"
72 #include "insn-config.h"
73 #include "expmed.h"
74 #include "dojump.h"
75 #include "explow.h"
76 #include "calls.h"
77 #include "emit-rtl.h"
78 #include "varasm.h"
79 #include "stmt.h"
80 #include "expr.h"
81 #include "recog.h" /* FIXME: for insn_data */
82 #include "insn-codes.h"
83 #include "optabs.h"
84 #include "diagnostic-core.h"
85 #include "tree-vectorizer.h"
86 #include "hash-map.h"
87 #include "plugin-api.h"
88 #include "ipa-ref.h"
89 #include "cgraph.h"
90 #include "builtins.h"
92 /* For lang_hooks.types.type_for_mode. */
93 #include "langhooks.h"
95 /* Return the vectorized type for the given statement. */
97 tree
98 stmt_vectype (struct _stmt_vec_info *stmt_info)
100 return STMT_VINFO_VECTYPE (stmt_info);
103 /* Return TRUE iff the given statement is in an inner loop relative to
104 the loop being vectorized. */
105 bool
106 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
108 gimple stmt = STMT_VINFO_STMT (stmt_info);
109 basic_block bb = gimple_bb (stmt);
110 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
111 struct loop* loop;
113 if (!loop_vinfo)
114 return false;
116 loop = LOOP_VINFO_LOOP (loop_vinfo);
118 return (bb->loop_father == loop->inner);
121 /* Record the cost of a statement, either by directly informing the
122 target model or by saving it in a vector for later processing.
123 Return a preliminary estimate of the statement's cost. */
125 unsigned
126 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
127 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
128 int misalign, enum vect_cost_model_location where)
130 if (body_cost_vec)
132 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
133 add_stmt_info_to_vec (body_cost_vec, count, kind,
134 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
135 misalign);
136 return (unsigned)
137 (builtin_vectorization_cost (kind, vectype, misalign) * count);
140 else
142 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
143 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
144 void *target_cost_data;
146 if (loop_vinfo)
147 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
148 else
149 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
151 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
152 misalign, where);
156 /* Return a variable of type ELEM_TYPE[NELEMS]. */
158 static tree
159 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
161 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
162 "vect_array");
165 /* ARRAY is an array of vectors created by create_vector_array.
166 Return an SSA_NAME for the vector in index N. The reference
167 is part of the vectorization of STMT and the vector is associated
168 with scalar destination SCALAR_DEST. */
170 static tree
171 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
172 tree array, unsigned HOST_WIDE_INT n)
174 tree vect_type, vect, vect_name, array_ref;
175 gimple new_stmt;
177 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
178 vect_type = TREE_TYPE (TREE_TYPE (array));
179 vect = vect_create_destination_var (scalar_dest, vect_type);
180 array_ref = build4 (ARRAY_REF, vect_type, array,
181 build_int_cst (size_type_node, n),
182 NULL_TREE, NULL_TREE);
184 new_stmt = gimple_build_assign (vect, array_ref);
185 vect_name = make_ssa_name (vect, new_stmt);
186 gimple_assign_set_lhs (new_stmt, vect_name);
187 vect_finish_stmt_generation (stmt, new_stmt, gsi);
189 return vect_name;
192 /* ARRAY is an array of vectors created by create_vector_array.
193 Emit code to store SSA_NAME VECT in index N of the array.
194 The store is part of the vectorization of STMT. */
196 static void
197 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
198 tree array, unsigned HOST_WIDE_INT n)
200 tree array_ref;
201 gimple new_stmt;
203 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
204 build_int_cst (size_type_node, n),
205 NULL_TREE, NULL_TREE);
207 new_stmt = gimple_build_assign (array_ref, vect);
208 vect_finish_stmt_generation (stmt, new_stmt, gsi);
211 /* PTR is a pointer to an array of type TYPE. Return a representation
212 of *PTR. The memory reference replaces those in FIRST_DR
213 (and its group). */
215 static tree
216 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
218 tree mem_ref, alias_ptr_type;
220 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
221 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
222 /* Arrays have the same alignment as their type. */
223 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
224 return mem_ref;
227 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
229 /* Function vect_mark_relevant.
231 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
233 static void
234 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
235 enum vect_relevant relevant, bool live_p,
236 bool used_in_pattern)
238 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
239 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
240 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
241 gimple pattern_stmt;
243 if (dump_enabled_p ())
244 dump_printf_loc (MSG_NOTE, vect_location,
245 "mark relevant %d, live %d.\n", relevant, live_p);
247 /* If this stmt is an original stmt in a pattern, we might need to mark its
248 related pattern stmt instead of the original stmt. However, such stmts
249 may have their own uses that are not in any pattern, in such cases the
250 stmt itself should be marked. */
251 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
253 bool found = false;
254 if (!used_in_pattern)
256 imm_use_iterator imm_iter;
257 use_operand_p use_p;
258 gimple use_stmt;
259 tree lhs;
260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
261 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
263 if (is_gimple_assign (stmt))
264 lhs = gimple_assign_lhs (stmt);
265 else
266 lhs = gimple_call_lhs (stmt);
268 /* This use is out of pattern use, if LHS has other uses that are
269 pattern uses, we should mark the stmt itself, and not the pattern
270 stmt. */
271 if (lhs && TREE_CODE (lhs) == SSA_NAME)
272 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
274 if (is_gimple_debug (USE_STMT (use_p)))
275 continue;
276 use_stmt = USE_STMT (use_p);
278 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
279 continue;
281 if (vinfo_for_stmt (use_stmt)
282 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
284 found = true;
285 break;
290 if (!found)
292 /* This is the last stmt in a sequence that was detected as a
293 pattern that can potentially be vectorized. Don't mark the stmt
294 as relevant/live because it's not going to be vectorized.
295 Instead mark the pattern-stmt that replaces it. */
297 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
299 if (dump_enabled_p ())
300 dump_printf_loc (MSG_NOTE, vect_location,
301 "last stmt in pattern. don't mark"
302 " relevant/live.\n");
303 stmt_info = vinfo_for_stmt (pattern_stmt);
304 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
305 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
306 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
307 stmt = pattern_stmt;
311 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
312 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
313 STMT_VINFO_RELEVANT (stmt_info) = relevant;
315 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
316 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
318 if (dump_enabled_p ())
319 dump_printf_loc (MSG_NOTE, vect_location,
320 "already marked relevant/live.\n");
321 return;
324 worklist->safe_push (stmt);
328 /* Function vect_stmt_relevant_p.
330 Return true if STMT in loop that is represented by LOOP_VINFO is
331 "relevant for vectorization".
333 A stmt is considered "relevant for vectorization" if:
334 - it has uses outside the loop.
335 - it has vdefs (it alters memory).
336 - control stmts in the loop (except for the exit condition).
338 CHECKME: what other side effects would the vectorizer allow? */
340 static bool
341 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
342 enum vect_relevant *relevant, bool *live_p)
344 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
345 ssa_op_iter op_iter;
346 imm_use_iterator imm_iter;
347 use_operand_p use_p;
348 def_operand_p def_p;
350 *relevant = vect_unused_in_scope;
351 *live_p = false;
353 /* cond stmt other than loop exit cond. */
354 if (is_ctrl_stmt (stmt)
355 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
356 != loop_exit_ctrl_vec_info_type)
357 *relevant = vect_used_in_scope;
359 /* changing memory. */
360 if (gimple_code (stmt) != GIMPLE_PHI)
361 if (gimple_vdef (stmt)
362 && !gimple_clobber_p (stmt))
364 if (dump_enabled_p ())
365 dump_printf_loc (MSG_NOTE, vect_location,
366 "vec_stmt_relevant_p: stmt has vdefs.\n");
367 *relevant = vect_used_in_scope;
370 /* uses outside the loop. */
371 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
373 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
375 basic_block bb = gimple_bb (USE_STMT (use_p));
376 if (!flow_bb_inside_loop_p (loop, bb))
378 if (dump_enabled_p ())
379 dump_printf_loc (MSG_NOTE, vect_location,
380 "vec_stmt_relevant_p: used out of loop.\n");
382 if (is_gimple_debug (USE_STMT (use_p)))
383 continue;
385 /* We expect all such uses to be in the loop exit phis
386 (because of loop closed form) */
387 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
388 gcc_assert (bb == single_exit (loop)->dest);
390 *live_p = true;
395 return (*live_p || *relevant);
399 /* Function exist_non_indexing_operands_for_use_p
401 USE is one of the uses attached to STMT. Check if USE is
402 used in STMT for anything other than indexing an array. */
404 static bool
405 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
407 tree operand;
408 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
410 /* USE corresponds to some operand in STMT. If there is no data
411 reference in STMT, then any operand that corresponds to USE
412 is not indexing an array. */
413 if (!STMT_VINFO_DATA_REF (stmt_info))
414 return true;
416 /* STMT has a data_ref. FORNOW this means that its of one of
417 the following forms:
418 -1- ARRAY_REF = var
419 -2- var = ARRAY_REF
420 (This should have been verified in analyze_data_refs).
422 'var' in the second case corresponds to a def, not a use,
423 so USE cannot correspond to any operands that are not used
424 for array indexing.
426 Therefore, all we need to check is if STMT falls into the
427 first case, and whether var corresponds to USE. */
429 if (!gimple_assign_copy_p (stmt))
431 if (is_gimple_call (stmt)
432 && gimple_call_internal_p (stmt))
433 switch (gimple_call_internal_fn (stmt))
435 case IFN_MASK_STORE:
436 operand = gimple_call_arg (stmt, 3);
437 if (operand == use)
438 return true;
439 /* FALLTHRU */
440 case IFN_MASK_LOAD:
441 operand = gimple_call_arg (stmt, 2);
442 if (operand == use)
443 return true;
444 break;
445 default:
446 break;
448 return false;
451 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
452 return false;
453 operand = gimple_assign_rhs1 (stmt);
454 if (TREE_CODE (operand) != SSA_NAME)
455 return false;
457 if (operand == use)
458 return true;
460 return false;
465 Function process_use.
467 Inputs:
468 - a USE in STMT in a loop represented by LOOP_VINFO
469 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
470 that defined USE. This is done by calling mark_relevant and passing it
471 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
472 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
473 be performed.
475 Outputs:
476 Generally, LIVE_P and RELEVANT are used to define the liveness and
477 relevance info of the DEF_STMT of this USE:
478 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
479 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
480 Exceptions:
481 - case 1: If USE is used only for address computations (e.g. array indexing),
482 which does not need to be directly vectorized, then the liveness/relevance
483 of the respective DEF_STMT is left unchanged.
484 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
485 skip DEF_STMT cause it had already been processed.
486 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
487 be modified accordingly.
489 Return true if everything is as expected. Return false otherwise. */
491 static bool
492 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
493 enum vect_relevant relevant, vec<gimple> *worklist,
494 bool force)
496 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
497 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
498 stmt_vec_info dstmt_vinfo;
499 basic_block bb, def_bb;
500 tree def;
501 gimple def_stmt;
502 enum vect_def_type dt;
504 /* case 1: we are only interested in uses that need to be vectorized. Uses
505 that are used for address computation are not considered relevant. */
506 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
507 return true;
509 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
511 if (dump_enabled_p ())
512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
513 "not vectorized: unsupported use in stmt.\n");
514 return false;
517 if (!def_stmt || gimple_nop_p (def_stmt))
518 return true;
520 def_bb = gimple_bb (def_stmt);
521 if (!flow_bb_inside_loop_p (loop, def_bb))
523 if (dump_enabled_p ())
524 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
525 return true;
528 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
529 DEF_STMT must have already been processed, because this should be the
530 only way that STMT, which is a reduction-phi, was put in the worklist,
531 as there should be no other uses for DEF_STMT in the loop. So we just
532 check that everything is as expected, and we are done. */
533 dstmt_vinfo = vinfo_for_stmt (def_stmt);
534 bb = gimple_bb (stmt);
535 if (gimple_code (stmt) == GIMPLE_PHI
536 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
537 && gimple_code (def_stmt) != GIMPLE_PHI
538 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
539 && bb->loop_father == def_bb->loop_father)
541 if (dump_enabled_p ())
542 dump_printf_loc (MSG_NOTE, vect_location,
543 "reduc-stmt defining reduc-phi in the same nest.\n");
544 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
545 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
546 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
547 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
548 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
549 return true;
552 /* case 3a: outer-loop stmt defining an inner-loop stmt:
553 outer-loop-header-bb:
554 d = def_stmt
555 inner-loop:
556 stmt # use (d)
557 outer-loop-tail-bb:
558 ... */
559 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
561 if (dump_enabled_p ())
562 dump_printf_loc (MSG_NOTE, vect_location,
563 "outer-loop def-stmt defining inner-loop stmt.\n");
565 switch (relevant)
567 case vect_unused_in_scope:
568 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
569 vect_used_in_scope : vect_unused_in_scope;
570 break;
572 case vect_used_in_outer_by_reduction:
573 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
574 relevant = vect_used_by_reduction;
575 break;
577 case vect_used_in_outer:
578 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
579 relevant = vect_used_in_scope;
580 break;
582 case vect_used_in_scope:
583 break;
585 default:
586 gcc_unreachable ();
590 /* case 3b: inner-loop stmt defining an outer-loop stmt:
591 outer-loop-header-bb:
593 inner-loop:
594 d = def_stmt
595 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
596 stmt # use (d) */
597 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
599 if (dump_enabled_p ())
600 dump_printf_loc (MSG_NOTE, vect_location,
601 "inner-loop def-stmt defining outer-loop stmt.\n");
603 switch (relevant)
605 case vect_unused_in_scope:
606 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
607 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
608 vect_used_in_outer_by_reduction : vect_unused_in_scope;
609 break;
611 case vect_used_by_reduction:
612 relevant = vect_used_in_outer_by_reduction;
613 break;
615 case vect_used_in_scope:
616 relevant = vect_used_in_outer;
617 break;
619 default:
620 gcc_unreachable ();
624 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
625 is_pattern_stmt_p (stmt_vinfo));
626 return true;
630 /* Function vect_mark_stmts_to_be_vectorized.
632 Not all stmts in the loop need to be vectorized. For example:
634 for i...
635 for j...
636 1. T0 = i + j
637 2. T1 = a[T0]
639 3. j = j + 1
641 Stmt 1 and 3 do not need to be vectorized, because loop control and
642 addressing of vectorized data-refs are handled differently.
644 This pass detects such stmts. */
646 bool
647 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
649 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
650 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
651 unsigned int nbbs = loop->num_nodes;
652 gimple_stmt_iterator si;
653 gimple stmt;
654 unsigned int i;
655 stmt_vec_info stmt_vinfo;
656 basic_block bb;
657 gimple phi;
658 bool live_p;
659 enum vect_relevant relevant, tmp_relevant;
660 enum vect_def_type def_type;
662 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE, vect_location,
664 "=== vect_mark_stmts_to_be_vectorized ===\n");
666 auto_vec<gimple, 64> worklist;
668 /* 1. Init worklist. */
669 for (i = 0; i < nbbs; i++)
671 bb = bbs[i];
672 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
674 phi = gsi_stmt (si);
675 if (dump_enabled_p ())
677 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
678 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
681 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
682 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
684 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
686 stmt = gsi_stmt (si);
687 if (dump_enabled_p ())
689 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
690 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
693 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
694 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
698 /* 2. Process_worklist */
699 while (worklist.length () > 0)
701 use_operand_p use_p;
702 ssa_op_iter iter;
704 stmt = worklist.pop ();
705 if (dump_enabled_p ())
707 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
708 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
711 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
712 (DEF_STMT) as relevant/irrelevant and live/dead according to the
713 liveness and relevance properties of STMT. */
714 stmt_vinfo = vinfo_for_stmt (stmt);
715 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
716 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
718 /* Generally, the liveness and relevance properties of STMT are
719 propagated as is to the DEF_STMTs of its USEs:
720 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
721 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
723 One exception is when STMT has been identified as defining a reduction
724 variable; in this case we set the liveness/relevance as follows:
725 live_p = false
726 relevant = vect_used_by_reduction
727 This is because we distinguish between two kinds of relevant stmts -
728 those that are used by a reduction computation, and those that are
729 (also) used by a regular computation. This allows us later on to
730 identify stmts that are used solely by a reduction, and therefore the
731 order of the results that they produce does not have to be kept. */
733 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
734 tmp_relevant = relevant;
735 switch (def_type)
737 case vect_reduction_def:
738 switch (tmp_relevant)
740 case vect_unused_in_scope:
741 relevant = vect_used_by_reduction;
742 break;
744 case vect_used_by_reduction:
745 if (gimple_code (stmt) == GIMPLE_PHI)
746 break;
747 /* fall through */
749 default:
750 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
752 "unsupported use of reduction.\n");
753 return false;
756 live_p = false;
757 break;
759 case vect_nested_cycle:
760 if (tmp_relevant != vect_unused_in_scope
761 && tmp_relevant != vect_used_in_outer_by_reduction
762 && tmp_relevant != vect_used_in_outer)
764 if (dump_enabled_p ())
765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
766 "unsupported use of nested cycle.\n");
768 return false;
771 live_p = false;
772 break;
774 case vect_double_reduction_def:
775 if (tmp_relevant != vect_unused_in_scope
776 && tmp_relevant != vect_used_by_reduction)
778 if (dump_enabled_p ())
779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
780 "unsupported use of double reduction.\n");
782 return false;
785 live_p = false;
786 break;
788 default:
789 break;
792 if (is_pattern_stmt_p (stmt_vinfo))
794 /* Pattern statements are not inserted into the code, so
795 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
796 have to scan the RHS or function arguments instead. */
797 if (is_gimple_assign (stmt))
799 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
800 tree op = gimple_assign_rhs1 (stmt);
802 i = 1;
803 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
805 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
806 live_p, relevant, &worklist, false)
807 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
808 live_p, relevant, &worklist, false))
809 return false;
810 i = 2;
812 for (; i < gimple_num_ops (stmt); i++)
814 op = gimple_op (stmt, i);
815 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
816 &worklist, false))
817 return false;
820 else if (is_gimple_call (stmt))
822 for (i = 0; i < gimple_call_num_args (stmt); i++)
824 tree arg = gimple_call_arg (stmt, i);
825 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
826 &worklist, false))
827 return false;
831 else
832 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
834 tree op = USE_FROM_PTR (use_p);
835 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
836 &worklist, false))
837 return false;
840 if (STMT_VINFO_GATHER_P (stmt_vinfo))
842 tree off;
843 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
844 gcc_assert (decl);
845 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
846 &worklist, true))
847 return false;
849 } /* while worklist */
851 return true;
855 /* Function vect_model_simple_cost.
857 Models cost for simple operations, i.e. those that only emit ncopies of a
858 single op. Right now, this does not account for multiple insns that could
859 be generated for the single vector op. We will handle that shortly. */
861 void
862 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
863 enum vect_def_type *dt,
864 stmt_vector_for_cost *prologue_cost_vec,
865 stmt_vector_for_cost *body_cost_vec)
867 int i;
868 int inside_cost = 0, prologue_cost = 0;
870 /* The SLP costs were already calculated during SLP tree build. */
871 if (PURE_SLP_STMT (stmt_info))
872 return;
874 /* FORNOW: Assuming maximum 2 args per stmts. */
875 for (i = 0; i < 2; i++)
876 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
877 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
878 stmt_info, 0, vect_prologue);
880 /* Pass the inside-of-loop statements to the target-specific cost model. */
881 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
882 stmt_info, 0, vect_body);
884 if (dump_enabled_p ())
885 dump_printf_loc (MSG_NOTE, vect_location,
886 "vect_model_simple_cost: inside_cost = %d, "
887 "prologue_cost = %d .\n", inside_cost, prologue_cost);
891 /* Model cost for type demotion and promotion operations. PWR is normally
892 zero for single-step promotions and demotions. It will be one if
893 two-step promotion/demotion is required, and so on. Each additional
894 step doubles the number of instructions required. */
896 static void
897 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
898 enum vect_def_type *dt, int pwr)
900 int i, tmp;
901 int inside_cost = 0, prologue_cost = 0;
902 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
903 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
904 void *target_cost_data;
906 /* The SLP costs were already calculated during SLP tree build. */
907 if (PURE_SLP_STMT (stmt_info))
908 return;
910 if (loop_vinfo)
911 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
912 else
913 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
915 for (i = 0; i < pwr + 1; i++)
917 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
918 (i + 1) : i;
919 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
920 vec_promote_demote, stmt_info, 0,
921 vect_body);
924 /* FORNOW: Assuming maximum 2 args per stmts. */
925 for (i = 0; i < 2; i++)
926 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
927 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
928 stmt_info, 0, vect_prologue);
930 if (dump_enabled_p ())
931 dump_printf_loc (MSG_NOTE, vect_location,
932 "vect_model_promotion_demotion_cost: inside_cost = %d, "
933 "prologue_cost = %d .\n", inside_cost, prologue_cost);
936 /* Function vect_cost_group_size
938 For grouped load or store, return the group_size only if it is the first
939 load or store of a group, else return 1. This ensures that group size is
940 only returned once per group. */
942 static int
943 vect_cost_group_size (stmt_vec_info stmt_info)
945 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
947 if (first_stmt == STMT_VINFO_STMT (stmt_info))
948 return GROUP_SIZE (stmt_info);
950 return 1;
954 /* Function vect_model_store_cost
956 Models cost for stores. In the case of grouped accesses, one access
957 has the overhead of the grouped access attributed to it. */
959 void
960 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
961 bool store_lanes_p, enum vect_def_type dt,
962 slp_tree slp_node,
963 stmt_vector_for_cost *prologue_cost_vec,
964 stmt_vector_for_cost *body_cost_vec)
966 int group_size;
967 unsigned int inside_cost = 0, prologue_cost = 0;
968 struct data_reference *first_dr;
969 gimple first_stmt;
971 if (dt == vect_constant_def || dt == vect_external_def)
972 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
973 stmt_info, 0, vect_prologue);
975 /* Grouped access? */
976 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
978 if (slp_node)
980 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
981 group_size = 1;
983 else
985 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
986 group_size = vect_cost_group_size (stmt_info);
989 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
991 /* Not a grouped access. */
992 else
994 group_size = 1;
995 first_dr = STMT_VINFO_DATA_REF (stmt_info);
998 /* We assume that the cost of a single store-lanes instruction is
999 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
1000 access is instead being provided by a permute-and-store operation,
1001 include the cost of the permutes. */
1002 if (!store_lanes_p && group_size > 1)
1004 /* Uses a high and low interleave or shuffle operations for each
1005 needed permute. */
1006 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1007 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1008 stmt_info, 0, vect_body);
1010 if (dump_enabled_p ())
1011 dump_printf_loc (MSG_NOTE, vect_location,
1012 "vect_model_store_cost: strided group_size = %d .\n",
1013 group_size);
1016 /* Costs of the stores. */
1017 if (STMT_VINFO_STRIDED_P (stmt_info))
1019 /* N scalar stores plus extracting the elements. */
1020 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1021 inside_cost += record_stmt_cost (body_cost_vec,
1022 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1023 scalar_store, stmt_info, 0, vect_body);
1024 inside_cost += record_stmt_cost (body_cost_vec,
1025 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1026 vec_to_scalar, stmt_info, 0, vect_body);
1028 else
1029 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_NOTE, vect_location,
1033 "vect_model_store_cost: inside_cost = %d, "
1034 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1038 /* Calculate cost of DR's memory access. */
1039 void
1040 vect_get_store_cost (struct data_reference *dr, int ncopies,
1041 unsigned int *inside_cost,
1042 stmt_vector_for_cost *body_cost_vec)
1044 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1045 gimple stmt = DR_STMT (dr);
1046 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1048 switch (alignment_support_scheme)
1050 case dr_aligned:
1052 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1053 vector_store, stmt_info, 0,
1054 vect_body);
1056 if (dump_enabled_p ())
1057 dump_printf_loc (MSG_NOTE, vect_location,
1058 "vect_model_store_cost: aligned.\n");
1059 break;
1062 case dr_unaligned_supported:
1064 /* Here, we assign an additional cost for the unaligned store. */
1065 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1066 unaligned_store, stmt_info,
1067 DR_MISALIGNMENT (dr), vect_body);
1068 if (dump_enabled_p ())
1069 dump_printf_loc (MSG_NOTE, vect_location,
1070 "vect_model_store_cost: unaligned supported by "
1071 "hardware.\n");
1072 break;
1075 case dr_unaligned_unsupported:
1077 *inside_cost = VECT_MAX_COST;
1079 if (dump_enabled_p ())
1080 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1081 "vect_model_store_cost: unsupported access.\n");
1082 break;
1085 default:
1086 gcc_unreachable ();
1091 /* Function vect_model_load_cost
1093 Models cost for loads. In the case of grouped accesses, the last access
1094 has the overhead of the grouped access attributed to it. Since unaligned
1095 accesses are supported for loads, we also account for the costs of the
1096 access scheme chosen. */
1098 void
1099 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1100 bool load_lanes_p, slp_tree slp_node,
1101 stmt_vector_for_cost *prologue_cost_vec,
1102 stmt_vector_for_cost *body_cost_vec)
1104 int group_size;
1105 gimple first_stmt;
1106 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1107 unsigned int inside_cost = 0, prologue_cost = 0;
1109 /* Grouped accesses? */
1110 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1111 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1113 group_size = vect_cost_group_size (stmt_info);
1114 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1116 /* Not a grouped access. */
1117 else
1119 group_size = 1;
1120 first_dr = dr;
1123 /* We assume that the cost of a single load-lanes instruction is
1124 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1125 access is instead being provided by a load-and-permute operation,
1126 include the cost of the permutes. */
1127 if (!load_lanes_p && group_size > 1
1128 && !STMT_VINFO_STRIDED_P (stmt_info))
1130 /* Uses an even and odd extract operations or shuffle operations
1131 for each needed permute. */
1132 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1133 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1134 stmt_info, 0, vect_body);
1136 if (dump_enabled_p ())
1137 dump_printf_loc (MSG_NOTE, vect_location,
1138 "vect_model_load_cost: strided group_size = %d .\n",
1139 group_size);
1142 /* The loads themselves. */
1143 if (STMT_VINFO_STRIDED_P (stmt_info)
1144 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1146 /* N scalar loads plus gathering them into a vector. */
1147 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1148 inside_cost += record_stmt_cost (body_cost_vec,
1149 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1150 scalar_load, stmt_info, 0, vect_body);
1152 else
1153 vect_get_load_cost (first_dr, ncopies,
1154 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1155 || group_size > 1 || slp_node),
1156 &inside_cost, &prologue_cost,
1157 prologue_cost_vec, body_cost_vec, true);
1158 if (STMT_VINFO_STRIDED_P (stmt_info))
1159 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1160 stmt_info, 0, vect_body);
1162 if (dump_enabled_p ())
1163 dump_printf_loc (MSG_NOTE, vect_location,
1164 "vect_model_load_cost: inside_cost = %d, "
1165 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1169 /* Calculate cost of DR's memory access. */
1170 void
1171 vect_get_load_cost (struct data_reference *dr, int ncopies,
1172 bool add_realign_cost, unsigned int *inside_cost,
1173 unsigned int *prologue_cost,
1174 stmt_vector_for_cost *prologue_cost_vec,
1175 stmt_vector_for_cost *body_cost_vec,
1176 bool record_prologue_costs)
1178 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1179 gimple stmt = DR_STMT (dr);
1180 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1182 switch (alignment_support_scheme)
1184 case dr_aligned:
1186 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1187 stmt_info, 0, vect_body);
1189 if (dump_enabled_p ())
1190 dump_printf_loc (MSG_NOTE, vect_location,
1191 "vect_model_load_cost: aligned.\n");
1193 break;
1195 case dr_unaligned_supported:
1197 /* Here, we assign an additional cost for the unaligned load. */
1198 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1199 unaligned_load, stmt_info,
1200 DR_MISALIGNMENT (dr), vect_body);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE, vect_location,
1204 "vect_model_load_cost: unaligned supported by "
1205 "hardware.\n");
1207 break;
1209 case dr_explicit_realign:
1211 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1212 vector_load, stmt_info, 0, vect_body);
1213 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1214 vec_perm, stmt_info, 0, vect_body);
1216 /* FIXME: If the misalignment remains fixed across the iterations of
1217 the containing loop, the following cost should be added to the
1218 prologue costs. */
1219 if (targetm.vectorize.builtin_mask_for_load)
1220 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1221 stmt_info, 0, vect_body);
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_NOTE, vect_location,
1225 "vect_model_load_cost: explicit realign\n");
1227 break;
1229 case dr_explicit_realign_optimized:
1231 if (dump_enabled_p ())
1232 dump_printf_loc (MSG_NOTE, vect_location,
1233 "vect_model_load_cost: unaligned software "
1234 "pipelined.\n");
1236 /* Unaligned software pipeline has a load of an address, an initial
1237 load, and possibly a mask operation to "prime" the loop. However,
1238 if this is an access in a group of loads, which provide grouped
1239 access, then the above cost should only be considered for one
1240 access in the group. Inside the loop, there is a load op
1241 and a realignment op. */
1243 if (add_realign_cost && record_prologue_costs)
1245 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1246 vector_stmt, stmt_info,
1247 0, vect_prologue);
1248 if (targetm.vectorize.builtin_mask_for_load)
1249 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1250 vector_stmt, stmt_info,
1251 0, vect_prologue);
1254 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1255 stmt_info, 0, vect_body);
1256 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1257 stmt_info, 0, vect_body);
1259 if (dump_enabled_p ())
1260 dump_printf_loc (MSG_NOTE, vect_location,
1261 "vect_model_load_cost: explicit realign optimized"
1262 "\n");
1264 break;
1267 case dr_unaligned_unsupported:
1269 *inside_cost = VECT_MAX_COST;
1271 if (dump_enabled_p ())
1272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1273 "vect_model_load_cost: unsupported access.\n");
1274 break;
1277 default:
1278 gcc_unreachable ();
1282 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1283 the loop preheader for the vectorized stmt STMT. */
1285 static void
1286 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1288 if (gsi)
1289 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1290 else
1292 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1293 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1295 if (loop_vinfo)
1297 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1298 basic_block new_bb;
1299 edge pe;
1301 if (nested_in_vect_loop_p (loop, stmt))
1302 loop = loop->inner;
1304 pe = loop_preheader_edge (loop);
1305 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1306 gcc_assert (!new_bb);
1308 else
1310 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1311 basic_block bb;
1312 gimple_stmt_iterator gsi_bb_start;
1314 gcc_assert (bb_vinfo);
1315 bb = BB_VINFO_BB (bb_vinfo);
1316 gsi_bb_start = gsi_after_labels (bb);
1317 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1321 if (dump_enabled_p ())
1323 dump_printf_loc (MSG_NOTE, vect_location,
1324 "created new init_stmt: ");
1325 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1329 /* Function vect_init_vector.
1331 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1332 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1333 vector type a vector with all elements equal to VAL is created first.
1334 Place the initialization at BSI if it is not NULL. Otherwise, place the
1335 initialization at the loop preheader.
1336 Return the DEF of INIT_STMT.
1337 It will be used in the vectorization of STMT. */
1339 tree
1340 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1342 tree new_var;
1343 gimple init_stmt;
1344 tree vec_oprnd;
1345 tree new_temp;
1347 if (TREE_CODE (type) == VECTOR_TYPE
1348 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1350 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1352 if (CONSTANT_CLASS_P (val))
1353 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1354 else
1356 new_temp = make_ssa_name (TREE_TYPE (type));
1357 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1358 vect_init_vector_1 (stmt, init_stmt, gsi);
1359 val = new_temp;
1362 val = build_vector_from_val (type, val);
1365 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1366 init_stmt = gimple_build_assign (new_var, val);
1367 new_temp = make_ssa_name (new_var, init_stmt);
1368 gimple_assign_set_lhs (init_stmt, new_temp);
1369 vect_init_vector_1 (stmt, init_stmt, gsi);
1370 vec_oprnd = gimple_assign_lhs (init_stmt);
1371 return vec_oprnd;
1375 /* Function vect_get_vec_def_for_operand.
1377 OP is an operand in STMT. This function returns a (vector) def that will be
1378 used in the vectorized stmt for STMT.
1380 In the case that OP is an SSA_NAME which is defined in the loop, then
1381 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1383 In case OP is an invariant or constant, a new stmt that creates a vector def
1384 needs to be introduced. */
1386 tree
1387 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1389 tree vec_oprnd;
1390 gimple vec_stmt;
1391 gimple def_stmt;
1392 stmt_vec_info def_stmt_info = NULL;
1393 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1394 unsigned int nunits;
1395 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1396 tree def;
1397 enum vect_def_type dt;
1398 bool is_simple_use;
1399 tree vector_type;
1401 if (dump_enabled_p ())
1403 dump_printf_loc (MSG_NOTE, vect_location,
1404 "vect_get_vec_def_for_operand: ");
1405 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1406 dump_printf (MSG_NOTE, "\n");
1409 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1410 &def_stmt, &def, &dt);
1411 gcc_assert (is_simple_use);
1412 if (dump_enabled_p ())
1414 int loc_printed = 0;
1415 if (def)
1417 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1418 loc_printed = 1;
1419 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1420 dump_printf (MSG_NOTE, "\n");
1422 if (def_stmt)
1424 if (loc_printed)
1425 dump_printf (MSG_NOTE, " def_stmt = ");
1426 else
1427 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1428 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1432 switch (dt)
1434 /* Case 1: operand is a constant. */
1435 case vect_constant_def:
1437 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1438 gcc_assert (vector_type);
1439 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1441 if (scalar_def)
1442 *scalar_def = op;
1444 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1445 if (dump_enabled_p ())
1446 dump_printf_loc (MSG_NOTE, vect_location,
1447 "Create vector_cst. nunits = %d\n", nunits);
1449 return vect_init_vector (stmt, op, vector_type, NULL);
1452 /* Case 2: operand is defined outside the loop - loop invariant. */
1453 case vect_external_def:
1455 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1456 gcc_assert (vector_type);
1458 if (scalar_def)
1459 *scalar_def = def;
1461 /* Create 'vec_inv = {inv,inv,..,inv}' */
1462 if (dump_enabled_p ())
1463 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1465 return vect_init_vector (stmt, def, vector_type, NULL);
1468 /* Case 3: operand is defined inside the loop. */
1469 case vect_internal_def:
1471 if (scalar_def)
1472 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1474 /* Get the def from the vectorized stmt. */
1475 def_stmt_info = vinfo_for_stmt (def_stmt);
1477 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1478 /* Get vectorized pattern statement. */
1479 if (!vec_stmt
1480 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1481 && !STMT_VINFO_RELEVANT (def_stmt_info))
1482 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1483 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1484 gcc_assert (vec_stmt);
1485 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1486 vec_oprnd = PHI_RESULT (vec_stmt);
1487 else if (is_gimple_call (vec_stmt))
1488 vec_oprnd = gimple_call_lhs (vec_stmt);
1489 else
1490 vec_oprnd = gimple_assign_lhs (vec_stmt);
1491 return vec_oprnd;
1494 /* Case 4: operand is defined by a loop header phi - reduction */
1495 case vect_reduction_def:
1496 case vect_double_reduction_def:
1497 case vect_nested_cycle:
1499 struct loop *loop;
1501 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1502 loop = (gimple_bb (def_stmt))->loop_father;
1504 /* Get the def before the loop */
1505 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1506 return get_initial_def_for_reduction (stmt, op, scalar_def);
1509 /* Case 5: operand is defined by loop-header phi - induction. */
1510 case vect_induction_def:
1512 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1514 /* Get the def from the vectorized stmt. */
1515 def_stmt_info = vinfo_for_stmt (def_stmt);
1516 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1517 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1518 vec_oprnd = PHI_RESULT (vec_stmt);
1519 else
1520 vec_oprnd = gimple_get_lhs (vec_stmt);
1521 return vec_oprnd;
1524 default:
1525 gcc_unreachable ();
1530 /* Function vect_get_vec_def_for_stmt_copy
1532 Return a vector-def for an operand. This function is used when the
1533 vectorized stmt to be created (by the caller to this function) is a "copy"
1534 created in case the vectorized result cannot fit in one vector, and several
1535 copies of the vector-stmt are required. In this case the vector-def is
1536 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1537 of the stmt that defines VEC_OPRND.
1538 DT is the type of the vector def VEC_OPRND.
1540 Context:
1541 In case the vectorization factor (VF) is bigger than the number
1542 of elements that can fit in a vectype (nunits), we have to generate
1543 more than one vector stmt to vectorize the scalar stmt. This situation
1544 arises when there are multiple data-types operated upon in the loop; the
1545 smallest data-type determines the VF, and as a result, when vectorizing
1546 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1547 vector stmt (each computing a vector of 'nunits' results, and together
1548 computing 'VF' results in each iteration). This function is called when
1549 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1550 which VF=16 and nunits=4, so the number of copies required is 4):
1552 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1554 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1555 VS1.1: vx.1 = memref1 VS1.2
1556 VS1.2: vx.2 = memref2 VS1.3
1557 VS1.3: vx.3 = memref3
1559 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1560 VSnew.1: vz1 = vx.1 + ... VSnew.2
1561 VSnew.2: vz2 = vx.2 + ... VSnew.3
1562 VSnew.3: vz3 = vx.3 + ...
1564 The vectorization of S1 is explained in vectorizable_load.
1565 The vectorization of S2:
1566 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1567 the function 'vect_get_vec_def_for_operand' is called to
1568 get the relevant vector-def for each operand of S2. For operand x it
1569 returns the vector-def 'vx.0'.
1571 To create the remaining copies of the vector-stmt (VSnew.j), this
1572 function is called to get the relevant vector-def for each operand. It is
1573 obtained from the respective VS1.j stmt, which is recorded in the
1574 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1576 For example, to obtain the vector-def 'vx.1' in order to create the
1577 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1578 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1579 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1580 and return its def ('vx.1').
1581 Overall, to create the above sequence this function will be called 3 times:
1582 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1583 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1584 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1586 tree
1587 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1589 gimple vec_stmt_for_operand;
1590 stmt_vec_info def_stmt_info;
1592 /* Do nothing; can reuse same def. */
1593 if (dt == vect_external_def || dt == vect_constant_def )
1594 return vec_oprnd;
1596 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1597 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1598 gcc_assert (def_stmt_info);
1599 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1600 gcc_assert (vec_stmt_for_operand);
1601 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1602 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1603 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1604 else
1605 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1606 return vec_oprnd;
1610 /* Get vectorized definitions for the operands to create a copy of an original
1611 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1613 static void
1614 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1615 vec<tree> *vec_oprnds0,
1616 vec<tree> *vec_oprnds1)
1618 tree vec_oprnd = vec_oprnds0->pop ();
1620 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1621 vec_oprnds0->quick_push (vec_oprnd);
1623 if (vec_oprnds1 && vec_oprnds1->length ())
1625 vec_oprnd = vec_oprnds1->pop ();
1626 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1627 vec_oprnds1->quick_push (vec_oprnd);
1632 /* Get vectorized definitions for OP0 and OP1.
1633 REDUC_INDEX is the index of reduction operand in case of reduction,
1634 and -1 otherwise. */
1636 void
1637 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1638 vec<tree> *vec_oprnds0,
1639 vec<tree> *vec_oprnds1,
1640 slp_tree slp_node, int reduc_index)
1642 if (slp_node)
1644 int nops = (op1 == NULL_TREE) ? 1 : 2;
1645 auto_vec<tree> ops (nops);
1646 auto_vec<vec<tree> > vec_defs (nops);
1648 ops.quick_push (op0);
1649 if (op1)
1650 ops.quick_push (op1);
1652 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1654 *vec_oprnds0 = vec_defs[0];
1655 if (op1)
1656 *vec_oprnds1 = vec_defs[1];
1658 else
1660 tree vec_oprnd;
1662 vec_oprnds0->create (1);
1663 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1664 vec_oprnds0->quick_push (vec_oprnd);
1666 if (op1)
1668 vec_oprnds1->create (1);
1669 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1670 vec_oprnds1->quick_push (vec_oprnd);
1676 /* Function vect_finish_stmt_generation.
1678 Insert a new stmt. */
1680 void
1681 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1682 gimple_stmt_iterator *gsi)
1684 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1685 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1686 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1688 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1690 if (!gsi_end_p (*gsi)
1691 && gimple_has_mem_ops (vec_stmt))
1693 gimple at_stmt = gsi_stmt (*gsi);
1694 tree vuse = gimple_vuse (at_stmt);
1695 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1697 tree vdef = gimple_vdef (at_stmt);
1698 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1699 /* If we have an SSA vuse and insert a store, update virtual
1700 SSA form to avoid triggering the renamer. Do so only
1701 if we can easily see all uses - which is what almost always
1702 happens with the way vectorized stmts are inserted. */
1703 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1704 && ((is_gimple_assign (vec_stmt)
1705 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1706 || (is_gimple_call (vec_stmt)
1707 && !(gimple_call_flags (vec_stmt)
1708 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1710 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1711 gimple_set_vdef (vec_stmt, new_vdef);
1712 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1716 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1718 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1719 bb_vinfo));
1721 if (dump_enabled_p ())
1723 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1724 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1727 gimple_set_location (vec_stmt, gimple_location (stmt));
1729 /* While EH edges will generally prevent vectorization, stmt might
1730 e.g. be in a must-not-throw region. Ensure newly created stmts
1731 that could throw are part of the same region. */
1732 int lp_nr = lookup_stmt_eh_lp (stmt);
1733 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1734 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1737 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1738 a function declaration if the target has a vectorized version
1739 of the function, or NULL_TREE if the function cannot be vectorized. */
1741 tree
1742 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1744 tree fndecl = gimple_call_fndecl (call);
1746 /* We only handle functions that do not read or clobber memory -- i.e.
1747 const or novops ones. */
1748 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1749 return NULL_TREE;
1751 if (!fndecl
1752 || TREE_CODE (fndecl) != FUNCTION_DECL
1753 || !DECL_BUILT_IN (fndecl))
1754 return NULL_TREE;
1756 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1757 vectype_in);
1761 static tree permute_vec_elements (tree, tree, tree, gimple,
1762 gimple_stmt_iterator *);
1765 /* Function vectorizable_mask_load_store.
1767 Check if STMT performs a conditional load or store that can be vectorized.
1768 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1769 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1770 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1772 static bool
1773 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1774 gimple *vec_stmt, slp_tree slp_node)
1776 tree vec_dest = NULL;
1777 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1778 stmt_vec_info prev_stmt_info;
1779 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1780 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1781 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1782 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1783 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1784 tree elem_type;
1785 gimple new_stmt;
1786 tree dummy;
1787 tree dataref_ptr = NULL_TREE;
1788 gimple ptr_incr;
1789 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1790 int ncopies;
1791 int i, j;
1792 bool inv_p;
1793 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1794 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1795 int gather_scale = 1;
1796 enum vect_def_type gather_dt = vect_unknown_def_type;
1797 bool is_store;
1798 tree mask;
1799 gimple def_stmt;
1800 tree def;
1801 enum vect_def_type dt;
1803 if (slp_node != NULL)
1804 return false;
1806 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1807 gcc_assert (ncopies >= 1);
1809 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1810 mask = gimple_call_arg (stmt, 2);
1811 if (TYPE_PRECISION (TREE_TYPE (mask))
1812 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1813 return false;
1815 /* FORNOW. This restriction should be relaxed. */
1816 if (nested_in_vect_loop && ncopies > 1)
1818 if (dump_enabled_p ())
1819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1820 "multiple types in nested loop.");
1821 return false;
1824 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1825 return false;
1827 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1828 return false;
1830 if (!STMT_VINFO_DATA_REF (stmt_info))
1831 return false;
1833 elem_type = TREE_TYPE (vectype);
1835 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1836 return false;
1838 if (STMT_VINFO_STRIDED_P (stmt_info))
1839 return false;
1841 if (STMT_VINFO_GATHER_P (stmt_info))
1843 gimple def_stmt;
1844 tree def;
1845 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1846 &gather_off, &gather_scale);
1847 gcc_assert (gather_decl);
1848 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1849 &def_stmt, &def, &gather_dt,
1850 &gather_off_vectype))
1852 if (dump_enabled_p ())
1853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1854 "gather index use not simple.");
1855 return false;
1858 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1859 tree masktype
1860 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1861 if (TREE_CODE (masktype) == INTEGER_TYPE)
1863 if (dump_enabled_p ())
1864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1865 "masked gather with integer mask not supported.");
1866 return false;
1869 else if (tree_int_cst_compare (nested_in_vect_loop
1870 ? STMT_VINFO_DR_STEP (stmt_info)
1871 : DR_STEP (dr), size_zero_node) <= 0)
1872 return false;
1873 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1874 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1875 return false;
1877 if (TREE_CODE (mask) != SSA_NAME)
1878 return false;
1880 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1881 &def_stmt, &def, &dt))
1882 return false;
1884 if (is_store)
1886 tree rhs = gimple_call_arg (stmt, 3);
1887 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1888 &def_stmt, &def, &dt))
1889 return false;
1892 if (!vec_stmt) /* transformation not required. */
1894 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1895 if (is_store)
1896 vect_model_store_cost (stmt_info, ncopies, false, dt,
1897 NULL, NULL, NULL);
1898 else
1899 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1900 return true;
1903 /** Transform. **/
1905 if (STMT_VINFO_GATHER_P (stmt_info))
1907 tree vec_oprnd0 = NULL_TREE, op;
1908 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1909 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1910 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1911 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1912 tree mask_perm_mask = NULL_TREE;
1913 edge pe = loop_preheader_edge (loop);
1914 gimple_seq seq;
1915 basic_block new_bb;
1916 enum { NARROW, NONE, WIDEN } modifier;
1917 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1919 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1920 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1921 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1922 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1923 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1924 scaletype = TREE_VALUE (arglist);
1925 gcc_checking_assert (types_compatible_p (srctype, rettype)
1926 && types_compatible_p (srctype, masktype));
1928 if (nunits == gather_off_nunits)
1929 modifier = NONE;
1930 else if (nunits == gather_off_nunits / 2)
1932 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1933 modifier = WIDEN;
1935 for (i = 0; i < gather_off_nunits; ++i)
1936 sel[i] = i | nunits;
1938 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1940 else if (nunits == gather_off_nunits * 2)
1942 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1943 modifier = NARROW;
1945 for (i = 0; i < nunits; ++i)
1946 sel[i] = i < gather_off_nunits
1947 ? i : i + nunits - gather_off_nunits;
1949 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1950 ncopies *= 2;
1951 for (i = 0; i < nunits; ++i)
1952 sel[i] = i | gather_off_nunits;
1953 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1955 else
1956 gcc_unreachable ();
1958 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1960 ptr = fold_convert (ptrtype, gather_base);
1961 if (!is_gimple_min_invariant (ptr))
1963 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1964 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1965 gcc_assert (!new_bb);
1968 scale = build_int_cst (scaletype, gather_scale);
1970 prev_stmt_info = NULL;
1971 for (j = 0; j < ncopies; ++j)
1973 if (modifier == WIDEN && (j & 1))
1974 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1975 perm_mask, stmt, gsi);
1976 else if (j == 0)
1977 op = vec_oprnd0
1978 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1979 else
1980 op = vec_oprnd0
1981 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1983 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1985 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1986 == TYPE_VECTOR_SUBPARTS (idxtype));
1987 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1988 var = make_ssa_name (var);
1989 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1990 new_stmt
1991 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1992 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1993 op = var;
1996 if (mask_perm_mask && (j & 1))
1997 mask_op = permute_vec_elements (mask_op, mask_op,
1998 mask_perm_mask, stmt, gsi);
1999 else
2001 if (j == 0)
2002 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2003 else
2005 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
2006 &def_stmt, &def, &dt);
2007 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2010 mask_op = vec_mask;
2011 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2013 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2014 == TYPE_VECTOR_SUBPARTS (masktype));
2015 var = vect_get_new_vect_var (masktype, vect_simple_var,
2016 NULL);
2017 var = make_ssa_name (var);
2018 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2019 new_stmt
2020 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2022 mask_op = var;
2026 new_stmt
2027 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2028 scale);
2030 if (!useless_type_conversion_p (vectype, rettype))
2032 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2033 == TYPE_VECTOR_SUBPARTS (rettype));
2034 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2035 op = make_ssa_name (var, new_stmt);
2036 gimple_call_set_lhs (new_stmt, op);
2037 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2038 var = make_ssa_name (vec_dest);
2039 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2040 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2042 else
2044 var = make_ssa_name (vec_dest, new_stmt);
2045 gimple_call_set_lhs (new_stmt, var);
2048 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2050 if (modifier == NARROW)
2052 if ((j & 1) == 0)
2054 prev_res = var;
2055 continue;
2057 var = permute_vec_elements (prev_res, var,
2058 perm_mask, stmt, gsi);
2059 new_stmt = SSA_NAME_DEF_STMT (var);
2062 if (prev_stmt_info == NULL)
2063 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2064 else
2065 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2066 prev_stmt_info = vinfo_for_stmt (new_stmt);
2069 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2070 from the IL. */
2071 tree lhs = gimple_call_lhs (stmt);
2072 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2073 set_vinfo_for_stmt (new_stmt, stmt_info);
2074 set_vinfo_for_stmt (stmt, NULL);
2075 STMT_VINFO_STMT (stmt_info) = new_stmt;
2076 gsi_replace (gsi, new_stmt, true);
2077 return true;
2079 else if (is_store)
2081 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2082 prev_stmt_info = NULL;
2083 for (i = 0; i < ncopies; i++)
2085 unsigned align, misalign;
2087 if (i == 0)
2089 tree rhs = gimple_call_arg (stmt, 3);
2090 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2091 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2092 /* We should have catched mismatched types earlier. */
2093 gcc_assert (useless_type_conversion_p (vectype,
2094 TREE_TYPE (vec_rhs)));
2095 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2096 NULL_TREE, &dummy, gsi,
2097 &ptr_incr, false, &inv_p);
2098 gcc_assert (!inv_p);
2100 else
2102 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2103 &def, &dt);
2104 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2105 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2106 &def, &dt);
2107 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2108 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2109 TYPE_SIZE_UNIT (vectype));
2112 align = TYPE_ALIGN_UNIT (vectype);
2113 if (aligned_access_p (dr))
2114 misalign = 0;
2115 else if (DR_MISALIGNMENT (dr) == -1)
2117 align = TYPE_ALIGN_UNIT (elem_type);
2118 misalign = 0;
2120 else
2121 misalign = DR_MISALIGNMENT (dr);
2122 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2123 misalign);
2124 new_stmt
2125 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2126 gimple_call_arg (stmt, 1),
2127 vec_mask, vec_rhs);
2128 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2129 if (i == 0)
2130 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2131 else
2132 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2133 prev_stmt_info = vinfo_for_stmt (new_stmt);
2136 else
2138 tree vec_mask = NULL_TREE;
2139 prev_stmt_info = NULL;
2140 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2141 for (i = 0; i < ncopies; i++)
2143 unsigned align, misalign;
2145 if (i == 0)
2147 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2148 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2149 NULL_TREE, &dummy, gsi,
2150 &ptr_incr, false, &inv_p);
2151 gcc_assert (!inv_p);
2153 else
2155 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2156 &def, &dt);
2157 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2158 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2159 TYPE_SIZE_UNIT (vectype));
2162 align = TYPE_ALIGN_UNIT (vectype);
2163 if (aligned_access_p (dr))
2164 misalign = 0;
2165 else if (DR_MISALIGNMENT (dr) == -1)
2167 align = TYPE_ALIGN_UNIT (elem_type);
2168 misalign = 0;
2170 else
2171 misalign = DR_MISALIGNMENT (dr);
2172 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2173 misalign);
2174 new_stmt
2175 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2176 gimple_call_arg (stmt, 1),
2177 vec_mask);
2178 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2179 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2180 if (i == 0)
2181 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2182 else
2183 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2184 prev_stmt_info = vinfo_for_stmt (new_stmt);
2188 if (!is_store)
2190 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2191 from the IL. */
2192 tree lhs = gimple_call_lhs (stmt);
2193 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2194 set_vinfo_for_stmt (new_stmt, stmt_info);
2195 set_vinfo_for_stmt (stmt, NULL);
2196 STMT_VINFO_STMT (stmt_info) = new_stmt;
2197 gsi_replace (gsi, new_stmt, true);
2200 return true;
2204 /* Function vectorizable_call.
2206 Check if GS performs a function call that can be vectorized.
2207 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2208 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2209 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2211 static bool
2212 vectorizable_call (gimple gs, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2213 slp_tree slp_node)
2215 gcall *stmt;
2216 tree vec_dest;
2217 tree scalar_dest;
2218 tree op, type;
2219 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2220 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2221 tree vectype_out, vectype_in;
2222 int nunits_in;
2223 int nunits_out;
2224 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2225 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2226 tree fndecl, new_temp, def, rhs_type;
2227 gimple def_stmt;
2228 enum vect_def_type dt[3]
2229 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2230 gimple new_stmt = NULL;
2231 int ncopies, j;
2232 vec<tree> vargs = vNULL;
2233 enum { NARROW, NONE, WIDEN } modifier;
2234 size_t i, nargs;
2235 tree lhs;
2237 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2238 return false;
2240 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2241 return false;
2243 /* Is GS a vectorizable call? */
2244 stmt = dyn_cast <gcall *> (gs);
2245 if (!stmt)
2246 return false;
2248 if (gimple_call_internal_p (stmt)
2249 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2250 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2251 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2252 slp_node);
2254 if (gimple_call_lhs (stmt) == NULL_TREE
2255 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2256 return false;
2258 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2260 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2262 /* Process function arguments. */
2263 rhs_type = NULL_TREE;
2264 vectype_in = NULL_TREE;
2265 nargs = gimple_call_num_args (stmt);
2267 /* Bail out if the function has more than three arguments, we do not have
2268 interesting builtin functions to vectorize with more than two arguments
2269 except for fma. No arguments is also not good. */
2270 if (nargs == 0 || nargs > 3)
2271 return false;
2273 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2274 if (gimple_call_internal_p (stmt)
2275 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2277 nargs = 0;
2278 rhs_type = unsigned_type_node;
2281 for (i = 0; i < nargs; i++)
2283 tree opvectype;
2285 op = gimple_call_arg (stmt, i);
2287 /* We can only handle calls with arguments of the same type. */
2288 if (rhs_type
2289 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2291 if (dump_enabled_p ())
2292 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2293 "argument types differ.\n");
2294 return false;
2296 if (!rhs_type)
2297 rhs_type = TREE_TYPE (op);
2299 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2300 &def_stmt, &def, &dt[i], &opvectype))
2302 if (dump_enabled_p ())
2303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2304 "use not simple.\n");
2305 return false;
2308 if (!vectype_in)
2309 vectype_in = opvectype;
2310 else if (opvectype
2311 && opvectype != vectype_in)
2313 if (dump_enabled_p ())
2314 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2315 "argument vector types differ.\n");
2316 return false;
2319 /* If all arguments are external or constant defs use a vector type with
2320 the same size as the output vector type. */
2321 if (!vectype_in)
2322 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2323 if (vec_stmt)
2324 gcc_assert (vectype_in);
2325 if (!vectype_in)
2327 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2330 "no vectype for scalar type ");
2331 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2332 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2335 return false;
2338 /* FORNOW */
2339 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2340 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2341 if (nunits_in == nunits_out / 2)
2342 modifier = NARROW;
2343 else if (nunits_out == nunits_in)
2344 modifier = NONE;
2345 else if (nunits_out == nunits_in / 2)
2346 modifier = WIDEN;
2347 else
2348 return false;
2350 /* For now, we only vectorize functions if a target specific builtin
2351 is available. TODO -- in some cases, it might be profitable to
2352 insert the calls for pieces of the vector, in order to be able
2353 to vectorize other operations in the loop. */
2354 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2355 if (fndecl == NULL_TREE)
2357 if (gimple_call_internal_p (stmt)
2358 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2359 && !slp_node
2360 && loop_vinfo
2361 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2362 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2363 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2364 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2366 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2367 { 0, 1, 2, ... vf - 1 } vector. */
2368 gcc_assert (nargs == 0);
2370 else
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2374 "function is not vectorizable.\n");
2375 return false;
2379 gcc_assert (!gimple_vuse (stmt));
2381 if (slp_node || PURE_SLP_STMT (stmt_info))
2382 ncopies = 1;
2383 else if (modifier == NARROW)
2384 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2385 else
2386 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2388 /* Sanity check: make sure that at least one copy of the vectorized stmt
2389 needs to be generated. */
2390 gcc_assert (ncopies >= 1);
2392 if (!vec_stmt) /* transformation not required. */
2394 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2395 if (dump_enabled_p ())
2396 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2397 "\n");
2398 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2399 return true;
2402 /** Transform. **/
2404 if (dump_enabled_p ())
2405 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2407 /* Handle def. */
2408 scalar_dest = gimple_call_lhs (stmt);
2409 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2411 prev_stmt_info = NULL;
2412 switch (modifier)
2414 case NONE:
2415 for (j = 0; j < ncopies; ++j)
2417 /* Build argument list for the vectorized call. */
2418 if (j == 0)
2419 vargs.create (nargs);
2420 else
2421 vargs.truncate (0);
2423 if (slp_node)
2425 auto_vec<vec<tree> > vec_defs (nargs);
2426 vec<tree> vec_oprnds0;
2428 for (i = 0; i < nargs; i++)
2429 vargs.quick_push (gimple_call_arg (stmt, i));
2430 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2431 vec_oprnds0 = vec_defs[0];
2433 /* Arguments are ready. Create the new vector stmt. */
2434 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2436 size_t k;
2437 for (k = 0; k < nargs; k++)
2439 vec<tree> vec_oprndsk = vec_defs[k];
2440 vargs[k] = vec_oprndsk[i];
2442 new_stmt = gimple_build_call_vec (fndecl, vargs);
2443 new_temp = make_ssa_name (vec_dest, new_stmt);
2444 gimple_call_set_lhs (new_stmt, new_temp);
2445 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2446 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2449 for (i = 0; i < nargs; i++)
2451 vec<tree> vec_oprndsi = vec_defs[i];
2452 vec_oprndsi.release ();
2454 continue;
2457 for (i = 0; i < nargs; i++)
2459 op = gimple_call_arg (stmt, i);
2460 if (j == 0)
2461 vec_oprnd0
2462 = vect_get_vec_def_for_operand (op, stmt, NULL);
2463 else
2465 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2466 vec_oprnd0
2467 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2470 vargs.quick_push (vec_oprnd0);
2473 if (gimple_call_internal_p (stmt)
2474 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2476 tree *v = XALLOCAVEC (tree, nunits_out);
2477 int k;
2478 for (k = 0; k < nunits_out; ++k)
2479 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2480 tree cst = build_vector (vectype_out, v);
2481 tree new_var
2482 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2483 gimple init_stmt = gimple_build_assign (new_var, cst);
2484 new_temp = make_ssa_name (new_var, init_stmt);
2485 gimple_assign_set_lhs (init_stmt, new_temp);
2486 vect_init_vector_1 (stmt, init_stmt, NULL);
2487 new_temp = make_ssa_name (vec_dest);
2488 new_stmt = gimple_build_assign (new_temp,
2489 gimple_assign_lhs (init_stmt));
2491 else
2493 new_stmt = gimple_build_call_vec (fndecl, vargs);
2494 new_temp = make_ssa_name (vec_dest, new_stmt);
2495 gimple_call_set_lhs (new_stmt, new_temp);
2497 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2499 if (j == 0)
2500 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2501 else
2502 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2504 prev_stmt_info = vinfo_for_stmt (new_stmt);
2507 break;
2509 case NARROW:
2510 for (j = 0; j < ncopies; ++j)
2512 /* Build argument list for the vectorized call. */
2513 if (j == 0)
2514 vargs.create (nargs * 2);
2515 else
2516 vargs.truncate (0);
2518 if (slp_node)
2520 auto_vec<vec<tree> > vec_defs (nargs);
2521 vec<tree> vec_oprnds0;
2523 for (i = 0; i < nargs; i++)
2524 vargs.quick_push (gimple_call_arg (stmt, i));
2525 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2526 vec_oprnds0 = vec_defs[0];
2528 /* Arguments are ready. Create the new vector stmt. */
2529 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2531 size_t k;
2532 vargs.truncate (0);
2533 for (k = 0; k < nargs; k++)
2535 vec<tree> vec_oprndsk = vec_defs[k];
2536 vargs.quick_push (vec_oprndsk[i]);
2537 vargs.quick_push (vec_oprndsk[i + 1]);
2539 new_stmt = gimple_build_call_vec (fndecl, vargs);
2540 new_temp = make_ssa_name (vec_dest, new_stmt);
2541 gimple_call_set_lhs (new_stmt, new_temp);
2542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2543 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2546 for (i = 0; i < nargs; i++)
2548 vec<tree> vec_oprndsi = vec_defs[i];
2549 vec_oprndsi.release ();
2551 continue;
2554 for (i = 0; i < nargs; i++)
2556 op = gimple_call_arg (stmt, i);
2557 if (j == 0)
2559 vec_oprnd0
2560 = vect_get_vec_def_for_operand (op, stmt, NULL);
2561 vec_oprnd1
2562 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2564 else
2566 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2567 vec_oprnd0
2568 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2569 vec_oprnd1
2570 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2573 vargs.quick_push (vec_oprnd0);
2574 vargs.quick_push (vec_oprnd1);
2577 new_stmt = gimple_build_call_vec (fndecl, vargs);
2578 new_temp = make_ssa_name (vec_dest, new_stmt);
2579 gimple_call_set_lhs (new_stmt, new_temp);
2580 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2582 if (j == 0)
2583 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2584 else
2585 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2587 prev_stmt_info = vinfo_for_stmt (new_stmt);
2590 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2592 break;
2594 case WIDEN:
2595 /* No current target implements this case. */
2596 return false;
2599 vargs.release ();
2601 /* The call in STMT might prevent it from being removed in dce.
2602 We however cannot remove it here, due to the way the ssa name
2603 it defines is mapped to the new definition. So just replace
2604 rhs of the statement with something harmless. */
2606 if (slp_node)
2607 return true;
2609 type = TREE_TYPE (scalar_dest);
2610 if (is_pattern_stmt_p (stmt_info))
2611 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2612 else
2613 lhs = gimple_call_lhs (stmt);
2614 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2615 set_vinfo_for_stmt (new_stmt, stmt_info);
2616 set_vinfo_for_stmt (stmt, NULL);
2617 STMT_VINFO_STMT (stmt_info) = new_stmt;
2618 gsi_replace (gsi, new_stmt, false);
2620 return true;
2624 struct simd_call_arg_info
2626 tree vectype;
2627 tree op;
2628 enum vect_def_type dt;
2629 HOST_WIDE_INT linear_step;
2630 unsigned int align;
2633 /* Function vectorizable_simd_clone_call.
2635 Check if STMT performs a function call that can be vectorized
2636 by calling a simd clone of the function.
2637 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2638 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2639 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2641 static bool
2642 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2643 gimple *vec_stmt, slp_tree slp_node)
2645 tree vec_dest;
2646 tree scalar_dest;
2647 tree op, type;
2648 tree vec_oprnd0 = NULL_TREE;
2649 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2650 tree vectype;
2651 unsigned int nunits;
2652 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2653 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2654 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2655 tree fndecl, new_temp, def;
2656 gimple def_stmt;
2657 gimple new_stmt = NULL;
2658 int ncopies, j;
2659 vec<simd_call_arg_info> arginfo = vNULL;
2660 vec<tree> vargs = vNULL;
2661 size_t i, nargs;
2662 tree lhs, rtype, ratype;
2663 vec<constructor_elt, va_gc> *ret_ctor_elts;
2665 /* Is STMT a vectorizable call? */
2666 if (!is_gimple_call (stmt))
2667 return false;
2669 fndecl = gimple_call_fndecl (stmt);
2670 if (fndecl == NULL_TREE)
2671 return false;
2673 struct cgraph_node *node = cgraph_node::get (fndecl);
2674 if (node == NULL || node->simd_clones == NULL)
2675 return false;
2677 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2678 return false;
2680 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2681 return false;
2683 if (gimple_call_lhs (stmt)
2684 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2685 return false;
2687 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2689 vectype = STMT_VINFO_VECTYPE (stmt_info);
2691 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2692 return false;
2694 /* FORNOW */
2695 if (slp_node || PURE_SLP_STMT (stmt_info))
2696 return false;
2698 /* Process function arguments. */
2699 nargs = gimple_call_num_args (stmt);
2701 /* Bail out if the function has zero arguments. */
2702 if (nargs == 0)
2703 return false;
2705 arginfo.create (nargs);
2707 for (i = 0; i < nargs; i++)
2709 simd_call_arg_info thisarginfo;
2710 affine_iv iv;
2712 thisarginfo.linear_step = 0;
2713 thisarginfo.align = 0;
2714 thisarginfo.op = NULL_TREE;
2716 op = gimple_call_arg (stmt, i);
2717 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2718 &def_stmt, &def, &thisarginfo.dt,
2719 &thisarginfo.vectype)
2720 || thisarginfo.dt == vect_uninitialized_def)
2722 if (dump_enabled_p ())
2723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2724 "use not simple.\n");
2725 arginfo.release ();
2726 return false;
2729 if (thisarginfo.dt == vect_constant_def
2730 || thisarginfo.dt == vect_external_def)
2731 gcc_assert (thisarginfo.vectype == NULL_TREE);
2732 else
2733 gcc_assert (thisarginfo.vectype != NULL_TREE);
2735 /* For linear arguments, the analyze phase should have saved
2736 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2737 if (i * 2 + 3 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2738 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2])
2740 gcc_assert (vec_stmt);
2741 thisarginfo.linear_step
2742 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2]);
2743 thisarginfo.op
2744 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 1];
2745 /* If loop has been peeled for alignment, we need to adjust it. */
2746 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2747 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2748 if (n1 != n2)
2750 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2751 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 2 + 2];
2752 tree opt = TREE_TYPE (thisarginfo.op);
2753 bias = fold_convert (TREE_TYPE (step), bias);
2754 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2755 thisarginfo.op
2756 = fold_build2 (POINTER_TYPE_P (opt)
2757 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2758 thisarginfo.op, bias);
2761 else if (!vec_stmt
2762 && thisarginfo.dt != vect_constant_def
2763 && thisarginfo.dt != vect_external_def
2764 && loop_vinfo
2765 && TREE_CODE (op) == SSA_NAME
2766 && simple_iv (loop, loop_containing_stmt (stmt), op,
2767 &iv, false)
2768 && tree_fits_shwi_p (iv.step))
2770 thisarginfo.linear_step = tree_to_shwi (iv.step);
2771 thisarginfo.op = iv.base;
2773 else if ((thisarginfo.dt == vect_constant_def
2774 || thisarginfo.dt == vect_external_def)
2775 && POINTER_TYPE_P (TREE_TYPE (op)))
2776 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2778 arginfo.quick_push (thisarginfo);
2781 unsigned int badness = 0;
2782 struct cgraph_node *bestn = NULL;
2783 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2784 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2785 else
2786 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2787 n = n->simdclone->next_clone)
2789 unsigned int this_badness = 0;
2790 if (n->simdclone->simdlen
2791 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2792 || n->simdclone->nargs != nargs)
2793 continue;
2794 if (n->simdclone->simdlen
2795 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2796 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2797 - exact_log2 (n->simdclone->simdlen)) * 1024;
2798 if (n->simdclone->inbranch)
2799 this_badness += 2048;
2800 int target_badness = targetm.simd_clone.usable (n);
2801 if (target_badness < 0)
2802 continue;
2803 this_badness += target_badness * 512;
2804 /* FORNOW: Have to add code to add the mask argument. */
2805 if (n->simdclone->inbranch)
2806 continue;
2807 for (i = 0; i < nargs; i++)
2809 switch (n->simdclone->args[i].arg_type)
2811 case SIMD_CLONE_ARG_TYPE_VECTOR:
2812 if (!useless_type_conversion_p
2813 (n->simdclone->args[i].orig_type,
2814 TREE_TYPE (gimple_call_arg (stmt, i))))
2815 i = -1;
2816 else if (arginfo[i].dt == vect_constant_def
2817 || arginfo[i].dt == vect_external_def
2818 || arginfo[i].linear_step)
2819 this_badness += 64;
2820 break;
2821 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2822 if (arginfo[i].dt != vect_constant_def
2823 && arginfo[i].dt != vect_external_def)
2824 i = -1;
2825 break;
2826 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2827 if (arginfo[i].dt == vect_constant_def
2828 || arginfo[i].dt == vect_external_def
2829 || (arginfo[i].linear_step
2830 != n->simdclone->args[i].linear_step))
2831 i = -1;
2832 break;
2833 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2834 /* FORNOW */
2835 i = -1;
2836 break;
2837 case SIMD_CLONE_ARG_TYPE_MASK:
2838 gcc_unreachable ();
2840 if (i == (size_t) -1)
2841 break;
2842 if (n->simdclone->args[i].alignment > arginfo[i].align)
2844 i = -1;
2845 break;
2847 if (arginfo[i].align)
2848 this_badness += (exact_log2 (arginfo[i].align)
2849 - exact_log2 (n->simdclone->args[i].alignment));
2851 if (i == (size_t) -1)
2852 continue;
2853 if (bestn == NULL || this_badness < badness)
2855 bestn = n;
2856 badness = this_badness;
2860 if (bestn == NULL)
2862 arginfo.release ();
2863 return false;
2866 for (i = 0; i < nargs; i++)
2867 if ((arginfo[i].dt == vect_constant_def
2868 || arginfo[i].dt == vect_external_def)
2869 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2871 arginfo[i].vectype
2872 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2873 i)));
2874 if (arginfo[i].vectype == NULL
2875 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2876 > bestn->simdclone->simdlen))
2878 arginfo.release ();
2879 return false;
2883 fndecl = bestn->decl;
2884 nunits = bestn->simdclone->simdlen;
2885 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2887 /* If the function isn't const, only allow it in simd loops where user
2888 has asserted that at least nunits consecutive iterations can be
2889 performed using SIMD instructions. */
2890 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2891 && gimple_vuse (stmt))
2893 arginfo.release ();
2894 return false;
2897 /* Sanity check: make sure that at least one copy of the vectorized stmt
2898 needs to be generated. */
2899 gcc_assert (ncopies >= 1);
2901 if (!vec_stmt) /* transformation not required. */
2903 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2904 for (i = 0; i < nargs; i++)
2905 if (bestn->simdclone->args[i].arg_type
2906 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 2
2909 + 1);
2910 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2911 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2912 ? size_type_node : TREE_TYPE (arginfo[i].op);
2913 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2914 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2916 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2917 if (dump_enabled_p ())
2918 dump_printf_loc (MSG_NOTE, vect_location,
2919 "=== vectorizable_simd_clone_call ===\n");
2920 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2921 arginfo.release ();
2922 return true;
2925 /** Transform. **/
2927 if (dump_enabled_p ())
2928 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2930 /* Handle def. */
2931 scalar_dest = gimple_call_lhs (stmt);
2932 vec_dest = NULL_TREE;
2933 rtype = NULL_TREE;
2934 ratype = NULL_TREE;
2935 if (scalar_dest)
2937 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2938 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2939 if (TREE_CODE (rtype) == ARRAY_TYPE)
2941 ratype = rtype;
2942 rtype = TREE_TYPE (ratype);
2946 prev_stmt_info = NULL;
2947 for (j = 0; j < ncopies; ++j)
2949 /* Build argument list for the vectorized call. */
2950 if (j == 0)
2951 vargs.create (nargs);
2952 else
2953 vargs.truncate (0);
2955 for (i = 0; i < nargs; i++)
2957 unsigned int k, l, m, o;
2958 tree atype;
2959 op = gimple_call_arg (stmt, i);
2960 switch (bestn->simdclone->args[i].arg_type)
2962 case SIMD_CLONE_ARG_TYPE_VECTOR:
2963 atype = bestn->simdclone->args[i].vector_type;
2964 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2965 for (m = j * o; m < (j + 1) * o; m++)
2967 if (TYPE_VECTOR_SUBPARTS (atype)
2968 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2970 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2971 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2972 / TYPE_VECTOR_SUBPARTS (atype));
2973 gcc_assert ((k & (k - 1)) == 0);
2974 if (m == 0)
2975 vec_oprnd0
2976 = vect_get_vec_def_for_operand (op, stmt, NULL);
2977 else
2979 vec_oprnd0 = arginfo[i].op;
2980 if ((m & (k - 1)) == 0)
2981 vec_oprnd0
2982 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2983 vec_oprnd0);
2985 arginfo[i].op = vec_oprnd0;
2986 vec_oprnd0
2987 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2988 size_int (prec),
2989 bitsize_int ((m & (k - 1)) * prec));
2990 new_stmt
2991 = gimple_build_assign (make_ssa_name (atype),
2992 vec_oprnd0);
2993 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2994 vargs.safe_push (gimple_assign_lhs (new_stmt));
2996 else
2998 k = (TYPE_VECTOR_SUBPARTS (atype)
2999 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3000 gcc_assert ((k & (k - 1)) == 0);
3001 vec<constructor_elt, va_gc> *ctor_elts;
3002 if (k != 1)
3003 vec_alloc (ctor_elts, k);
3004 else
3005 ctor_elts = NULL;
3006 for (l = 0; l < k; l++)
3008 if (m == 0 && l == 0)
3009 vec_oprnd0
3010 = vect_get_vec_def_for_operand (op, stmt, NULL);
3011 else
3012 vec_oprnd0
3013 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3014 arginfo[i].op);
3015 arginfo[i].op = vec_oprnd0;
3016 if (k == 1)
3017 break;
3018 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3019 vec_oprnd0);
3021 if (k == 1)
3022 vargs.safe_push (vec_oprnd0);
3023 else
3025 vec_oprnd0 = build_constructor (atype, ctor_elts);
3026 new_stmt
3027 = gimple_build_assign (make_ssa_name (atype),
3028 vec_oprnd0);
3029 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3030 vargs.safe_push (gimple_assign_lhs (new_stmt));
3034 break;
3035 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3036 vargs.safe_push (op);
3037 break;
3038 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3039 if (j == 0)
3041 gimple_seq stmts;
3042 arginfo[i].op
3043 = force_gimple_operand (arginfo[i].op, &stmts, true,
3044 NULL_TREE);
3045 if (stmts != NULL)
3047 basic_block new_bb;
3048 edge pe = loop_preheader_edge (loop);
3049 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3050 gcc_assert (!new_bb);
3052 tree phi_res = copy_ssa_name (op);
3053 gphi *new_phi = create_phi_node (phi_res, loop->header);
3054 set_vinfo_for_stmt (new_phi,
3055 new_stmt_vec_info (new_phi, loop_vinfo,
3056 NULL));
3057 add_phi_arg (new_phi, arginfo[i].op,
3058 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3059 enum tree_code code
3060 = POINTER_TYPE_P (TREE_TYPE (op))
3061 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3062 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3063 ? sizetype : TREE_TYPE (op);
3064 widest_int cst
3065 = wi::mul (bestn->simdclone->args[i].linear_step,
3066 ncopies * nunits);
3067 tree tcst = wide_int_to_tree (type, cst);
3068 tree phi_arg = copy_ssa_name (op);
3069 new_stmt
3070 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3071 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3072 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3073 set_vinfo_for_stmt (new_stmt,
3074 new_stmt_vec_info (new_stmt, loop_vinfo,
3075 NULL));
3076 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3077 UNKNOWN_LOCATION);
3078 arginfo[i].op = phi_res;
3079 vargs.safe_push (phi_res);
3081 else
3083 enum tree_code code
3084 = POINTER_TYPE_P (TREE_TYPE (op))
3085 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3086 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3087 ? sizetype : TREE_TYPE (op);
3088 widest_int cst
3089 = wi::mul (bestn->simdclone->args[i].linear_step,
3090 j * nunits);
3091 tree tcst = wide_int_to_tree (type, cst);
3092 new_temp = make_ssa_name (TREE_TYPE (op));
3093 new_stmt = gimple_build_assign (new_temp, code,
3094 arginfo[i].op, tcst);
3095 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3096 vargs.safe_push (new_temp);
3098 break;
3099 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3100 default:
3101 gcc_unreachable ();
3105 new_stmt = gimple_build_call_vec (fndecl, vargs);
3106 if (vec_dest)
3108 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3109 if (ratype)
3110 new_temp = create_tmp_var (ratype);
3111 else if (TYPE_VECTOR_SUBPARTS (vectype)
3112 == TYPE_VECTOR_SUBPARTS (rtype))
3113 new_temp = make_ssa_name (vec_dest, new_stmt);
3114 else
3115 new_temp = make_ssa_name (rtype, new_stmt);
3116 gimple_call_set_lhs (new_stmt, new_temp);
3118 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3120 if (vec_dest)
3122 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3124 unsigned int k, l;
3125 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3126 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3127 gcc_assert ((k & (k - 1)) == 0);
3128 for (l = 0; l < k; l++)
3130 tree t;
3131 if (ratype)
3133 t = build_fold_addr_expr (new_temp);
3134 t = build2 (MEM_REF, vectype, t,
3135 build_int_cst (TREE_TYPE (t),
3136 l * prec / BITS_PER_UNIT));
3138 else
3139 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3140 size_int (prec), bitsize_int (l * prec));
3141 new_stmt
3142 = gimple_build_assign (make_ssa_name (vectype), t);
3143 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3144 if (j == 0 && l == 0)
3145 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3146 else
3147 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3149 prev_stmt_info = vinfo_for_stmt (new_stmt);
3152 if (ratype)
3154 tree clobber = build_constructor (ratype, NULL);
3155 TREE_THIS_VOLATILE (clobber) = 1;
3156 new_stmt = gimple_build_assign (new_temp, clobber);
3157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3159 continue;
3161 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3163 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3164 / TYPE_VECTOR_SUBPARTS (rtype));
3165 gcc_assert ((k & (k - 1)) == 0);
3166 if ((j & (k - 1)) == 0)
3167 vec_alloc (ret_ctor_elts, k);
3168 if (ratype)
3170 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3171 for (m = 0; m < o; m++)
3173 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3174 size_int (m), NULL_TREE, NULL_TREE);
3175 new_stmt
3176 = gimple_build_assign (make_ssa_name (rtype), tem);
3177 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3178 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3179 gimple_assign_lhs (new_stmt));
3181 tree clobber = build_constructor (ratype, NULL);
3182 TREE_THIS_VOLATILE (clobber) = 1;
3183 new_stmt = gimple_build_assign (new_temp, clobber);
3184 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3186 else
3187 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3188 if ((j & (k - 1)) != k - 1)
3189 continue;
3190 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3191 new_stmt
3192 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3193 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3195 if ((unsigned) j == k - 1)
3196 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3197 else
3198 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3200 prev_stmt_info = vinfo_for_stmt (new_stmt);
3201 continue;
3203 else if (ratype)
3205 tree t = build_fold_addr_expr (new_temp);
3206 t = build2 (MEM_REF, vectype, t,
3207 build_int_cst (TREE_TYPE (t), 0));
3208 new_stmt
3209 = gimple_build_assign (make_ssa_name (vec_dest), t);
3210 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3211 tree clobber = build_constructor (ratype, NULL);
3212 TREE_THIS_VOLATILE (clobber) = 1;
3213 vect_finish_stmt_generation (stmt,
3214 gimple_build_assign (new_temp,
3215 clobber), gsi);
3219 if (j == 0)
3220 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3221 else
3222 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3224 prev_stmt_info = vinfo_for_stmt (new_stmt);
3227 vargs.release ();
3229 /* The call in STMT might prevent it from being removed in dce.
3230 We however cannot remove it here, due to the way the ssa name
3231 it defines is mapped to the new definition. So just replace
3232 rhs of the statement with something harmless. */
3234 if (slp_node)
3235 return true;
3237 if (scalar_dest)
3239 type = TREE_TYPE (scalar_dest);
3240 if (is_pattern_stmt_p (stmt_info))
3241 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3242 else
3243 lhs = gimple_call_lhs (stmt);
3244 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3246 else
3247 new_stmt = gimple_build_nop ();
3248 set_vinfo_for_stmt (new_stmt, stmt_info);
3249 set_vinfo_for_stmt (stmt, NULL);
3250 STMT_VINFO_STMT (stmt_info) = new_stmt;
3251 gsi_replace (gsi, new_stmt, true);
3252 unlink_stmt_vdef (stmt);
3254 return true;
3258 /* Function vect_gen_widened_results_half
3260 Create a vector stmt whose code, type, number of arguments, and result
3261 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3262 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3263 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3264 needs to be created (DECL is a function-decl of a target-builtin).
3265 STMT is the original scalar stmt that we are vectorizing. */
3267 static gimple
3268 vect_gen_widened_results_half (enum tree_code code,
3269 tree decl,
3270 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3271 tree vec_dest, gimple_stmt_iterator *gsi,
3272 gimple stmt)
3274 gimple new_stmt;
3275 tree new_temp;
3277 /* Generate half of the widened result: */
3278 if (code == CALL_EXPR)
3280 /* Target specific support */
3281 if (op_type == binary_op)
3282 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3283 else
3284 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3285 new_temp = make_ssa_name (vec_dest, new_stmt);
3286 gimple_call_set_lhs (new_stmt, new_temp);
3288 else
3290 /* Generic support */
3291 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3292 if (op_type != binary_op)
3293 vec_oprnd1 = NULL;
3294 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3295 new_temp = make_ssa_name (vec_dest, new_stmt);
3296 gimple_assign_set_lhs (new_stmt, new_temp);
3298 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3300 return new_stmt;
3304 /* Get vectorized definitions for loop-based vectorization. For the first
3305 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3306 scalar operand), and for the rest we get a copy with
3307 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3308 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3309 The vectors are collected into VEC_OPRNDS. */
3311 static void
3312 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3313 vec<tree> *vec_oprnds, int multi_step_cvt)
3315 tree vec_oprnd;
3317 /* Get first vector operand. */
3318 /* All the vector operands except the very first one (that is scalar oprnd)
3319 are stmt copies. */
3320 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3321 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3322 else
3323 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3325 vec_oprnds->quick_push (vec_oprnd);
3327 /* Get second vector operand. */
3328 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3329 vec_oprnds->quick_push (vec_oprnd);
3331 *oprnd = vec_oprnd;
3333 /* For conversion in multiple steps, continue to get operands
3334 recursively. */
3335 if (multi_step_cvt)
3336 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3340 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3341 For multi-step conversions store the resulting vectors and call the function
3342 recursively. */
3344 static void
3345 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3346 int multi_step_cvt, gimple stmt,
3347 vec<tree> vec_dsts,
3348 gimple_stmt_iterator *gsi,
3349 slp_tree slp_node, enum tree_code code,
3350 stmt_vec_info *prev_stmt_info)
3352 unsigned int i;
3353 tree vop0, vop1, new_tmp, vec_dest;
3354 gimple new_stmt;
3355 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3357 vec_dest = vec_dsts.pop ();
3359 for (i = 0; i < vec_oprnds->length (); i += 2)
3361 /* Create demotion operation. */
3362 vop0 = (*vec_oprnds)[i];
3363 vop1 = (*vec_oprnds)[i + 1];
3364 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3365 new_tmp = make_ssa_name (vec_dest, new_stmt);
3366 gimple_assign_set_lhs (new_stmt, new_tmp);
3367 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3369 if (multi_step_cvt)
3370 /* Store the resulting vector for next recursive call. */
3371 (*vec_oprnds)[i/2] = new_tmp;
3372 else
3374 /* This is the last step of the conversion sequence. Store the
3375 vectors in SLP_NODE or in vector info of the scalar statement
3376 (or in STMT_VINFO_RELATED_STMT chain). */
3377 if (slp_node)
3378 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3379 else
3381 if (!*prev_stmt_info)
3382 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3383 else
3384 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3386 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3391 /* For multi-step demotion operations we first generate demotion operations
3392 from the source type to the intermediate types, and then combine the
3393 results (stored in VEC_OPRNDS) in demotion operation to the destination
3394 type. */
3395 if (multi_step_cvt)
3397 /* At each level of recursion we have half of the operands we had at the
3398 previous level. */
3399 vec_oprnds->truncate ((i+1)/2);
3400 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3401 stmt, vec_dsts, gsi, slp_node,
3402 VEC_PACK_TRUNC_EXPR,
3403 prev_stmt_info);
3406 vec_dsts.quick_push (vec_dest);
3410 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3411 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3412 the resulting vectors and call the function recursively. */
3414 static void
3415 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3416 vec<tree> *vec_oprnds1,
3417 gimple stmt, tree vec_dest,
3418 gimple_stmt_iterator *gsi,
3419 enum tree_code code1,
3420 enum tree_code code2, tree decl1,
3421 tree decl2, int op_type)
3423 int i;
3424 tree vop0, vop1, new_tmp1, new_tmp2;
3425 gimple new_stmt1, new_stmt2;
3426 vec<tree> vec_tmp = vNULL;
3428 vec_tmp.create (vec_oprnds0->length () * 2);
3429 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3431 if (op_type == binary_op)
3432 vop1 = (*vec_oprnds1)[i];
3433 else
3434 vop1 = NULL_TREE;
3436 /* Generate the two halves of promotion operation. */
3437 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3438 op_type, vec_dest, gsi, stmt);
3439 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3440 op_type, vec_dest, gsi, stmt);
3441 if (is_gimple_call (new_stmt1))
3443 new_tmp1 = gimple_call_lhs (new_stmt1);
3444 new_tmp2 = gimple_call_lhs (new_stmt2);
3446 else
3448 new_tmp1 = gimple_assign_lhs (new_stmt1);
3449 new_tmp2 = gimple_assign_lhs (new_stmt2);
3452 /* Store the results for the next step. */
3453 vec_tmp.quick_push (new_tmp1);
3454 vec_tmp.quick_push (new_tmp2);
3457 vec_oprnds0->release ();
3458 *vec_oprnds0 = vec_tmp;
3462 /* Check if STMT performs a conversion operation, that can be vectorized.
3463 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3464 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3465 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3467 static bool
3468 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3469 gimple *vec_stmt, slp_tree slp_node)
3471 tree vec_dest;
3472 tree scalar_dest;
3473 tree op0, op1 = NULL_TREE;
3474 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3475 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3476 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3477 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3478 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3479 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3480 tree new_temp;
3481 tree def;
3482 gimple def_stmt;
3483 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3484 gimple new_stmt = NULL;
3485 stmt_vec_info prev_stmt_info;
3486 int nunits_in;
3487 int nunits_out;
3488 tree vectype_out, vectype_in;
3489 int ncopies, i, j;
3490 tree lhs_type, rhs_type;
3491 enum { NARROW, NONE, WIDEN } modifier;
3492 vec<tree> vec_oprnds0 = vNULL;
3493 vec<tree> vec_oprnds1 = vNULL;
3494 tree vop0;
3495 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3496 int multi_step_cvt = 0;
3497 vec<tree> vec_dsts = vNULL;
3498 vec<tree> interm_types = vNULL;
3499 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3500 int op_type;
3501 machine_mode rhs_mode;
3502 unsigned short fltsz;
3504 /* Is STMT a vectorizable conversion? */
3506 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3507 return false;
3509 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3510 return false;
3512 if (!is_gimple_assign (stmt))
3513 return false;
3515 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3516 return false;
3518 code = gimple_assign_rhs_code (stmt);
3519 if (!CONVERT_EXPR_CODE_P (code)
3520 && code != FIX_TRUNC_EXPR
3521 && code != FLOAT_EXPR
3522 && code != WIDEN_MULT_EXPR
3523 && code != WIDEN_LSHIFT_EXPR)
3524 return false;
3526 op_type = TREE_CODE_LENGTH (code);
3528 /* Check types of lhs and rhs. */
3529 scalar_dest = gimple_assign_lhs (stmt);
3530 lhs_type = TREE_TYPE (scalar_dest);
3531 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3533 op0 = gimple_assign_rhs1 (stmt);
3534 rhs_type = TREE_TYPE (op0);
3536 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3537 && !((INTEGRAL_TYPE_P (lhs_type)
3538 && INTEGRAL_TYPE_P (rhs_type))
3539 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3540 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3541 return false;
3543 if ((INTEGRAL_TYPE_P (lhs_type)
3544 && (TYPE_PRECISION (lhs_type)
3545 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3546 || (INTEGRAL_TYPE_P (rhs_type)
3547 && (TYPE_PRECISION (rhs_type)
3548 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3550 if (dump_enabled_p ())
3551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3552 "type conversion to/from bit-precision unsupported."
3553 "\n");
3554 return false;
3557 /* Check the operands of the operation. */
3558 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3559 &def_stmt, &def, &dt[0], &vectype_in))
3561 if (dump_enabled_p ())
3562 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3563 "use not simple.\n");
3564 return false;
3566 if (op_type == binary_op)
3568 bool ok;
3570 op1 = gimple_assign_rhs2 (stmt);
3571 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3572 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3573 OP1. */
3574 if (CONSTANT_CLASS_P (op0))
3575 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3576 &def_stmt, &def, &dt[1], &vectype_in);
3577 else
3578 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3579 &def, &dt[1]);
3581 if (!ok)
3583 if (dump_enabled_p ())
3584 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3585 "use not simple.\n");
3586 return false;
3590 /* If op0 is an external or constant defs use a vector type of
3591 the same size as the output vector type. */
3592 if (!vectype_in)
3593 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3594 if (vec_stmt)
3595 gcc_assert (vectype_in);
3596 if (!vectype_in)
3598 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3601 "no vectype for scalar type ");
3602 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3603 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3606 return false;
3609 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3610 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3611 if (nunits_in < nunits_out)
3612 modifier = NARROW;
3613 else if (nunits_out == nunits_in)
3614 modifier = NONE;
3615 else
3616 modifier = WIDEN;
3618 /* Multiple types in SLP are handled by creating the appropriate number of
3619 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3620 case of SLP. */
3621 if (slp_node || PURE_SLP_STMT (stmt_info))
3622 ncopies = 1;
3623 else if (modifier == NARROW)
3624 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3625 else
3626 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3628 /* Sanity check: make sure that at least one copy of the vectorized stmt
3629 needs to be generated. */
3630 gcc_assert (ncopies >= 1);
3632 /* Supportable by target? */
3633 switch (modifier)
3635 case NONE:
3636 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3637 return false;
3638 if (supportable_convert_operation (code, vectype_out, vectype_in,
3639 &decl1, &code1))
3640 break;
3641 /* FALLTHRU */
3642 unsupported:
3643 if (dump_enabled_p ())
3644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3645 "conversion not supported by target.\n");
3646 return false;
3648 case WIDEN:
3649 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3650 &code1, &code2, &multi_step_cvt,
3651 &interm_types))
3653 /* Binary widening operation can only be supported directly by the
3654 architecture. */
3655 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3656 break;
3659 if (code != FLOAT_EXPR
3660 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3661 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3662 goto unsupported;
3664 rhs_mode = TYPE_MODE (rhs_type);
3665 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3666 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3667 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3668 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3670 cvt_type
3671 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3672 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3673 if (cvt_type == NULL_TREE)
3674 goto unsupported;
3676 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3678 if (!supportable_convert_operation (code, vectype_out,
3679 cvt_type, &decl1, &codecvt1))
3680 goto unsupported;
3682 else if (!supportable_widening_operation (code, stmt, vectype_out,
3683 cvt_type, &codecvt1,
3684 &codecvt2, &multi_step_cvt,
3685 &interm_types))
3686 continue;
3687 else
3688 gcc_assert (multi_step_cvt == 0);
3690 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3691 vectype_in, &code1, &code2,
3692 &multi_step_cvt, &interm_types))
3693 break;
3696 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3697 goto unsupported;
3699 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3700 codecvt2 = ERROR_MARK;
3701 else
3703 multi_step_cvt++;
3704 interm_types.safe_push (cvt_type);
3705 cvt_type = NULL_TREE;
3707 break;
3709 case NARROW:
3710 gcc_assert (op_type == unary_op);
3711 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3712 &code1, &multi_step_cvt,
3713 &interm_types))
3714 break;
3716 if (code != FIX_TRUNC_EXPR
3717 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3718 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3719 goto unsupported;
3721 rhs_mode = TYPE_MODE (rhs_type);
3722 cvt_type
3723 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3724 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3725 if (cvt_type == NULL_TREE)
3726 goto unsupported;
3727 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3728 &decl1, &codecvt1))
3729 goto unsupported;
3730 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3731 &code1, &multi_step_cvt,
3732 &interm_types))
3733 break;
3734 goto unsupported;
3736 default:
3737 gcc_unreachable ();
3740 if (!vec_stmt) /* transformation not required. */
3742 if (dump_enabled_p ())
3743 dump_printf_loc (MSG_NOTE, vect_location,
3744 "=== vectorizable_conversion ===\n");
3745 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3747 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3748 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3750 else if (modifier == NARROW)
3752 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3753 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3755 else
3757 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3758 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3760 interm_types.release ();
3761 return true;
3764 /** Transform. **/
3765 if (dump_enabled_p ())
3766 dump_printf_loc (MSG_NOTE, vect_location,
3767 "transform conversion. ncopies = %d.\n", ncopies);
3769 if (op_type == binary_op)
3771 if (CONSTANT_CLASS_P (op0))
3772 op0 = fold_convert (TREE_TYPE (op1), op0);
3773 else if (CONSTANT_CLASS_P (op1))
3774 op1 = fold_convert (TREE_TYPE (op0), op1);
3777 /* In case of multi-step conversion, we first generate conversion operations
3778 to the intermediate types, and then from that types to the final one.
3779 We create vector destinations for the intermediate type (TYPES) received
3780 from supportable_*_operation, and store them in the correct order
3781 for future use in vect_create_vectorized_*_stmts (). */
3782 vec_dsts.create (multi_step_cvt + 1);
3783 vec_dest = vect_create_destination_var (scalar_dest,
3784 (cvt_type && modifier == WIDEN)
3785 ? cvt_type : vectype_out);
3786 vec_dsts.quick_push (vec_dest);
3788 if (multi_step_cvt)
3790 for (i = interm_types.length () - 1;
3791 interm_types.iterate (i, &intermediate_type); i--)
3793 vec_dest = vect_create_destination_var (scalar_dest,
3794 intermediate_type);
3795 vec_dsts.quick_push (vec_dest);
3799 if (cvt_type)
3800 vec_dest = vect_create_destination_var (scalar_dest,
3801 modifier == WIDEN
3802 ? vectype_out : cvt_type);
3804 if (!slp_node)
3806 if (modifier == WIDEN)
3808 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3809 if (op_type == binary_op)
3810 vec_oprnds1.create (1);
3812 else if (modifier == NARROW)
3813 vec_oprnds0.create (
3814 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3816 else if (code == WIDEN_LSHIFT_EXPR)
3817 vec_oprnds1.create (slp_node->vec_stmts_size);
3819 last_oprnd = op0;
3820 prev_stmt_info = NULL;
3821 switch (modifier)
3823 case NONE:
3824 for (j = 0; j < ncopies; j++)
3826 if (j == 0)
3827 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3828 -1);
3829 else
3830 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3832 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3834 /* Arguments are ready, create the new vector stmt. */
3835 if (code1 == CALL_EXPR)
3837 new_stmt = gimple_build_call (decl1, 1, vop0);
3838 new_temp = make_ssa_name (vec_dest, new_stmt);
3839 gimple_call_set_lhs (new_stmt, new_temp);
3841 else
3843 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3844 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3845 new_temp = make_ssa_name (vec_dest, new_stmt);
3846 gimple_assign_set_lhs (new_stmt, new_temp);
3849 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3850 if (slp_node)
3851 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3854 if (j == 0)
3855 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3856 else
3857 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3858 prev_stmt_info = vinfo_for_stmt (new_stmt);
3860 break;
3862 case WIDEN:
3863 /* In case the vectorization factor (VF) is bigger than the number
3864 of elements that we can fit in a vectype (nunits), we have to
3865 generate more than one vector stmt - i.e - we need to "unroll"
3866 the vector stmt by a factor VF/nunits. */
3867 for (j = 0; j < ncopies; j++)
3869 /* Handle uses. */
3870 if (j == 0)
3872 if (slp_node)
3874 if (code == WIDEN_LSHIFT_EXPR)
3876 unsigned int k;
3878 vec_oprnd1 = op1;
3879 /* Store vec_oprnd1 for every vector stmt to be created
3880 for SLP_NODE. We check during the analysis that all
3881 the shift arguments are the same. */
3882 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3883 vec_oprnds1.quick_push (vec_oprnd1);
3885 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3886 slp_node, -1);
3888 else
3889 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3890 &vec_oprnds1, slp_node, -1);
3892 else
3894 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3895 vec_oprnds0.quick_push (vec_oprnd0);
3896 if (op_type == binary_op)
3898 if (code == WIDEN_LSHIFT_EXPR)
3899 vec_oprnd1 = op1;
3900 else
3901 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3902 NULL);
3903 vec_oprnds1.quick_push (vec_oprnd1);
3907 else
3909 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3910 vec_oprnds0.truncate (0);
3911 vec_oprnds0.quick_push (vec_oprnd0);
3912 if (op_type == binary_op)
3914 if (code == WIDEN_LSHIFT_EXPR)
3915 vec_oprnd1 = op1;
3916 else
3917 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3918 vec_oprnd1);
3919 vec_oprnds1.truncate (0);
3920 vec_oprnds1.quick_push (vec_oprnd1);
3924 /* Arguments are ready. Create the new vector stmts. */
3925 for (i = multi_step_cvt; i >= 0; i--)
3927 tree this_dest = vec_dsts[i];
3928 enum tree_code c1 = code1, c2 = code2;
3929 if (i == 0 && codecvt2 != ERROR_MARK)
3931 c1 = codecvt1;
3932 c2 = codecvt2;
3934 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3935 &vec_oprnds1,
3936 stmt, this_dest, gsi,
3937 c1, c2, decl1, decl2,
3938 op_type);
3941 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3943 if (cvt_type)
3945 if (codecvt1 == CALL_EXPR)
3947 new_stmt = gimple_build_call (decl1, 1, vop0);
3948 new_temp = make_ssa_name (vec_dest, new_stmt);
3949 gimple_call_set_lhs (new_stmt, new_temp);
3951 else
3953 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3954 new_temp = make_ssa_name (vec_dest);
3955 new_stmt = gimple_build_assign (new_temp, codecvt1,
3956 vop0);
3959 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3961 else
3962 new_stmt = SSA_NAME_DEF_STMT (vop0);
3964 if (slp_node)
3965 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3966 else
3968 if (!prev_stmt_info)
3969 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3970 else
3971 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3972 prev_stmt_info = vinfo_for_stmt (new_stmt);
3977 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3978 break;
3980 case NARROW:
3981 /* In case the vectorization factor (VF) is bigger than the number
3982 of elements that we can fit in a vectype (nunits), we have to
3983 generate more than one vector stmt - i.e - we need to "unroll"
3984 the vector stmt by a factor VF/nunits. */
3985 for (j = 0; j < ncopies; j++)
3987 /* Handle uses. */
3988 if (slp_node)
3989 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3990 slp_node, -1);
3991 else
3993 vec_oprnds0.truncate (0);
3994 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3995 vect_pow2 (multi_step_cvt) - 1);
3998 /* Arguments are ready. Create the new vector stmts. */
3999 if (cvt_type)
4000 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4002 if (codecvt1 == CALL_EXPR)
4004 new_stmt = gimple_build_call (decl1, 1, vop0);
4005 new_temp = make_ssa_name (vec_dest, new_stmt);
4006 gimple_call_set_lhs (new_stmt, new_temp);
4008 else
4010 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4011 new_temp = make_ssa_name (vec_dest);
4012 new_stmt = gimple_build_assign (new_temp, codecvt1,
4013 vop0);
4016 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4017 vec_oprnds0[i] = new_temp;
4020 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4021 stmt, vec_dsts, gsi,
4022 slp_node, code1,
4023 &prev_stmt_info);
4026 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4027 break;
4030 vec_oprnds0.release ();
4031 vec_oprnds1.release ();
4032 vec_dsts.release ();
4033 interm_types.release ();
4035 return true;
4039 /* Function vectorizable_assignment.
4041 Check if STMT performs an assignment (copy) that can be vectorized.
4042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4046 static bool
4047 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
4048 gimple *vec_stmt, slp_tree slp_node)
4050 tree vec_dest;
4051 tree scalar_dest;
4052 tree op;
4053 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4054 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4055 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4056 tree new_temp;
4057 tree def;
4058 gimple def_stmt;
4059 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4060 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4061 int ncopies;
4062 int i, j;
4063 vec<tree> vec_oprnds = vNULL;
4064 tree vop;
4065 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4066 gimple new_stmt = NULL;
4067 stmt_vec_info prev_stmt_info = NULL;
4068 enum tree_code code;
4069 tree vectype_in;
4071 /* Multiple types in SLP are handled by creating the appropriate number of
4072 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4073 case of SLP. */
4074 if (slp_node || PURE_SLP_STMT (stmt_info))
4075 ncopies = 1;
4076 else
4077 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4079 gcc_assert (ncopies >= 1);
4081 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4082 return false;
4084 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4085 return false;
4087 /* Is vectorizable assignment? */
4088 if (!is_gimple_assign (stmt))
4089 return false;
4091 scalar_dest = gimple_assign_lhs (stmt);
4092 if (TREE_CODE (scalar_dest) != SSA_NAME)
4093 return false;
4095 code = gimple_assign_rhs_code (stmt);
4096 if (gimple_assign_single_p (stmt)
4097 || code == PAREN_EXPR
4098 || CONVERT_EXPR_CODE_P (code))
4099 op = gimple_assign_rhs1 (stmt);
4100 else
4101 return false;
4103 if (code == VIEW_CONVERT_EXPR)
4104 op = TREE_OPERAND (op, 0);
4106 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4107 &def_stmt, &def, &dt[0], &vectype_in))
4109 if (dump_enabled_p ())
4110 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4111 "use not simple.\n");
4112 return false;
4115 /* We can handle NOP_EXPR conversions that do not change the number
4116 of elements or the vector size. */
4117 if ((CONVERT_EXPR_CODE_P (code)
4118 || code == VIEW_CONVERT_EXPR)
4119 && (!vectype_in
4120 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4121 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4122 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4123 return false;
4125 /* We do not handle bit-precision changes. */
4126 if ((CONVERT_EXPR_CODE_P (code)
4127 || code == VIEW_CONVERT_EXPR)
4128 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4129 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4130 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4131 || ((TYPE_PRECISION (TREE_TYPE (op))
4132 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4133 /* But a conversion that does not change the bit-pattern is ok. */
4134 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4135 > TYPE_PRECISION (TREE_TYPE (op)))
4136 && TYPE_UNSIGNED (TREE_TYPE (op))))
4138 if (dump_enabled_p ())
4139 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4140 "type conversion to/from bit-precision "
4141 "unsupported.\n");
4142 return false;
4145 if (!vec_stmt) /* transformation not required. */
4147 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4148 if (dump_enabled_p ())
4149 dump_printf_loc (MSG_NOTE, vect_location,
4150 "=== vectorizable_assignment ===\n");
4151 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4152 return true;
4155 /** Transform. **/
4156 if (dump_enabled_p ())
4157 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4159 /* Handle def. */
4160 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4162 /* Handle use. */
4163 for (j = 0; j < ncopies; j++)
4165 /* Handle uses. */
4166 if (j == 0)
4167 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4168 else
4169 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4171 /* Arguments are ready. create the new vector stmt. */
4172 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4174 if (CONVERT_EXPR_CODE_P (code)
4175 || code == VIEW_CONVERT_EXPR)
4176 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4177 new_stmt = gimple_build_assign (vec_dest, vop);
4178 new_temp = make_ssa_name (vec_dest, new_stmt);
4179 gimple_assign_set_lhs (new_stmt, new_temp);
4180 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4181 if (slp_node)
4182 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4185 if (slp_node)
4186 continue;
4188 if (j == 0)
4189 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4190 else
4191 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4193 prev_stmt_info = vinfo_for_stmt (new_stmt);
4196 vec_oprnds.release ();
4197 return true;
4201 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4202 either as shift by a scalar or by a vector. */
4204 bool
4205 vect_supportable_shift (enum tree_code code, tree scalar_type)
4208 machine_mode vec_mode;
4209 optab optab;
4210 int icode;
4211 tree vectype;
4213 vectype = get_vectype_for_scalar_type (scalar_type);
4214 if (!vectype)
4215 return false;
4217 optab = optab_for_tree_code (code, vectype, optab_scalar);
4218 if (!optab
4219 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4221 optab = optab_for_tree_code (code, vectype, optab_vector);
4222 if (!optab
4223 || (optab_handler (optab, TYPE_MODE (vectype))
4224 == CODE_FOR_nothing))
4225 return false;
4228 vec_mode = TYPE_MODE (vectype);
4229 icode = (int) optab_handler (optab, vec_mode);
4230 if (icode == CODE_FOR_nothing)
4231 return false;
4233 return true;
4237 /* Function vectorizable_shift.
4239 Check if STMT performs a shift operation that can be vectorized.
4240 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4241 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4242 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4244 static bool
4245 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4246 gimple *vec_stmt, slp_tree slp_node)
4248 tree vec_dest;
4249 tree scalar_dest;
4250 tree op0, op1 = NULL;
4251 tree vec_oprnd1 = NULL_TREE;
4252 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4253 tree vectype;
4254 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4255 enum tree_code code;
4256 machine_mode vec_mode;
4257 tree new_temp;
4258 optab optab;
4259 int icode;
4260 machine_mode optab_op2_mode;
4261 tree def;
4262 gimple def_stmt;
4263 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4264 gimple new_stmt = NULL;
4265 stmt_vec_info prev_stmt_info;
4266 int nunits_in;
4267 int nunits_out;
4268 tree vectype_out;
4269 tree op1_vectype;
4270 int ncopies;
4271 int j, i;
4272 vec<tree> vec_oprnds0 = vNULL;
4273 vec<tree> vec_oprnds1 = vNULL;
4274 tree vop0, vop1;
4275 unsigned int k;
4276 bool scalar_shift_arg = true;
4277 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4278 int vf;
4280 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4281 return false;
4283 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4284 return false;
4286 /* Is STMT a vectorizable binary/unary operation? */
4287 if (!is_gimple_assign (stmt))
4288 return false;
4290 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4291 return false;
4293 code = gimple_assign_rhs_code (stmt);
4295 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4296 || code == RROTATE_EXPR))
4297 return false;
4299 scalar_dest = gimple_assign_lhs (stmt);
4300 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4301 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4302 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4304 if (dump_enabled_p ())
4305 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4306 "bit-precision shifts not supported.\n");
4307 return false;
4310 op0 = gimple_assign_rhs1 (stmt);
4311 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4312 &def_stmt, &def, &dt[0], &vectype))
4314 if (dump_enabled_p ())
4315 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4316 "use not simple.\n");
4317 return false;
4319 /* If op0 is an external or constant def use a vector type with
4320 the same size as the output vector type. */
4321 if (!vectype)
4322 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4323 if (vec_stmt)
4324 gcc_assert (vectype);
4325 if (!vectype)
4327 if (dump_enabled_p ())
4328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4329 "no vectype for scalar type\n");
4330 return false;
4333 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4334 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4335 if (nunits_out != nunits_in)
4336 return false;
4338 op1 = gimple_assign_rhs2 (stmt);
4339 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4340 &def, &dt[1], &op1_vectype))
4342 if (dump_enabled_p ())
4343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4344 "use not simple.\n");
4345 return false;
4348 if (loop_vinfo)
4349 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4350 else
4351 vf = 1;
4353 /* Multiple types in SLP are handled by creating the appropriate number of
4354 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4355 case of SLP. */
4356 if (slp_node || PURE_SLP_STMT (stmt_info))
4357 ncopies = 1;
4358 else
4359 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4361 gcc_assert (ncopies >= 1);
4363 /* Determine whether the shift amount is a vector, or scalar. If the
4364 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4366 if (dt[1] == vect_internal_def && !slp_node)
4367 scalar_shift_arg = false;
4368 else if (dt[1] == vect_constant_def
4369 || dt[1] == vect_external_def
4370 || dt[1] == vect_internal_def)
4372 /* In SLP, need to check whether the shift count is the same,
4373 in loops if it is a constant or invariant, it is always
4374 a scalar shift. */
4375 if (slp_node)
4377 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4378 gimple slpstmt;
4380 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4381 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4382 scalar_shift_arg = false;
4385 else
4387 if (dump_enabled_p ())
4388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4389 "operand mode requires invariant argument.\n");
4390 return false;
4393 /* Vector shifted by vector. */
4394 if (!scalar_shift_arg)
4396 optab = optab_for_tree_code (code, vectype, optab_vector);
4397 if (dump_enabled_p ())
4398 dump_printf_loc (MSG_NOTE, vect_location,
4399 "vector/vector shift/rotate found.\n");
4401 if (!op1_vectype)
4402 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4403 if (op1_vectype == NULL_TREE
4404 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4406 if (dump_enabled_p ())
4407 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4408 "unusable type for last operand in"
4409 " vector/vector shift/rotate.\n");
4410 return false;
4413 /* See if the machine has a vector shifted by scalar insn and if not
4414 then see if it has a vector shifted by vector insn. */
4415 else
4417 optab = optab_for_tree_code (code, vectype, optab_scalar);
4418 if (optab
4419 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4421 if (dump_enabled_p ())
4422 dump_printf_loc (MSG_NOTE, vect_location,
4423 "vector/scalar shift/rotate found.\n");
4425 else
4427 optab = optab_for_tree_code (code, vectype, optab_vector);
4428 if (optab
4429 && (optab_handler (optab, TYPE_MODE (vectype))
4430 != CODE_FOR_nothing))
4432 scalar_shift_arg = false;
4434 if (dump_enabled_p ())
4435 dump_printf_loc (MSG_NOTE, vect_location,
4436 "vector/vector shift/rotate found.\n");
4438 /* Unlike the other binary operators, shifts/rotates have
4439 the rhs being int, instead of the same type as the lhs,
4440 so make sure the scalar is the right type if we are
4441 dealing with vectors of long long/long/short/char. */
4442 if (dt[1] == vect_constant_def)
4443 op1 = fold_convert (TREE_TYPE (vectype), op1);
4444 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4445 TREE_TYPE (op1)))
4447 if (slp_node
4448 && TYPE_MODE (TREE_TYPE (vectype))
4449 != TYPE_MODE (TREE_TYPE (op1)))
4451 if (dump_enabled_p ())
4452 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4453 "unusable type for last operand in"
4454 " vector/vector shift/rotate.\n");
4455 return false;
4457 if (vec_stmt && !slp_node)
4459 op1 = fold_convert (TREE_TYPE (vectype), op1);
4460 op1 = vect_init_vector (stmt, op1,
4461 TREE_TYPE (vectype), NULL);
4468 /* Supportable by target? */
4469 if (!optab)
4471 if (dump_enabled_p ())
4472 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4473 "no optab.\n");
4474 return false;
4476 vec_mode = TYPE_MODE (vectype);
4477 icode = (int) optab_handler (optab, vec_mode);
4478 if (icode == CODE_FOR_nothing)
4480 if (dump_enabled_p ())
4481 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4482 "op not supported by target.\n");
4483 /* Check only during analysis. */
4484 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4485 || (vf < vect_min_worthwhile_factor (code)
4486 && !vec_stmt))
4487 return false;
4488 if (dump_enabled_p ())
4489 dump_printf_loc (MSG_NOTE, vect_location,
4490 "proceeding using word mode.\n");
4493 /* Worthwhile without SIMD support? Check only during analysis. */
4494 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4495 && vf < vect_min_worthwhile_factor (code)
4496 && !vec_stmt)
4498 if (dump_enabled_p ())
4499 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4500 "not worthwhile without SIMD support.\n");
4501 return false;
4504 if (!vec_stmt) /* transformation not required. */
4506 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4507 if (dump_enabled_p ())
4508 dump_printf_loc (MSG_NOTE, vect_location,
4509 "=== vectorizable_shift ===\n");
4510 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4511 return true;
4514 /** Transform. **/
4516 if (dump_enabled_p ())
4517 dump_printf_loc (MSG_NOTE, vect_location,
4518 "transform binary/unary operation.\n");
4520 /* Handle def. */
4521 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4523 prev_stmt_info = NULL;
4524 for (j = 0; j < ncopies; j++)
4526 /* Handle uses. */
4527 if (j == 0)
4529 if (scalar_shift_arg)
4531 /* Vector shl and shr insn patterns can be defined with scalar
4532 operand 2 (shift operand). In this case, use constant or loop
4533 invariant op1 directly, without extending it to vector mode
4534 first. */
4535 optab_op2_mode = insn_data[icode].operand[2].mode;
4536 if (!VECTOR_MODE_P (optab_op2_mode))
4538 if (dump_enabled_p ())
4539 dump_printf_loc (MSG_NOTE, vect_location,
4540 "operand 1 using scalar mode.\n");
4541 vec_oprnd1 = op1;
4542 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4543 vec_oprnds1.quick_push (vec_oprnd1);
4544 if (slp_node)
4546 /* Store vec_oprnd1 for every vector stmt to be created
4547 for SLP_NODE. We check during the analysis that all
4548 the shift arguments are the same.
4549 TODO: Allow different constants for different vector
4550 stmts generated for an SLP instance. */
4551 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4552 vec_oprnds1.quick_push (vec_oprnd1);
4557 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4558 (a special case for certain kind of vector shifts); otherwise,
4559 operand 1 should be of a vector type (the usual case). */
4560 if (vec_oprnd1)
4561 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4562 slp_node, -1);
4563 else
4564 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4565 slp_node, -1);
4567 else
4568 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4570 /* Arguments are ready. Create the new vector stmt. */
4571 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4573 vop1 = vec_oprnds1[i];
4574 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4575 new_temp = make_ssa_name (vec_dest, new_stmt);
4576 gimple_assign_set_lhs (new_stmt, new_temp);
4577 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4578 if (slp_node)
4579 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4582 if (slp_node)
4583 continue;
4585 if (j == 0)
4586 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4587 else
4588 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4589 prev_stmt_info = vinfo_for_stmt (new_stmt);
4592 vec_oprnds0.release ();
4593 vec_oprnds1.release ();
4595 return true;
4599 /* Function vectorizable_operation.
4601 Check if STMT performs a binary, unary or ternary operation that can
4602 be vectorized.
4603 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4604 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4605 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4607 static bool
4608 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4609 gimple *vec_stmt, slp_tree slp_node)
4611 tree vec_dest;
4612 tree scalar_dest;
4613 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4614 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4615 tree vectype;
4616 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4617 enum tree_code code;
4618 machine_mode vec_mode;
4619 tree new_temp;
4620 int op_type;
4621 optab optab;
4622 int icode;
4623 tree def;
4624 gimple def_stmt;
4625 enum vect_def_type dt[3]
4626 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4627 gimple new_stmt = NULL;
4628 stmt_vec_info prev_stmt_info;
4629 int nunits_in;
4630 int nunits_out;
4631 tree vectype_out;
4632 int ncopies;
4633 int j, i;
4634 vec<tree> vec_oprnds0 = vNULL;
4635 vec<tree> vec_oprnds1 = vNULL;
4636 vec<tree> vec_oprnds2 = vNULL;
4637 tree vop0, vop1, vop2;
4638 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4639 int vf;
4641 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4642 return false;
4644 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4645 return false;
4647 /* Is STMT a vectorizable binary/unary operation? */
4648 if (!is_gimple_assign (stmt))
4649 return false;
4651 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4652 return false;
4654 code = gimple_assign_rhs_code (stmt);
4656 /* For pointer addition, we should use the normal plus for
4657 the vector addition. */
4658 if (code == POINTER_PLUS_EXPR)
4659 code = PLUS_EXPR;
4661 /* Support only unary or binary operations. */
4662 op_type = TREE_CODE_LENGTH (code);
4663 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4665 if (dump_enabled_p ())
4666 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4667 "num. args = %d (not unary/binary/ternary op).\n",
4668 op_type);
4669 return false;
4672 scalar_dest = gimple_assign_lhs (stmt);
4673 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4675 /* Most operations cannot handle bit-precision types without extra
4676 truncations. */
4677 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4678 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4679 /* Exception are bitwise binary operations. */
4680 && code != BIT_IOR_EXPR
4681 && code != BIT_XOR_EXPR
4682 && code != BIT_AND_EXPR)
4684 if (dump_enabled_p ())
4685 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4686 "bit-precision arithmetic not supported.\n");
4687 return false;
4690 op0 = gimple_assign_rhs1 (stmt);
4691 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4692 &def_stmt, &def, &dt[0], &vectype))
4694 if (dump_enabled_p ())
4695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4696 "use not simple.\n");
4697 return false;
4699 /* If op0 is an external or constant def use a vector type with
4700 the same size as the output vector type. */
4701 if (!vectype)
4702 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4703 if (vec_stmt)
4704 gcc_assert (vectype);
4705 if (!vectype)
4707 if (dump_enabled_p ())
4709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4710 "no vectype for scalar type ");
4711 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4712 TREE_TYPE (op0));
4713 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4716 return false;
4719 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4720 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4721 if (nunits_out != nunits_in)
4722 return false;
4724 if (op_type == binary_op || op_type == ternary_op)
4726 op1 = gimple_assign_rhs2 (stmt);
4727 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4728 &def, &dt[1]))
4730 if (dump_enabled_p ())
4731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4732 "use not simple.\n");
4733 return false;
4736 if (op_type == ternary_op)
4738 op2 = gimple_assign_rhs3 (stmt);
4739 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4740 &def, &dt[2]))
4742 if (dump_enabled_p ())
4743 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4744 "use not simple.\n");
4745 return false;
4749 if (loop_vinfo)
4750 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4751 else
4752 vf = 1;
4754 /* Multiple types in SLP are handled by creating the appropriate number of
4755 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4756 case of SLP. */
4757 if (slp_node || PURE_SLP_STMT (stmt_info))
4758 ncopies = 1;
4759 else
4760 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4762 gcc_assert (ncopies >= 1);
4764 /* Shifts are handled in vectorizable_shift (). */
4765 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4766 || code == RROTATE_EXPR)
4767 return false;
4769 /* Supportable by target? */
4771 vec_mode = TYPE_MODE (vectype);
4772 if (code == MULT_HIGHPART_EXPR)
4774 if (can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)))
4775 icode = LAST_INSN_CODE;
4776 else
4777 icode = CODE_FOR_nothing;
4779 else
4781 optab = optab_for_tree_code (code, vectype, optab_default);
4782 if (!optab)
4784 if (dump_enabled_p ())
4785 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4786 "no optab.\n");
4787 return false;
4789 icode = (int) optab_handler (optab, vec_mode);
4792 if (icode == CODE_FOR_nothing)
4794 if (dump_enabled_p ())
4795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4796 "op not supported by target.\n");
4797 /* Check only during analysis. */
4798 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4799 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4800 return false;
4801 if (dump_enabled_p ())
4802 dump_printf_loc (MSG_NOTE, vect_location,
4803 "proceeding using word mode.\n");
4806 /* Worthwhile without SIMD support? Check only during analysis. */
4807 if (!VECTOR_MODE_P (vec_mode)
4808 && !vec_stmt
4809 && vf < vect_min_worthwhile_factor (code))
4811 if (dump_enabled_p ())
4812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4813 "not worthwhile without SIMD support.\n");
4814 return false;
4817 if (!vec_stmt) /* transformation not required. */
4819 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4820 if (dump_enabled_p ())
4821 dump_printf_loc (MSG_NOTE, vect_location,
4822 "=== vectorizable_operation ===\n");
4823 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4824 return true;
4827 /** Transform. **/
4829 if (dump_enabled_p ())
4830 dump_printf_loc (MSG_NOTE, vect_location,
4831 "transform binary/unary operation.\n");
4833 /* Handle def. */
4834 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4836 /* In case the vectorization factor (VF) is bigger than the number
4837 of elements that we can fit in a vectype (nunits), we have to generate
4838 more than one vector stmt - i.e - we need to "unroll" the
4839 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4840 from one copy of the vector stmt to the next, in the field
4841 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4842 stages to find the correct vector defs to be used when vectorizing
4843 stmts that use the defs of the current stmt. The example below
4844 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4845 we need to create 4 vectorized stmts):
4847 before vectorization:
4848 RELATED_STMT VEC_STMT
4849 S1: x = memref - -
4850 S2: z = x + 1 - -
4852 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4853 there):
4854 RELATED_STMT VEC_STMT
4855 VS1_0: vx0 = memref0 VS1_1 -
4856 VS1_1: vx1 = memref1 VS1_2 -
4857 VS1_2: vx2 = memref2 VS1_3 -
4858 VS1_3: vx3 = memref3 - -
4859 S1: x = load - VS1_0
4860 S2: z = x + 1 - -
4862 step2: vectorize stmt S2 (done here):
4863 To vectorize stmt S2 we first need to find the relevant vector
4864 def for the first operand 'x'. This is, as usual, obtained from
4865 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4866 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4867 relevant vector def 'vx0'. Having found 'vx0' we can generate
4868 the vector stmt VS2_0, and as usual, record it in the
4869 STMT_VINFO_VEC_STMT of stmt S2.
4870 When creating the second copy (VS2_1), we obtain the relevant vector
4871 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4872 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4873 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4874 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4875 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4876 chain of stmts and pointers:
4877 RELATED_STMT VEC_STMT
4878 VS1_0: vx0 = memref0 VS1_1 -
4879 VS1_1: vx1 = memref1 VS1_2 -
4880 VS1_2: vx2 = memref2 VS1_3 -
4881 VS1_3: vx3 = memref3 - -
4882 S1: x = load - VS1_0
4883 VS2_0: vz0 = vx0 + v1 VS2_1 -
4884 VS2_1: vz1 = vx1 + v1 VS2_2 -
4885 VS2_2: vz2 = vx2 + v1 VS2_3 -
4886 VS2_3: vz3 = vx3 + v1 - -
4887 S2: z = x + 1 - VS2_0 */
4889 prev_stmt_info = NULL;
4890 for (j = 0; j < ncopies; j++)
4892 /* Handle uses. */
4893 if (j == 0)
4895 if (op_type == binary_op || op_type == ternary_op)
4896 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4897 slp_node, -1);
4898 else
4899 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4900 slp_node, -1);
4901 if (op_type == ternary_op)
4903 vec_oprnds2.create (1);
4904 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4905 stmt,
4906 NULL));
4909 else
4911 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4912 if (op_type == ternary_op)
4914 tree vec_oprnd = vec_oprnds2.pop ();
4915 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4916 vec_oprnd));
4920 /* Arguments are ready. Create the new vector stmt. */
4921 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4923 vop1 = ((op_type == binary_op || op_type == ternary_op)
4924 ? vec_oprnds1[i] : NULL_TREE);
4925 vop2 = ((op_type == ternary_op)
4926 ? vec_oprnds2[i] : NULL_TREE);
4927 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4928 new_temp = make_ssa_name (vec_dest, new_stmt);
4929 gimple_assign_set_lhs (new_stmt, new_temp);
4930 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4931 if (slp_node)
4932 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4935 if (slp_node)
4936 continue;
4938 if (j == 0)
4939 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4940 else
4941 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4942 prev_stmt_info = vinfo_for_stmt (new_stmt);
4945 vec_oprnds0.release ();
4946 vec_oprnds1.release ();
4947 vec_oprnds2.release ();
4949 return true;
4952 /* A helper function to ensure data reference DR's base alignment
4953 for STMT_INFO. */
4955 static void
4956 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4958 if (!dr->aux)
4959 return;
4961 if (((dataref_aux *)dr->aux)->base_misaligned)
4963 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4964 tree base_decl = ((dataref_aux *)dr->aux)->base_decl;
4966 if (decl_in_symtab_p (base_decl))
4967 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4968 else
4970 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4971 DECL_USER_ALIGN (base_decl) = 1;
4973 ((dataref_aux *)dr->aux)->base_misaligned = false;
4978 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4979 reversal of the vector elements. If that is impossible to do,
4980 returns NULL. */
4982 static tree
4983 perm_mask_for_reverse (tree vectype)
4985 int i, nunits;
4986 unsigned char *sel;
4988 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4989 sel = XALLOCAVEC (unsigned char, nunits);
4991 for (i = 0; i < nunits; ++i)
4992 sel[i] = nunits - 1 - i;
4994 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
4995 return NULL_TREE;
4996 return vect_gen_perm_mask_checked (vectype, sel);
4999 /* Function vectorizable_store.
5001 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5002 can be vectorized.
5003 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5004 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5005 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5007 static bool
5008 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5009 slp_tree slp_node)
5011 tree scalar_dest;
5012 tree data_ref;
5013 tree op;
5014 tree vec_oprnd = NULL_TREE;
5015 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5016 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5017 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5018 tree elem_type;
5019 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5020 struct loop *loop = NULL;
5021 machine_mode vec_mode;
5022 tree dummy;
5023 enum dr_alignment_support alignment_support_scheme;
5024 tree def;
5025 gimple def_stmt;
5026 enum vect_def_type dt;
5027 stmt_vec_info prev_stmt_info = NULL;
5028 tree dataref_ptr = NULL_TREE;
5029 tree dataref_offset = NULL_TREE;
5030 gimple ptr_incr = NULL;
5031 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5032 int ncopies;
5033 int j;
5034 gimple next_stmt, first_stmt = NULL;
5035 bool grouped_store = false;
5036 bool store_lanes_p = false;
5037 unsigned int group_size, i;
5038 vec<tree> dr_chain = vNULL;
5039 vec<tree> oprnds = vNULL;
5040 vec<tree> result_chain = vNULL;
5041 bool inv_p;
5042 bool negative = false;
5043 tree offset = NULL_TREE;
5044 vec<tree> vec_oprnds = vNULL;
5045 bool slp = (slp_node != NULL);
5046 unsigned int vec_num;
5047 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5048 tree aggr_type;
5050 if (loop_vinfo)
5051 loop = LOOP_VINFO_LOOP (loop_vinfo);
5053 /* Multiple types in SLP are handled by creating the appropriate number of
5054 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5055 case of SLP. */
5056 if (slp || PURE_SLP_STMT (stmt_info))
5057 ncopies = 1;
5058 else
5059 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5061 gcc_assert (ncopies >= 1);
5063 /* FORNOW. This restriction should be relaxed. */
5064 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5066 if (dump_enabled_p ())
5067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5068 "multiple types in nested loop.\n");
5069 return false;
5072 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5073 return false;
5075 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5076 return false;
5078 /* Is vectorizable store? */
5080 if (!is_gimple_assign (stmt))
5081 return false;
5083 scalar_dest = gimple_assign_lhs (stmt);
5084 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5085 && is_pattern_stmt_p (stmt_info))
5086 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5087 if (TREE_CODE (scalar_dest) != ARRAY_REF
5088 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5089 && TREE_CODE (scalar_dest) != INDIRECT_REF
5090 && TREE_CODE (scalar_dest) != COMPONENT_REF
5091 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5092 && TREE_CODE (scalar_dest) != REALPART_EXPR
5093 && TREE_CODE (scalar_dest) != MEM_REF)
5094 return false;
5096 gcc_assert (gimple_assign_single_p (stmt));
5097 op = gimple_assign_rhs1 (stmt);
5098 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5099 &def, &dt))
5101 if (dump_enabled_p ())
5102 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5103 "use not simple.\n");
5104 return false;
5107 elem_type = TREE_TYPE (vectype);
5108 vec_mode = TYPE_MODE (vectype);
5110 /* FORNOW. In some cases can vectorize even if data-type not supported
5111 (e.g. - array initialization with 0). */
5112 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5113 return false;
5115 if (!STMT_VINFO_DATA_REF (stmt_info))
5116 return false;
5118 if (!STMT_VINFO_STRIDED_P (stmt_info))
5120 negative =
5121 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5122 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5123 size_zero_node) < 0;
5124 if (negative && ncopies > 1)
5126 if (dump_enabled_p ())
5127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5128 "multiple types with negative step.\n");
5129 return false;
5131 if (negative)
5133 gcc_assert (!grouped_store);
5134 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5135 if (alignment_support_scheme != dr_aligned
5136 && alignment_support_scheme != dr_unaligned_supported)
5138 if (dump_enabled_p ())
5139 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5140 "negative step but alignment required.\n");
5141 return false;
5143 if (dt != vect_constant_def
5144 && dt != vect_external_def
5145 && !perm_mask_for_reverse (vectype))
5147 if (dump_enabled_p ())
5148 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5149 "negative step and reversing not supported.\n");
5150 return false;
5155 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5157 grouped_store = true;
5158 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5159 if (!slp && !PURE_SLP_STMT (stmt_info))
5161 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5162 if (vect_store_lanes_supported (vectype, group_size))
5163 store_lanes_p = true;
5164 else if (!vect_grouped_store_supported (vectype, group_size))
5165 return false;
5168 if (first_stmt == stmt)
5170 /* STMT is the leader of the group. Check the operands of all the
5171 stmts of the group. */
5172 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5173 while (next_stmt)
5175 gcc_assert (gimple_assign_single_p (next_stmt));
5176 op = gimple_assign_rhs1 (next_stmt);
5177 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5178 &def_stmt, &def, &dt))
5180 if (dump_enabled_p ())
5181 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5182 "use not simple.\n");
5183 return false;
5185 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5190 if (!vec_stmt) /* transformation not required. */
5192 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5193 /* The SLP costs are calculated during SLP analysis. */
5194 if (!PURE_SLP_STMT (stmt_info))
5195 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5196 NULL, NULL, NULL);
5197 return true;
5200 /** Transform. **/
5202 ensure_base_align (stmt_info, dr);
5204 if (grouped_store)
5206 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5207 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5209 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5211 /* FORNOW */
5212 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5214 /* We vectorize all the stmts of the interleaving group when we
5215 reach the last stmt in the group. */
5216 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5217 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5218 && !slp)
5220 *vec_stmt = NULL;
5221 return true;
5224 if (slp)
5226 grouped_store = false;
5227 /* VEC_NUM is the number of vect stmts to be created for this
5228 group. */
5229 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5230 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5231 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5232 op = gimple_assign_rhs1 (first_stmt);
5234 else
5235 /* VEC_NUM is the number of vect stmts to be created for this
5236 group. */
5237 vec_num = group_size;
5239 else
5241 first_stmt = stmt;
5242 first_dr = dr;
5243 group_size = vec_num = 1;
5246 if (dump_enabled_p ())
5247 dump_printf_loc (MSG_NOTE, vect_location,
5248 "transform store. ncopies = %d\n", ncopies);
5250 if (STMT_VINFO_STRIDED_P (stmt_info))
5252 gimple_stmt_iterator incr_gsi;
5253 bool insert_after;
5254 gimple incr;
5255 tree offvar;
5256 tree ivstep;
5257 tree running_off;
5258 gimple_seq stmts = NULL;
5259 tree stride_base, stride_step, alias_off;
5260 tree vec_oprnd;
5262 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5264 stride_base
5265 = fold_build_pointer_plus
5266 (unshare_expr (DR_BASE_ADDRESS (dr)),
5267 size_binop (PLUS_EXPR,
5268 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
5269 convert_to_ptrofftype (DR_INIT(dr))));
5270 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
5272 /* For a store with loop-invariant (but other than power-of-2)
5273 stride (i.e. not a grouped access) like so:
5275 for (i = 0; i < n; i += stride)
5276 array[i] = ...;
5278 we generate a new induction variable and new stores from
5279 the components of the (vectorized) rhs:
5281 for (j = 0; ; j += VF*stride)
5282 vectemp = ...;
5283 tmp1 = vectemp[0];
5284 array[j] = tmp1;
5285 tmp2 = vectemp[1];
5286 array[j + stride] = tmp2;
5290 ivstep = stride_step;
5291 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5292 build_int_cst (TREE_TYPE (ivstep),
5293 ncopies * nunits));
5295 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5297 create_iv (stride_base, ivstep, NULL,
5298 loop, &incr_gsi, insert_after,
5299 &offvar, NULL);
5300 incr = gsi_stmt (incr_gsi);
5301 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
5303 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5304 if (stmts)
5305 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5307 prev_stmt_info = NULL;
5308 running_off = offvar;
5309 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
5310 for (j = 0; j < ncopies; j++)
5312 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5313 and first_stmt == stmt. */
5314 if (j == 0)
5315 vec_oprnd = vect_get_vec_def_for_operand (op, first_stmt, NULL);
5316 else
5317 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5319 for (i = 0; i < nunits; i++)
5321 tree newref, newoff;
5322 gimple incr, assign;
5323 tree size = TYPE_SIZE (elem_type);
5324 /* Extract the i'th component. */
5325 tree pos = fold_build2 (MULT_EXPR, bitsizetype, bitsize_int (i),
5326 size);
5327 tree elem = fold_build3 (BIT_FIELD_REF, elem_type, vec_oprnd,
5328 size, pos);
5330 elem = force_gimple_operand_gsi (gsi, elem, true,
5331 NULL_TREE, true,
5332 GSI_SAME_STMT);
5334 newref = build2 (MEM_REF, TREE_TYPE (vectype),
5335 running_off, alias_off);
5337 /* And store it to *running_off. */
5338 assign = gimple_build_assign (newref, elem);
5339 vect_finish_stmt_generation (stmt, assign, gsi);
5341 newoff = copy_ssa_name (running_off, NULL);
5342 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5343 running_off, stride_step);
5344 vect_finish_stmt_generation (stmt, incr, gsi);
5346 running_off = newoff;
5347 if (j == 0 && i == i)
5348 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = assign;
5349 else
5350 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5351 prev_stmt_info = vinfo_for_stmt (assign);
5354 return true;
5357 dr_chain.create (group_size);
5358 oprnds.create (group_size);
5360 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5361 gcc_assert (alignment_support_scheme);
5362 /* Targets with store-lane instructions must not require explicit
5363 realignment. */
5364 gcc_assert (!store_lanes_p
5365 || alignment_support_scheme == dr_aligned
5366 || alignment_support_scheme == dr_unaligned_supported);
5368 if (negative)
5369 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5371 if (store_lanes_p)
5372 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5373 else
5374 aggr_type = vectype;
5376 /* In case the vectorization factor (VF) is bigger than the number
5377 of elements that we can fit in a vectype (nunits), we have to generate
5378 more than one vector stmt - i.e - we need to "unroll" the
5379 vector stmt by a factor VF/nunits. For more details see documentation in
5380 vect_get_vec_def_for_copy_stmt. */
5382 /* In case of interleaving (non-unit grouped access):
5384 S1: &base + 2 = x2
5385 S2: &base = x0
5386 S3: &base + 1 = x1
5387 S4: &base + 3 = x3
5389 We create vectorized stores starting from base address (the access of the
5390 first stmt in the chain (S2 in the above example), when the last store stmt
5391 of the chain (S4) is reached:
5393 VS1: &base = vx2
5394 VS2: &base + vec_size*1 = vx0
5395 VS3: &base + vec_size*2 = vx1
5396 VS4: &base + vec_size*3 = vx3
5398 Then permutation statements are generated:
5400 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5401 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5404 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5405 (the order of the data-refs in the output of vect_permute_store_chain
5406 corresponds to the order of scalar stmts in the interleaving chain - see
5407 the documentation of vect_permute_store_chain()).
5409 In case of both multiple types and interleaving, above vector stores and
5410 permutation stmts are created for every copy. The result vector stmts are
5411 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5412 STMT_VINFO_RELATED_STMT for the next copies.
5415 prev_stmt_info = NULL;
5416 for (j = 0; j < ncopies; j++)
5418 gimple new_stmt;
5420 if (j == 0)
5422 if (slp)
5424 /* Get vectorized arguments for SLP_NODE. */
5425 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5426 NULL, slp_node, -1);
5428 vec_oprnd = vec_oprnds[0];
5430 else
5432 /* For interleaved stores we collect vectorized defs for all the
5433 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5434 used as an input to vect_permute_store_chain(), and OPRNDS as
5435 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5437 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5438 OPRNDS are of size 1. */
5439 next_stmt = first_stmt;
5440 for (i = 0; i < group_size; i++)
5442 /* Since gaps are not supported for interleaved stores,
5443 GROUP_SIZE is the exact number of stmts in the chain.
5444 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5445 there is no interleaving, GROUP_SIZE is 1, and only one
5446 iteration of the loop will be executed. */
5447 gcc_assert (next_stmt
5448 && gimple_assign_single_p (next_stmt));
5449 op = gimple_assign_rhs1 (next_stmt);
5451 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5452 NULL);
5453 dr_chain.quick_push (vec_oprnd);
5454 oprnds.quick_push (vec_oprnd);
5455 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5459 /* We should have catched mismatched types earlier. */
5460 gcc_assert (useless_type_conversion_p (vectype,
5461 TREE_TYPE (vec_oprnd)));
5462 bool simd_lane_access_p
5463 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5464 if (simd_lane_access_p
5465 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5466 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5467 && integer_zerop (DR_OFFSET (first_dr))
5468 && integer_zerop (DR_INIT (first_dr))
5469 && alias_sets_conflict_p (get_alias_set (aggr_type),
5470 get_alias_set (DR_REF (first_dr))))
5472 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5473 dataref_offset = build_int_cst (reference_alias_ptr_type
5474 (DR_REF (first_dr)), 0);
5475 inv_p = false;
5477 else
5478 dataref_ptr
5479 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5480 simd_lane_access_p ? loop : NULL,
5481 offset, &dummy, gsi, &ptr_incr,
5482 simd_lane_access_p, &inv_p);
5483 gcc_assert (bb_vinfo || !inv_p);
5485 else
5487 /* For interleaved stores we created vectorized defs for all the
5488 defs stored in OPRNDS in the previous iteration (previous copy).
5489 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5490 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5491 next copy.
5492 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5493 OPRNDS are of size 1. */
5494 for (i = 0; i < group_size; i++)
5496 op = oprnds[i];
5497 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5498 &def, &dt);
5499 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5500 dr_chain[i] = vec_oprnd;
5501 oprnds[i] = vec_oprnd;
5503 if (dataref_offset)
5504 dataref_offset
5505 = int_const_binop (PLUS_EXPR, dataref_offset,
5506 TYPE_SIZE_UNIT (aggr_type));
5507 else
5508 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5509 TYPE_SIZE_UNIT (aggr_type));
5512 if (store_lanes_p)
5514 tree vec_array;
5516 /* Combine all the vectors into an array. */
5517 vec_array = create_vector_array (vectype, vec_num);
5518 for (i = 0; i < vec_num; i++)
5520 vec_oprnd = dr_chain[i];
5521 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5524 /* Emit:
5525 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5526 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5527 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5528 gimple_call_set_lhs (new_stmt, data_ref);
5529 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5531 else
5533 new_stmt = NULL;
5534 if (grouped_store)
5536 if (j == 0)
5537 result_chain.create (group_size);
5538 /* Permute. */
5539 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5540 &result_chain);
5543 next_stmt = first_stmt;
5544 for (i = 0; i < vec_num; i++)
5546 unsigned align, misalign;
5548 if (i > 0)
5549 /* Bump the vector pointer. */
5550 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5551 stmt, NULL_TREE);
5553 if (slp)
5554 vec_oprnd = vec_oprnds[i];
5555 else if (grouped_store)
5556 /* For grouped stores vectorized defs are interleaved in
5557 vect_permute_store_chain(). */
5558 vec_oprnd = result_chain[i];
5560 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
5561 dataref_offset
5562 ? dataref_offset
5563 : build_int_cst (reference_alias_ptr_type
5564 (DR_REF (first_dr)), 0));
5565 align = TYPE_ALIGN_UNIT (vectype);
5566 if (aligned_access_p (first_dr))
5567 misalign = 0;
5568 else if (DR_MISALIGNMENT (first_dr) == -1)
5570 TREE_TYPE (data_ref)
5571 = build_aligned_type (TREE_TYPE (data_ref),
5572 TYPE_ALIGN (elem_type));
5573 align = TYPE_ALIGN_UNIT (elem_type);
5574 misalign = 0;
5576 else
5578 TREE_TYPE (data_ref)
5579 = build_aligned_type (TREE_TYPE (data_ref),
5580 TYPE_ALIGN (elem_type));
5581 misalign = DR_MISALIGNMENT (first_dr);
5583 if (dataref_offset == NULL_TREE)
5584 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5585 misalign);
5587 if (negative
5588 && dt != vect_constant_def
5589 && dt != vect_external_def)
5591 tree perm_mask = perm_mask_for_reverse (vectype);
5592 tree perm_dest
5593 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5594 vectype);
5595 tree new_temp = make_ssa_name (perm_dest);
5597 /* Generate the permute statement. */
5598 gimple perm_stmt
5599 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5600 vec_oprnd, perm_mask);
5601 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5603 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5604 vec_oprnd = new_temp;
5607 /* Arguments are ready. Create the new vector stmt. */
5608 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5609 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5611 if (slp)
5612 continue;
5614 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5615 if (!next_stmt)
5616 break;
5619 if (!slp)
5621 if (j == 0)
5622 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5623 else
5624 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5625 prev_stmt_info = vinfo_for_stmt (new_stmt);
5629 dr_chain.release ();
5630 oprnds.release ();
5631 result_chain.release ();
5632 vec_oprnds.release ();
5634 return true;
5637 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5638 VECTOR_CST mask. No checks are made that the target platform supports the
5639 mask, so callers may wish to test can_vec_perm_p separately, or use
5640 vect_gen_perm_mask_checked. */
5642 tree
5643 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5645 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5646 int i, nunits;
5648 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5650 mask_elt_type = lang_hooks.types.type_for_mode
5651 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5652 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5654 mask_elts = XALLOCAVEC (tree, nunits);
5655 for (i = nunits - 1; i >= 0; i--)
5656 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5657 mask_vec = build_vector (mask_type, mask_elts);
5659 return mask_vec;
5662 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5663 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5665 tree
5666 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5668 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5669 return vect_gen_perm_mask_any (vectype, sel);
5672 /* Given a vector variable X and Y, that was generated for the scalar
5673 STMT, generate instructions to permute the vector elements of X and Y
5674 using permutation mask MASK_VEC, insert them at *GSI and return the
5675 permuted vector variable. */
5677 static tree
5678 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5679 gimple_stmt_iterator *gsi)
5681 tree vectype = TREE_TYPE (x);
5682 tree perm_dest, data_ref;
5683 gimple perm_stmt;
5685 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5686 data_ref = make_ssa_name (perm_dest);
5688 /* Generate the permute statement. */
5689 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5690 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5692 return data_ref;
5695 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5696 inserting them on the loops preheader edge. Returns true if we
5697 were successful in doing so (and thus STMT can be moved then),
5698 otherwise returns false. */
5700 static bool
5701 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5703 ssa_op_iter i;
5704 tree op;
5705 bool any = false;
5707 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5709 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5710 if (!gimple_nop_p (def_stmt)
5711 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5713 /* Make sure we don't need to recurse. While we could do
5714 so in simple cases when there are more complex use webs
5715 we don't have an easy way to preserve stmt order to fulfil
5716 dependencies within them. */
5717 tree op2;
5718 ssa_op_iter i2;
5719 if (gimple_code (def_stmt) == GIMPLE_PHI)
5720 return false;
5721 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5723 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5724 if (!gimple_nop_p (def_stmt2)
5725 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5726 return false;
5728 any = true;
5732 if (!any)
5733 return true;
5735 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5737 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5738 if (!gimple_nop_p (def_stmt)
5739 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5741 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5742 gsi_remove (&gsi, false);
5743 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5747 return true;
5750 /* vectorizable_load.
5752 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5753 can be vectorized.
5754 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5755 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5756 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5758 static bool
5759 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5760 slp_tree slp_node, slp_instance slp_node_instance)
5762 tree scalar_dest;
5763 tree vec_dest = NULL;
5764 tree data_ref = NULL;
5765 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5766 stmt_vec_info prev_stmt_info;
5767 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5768 struct loop *loop = NULL;
5769 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5770 bool nested_in_vect_loop = false;
5771 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5772 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5773 tree elem_type;
5774 tree new_temp;
5775 machine_mode mode;
5776 gimple new_stmt = NULL;
5777 tree dummy;
5778 enum dr_alignment_support alignment_support_scheme;
5779 tree dataref_ptr = NULL_TREE;
5780 tree dataref_offset = NULL_TREE;
5781 gimple ptr_incr = NULL;
5782 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5783 int ncopies;
5784 int i, j, group_size = -1, group_gap;
5785 tree msq = NULL_TREE, lsq;
5786 tree offset = NULL_TREE;
5787 tree byte_offset = NULL_TREE;
5788 tree realignment_token = NULL_TREE;
5789 gphi *phi = NULL;
5790 vec<tree> dr_chain = vNULL;
5791 bool grouped_load = false;
5792 bool load_lanes_p = false;
5793 gimple first_stmt;
5794 bool inv_p;
5795 bool negative = false;
5796 bool compute_in_loop = false;
5797 struct loop *at_loop;
5798 int vec_num;
5799 bool slp = (slp_node != NULL);
5800 bool slp_perm = false;
5801 enum tree_code code;
5802 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5803 int vf;
5804 tree aggr_type;
5805 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5806 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5807 int gather_scale = 1;
5808 enum vect_def_type gather_dt = vect_unknown_def_type;
5810 if (loop_vinfo)
5812 loop = LOOP_VINFO_LOOP (loop_vinfo);
5813 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5814 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5816 else
5817 vf = 1;
5819 /* Multiple types in SLP are handled by creating the appropriate number of
5820 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5821 case of SLP. */
5822 if (slp || PURE_SLP_STMT (stmt_info))
5823 ncopies = 1;
5824 else
5825 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5827 gcc_assert (ncopies >= 1);
5829 /* FORNOW. This restriction should be relaxed. */
5830 if (nested_in_vect_loop && ncopies > 1)
5832 if (dump_enabled_p ())
5833 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5834 "multiple types in nested loop.\n");
5835 return false;
5838 /* Invalidate assumptions made by dependence analysis when vectorization
5839 on the unrolled body effectively re-orders stmts. */
5840 if (ncopies > 1
5841 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5842 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5843 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5845 if (dump_enabled_p ())
5846 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5847 "cannot perform implicit CSE when unrolling "
5848 "with negative dependence distance\n");
5849 return false;
5852 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5853 return false;
5855 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5856 return false;
5858 /* Is vectorizable load? */
5859 if (!is_gimple_assign (stmt))
5860 return false;
5862 scalar_dest = gimple_assign_lhs (stmt);
5863 if (TREE_CODE (scalar_dest) != SSA_NAME)
5864 return false;
5866 code = gimple_assign_rhs_code (stmt);
5867 if (code != ARRAY_REF
5868 && code != BIT_FIELD_REF
5869 && code != INDIRECT_REF
5870 && code != COMPONENT_REF
5871 && code != IMAGPART_EXPR
5872 && code != REALPART_EXPR
5873 && code != MEM_REF
5874 && TREE_CODE_CLASS (code) != tcc_declaration)
5875 return false;
5877 if (!STMT_VINFO_DATA_REF (stmt_info))
5878 return false;
5880 elem_type = TREE_TYPE (vectype);
5881 mode = TYPE_MODE (vectype);
5883 /* FORNOW. In some cases can vectorize even if data-type not supported
5884 (e.g. - data copies). */
5885 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
5887 if (dump_enabled_p ())
5888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5889 "Aligned load, but unsupported type.\n");
5890 return false;
5893 /* Check if the load is a part of an interleaving chain. */
5894 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5896 grouped_load = true;
5897 /* FORNOW */
5898 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
5900 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5902 /* If this is single-element interleaving with an element distance
5903 that leaves unused vector loads around punt - we at least create
5904 very sub-optimal code in that case (and blow up memory,
5905 see PR65518). */
5906 if (first_stmt == stmt
5907 && !GROUP_NEXT_ELEMENT (stmt_info)
5908 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
5910 if (dump_enabled_p ())
5911 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5912 "single-element interleaving not supported "
5913 "for not adjacent vector loads\n");
5914 return false;
5917 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5918 if (!slp
5919 && !PURE_SLP_STMT (stmt_info)
5920 && !STMT_VINFO_STRIDED_P (stmt_info))
5922 if (vect_load_lanes_supported (vectype, group_size))
5923 load_lanes_p = true;
5924 else if (!vect_grouped_load_supported (vectype, group_size))
5925 return false;
5928 /* Invalidate assumptions made by dependence analysis when vectorization
5929 on the unrolled body effectively re-orders stmts. */
5930 if (!PURE_SLP_STMT (stmt_info)
5931 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5932 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5933 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5935 if (dump_enabled_p ())
5936 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5937 "cannot perform implicit CSE when performing "
5938 "group loads with negative dependence distance\n");
5939 return false;
5942 /* Similarly when the stmt is a load that is both part of a SLP
5943 instance and a loop vectorized stmt via the same-dr mechanism
5944 we have to give up. */
5945 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
5946 && (STMT_SLP_TYPE (stmt_info)
5947 != STMT_SLP_TYPE (vinfo_for_stmt
5948 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
5950 if (dump_enabled_p ())
5951 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5952 "conflicting SLP types for CSEd load\n");
5953 return false;
5958 if (STMT_VINFO_GATHER_P (stmt_info))
5960 gimple def_stmt;
5961 tree def;
5962 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
5963 &gather_off, &gather_scale);
5964 gcc_assert (gather_decl);
5965 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
5966 &def_stmt, &def, &gather_dt,
5967 &gather_off_vectype))
5969 if (dump_enabled_p ())
5970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5971 "gather index use not simple.\n");
5972 return false;
5975 else if (STMT_VINFO_STRIDED_P (stmt_info))
5977 if ((grouped_load
5978 && (slp || PURE_SLP_STMT (stmt_info)))
5979 && (group_size > nunits
5980 || nunits % group_size != 0
5981 /* ??? During analysis phase we are not called with the
5982 slp node/instance we are in so whether we'll end up
5983 with a permutation we don't know. Still we don't
5984 support load permutations. */
5985 || slp_perm))
5987 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5988 "unhandled strided group load\n");
5989 return false;
5992 else
5994 negative = tree_int_cst_compare (nested_in_vect_loop
5995 ? STMT_VINFO_DR_STEP (stmt_info)
5996 : DR_STEP (dr),
5997 size_zero_node) < 0;
5998 if (negative && ncopies > 1)
6000 if (dump_enabled_p ())
6001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6002 "multiple types with negative step.\n");
6003 return false;
6006 if (negative)
6008 if (grouped_load)
6010 if (dump_enabled_p ())
6011 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6012 "negative step for group load not supported"
6013 "\n");
6014 return false;
6016 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6017 if (alignment_support_scheme != dr_aligned
6018 && alignment_support_scheme != dr_unaligned_supported)
6020 if (dump_enabled_p ())
6021 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6022 "negative step but alignment required.\n");
6023 return false;
6025 if (!perm_mask_for_reverse (vectype))
6027 if (dump_enabled_p ())
6028 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6029 "negative step and reversing not supported."
6030 "\n");
6031 return false;
6036 if (!vec_stmt) /* transformation not required. */
6038 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6039 /* The SLP costs are calculated during SLP analysis. */
6040 if (!PURE_SLP_STMT (stmt_info))
6041 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6042 NULL, NULL, NULL);
6043 return true;
6046 if (dump_enabled_p ())
6047 dump_printf_loc (MSG_NOTE, vect_location,
6048 "transform load. ncopies = %d\n", ncopies);
6050 /** Transform. **/
6052 ensure_base_align (stmt_info, dr);
6054 if (STMT_VINFO_GATHER_P (stmt_info))
6056 tree vec_oprnd0 = NULL_TREE, op;
6057 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6058 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6059 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6060 edge pe = loop_preheader_edge (loop);
6061 gimple_seq seq;
6062 basic_block new_bb;
6063 enum { NARROW, NONE, WIDEN } modifier;
6064 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6066 if (nunits == gather_off_nunits)
6067 modifier = NONE;
6068 else if (nunits == gather_off_nunits / 2)
6070 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6071 modifier = WIDEN;
6073 for (i = 0; i < gather_off_nunits; ++i)
6074 sel[i] = i | nunits;
6076 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6078 else if (nunits == gather_off_nunits * 2)
6080 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6081 modifier = NARROW;
6083 for (i = 0; i < nunits; ++i)
6084 sel[i] = i < gather_off_nunits
6085 ? i : i + nunits - gather_off_nunits;
6087 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6088 ncopies *= 2;
6090 else
6091 gcc_unreachable ();
6093 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6094 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6095 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6096 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6097 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6098 scaletype = TREE_VALUE (arglist);
6099 gcc_checking_assert (types_compatible_p (srctype, rettype));
6101 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6103 ptr = fold_convert (ptrtype, gather_base);
6104 if (!is_gimple_min_invariant (ptr))
6106 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6107 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6108 gcc_assert (!new_bb);
6111 /* Currently we support only unconditional gather loads,
6112 so mask should be all ones. */
6113 if (TREE_CODE (masktype) == INTEGER_TYPE)
6114 mask = build_int_cst (masktype, -1);
6115 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6117 mask = build_int_cst (TREE_TYPE (masktype), -1);
6118 mask = build_vector_from_val (masktype, mask);
6119 mask = vect_init_vector (stmt, mask, masktype, NULL);
6121 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6123 REAL_VALUE_TYPE r;
6124 long tmp[6];
6125 for (j = 0; j < 6; ++j)
6126 tmp[j] = -1;
6127 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6128 mask = build_real (TREE_TYPE (masktype), r);
6129 mask = build_vector_from_val (masktype, mask);
6130 mask = vect_init_vector (stmt, mask, masktype, NULL);
6132 else
6133 gcc_unreachable ();
6135 scale = build_int_cst (scaletype, gather_scale);
6137 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6138 merge = build_int_cst (TREE_TYPE (rettype), 0);
6139 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6141 REAL_VALUE_TYPE r;
6142 long tmp[6];
6143 for (j = 0; j < 6; ++j)
6144 tmp[j] = 0;
6145 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6146 merge = build_real (TREE_TYPE (rettype), r);
6148 else
6149 gcc_unreachable ();
6150 merge = build_vector_from_val (rettype, merge);
6151 merge = vect_init_vector (stmt, merge, rettype, NULL);
6153 prev_stmt_info = NULL;
6154 for (j = 0; j < ncopies; ++j)
6156 if (modifier == WIDEN && (j & 1))
6157 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6158 perm_mask, stmt, gsi);
6159 else if (j == 0)
6160 op = vec_oprnd0
6161 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
6162 else
6163 op = vec_oprnd0
6164 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6166 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6168 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6169 == TYPE_VECTOR_SUBPARTS (idxtype));
6170 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
6171 var = make_ssa_name (var);
6172 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6173 new_stmt
6174 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6175 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6176 op = var;
6179 new_stmt
6180 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6182 if (!useless_type_conversion_p (vectype, rettype))
6184 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6185 == TYPE_VECTOR_SUBPARTS (rettype));
6186 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
6187 op = make_ssa_name (var, new_stmt);
6188 gimple_call_set_lhs (new_stmt, op);
6189 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6190 var = make_ssa_name (vec_dest);
6191 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6192 new_stmt
6193 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6195 else
6197 var = make_ssa_name (vec_dest, new_stmt);
6198 gimple_call_set_lhs (new_stmt, var);
6201 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6203 if (modifier == NARROW)
6205 if ((j & 1) == 0)
6207 prev_res = var;
6208 continue;
6210 var = permute_vec_elements (prev_res, var,
6211 perm_mask, stmt, gsi);
6212 new_stmt = SSA_NAME_DEF_STMT (var);
6215 if (prev_stmt_info == NULL)
6216 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6217 else
6218 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6219 prev_stmt_info = vinfo_for_stmt (new_stmt);
6221 return true;
6223 else if (STMT_VINFO_STRIDED_P (stmt_info))
6225 gimple_stmt_iterator incr_gsi;
6226 bool insert_after;
6227 gimple incr;
6228 tree offvar;
6229 tree ivstep;
6230 tree running_off;
6231 vec<constructor_elt, va_gc> *v = NULL;
6232 gimple_seq stmts = NULL;
6233 tree stride_base, stride_step, alias_off;
6235 gcc_assert (!nested_in_vect_loop);
6237 stride_base
6238 = fold_build_pointer_plus
6239 (unshare_expr (DR_BASE_ADDRESS (dr)),
6240 size_binop (PLUS_EXPR,
6241 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
6242 convert_to_ptrofftype (DR_INIT (dr))));
6243 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
6245 /* For a load with loop-invariant (but other than power-of-2)
6246 stride (i.e. not a grouped access) like so:
6248 for (i = 0; i < n; i += stride)
6249 ... = array[i];
6251 we generate a new induction variable and new accesses to
6252 form a new vector (or vectors, depending on ncopies):
6254 for (j = 0; ; j += VF*stride)
6255 tmp1 = array[j];
6256 tmp2 = array[j + stride];
6258 vectemp = {tmp1, tmp2, ...}
6261 ivstep = stride_step;
6262 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6263 build_int_cst (TREE_TYPE (ivstep), vf));
6265 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6267 create_iv (stride_base, ivstep, NULL,
6268 loop, &incr_gsi, insert_after,
6269 &offvar, NULL);
6270 incr = gsi_stmt (incr_gsi);
6271 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6273 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6274 if (stmts)
6275 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6277 prev_stmt_info = NULL;
6278 running_off = offvar;
6279 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
6280 int nloads = nunits;
6281 tree ltype = TREE_TYPE (vectype);
6282 if (slp)
6284 nloads = nunits / group_size;
6285 if (group_size < nunits)
6286 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6287 else
6288 ltype = vectype;
6289 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6290 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6291 gcc_assert (!slp_perm);
6293 for (j = 0; j < ncopies; j++)
6295 tree vec_inv;
6297 if (nloads > 1)
6299 vec_alloc (v, nloads);
6300 for (i = 0; i < nloads; i++)
6302 tree newref, newoff;
6303 gimple incr;
6304 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6306 newref = force_gimple_operand_gsi (gsi, newref, true,
6307 NULL_TREE, true,
6308 GSI_SAME_STMT);
6309 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6310 newoff = copy_ssa_name (running_off);
6311 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6312 running_off, stride_step);
6313 vect_finish_stmt_generation (stmt, incr, gsi);
6315 running_off = newoff;
6318 vec_inv = build_constructor (vectype, v);
6319 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6320 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6322 else
6324 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6325 build2 (MEM_REF, ltype,
6326 running_off, alias_off));
6327 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6329 tree newoff = copy_ssa_name (running_off);
6330 gimple incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6331 running_off, stride_step);
6332 vect_finish_stmt_generation (stmt, incr, gsi);
6334 running_off = newoff;
6337 if (slp)
6338 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6339 if (j == 0)
6340 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6341 else
6342 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6343 prev_stmt_info = vinfo_for_stmt (new_stmt);
6345 return true;
6348 if (grouped_load)
6350 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6351 if (slp
6352 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6353 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6354 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6356 /* Check if the chain of loads is already vectorized. */
6357 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6358 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6359 ??? But we can only do so if there is exactly one
6360 as we have no way to get at the rest. Leave the CSE
6361 opportunity alone.
6362 ??? With the group load eventually participating
6363 in multiple different permutations (having multiple
6364 slp nodes which refer to the same group) the CSE
6365 is even wrong code. See PR56270. */
6366 && !slp)
6368 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6369 return true;
6371 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6372 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6374 /* VEC_NUM is the number of vect stmts to be created for this group. */
6375 if (slp)
6377 grouped_load = false;
6378 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6379 if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6380 slp_perm = true;
6381 group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
6383 else
6385 vec_num = group_size;
6386 group_gap = 0;
6389 else
6391 first_stmt = stmt;
6392 first_dr = dr;
6393 group_size = vec_num = 1;
6394 group_gap = 0;
6397 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6398 gcc_assert (alignment_support_scheme);
6399 /* Targets with load-lane instructions must not require explicit
6400 realignment. */
6401 gcc_assert (!load_lanes_p
6402 || alignment_support_scheme == dr_aligned
6403 || alignment_support_scheme == dr_unaligned_supported);
6405 /* In case the vectorization factor (VF) is bigger than the number
6406 of elements that we can fit in a vectype (nunits), we have to generate
6407 more than one vector stmt - i.e - we need to "unroll" the
6408 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6409 from one copy of the vector stmt to the next, in the field
6410 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6411 stages to find the correct vector defs to be used when vectorizing
6412 stmts that use the defs of the current stmt. The example below
6413 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6414 need to create 4 vectorized stmts):
6416 before vectorization:
6417 RELATED_STMT VEC_STMT
6418 S1: x = memref - -
6419 S2: z = x + 1 - -
6421 step 1: vectorize stmt S1:
6422 We first create the vector stmt VS1_0, and, as usual, record a
6423 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6424 Next, we create the vector stmt VS1_1, and record a pointer to
6425 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6426 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6427 stmts and pointers:
6428 RELATED_STMT VEC_STMT
6429 VS1_0: vx0 = memref0 VS1_1 -
6430 VS1_1: vx1 = memref1 VS1_2 -
6431 VS1_2: vx2 = memref2 VS1_3 -
6432 VS1_3: vx3 = memref3 - -
6433 S1: x = load - VS1_0
6434 S2: z = x + 1 - -
6436 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6437 information we recorded in RELATED_STMT field is used to vectorize
6438 stmt S2. */
6440 /* In case of interleaving (non-unit grouped access):
6442 S1: x2 = &base + 2
6443 S2: x0 = &base
6444 S3: x1 = &base + 1
6445 S4: x3 = &base + 3
6447 Vectorized loads are created in the order of memory accesses
6448 starting from the access of the first stmt of the chain:
6450 VS1: vx0 = &base
6451 VS2: vx1 = &base + vec_size*1
6452 VS3: vx3 = &base + vec_size*2
6453 VS4: vx4 = &base + vec_size*3
6455 Then permutation statements are generated:
6457 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6458 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6461 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6462 (the order of the data-refs in the output of vect_permute_load_chain
6463 corresponds to the order of scalar stmts in the interleaving chain - see
6464 the documentation of vect_permute_load_chain()).
6465 The generation of permutation stmts and recording them in
6466 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6468 In case of both multiple types and interleaving, the vector loads and
6469 permutation stmts above are created for every copy. The result vector
6470 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6471 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6473 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6474 on a target that supports unaligned accesses (dr_unaligned_supported)
6475 we generate the following code:
6476 p = initial_addr;
6477 indx = 0;
6478 loop {
6479 p = p + indx * vectype_size;
6480 vec_dest = *(p);
6481 indx = indx + 1;
6484 Otherwise, the data reference is potentially unaligned on a target that
6485 does not support unaligned accesses (dr_explicit_realign_optimized) -
6486 then generate the following code, in which the data in each iteration is
6487 obtained by two vector loads, one from the previous iteration, and one
6488 from the current iteration:
6489 p1 = initial_addr;
6490 msq_init = *(floor(p1))
6491 p2 = initial_addr + VS - 1;
6492 realignment_token = call target_builtin;
6493 indx = 0;
6494 loop {
6495 p2 = p2 + indx * vectype_size
6496 lsq = *(floor(p2))
6497 vec_dest = realign_load (msq, lsq, realignment_token)
6498 indx = indx + 1;
6499 msq = lsq;
6500 } */
6502 /* If the misalignment remains the same throughout the execution of the
6503 loop, we can create the init_addr and permutation mask at the loop
6504 preheader. Otherwise, it needs to be created inside the loop.
6505 This can only occur when vectorizing memory accesses in the inner-loop
6506 nested within an outer-loop that is being vectorized. */
6508 if (nested_in_vect_loop
6509 && (TREE_INT_CST_LOW (DR_STEP (dr))
6510 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6512 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6513 compute_in_loop = true;
6516 if ((alignment_support_scheme == dr_explicit_realign_optimized
6517 || alignment_support_scheme == dr_explicit_realign)
6518 && !compute_in_loop)
6520 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6521 alignment_support_scheme, NULL_TREE,
6522 &at_loop);
6523 if (alignment_support_scheme == dr_explicit_realign_optimized)
6525 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6526 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6527 size_one_node);
6530 else
6531 at_loop = loop;
6533 if (negative)
6534 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6536 if (load_lanes_p)
6537 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6538 else
6539 aggr_type = vectype;
6541 prev_stmt_info = NULL;
6542 for (j = 0; j < ncopies; j++)
6544 /* 1. Create the vector or array pointer update chain. */
6545 if (j == 0)
6547 bool simd_lane_access_p
6548 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6549 if (simd_lane_access_p
6550 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6551 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6552 && integer_zerop (DR_OFFSET (first_dr))
6553 && integer_zerop (DR_INIT (first_dr))
6554 && alias_sets_conflict_p (get_alias_set (aggr_type),
6555 get_alias_set (DR_REF (first_dr)))
6556 && (alignment_support_scheme == dr_aligned
6557 || alignment_support_scheme == dr_unaligned_supported))
6559 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6560 dataref_offset = build_int_cst (reference_alias_ptr_type
6561 (DR_REF (first_dr)), 0);
6562 inv_p = false;
6564 else
6565 dataref_ptr
6566 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6567 offset, &dummy, gsi, &ptr_incr,
6568 simd_lane_access_p, &inv_p,
6569 byte_offset);
6571 else if (dataref_offset)
6572 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6573 TYPE_SIZE_UNIT (aggr_type));
6574 else
6575 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6576 TYPE_SIZE_UNIT (aggr_type));
6578 if (grouped_load || slp_perm)
6579 dr_chain.create (vec_num);
6581 if (load_lanes_p)
6583 tree vec_array;
6585 vec_array = create_vector_array (vectype, vec_num);
6587 /* Emit:
6588 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6589 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6590 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6591 gimple_call_set_lhs (new_stmt, vec_array);
6592 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6594 /* Extract each vector into an SSA_NAME. */
6595 for (i = 0; i < vec_num; i++)
6597 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6598 vec_array, i);
6599 dr_chain.quick_push (new_temp);
6602 /* Record the mapping between SSA_NAMEs and statements. */
6603 vect_record_grouped_load_vectors (stmt, dr_chain);
6605 else
6607 for (i = 0; i < vec_num; i++)
6609 if (i > 0)
6610 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6611 stmt, NULL_TREE);
6613 /* 2. Create the vector-load in the loop. */
6614 switch (alignment_support_scheme)
6616 case dr_aligned:
6617 case dr_unaligned_supported:
6619 unsigned int align, misalign;
6621 data_ref
6622 = build2 (MEM_REF, vectype, dataref_ptr,
6623 dataref_offset
6624 ? dataref_offset
6625 : build_int_cst (reference_alias_ptr_type
6626 (DR_REF (first_dr)), 0));
6627 align = TYPE_ALIGN_UNIT (vectype);
6628 if (alignment_support_scheme == dr_aligned)
6630 gcc_assert (aligned_access_p (first_dr));
6631 misalign = 0;
6633 else if (DR_MISALIGNMENT (first_dr) == -1)
6635 TREE_TYPE (data_ref)
6636 = build_aligned_type (TREE_TYPE (data_ref),
6637 TYPE_ALIGN (elem_type));
6638 align = TYPE_ALIGN_UNIT (elem_type);
6639 misalign = 0;
6641 else
6643 TREE_TYPE (data_ref)
6644 = build_aligned_type (TREE_TYPE (data_ref),
6645 TYPE_ALIGN (elem_type));
6646 misalign = DR_MISALIGNMENT (first_dr);
6648 if (dataref_offset == NULL_TREE)
6649 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6650 align, misalign);
6651 break;
6653 case dr_explicit_realign:
6655 tree ptr, bump;
6657 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6659 if (compute_in_loop)
6660 msq = vect_setup_realignment (first_stmt, gsi,
6661 &realignment_token,
6662 dr_explicit_realign,
6663 dataref_ptr, NULL);
6665 ptr = copy_ssa_name (dataref_ptr);
6666 new_stmt = gimple_build_assign
6667 (ptr, BIT_AND_EXPR, dataref_ptr,
6668 build_int_cst
6669 (TREE_TYPE (dataref_ptr),
6670 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6671 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6672 data_ref
6673 = build2 (MEM_REF, vectype, ptr,
6674 build_int_cst (reference_alias_ptr_type
6675 (DR_REF (first_dr)), 0));
6676 vec_dest = vect_create_destination_var (scalar_dest,
6677 vectype);
6678 new_stmt = gimple_build_assign (vec_dest, data_ref);
6679 new_temp = make_ssa_name (vec_dest, new_stmt);
6680 gimple_assign_set_lhs (new_stmt, new_temp);
6681 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6682 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6683 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6684 msq = new_temp;
6686 bump = size_binop (MULT_EXPR, vs,
6687 TYPE_SIZE_UNIT (elem_type));
6688 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6689 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6690 new_stmt = gimple_build_assign
6691 (NULL_TREE, BIT_AND_EXPR, ptr,
6692 build_int_cst
6693 (TREE_TYPE (ptr),
6694 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6695 ptr = copy_ssa_name (dataref_ptr, new_stmt);
6696 gimple_assign_set_lhs (new_stmt, ptr);
6697 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6698 data_ref
6699 = build2 (MEM_REF, vectype, ptr,
6700 build_int_cst (reference_alias_ptr_type
6701 (DR_REF (first_dr)), 0));
6702 break;
6704 case dr_explicit_realign_optimized:
6705 new_temp = copy_ssa_name (dataref_ptr);
6706 new_stmt = gimple_build_assign
6707 (new_temp, BIT_AND_EXPR, dataref_ptr,
6708 build_int_cst
6709 (TREE_TYPE (dataref_ptr),
6710 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6711 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6712 data_ref
6713 = build2 (MEM_REF, vectype, new_temp,
6714 build_int_cst (reference_alias_ptr_type
6715 (DR_REF (first_dr)), 0));
6716 break;
6717 default:
6718 gcc_unreachable ();
6720 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6721 new_stmt = gimple_build_assign (vec_dest, data_ref);
6722 new_temp = make_ssa_name (vec_dest, new_stmt);
6723 gimple_assign_set_lhs (new_stmt, new_temp);
6724 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6726 /* 3. Handle explicit realignment if necessary/supported.
6727 Create in loop:
6728 vec_dest = realign_load (msq, lsq, realignment_token) */
6729 if (alignment_support_scheme == dr_explicit_realign_optimized
6730 || alignment_support_scheme == dr_explicit_realign)
6732 lsq = gimple_assign_lhs (new_stmt);
6733 if (!realignment_token)
6734 realignment_token = dataref_ptr;
6735 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6736 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6737 msq, lsq, realignment_token);
6738 new_temp = make_ssa_name (vec_dest, new_stmt);
6739 gimple_assign_set_lhs (new_stmt, new_temp);
6740 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6742 if (alignment_support_scheme == dr_explicit_realign_optimized)
6744 gcc_assert (phi);
6745 if (i == vec_num - 1 && j == ncopies - 1)
6746 add_phi_arg (phi, lsq,
6747 loop_latch_edge (containing_loop),
6748 UNKNOWN_LOCATION);
6749 msq = lsq;
6753 /* 4. Handle invariant-load. */
6754 if (inv_p && !bb_vinfo)
6756 gcc_assert (!grouped_load);
6757 /* If we have versioned for aliasing or the loop doesn't
6758 have any data dependencies that would preclude this,
6759 then we are sure this is a loop invariant load and
6760 thus we can insert it on the preheader edge. */
6761 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6762 && !nested_in_vect_loop
6763 && hoist_defs_of_uses (stmt, loop))
6765 if (dump_enabled_p ())
6767 dump_printf_loc (MSG_NOTE, vect_location,
6768 "hoisting out of the vectorized "
6769 "loop: ");
6770 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6772 tree tem = copy_ssa_name (scalar_dest);
6773 gsi_insert_on_edge_immediate
6774 (loop_preheader_edge (loop),
6775 gimple_build_assign (tem,
6776 unshare_expr
6777 (gimple_assign_rhs1 (stmt))));
6778 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6780 else
6782 gimple_stmt_iterator gsi2 = *gsi;
6783 gsi_next (&gsi2);
6784 new_temp = vect_init_vector (stmt, scalar_dest,
6785 vectype, &gsi2);
6787 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6788 set_vinfo_for_stmt (new_stmt,
6789 new_stmt_vec_info (new_stmt, loop_vinfo,
6790 bb_vinfo));
6793 if (negative)
6795 tree perm_mask = perm_mask_for_reverse (vectype);
6796 new_temp = permute_vec_elements (new_temp, new_temp,
6797 perm_mask, stmt, gsi);
6798 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6801 /* Collect vector loads and later create their permutation in
6802 vect_transform_grouped_load (). */
6803 if (grouped_load || slp_perm)
6804 dr_chain.quick_push (new_temp);
6806 /* Store vector loads in the corresponding SLP_NODE. */
6807 if (slp && !slp_perm)
6808 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6810 /* Bump the vector pointer to account for a gap. */
6811 if (slp && group_gap != 0)
6813 tree bump = size_binop (MULT_EXPR,
6814 TYPE_SIZE_UNIT (elem_type),
6815 size_int (group_gap));
6816 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6817 stmt, bump);
6821 if (slp && !slp_perm)
6822 continue;
6824 if (slp_perm)
6826 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6827 slp_node_instance, false))
6829 dr_chain.release ();
6830 return false;
6833 else
6835 if (grouped_load)
6837 if (!load_lanes_p)
6838 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
6839 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6841 else
6843 if (j == 0)
6844 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6845 else
6846 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6847 prev_stmt_info = vinfo_for_stmt (new_stmt);
6850 dr_chain.release ();
6853 return true;
6856 /* Function vect_is_simple_cond.
6858 Input:
6859 LOOP - the loop that is being vectorized.
6860 COND - Condition that is checked for simple use.
6862 Output:
6863 *COMP_VECTYPE - the vector type for the comparison.
6865 Returns whether a COND can be vectorized. Checks whether
6866 condition operands are supportable using vec_is_simple_use. */
6868 static bool
6869 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
6870 bb_vec_info bb_vinfo, tree *comp_vectype)
6872 tree lhs, rhs;
6873 tree def;
6874 enum vect_def_type dt;
6875 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
6877 if (!COMPARISON_CLASS_P (cond))
6878 return false;
6880 lhs = TREE_OPERAND (cond, 0);
6881 rhs = TREE_OPERAND (cond, 1);
6883 if (TREE_CODE (lhs) == SSA_NAME)
6885 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6886 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
6887 &lhs_def_stmt, &def, &dt, &vectype1))
6888 return false;
6890 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6891 && TREE_CODE (lhs) != FIXED_CST)
6892 return false;
6894 if (TREE_CODE (rhs) == SSA_NAME)
6896 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
6897 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
6898 &rhs_def_stmt, &def, &dt, &vectype2))
6899 return false;
6901 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
6902 && TREE_CODE (rhs) != FIXED_CST)
6903 return false;
6905 *comp_vectype = vectype1 ? vectype1 : vectype2;
6906 return true;
6909 /* vectorizable_condition.
6911 Check if STMT is conditional modify expression that can be vectorized.
6912 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6913 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6914 at GSI.
6916 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6917 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6918 else caluse if it is 2).
6920 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6922 bool
6923 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
6924 gimple *vec_stmt, tree reduc_def, int reduc_index,
6925 slp_tree slp_node)
6927 tree scalar_dest = NULL_TREE;
6928 tree vec_dest = NULL_TREE;
6929 tree cond_expr, then_clause, else_clause;
6930 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6931 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6932 tree comp_vectype = NULL_TREE;
6933 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
6934 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
6935 tree vec_compare, vec_cond_expr;
6936 tree new_temp;
6937 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6938 tree def;
6939 enum vect_def_type dt, dts[4];
6940 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6941 int ncopies;
6942 enum tree_code code;
6943 stmt_vec_info prev_stmt_info = NULL;
6944 int i, j;
6945 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6946 vec<tree> vec_oprnds0 = vNULL;
6947 vec<tree> vec_oprnds1 = vNULL;
6948 vec<tree> vec_oprnds2 = vNULL;
6949 vec<tree> vec_oprnds3 = vNULL;
6950 tree vec_cmp_type;
6952 if (slp_node || PURE_SLP_STMT (stmt_info))
6953 ncopies = 1;
6954 else
6955 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6957 gcc_assert (ncopies >= 1);
6958 if (reduc_index && ncopies > 1)
6959 return false; /* FORNOW */
6961 if (reduc_index && STMT_SLP_TYPE (stmt_info))
6962 return false;
6964 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6965 return false;
6967 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6968 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
6969 && reduc_def))
6970 return false;
6972 /* FORNOW: not yet supported. */
6973 if (STMT_VINFO_LIVE_P (stmt_info))
6975 if (dump_enabled_p ())
6976 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6977 "value used after loop.\n");
6978 return false;
6981 /* Is vectorizable conditional operation? */
6982 if (!is_gimple_assign (stmt))
6983 return false;
6985 code = gimple_assign_rhs_code (stmt);
6987 if (code != COND_EXPR)
6988 return false;
6990 cond_expr = gimple_assign_rhs1 (stmt);
6991 then_clause = gimple_assign_rhs2 (stmt);
6992 else_clause = gimple_assign_rhs3 (stmt);
6994 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
6995 &comp_vectype)
6996 || !comp_vectype)
6997 return false;
6999 if (TREE_CODE (then_clause) == SSA_NAME)
7001 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
7002 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
7003 &then_def_stmt, &def, &dt))
7004 return false;
7006 else if (TREE_CODE (then_clause) != INTEGER_CST
7007 && TREE_CODE (then_clause) != REAL_CST
7008 && TREE_CODE (then_clause) != FIXED_CST)
7009 return false;
7011 if (TREE_CODE (else_clause) == SSA_NAME)
7013 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
7014 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
7015 &else_def_stmt, &def, &dt))
7016 return false;
7018 else if (TREE_CODE (else_clause) != INTEGER_CST
7019 && TREE_CODE (else_clause) != REAL_CST
7020 && TREE_CODE (else_clause) != FIXED_CST)
7021 return false;
7023 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
7024 /* The result of a vector comparison should be signed type. */
7025 tree cmp_type = build_nonstandard_integer_type (prec, 0);
7026 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
7027 if (vec_cmp_type == NULL_TREE)
7028 return false;
7030 if (!vec_stmt)
7032 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7033 return expand_vec_cond_expr_p (vectype, comp_vectype);
7036 /* Transform. */
7038 if (!slp_node)
7040 vec_oprnds0.create (1);
7041 vec_oprnds1.create (1);
7042 vec_oprnds2.create (1);
7043 vec_oprnds3.create (1);
7046 /* Handle def. */
7047 scalar_dest = gimple_assign_lhs (stmt);
7048 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7050 /* Handle cond expr. */
7051 for (j = 0; j < ncopies; j++)
7053 gassign *new_stmt = NULL;
7054 if (j == 0)
7056 if (slp_node)
7058 auto_vec<tree, 4> ops;
7059 auto_vec<vec<tree>, 4> vec_defs;
7061 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7062 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7063 ops.safe_push (then_clause);
7064 ops.safe_push (else_clause);
7065 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7066 vec_oprnds3 = vec_defs.pop ();
7067 vec_oprnds2 = vec_defs.pop ();
7068 vec_oprnds1 = vec_defs.pop ();
7069 vec_oprnds0 = vec_defs.pop ();
7071 ops.release ();
7072 vec_defs.release ();
7074 else
7076 gimple gtemp;
7077 vec_cond_lhs =
7078 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7079 stmt, NULL);
7080 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
7081 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
7083 vec_cond_rhs =
7084 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7085 stmt, NULL);
7086 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
7087 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
7088 if (reduc_index == 1)
7089 vec_then_clause = reduc_def;
7090 else
7092 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7093 stmt, NULL);
7094 vect_is_simple_use (then_clause, stmt, loop_vinfo,
7095 NULL, &gtemp, &def, &dts[2]);
7097 if (reduc_index == 2)
7098 vec_else_clause = reduc_def;
7099 else
7101 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7102 stmt, NULL);
7103 vect_is_simple_use (else_clause, stmt, loop_vinfo,
7104 NULL, &gtemp, &def, &dts[3]);
7108 else
7110 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7111 vec_oprnds0.pop ());
7112 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7113 vec_oprnds1.pop ());
7114 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7115 vec_oprnds2.pop ());
7116 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7117 vec_oprnds3.pop ());
7120 if (!slp_node)
7122 vec_oprnds0.quick_push (vec_cond_lhs);
7123 vec_oprnds1.quick_push (vec_cond_rhs);
7124 vec_oprnds2.quick_push (vec_then_clause);
7125 vec_oprnds3.quick_push (vec_else_clause);
7128 /* Arguments are ready. Create the new vector stmt. */
7129 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7131 vec_cond_rhs = vec_oprnds1[i];
7132 vec_then_clause = vec_oprnds2[i];
7133 vec_else_clause = vec_oprnds3[i];
7135 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7136 vec_cond_lhs, vec_cond_rhs);
7137 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7138 vec_compare, vec_then_clause, vec_else_clause);
7140 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7141 new_temp = make_ssa_name (vec_dest, new_stmt);
7142 gimple_assign_set_lhs (new_stmt, new_temp);
7143 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7144 if (slp_node)
7145 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7148 if (slp_node)
7149 continue;
7151 if (j == 0)
7152 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7153 else
7154 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7156 prev_stmt_info = vinfo_for_stmt (new_stmt);
7159 vec_oprnds0.release ();
7160 vec_oprnds1.release ();
7161 vec_oprnds2.release ();
7162 vec_oprnds3.release ();
7164 return true;
7168 /* Make sure the statement is vectorizable. */
7170 bool
7171 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
7173 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7174 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7175 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7176 bool ok;
7177 tree scalar_type, vectype;
7178 gimple pattern_stmt;
7179 gimple_seq pattern_def_seq;
7181 if (dump_enabled_p ())
7183 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7184 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7187 if (gimple_has_volatile_ops (stmt))
7189 if (dump_enabled_p ())
7190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7191 "not vectorized: stmt has volatile operands\n");
7193 return false;
7196 /* Skip stmts that do not need to be vectorized. In loops this is expected
7197 to include:
7198 - the COND_EXPR which is the loop exit condition
7199 - any LABEL_EXPRs in the loop
7200 - computations that are used only for array indexing or loop control.
7201 In basic blocks we only analyze statements that are a part of some SLP
7202 instance, therefore, all the statements are relevant.
7204 Pattern statement needs to be analyzed instead of the original statement
7205 if the original statement is not relevant. Otherwise, we analyze both
7206 statements. In basic blocks we are called from some SLP instance
7207 traversal, don't analyze pattern stmts instead, the pattern stmts
7208 already will be part of SLP instance. */
7210 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7211 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7212 && !STMT_VINFO_LIVE_P (stmt_info))
7214 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7215 && pattern_stmt
7216 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7217 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7219 /* Analyze PATTERN_STMT instead of the original stmt. */
7220 stmt = pattern_stmt;
7221 stmt_info = vinfo_for_stmt (pattern_stmt);
7222 if (dump_enabled_p ())
7224 dump_printf_loc (MSG_NOTE, vect_location,
7225 "==> examining pattern statement: ");
7226 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7229 else
7231 if (dump_enabled_p ())
7232 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7234 return true;
7237 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7238 && node == NULL
7239 && pattern_stmt
7240 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7241 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7243 /* Analyze PATTERN_STMT too. */
7244 if (dump_enabled_p ())
7246 dump_printf_loc (MSG_NOTE, vect_location,
7247 "==> examining pattern statement: ");
7248 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7251 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7252 return false;
7255 if (is_pattern_stmt_p (stmt_info)
7256 && node == NULL
7257 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7259 gimple_stmt_iterator si;
7261 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7263 gimple pattern_def_stmt = gsi_stmt (si);
7264 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7265 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7267 /* Analyze def stmt of STMT if it's a pattern stmt. */
7268 if (dump_enabled_p ())
7270 dump_printf_loc (MSG_NOTE, vect_location,
7271 "==> examining pattern def statement: ");
7272 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7275 if (!vect_analyze_stmt (pattern_def_stmt,
7276 need_to_vectorize, node))
7277 return false;
7282 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7284 case vect_internal_def:
7285 break;
7287 case vect_reduction_def:
7288 case vect_nested_cycle:
7289 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
7290 || relevance == vect_used_in_outer_by_reduction
7291 || relevance == vect_unused_in_scope));
7292 break;
7294 case vect_induction_def:
7295 case vect_constant_def:
7296 case vect_external_def:
7297 case vect_unknown_def_type:
7298 default:
7299 gcc_unreachable ();
7302 if (bb_vinfo)
7304 gcc_assert (PURE_SLP_STMT (stmt_info));
7306 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7307 if (dump_enabled_p ())
7309 dump_printf_loc (MSG_NOTE, vect_location,
7310 "get vectype for scalar type: ");
7311 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7312 dump_printf (MSG_NOTE, "\n");
7315 vectype = get_vectype_for_scalar_type (scalar_type);
7316 if (!vectype)
7318 if (dump_enabled_p ())
7320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7321 "not SLPed: unsupported data-type ");
7322 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7323 scalar_type);
7324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7326 return false;
7329 if (dump_enabled_p ())
7331 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7332 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7333 dump_printf (MSG_NOTE, "\n");
7336 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7339 if (STMT_VINFO_RELEVANT_P (stmt_info))
7341 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7342 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7343 || (is_gimple_call (stmt)
7344 && gimple_call_lhs (stmt) == NULL_TREE));
7345 *need_to_vectorize = true;
7348 ok = true;
7349 if (!bb_vinfo
7350 && (STMT_VINFO_RELEVANT_P (stmt_info)
7351 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7352 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, NULL)
7353 || vectorizable_conversion (stmt, NULL, NULL, NULL)
7354 || vectorizable_shift (stmt, NULL, NULL, NULL)
7355 || vectorizable_operation (stmt, NULL, NULL, NULL)
7356 || vectorizable_assignment (stmt, NULL, NULL, NULL)
7357 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
7358 || vectorizable_call (stmt, NULL, NULL, NULL)
7359 || vectorizable_store (stmt, NULL, NULL, NULL)
7360 || vectorizable_reduction (stmt, NULL, NULL, NULL)
7361 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, NULL));
7362 else
7364 if (bb_vinfo)
7365 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7366 || vectorizable_conversion (stmt, NULL, NULL, node)
7367 || vectorizable_shift (stmt, NULL, NULL, node)
7368 || vectorizable_operation (stmt, NULL, NULL, node)
7369 || vectorizable_assignment (stmt, NULL, NULL, node)
7370 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7371 || vectorizable_call (stmt, NULL, NULL, node)
7372 || vectorizable_store (stmt, NULL, NULL, node)
7373 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7376 if (!ok)
7378 if (dump_enabled_p ())
7380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7381 "not vectorized: relevant stmt not ");
7382 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7383 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7386 return false;
7389 if (bb_vinfo)
7390 return true;
7392 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7393 need extra handling, except for vectorizable reductions. */
7394 if (STMT_VINFO_LIVE_P (stmt_info)
7395 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7396 ok = vectorizable_live_operation (stmt, NULL, NULL);
7398 if (!ok)
7400 if (dump_enabled_p ())
7402 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7403 "not vectorized: live stmt not ");
7404 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7405 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7408 return false;
7411 return true;
7415 /* Function vect_transform_stmt.
7417 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7419 bool
7420 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7421 bool *grouped_store, slp_tree slp_node,
7422 slp_instance slp_node_instance)
7424 bool is_store = false;
7425 gimple vec_stmt = NULL;
7426 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7427 bool done;
7429 switch (STMT_VINFO_TYPE (stmt_info))
7431 case type_demotion_vec_info_type:
7432 case type_promotion_vec_info_type:
7433 case type_conversion_vec_info_type:
7434 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7435 gcc_assert (done);
7436 break;
7438 case induc_vec_info_type:
7439 gcc_assert (!slp_node);
7440 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7441 gcc_assert (done);
7442 break;
7444 case shift_vec_info_type:
7445 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7446 gcc_assert (done);
7447 break;
7449 case op_vec_info_type:
7450 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7451 gcc_assert (done);
7452 break;
7454 case assignment_vec_info_type:
7455 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7456 gcc_assert (done);
7457 break;
7459 case load_vec_info_type:
7460 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7461 slp_node_instance);
7462 gcc_assert (done);
7463 break;
7465 case store_vec_info_type:
7466 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7467 gcc_assert (done);
7468 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7470 /* In case of interleaving, the whole chain is vectorized when the
7471 last store in the chain is reached. Store stmts before the last
7472 one are skipped, and there vec_stmt_info shouldn't be freed
7473 meanwhile. */
7474 *grouped_store = true;
7475 if (STMT_VINFO_VEC_STMT (stmt_info))
7476 is_store = true;
7478 else
7479 is_store = true;
7480 break;
7482 case condition_vec_info_type:
7483 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7484 gcc_assert (done);
7485 break;
7487 case call_vec_info_type:
7488 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7489 stmt = gsi_stmt (*gsi);
7490 if (is_gimple_call (stmt)
7491 && gimple_call_internal_p (stmt)
7492 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7493 is_store = true;
7494 break;
7496 case call_simd_clone_vec_info_type:
7497 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7498 stmt = gsi_stmt (*gsi);
7499 break;
7501 case reduc_vec_info_type:
7502 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7503 gcc_assert (done);
7504 break;
7506 default:
7507 if (!STMT_VINFO_LIVE_P (stmt_info))
7509 if (dump_enabled_p ())
7510 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7511 "stmt not supported.\n");
7512 gcc_unreachable ();
7516 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7517 is being vectorized, but outside the immediately enclosing loop. */
7518 if (vec_stmt
7519 && STMT_VINFO_LOOP_VINFO (stmt_info)
7520 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7521 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7522 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7523 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7524 || STMT_VINFO_RELEVANT (stmt_info) ==
7525 vect_used_in_outer_by_reduction))
7527 struct loop *innerloop = LOOP_VINFO_LOOP (
7528 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7529 imm_use_iterator imm_iter;
7530 use_operand_p use_p;
7531 tree scalar_dest;
7532 gimple exit_phi;
7534 if (dump_enabled_p ())
7535 dump_printf_loc (MSG_NOTE, vect_location,
7536 "Record the vdef for outer-loop vectorization.\n");
7538 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7539 (to be used when vectorizing outer-loop stmts that use the DEF of
7540 STMT). */
7541 if (gimple_code (stmt) == GIMPLE_PHI)
7542 scalar_dest = PHI_RESULT (stmt);
7543 else
7544 scalar_dest = gimple_assign_lhs (stmt);
7546 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7548 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7550 exit_phi = USE_STMT (use_p);
7551 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7556 /* Handle stmts whose DEF is used outside the loop-nest that is
7557 being vectorized. */
7558 if (STMT_VINFO_LIVE_P (stmt_info)
7559 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7561 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7562 gcc_assert (done);
7565 if (vec_stmt)
7566 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7568 return is_store;
7572 /* Remove a group of stores (for SLP or interleaving), free their
7573 stmt_vec_info. */
7575 void
7576 vect_remove_stores (gimple first_stmt)
7578 gimple next = first_stmt;
7579 gimple tmp;
7580 gimple_stmt_iterator next_si;
7582 while (next)
7584 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7586 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7587 if (is_pattern_stmt_p (stmt_info))
7588 next = STMT_VINFO_RELATED_STMT (stmt_info);
7589 /* Free the attached stmt_vec_info and remove the stmt. */
7590 next_si = gsi_for_stmt (next);
7591 unlink_stmt_vdef (next);
7592 gsi_remove (&next_si, true);
7593 release_defs (next);
7594 free_stmt_vec_info (next);
7595 next = tmp;
7600 /* Function new_stmt_vec_info.
7602 Create and initialize a new stmt_vec_info struct for STMT. */
7604 stmt_vec_info
7605 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7606 bb_vec_info bb_vinfo)
7608 stmt_vec_info res;
7609 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7611 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7612 STMT_VINFO_STMT (res) = stmt;
7613 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7614 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7615 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7616 STMT_VINFO_LIVE_P (res) = false;
7617 STMT_VINFO_VECTYPE (res) = NULL;
7618 STMT_VINFO_VEC_STMT (res) = NULL;
7619 STMT_VINFO_VECTORIZABLE (res) = true;
7620 STMT_VINFO_IN_PATTERN_P (res) = false;
7621 STMT_VINFO_RELATED_STMT (res) = NULL;
7622 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7623 STMT_VINFO_DATA_REF (res) = NULL;
7625 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7626 STMT_VINFO_DR_OFFSET (res) = NULL;
7627 STMT_VINFO_DR_INIT (res) = NULL;
7628 STMT_VINFO_DR_STEP (res) = NULL;
7629 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7631 if (gimple_code (stmt) == GIMPLE_PHI
7632 && is_loop_header_bb_p (gimple_bb (stmt)))
7633 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7634 else
7635 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7637 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7638 STMT_SLP_TYPE (res) = loop_vect;
7639 GROUP_FIRST_ELEMENT (res) = NULL;
7640 GROUP_NEXT_ELEMENT (res) = NULL;
7641 GROUP_SIZE (res) = 0;
7642 GROUP_STORE_COUNT (res) = 0;
7643 GROUP_GAP (res) = 0;
7644 GROUP_SAME_DR_STMT (res) = NULL;
7646 return res;
7650 /* Create a hash table for stmt_vec_info. */
7652 void
7653 init_stmt_vec_info_vec (void)
7655 gcc_assert (!stmt_vec_info_vec.exists ());
7656 stmt_vec_info_vec.create (50);
7660 /* Free hash table for stmt_vec_info. */
7662 void
7663 free_stmt_vec_info_vec (void)
7665 unsigned int i;
7666 vec_void_p info;
7667 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7668 if (info != NULL)
7669 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7670 gcc_assert (stmt_vec_info_vec.exists ());
7671 stmt_vec_info_vec.release ();
7675 /* Free stmt vectorization related info. */
7677 void
7678 free_stmt_vec_info (gimple stmt)
7680 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7682 if (!stmt_info)
7683 return;
7685 /* Check if this statement has a related "pattern stmt"
7686 (introduced by the vectorizer during the pattern recognition
7687 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7688 too. */
7689 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7691 stmt_vec_info patt_info
7692 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7693 if (patt_info)
7695 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7696 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7697 gimple_set_bb (patt_stmt, NULL);
7698 tree lhs = gimple_get_lhs (patt_stmt);
7699 if (TREE_CODE (lhs) == SSA_NAME)
7700 release_ssa_name (lhs);
7701 if (seq)
7703 gimple_stmt_iterator si;
7704 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7706 gimple seq_stmt = gsi_stmt (si);
7707 gimple_set_bb (seq_stmt, NULL);
7708 lhs = gimple_get_lhs (patt_stmt);
7709 if (TREE_CODE (lhs) == SSA_NAME)
7710 release_ssa_name (lhs);
7711 free_stmt_vec_info (seq_stmt);
7714 free_stmt_vec_info (patt_stmt);
7718 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7719 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7720 set_vinfo_for_stmt (stmt, NULL);
7721 free (stmt_info);
7725 /* Function get_vectype_for_scalar_type_and_size.
7727 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7728 by the target. */
7730 static tree
7731 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7733 machine_mode inner_mode = TYPE_MODE (scalar_type);
7734 machine_mode simd_mode;
7735 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7736 int nunits;
7737 tree vectype;
7739 if (nbytes == 0)
7740 return NULL_TREE;
7742 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7743 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7744 return NULL_TREE;
7746 /* For vector types of elements whose mode precision doesn't
7747 match their types precision we use a element type of mode
7748 precision. The vectorization routines will have to make sure
7749 they support the proper result truncation/extension.
7750 We also make sure to build vector types with INTEGER_TYPE
7751 component type only. */
7752 if (INTEGRAL_TYPE_P (scalar_type)
7753 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7754 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7755 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7756 TYPE_UNSIGNED (scalar_type));
7758 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7759 When the component mode passes the above test simply use a type
7760 corresponding to that mode. The theory is that any use that
7761 would cause problems with this will disable vectorization anyway. */
7762 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7763 && !INTEGRAL_TYPE_P (scalar_type))
7764 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7766 /* We can't build a vector type of elements with alignment bigger than
7767 their size. */
7768 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7769 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7770 TYPE_UNSIGNED (scalar_type));
7772 /* If we felt back to using the mode fail if there was
7773 no scalar type for it. */
7774 if (scalar_type == NULL_TREE)
7775 return NULL_TREE;
7777 /* If no size was supplied use the mode the target prefers. Otherwise
7778 lookup a vector mode of the specified size. */
7779 if (size == 0)
7780 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
7781 else
7782 simd_mode = mode_for_vector (inner_mode, size / nbytes);
7783 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
7784 if (nunits <= 1)
7785 return NULL_TREE;
7787 vectype = build_vector_type (scalar_type, nunits);
7789 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
7790 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
7791 return NULL_TREE;
7793 return vectype;
7796 unsigned int current_vector_size;
7798 /* Function get_vectype_for_scalar_type.
7800 Returns the vector type corresponding to SCALAR_TYPE as supported
7801 by the target. */
7803 tree
7804 get_vectype_for_scalar_type (tree scalar_type)
7806 tree vectype;
7807 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
7808 current_vector_size);
7809 if (vectype
7810 && current_vector_size == 0)
7811 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
7812 return vectype;
7815 /* Function get_same_sized_vectype
7817 Returns a vector type corresponding to SCALAR_TYPE of size
7818 VECTOR_TYPE if supported by the target. */
7820 tree
7821 get_same_sized_vectype (tree scalar_type, tree vector_type)
7823 return get_vectype_for_scalar_type_and_size
7824 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
7827 /* Function vect_is_simple_use.
7829 Input:
7830 LOOP_VINFO - the vect info of the loop that is being vectorized.
7831 BB_VINFO - the vect info of the basic block that is being vectorized.
7832 OPERAND - operand of STMT in the loop or bb.
7833 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7835 Returns whether a stmt with OPERAND can be vectorized.
7836 For loops, supportable operands are constants, loop invariants, and operands
7837 that are defined by the current iteration of the loop. Unsupportable
7838 operands are those that are defined by a previous iteration of the loop (as
7839 is the case in reduction/induction computations).
7840 For basic blocks, supportable operands are constants and bb invariants.
7841 For now, operands defined outside the basic block are not supported. */
7843 bool
7844 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7845 bb_vec_info bb_vinfo, gimple *def_stmt,
7846 tree *def, enum vect_def_type *dt)
7848 basic_block bb;
7849 stmt_vec_info stmt_vinfo;
7850 struct loop *loop = NULL;
7852 if (loop_vinfo)
7853 loop = LOOP_VINFO_LOOP (loop_vinfo);
7855 *def_stmt = NULL;
7856 *def = NULL_TREE;
7858 if (dump_enabled_p ())
7860 dump_printf_loc (MSG_NOTE, vect_location,
7861 "vect_is_simple_use: operand ");
7862 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
7863 dump_printf (MSG_NOTE, "\n");
7866 if (CONSTANT_CLASS_P (operand))
7868 *dt = vect_constant_def;
7869 return true;
7872 if (is_gimple_min_invariant (operand))
7874 *def = operand;
7875 *dt = vect_external_def;
7876 return true;
7879 if (TREE_CODE (operand) == PAREN_EXPR)
7881 if (dump_enabled_p ())
7882 dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.\n");
7883 operand = TREE_OPERAND (operand, 0);
7886 if (TREE_CODE (operand) != SSA_NAME)
7888 if (dump_enabled_p ())
7889 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7890 "not ssa-name.\n");
7891 return false;
7894 *def_stmt = SSA_NAME_DEF_STMT (operand);
7895 if (*def_stmt == NULL)
7897 if (dump_enabled_p ())
7898 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7899 "no def_stmt.\n");
7900 return false;
7903 if (dump_enabled_p ())
7905 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
7906 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
7909 /* Empty stmt is expected only in case of a function argument.
7910 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7911 if (gimple_nop_p (*def_stmt))
7913 *def = operand;
7914 *dt = vect_external_def;
7915 return true;
7918 bb = gimple_bb (*def_stmt);
7920 if ((loop && !flow_bb_inside_loop_p (loop, bb))
7921 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
7922 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
7923 *dt = vect_external_def;
7924 else
7926 stmt_vinfo = vinfo_for_stmt (*def_stmt);
7927 if (!loop && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
7928 *dt = vect_external_def;
7929 else
7930 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
7933 if (dump_enabled_p ())
7935 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
7936 switch (*dt)
7938 case vect_uninitialized_def:
7939 dump_printf (MSG_NOTE, "uninitialized\n");
7940 break;
7941 case vect_constant_def:
7942 dump_printf (MSG_NOTE, "constant\n");
7943 break;
7944 case vect_external_def:
7945 dump_printf (MSG_NOTE, "external\n");
7946 break;
7947 case vect_internal_def:
7948 dump_printf (MSG_NOTE, "internal\n");
7949 break;
7950 case vect_induction_def:
7951 dump_printf (MSG_NOTE, "induction\n");
7952 break;
7953 case vect_reduction_def:
7954 dump_printf (MSG_NOTE, "reduction\n");
7955 break;
7956 case vect_double_reduction_def:
7957 dump_printf (MSG_NOTE, "double reduction\n");
7958 break;
7959 case vect_nested_cycle:
7960 dump_printf (MSG_NOTE, "nested cycle\n");
7961 break;
7962 case vect_unknown_def_type:
7963 dump_printf (MSG_NOTE, "unknown\n");
7964 break;
7968 if (*dt == vect_unknown_def_type
7969 || (stmt
7970 && *dt == vect_double_reduction_def
7971 && gimple_code (stmt) != GIMPLE_PHI))
7973 if (dump_enabled_p ())
7974 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7975 "Unsupported pattern.\n");
7976 return false;
7979 switch (gimple_code (*def_stmt))
7981 case GIMPLE_PHI:
7982 *def = gimple_phi_result (*def_stmt);
7983 break;
7985 case GIMPLE_ASSIGN:
7986 *def = gimple_assign_lhs (*def_stmt);
7987 break;
7989 case GIMPLE_CALL:
7990 *def = gimple_call_lhs (*def_stmt);
7991 if (*def != NULL)
7992 break;
7993 /* FALLTHRU */
7994 default:
7995 if (dump_enabled_p ())
7996 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7997 "unsupported defining stmt:\n");
7998 return false;
8001 return true;
8004 /* Function vect_is_simple_use_1.
8006 Same as vect_is_simple_use_1 but also determines the vector operand
8007 type of OPERAND and stores it to *VECTYPE. If the definition of
8008 OPERAND is vect_uninitialized_def, vect_constant_def or
8009 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8010 is responsible to compute the best suited vector type for the
8011 scalar operand. */
8013 bool
8014 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
8015 bb_vec_info bb_vinfo, gimple *def_stmt,
8016 tree *def, enum vect_def_type *dt, tree *vectype)
8018 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
8019 def, dt))
8020 return false;
8022 /* Now get a vector type if the def is internal, otherwise supply
8023 NULL_TREE and leave it up to the caller to figure out a proper
8024 type for the use stmt. */
8025 if (*dt == vect_internal_def
8026 || *dt == vect_induction_def
8027 || *dt == vect_reduction_def
8028 || *dt == vect_double_reduction_def
8029 || *dt == vect_nested_cycle)
8031 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8033 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8034 && !STMT_VINFO_RELEVANT (stmt_info)
8035 && !STMT_VINFO_LIVE_P (stmt_info))
8036 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8038 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8039 gcc_assert (*vectype != NULL_TREE);
8041 else if (*dt == vect_uninitialized_def
8042 || *dt == vect_constant_def
8043 || *dt == vect_external_def)
8044 *vectype = NULL_TREE;
8045 else
8046 gcc_unreachable ();
8048 return true;
8052 /* Function supportable_widening_operation
8054 Check whether an operation represented by the code CODE is a
8055 widening operation that is supported by the target platform in
8056 vector form (i.e., when operating on arguments of type VECTYPE_IN
8057 producing a result of type VECTYPE_OUT).
8059 Widening operations we currently support are NOP (CONVERT), FLOAT
8060 and WIDEN_MULT. This function checks if these operations are supported
8061 by the target platform either directly (via vector tree-codes), or via
8062 target builtins.
8064 Output:
8065 - CODE1 and CODE2 are codes of vector operations to be used when
8066 vectorizing the operation, if available.
8067 - MULTI_STEP_CVT determines the number of required intermediate steps in
8068 case of multi-step conversion (like char->short->int - in that case
8069 MULTI_STEP_CVT will be 1).
8070 - INTERM_TYPES contains the intermediate type required to perform the
8071 widening operation (short in the above example). */
8073 bool
8074 supportable_widening_operation (enum tree_code code, gimple stmt,
8075 tree vectype_out, tree vectype_in,
8076 enum tree_code *code1, enum tree_code *code2,
8077 int *multi_step_cvt,
8078 vec<tree> *interm_types)
8080 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8081 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8082 struct loop *vect_loop = NULL;
8083 machine_mode vec_mode;
8084 enum insn_code icode1, icode2;
8085 optab optab1, optab2;
8086 tree vectype = vectype_in;
8087 tree wide_vectype = vectype_out;
8088 enum tree_code c1, c2;
8089 int i;
8090 tree prev_type, intermediate_type;
8091 machine_mode intermediate_mode, prev_mode;
8092 optab optab3, optab4;
8094 *multi_step_cvt = 0;
8095 if (loop_info)
8096 vect_loop = LOOP_VINFO_LOOP (loop_info);
8098 switch (code)
8100 case WIDEN_MULT_EXPR:
8101 /* The result of a vectorized widening operation usually requires
8102 two vectors (because the widened results do not fit into one vector).
8103 The generated vector results would normally be expected to be
8104 generated in the same order as in the original scalar computation,
8105 i.e. if 8 results are generated in each vector iteration, they are
8106 to be organized as follows:
8107 vect1: [res1,res2,res3,res4],
8108 vect2: [res5,res6,res7,res8].
8110 However, in the special case that the result of the widening
8111 operation is used in a reduction computation only, the order doesn't
8112 matter (because when vectorizing a reduction we change the order of
8113 the computation). Some targets can take advantage of this and
8114 generate more efficient code. For example, targets like Altivec,
8115 that support widen_mult using a sequence of {mult_even,mult_odd}
8116 generate the following vectors:
8117 vect1: [res1,res3,res5,res7],
8118 vect2: [res2,res4,res6,res8].
8120 When vectorizing outer-loops, we execute the inner-loop sequentially
8121 (each vectorized inner-loop iteration contributes to VF outer-loop
8122 iterations in parallel). We therefore don't allow to change the
8123 order of the computation in the inner-loop during outer-loop
8124 vectorization. */
8125 /* TODO: Another case in which order doesn't *really* matter is when we
8126 widen and then contract again, e.g. (short)((int)x * y >> 8).
8127 Normally, pack_trunc performs an even/odd permute, whereas the
8128 repack from an even/odd expansion would be an interleave, which
8129 would be significantly simpler for e.g. AVX2. */
8130 /* In any case, in order to avoid duplicating the code below, recurse
8131 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8132 are properly set up for the caller. If we fail, we'll continue with
8133 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8134 if (vect_loop
8135 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8136 && !nested_in_vect_loop_p (vect_loop, stmt)
8137 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8138 stmt, vectype_out, vectype_in,
8139 code1, code2, multi_step_cvt,
8140 interm_types))
8142 /* Elements in a vector with vect_used_by_reduction property cannot
8143 be reordered if the use chain with this property does not have the
8144 same operation. One such an example is s += a * b, where elements
8145 in a and b cannot be reordered. Here we check if the vector defined
8146 by STMT is only directly used in the reduction statement. */
8147 tree lhs = gimple_assign_lhs (stmt);
8148 use_operand_p dummy;
8149 gimple use_stmt;
8150 stmt_vec_info use_stmt_info = NULL;
8151 if (single_imm_use (lhs, &dummy, &use_stmt)
8152 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8153 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8154 return true;
8156 c1 = VEC_WIDEN_MULT_LO_EXPR;
8157 c2 = VEC_WIDEN_MULT_HI_EXPR;
8158 break;
8160 case VEC_WIDEN_MULT_EVEN_EXPR:
8161 /* Support the recursion induced just above. */
8162 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8163 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8164 break;
8166 case WIDEN_LSHIFT_EXPR:
8167 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8168 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8169 break;
8171 CASE_CONVERT:
8172 c1 = VEC_UNPACK_LO_EXPR;
8173 c2 = VEC_UNPACK_HI_EXPR;
8174 break;
8176 case FLOAT_EXPR:
8177 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8178 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8179 break;
8181 case FIX_TRUNC_EXPR:
8182 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8183 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8184 computing the operation. */
8185 return false;
8187 default:
8188 gcc_unreachable ();
8191 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8193 enum tree_code ctmp = c1;
8194 c1 = c2;
8195 c2 = ctmp;
8198 if (code == FIX_TRUNC_EXPR)
8200 /* The signedness is determined from output operand. */
8201 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8202 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8204 else
8206 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8207 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8210 if (!optab1 || !optab2)
8211 return false;
8213 vec_mode = TYPE_MODE (vectype);
8214 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8215 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8216 return false;
8218 *code1 = c1;
8219 *code2 = c2;
8221 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8222 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8223 return true;
8225 /* Check if it's a multi-step conversion that can be done using intermediate
8226 types. */
8228 prev_type = vectype;
8229 prev_mode = vec_mode;
8231 if (!CONVERT_EXPR_CODE_P (code))
8232 return false;
8234 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8235 intermediate steps in promotion sequence. We try
8236 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8237 not. */
8238 interm_types->create (MAX_INTERM_CVT_STEPS);
8239 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8241 intermediate_mode = insn_data[icode1].operand[0].mode;
8242 intermediate_type
8243 = lang_hooks.types.type_for_mode (intermediate_mode,
8244 TYPE_UNSIGNED (prev_type));
8245 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8246 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8248 if (!optab3 || !optab4
8249 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8250 || insn_data[icode1].operand[0].mode != intermediate_mode
8251 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8252 || insn_data[icode2].operand[0].mode != intermediate_mode
8253 || ((icode1 = optab_handler (optab3, intermediate_mode))
8254 == CODE_FOR_nothing)
8255 || ((icode2 = optab_handler (optab4, intermediate_mode))
8256 == CODE_FOR_nothing))
8257 break;
8259 interm_types->quick_push (intermediate_type);
8260 (*multi_step_cvt)++;
8262 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8263 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8264 return true;
8266 prev_type = intermediate_type;
8267 prev_mode = intermediate_mode;
8270 interm_types->release ();
8271 return false;
8275 /* Function supportable_narrowing_operation
8277 Check whether an operation represented by the code CODE is a
8278 narrowing operation that is supported by the target platform in
8279 vector form (i.e., when operating on arguments of type VECTYPE_IN
8280 and producing a result of type VECTYPE_OUT).
8282 Narrowing operations we currently support are NOP (CONVERT) and
8283 FIX_TRUNC. This function checks if these operations are supported by
8284 the target platform directly via vector tree-codes.
8286 Output:
8287 - CODE1 is the code of a vector operation to be used when
8288 vectorizing the operation, if available.
8289 - MULTI_STEP_CVT determines the number of required intermediate steps in
8290 case of multi-step conversion (like int->short->char - in that case
8291 MULTI_STEP_CVT will be 1).
8292 - INTERM_TYPES contains the intermediate type required to perform the
8293 narrowing operation (short in the above example). */
8295 bool
8296 supportable_narrowing_operation (enum tree_code code,
8297 tree vectype_out, tree vectype_in,
8298 enum tree_code *code1, int *multi_step_cvt,
8299 vec<tree> *interm_types)
8301 machine_mode vec_mode;
8302 enum insn_code icode1;
8303 optab optab1, interm_optab;
8304 tree vectype = vectype_in;
8305 tree narrow_vectype = vectype_out;
8306 enum tree_code c1;
8307 tree intermediate_type;
8308 machine_mode intermediate_mode, prev_mode;
8309 int i;
8310 bool uns;
8312 *multi_step_cvt = 0;
8313 switch (code)
8315 CASE_CONVERT:
8316 c1 = VEC_PACK_TRUNC_EXPR;
8317 break;
8319 case FIX_TRUNC_EXPR:
8320 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8321 break;
8323 case FLOAT_EXPR:
8324 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8325 tree code and optabs used for computing the operation. */
8326 return false;
8328 default:
8329 gcc_unreachable ();
8332 if (code == FIX_TRUNC_EXPR)
8333 /* The signedness is determined from output operand. */
8334 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8335 else
8336 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8338 if (!optab1)
8339 return false;
8341 vec_mode = TYPE_MODE (vectype);
8342 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8343 return false;
8345 *code1 = c1;
8347 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8348 return true;
8350 /* Check if it's a multi-step conversion that can be done using intermediate
8351 types. */
8352 prev_mode = vec_mode;
8353 if (code == FIX_TRUNC_EXPR)
8354 uns = TYPE_UNSIGNED (vectype_out);
8355 else
8356 uns = TYPE_UNSIGNED (vectype);
8358 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8359 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8360 costly than signed. */
8361 if (code == FIX_TRUNC_EXPR && uns)
8363 enum insn_code icode2;
8365 intermediate_type
8366 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8367 interm_optab
8368 = optab_for_tree_code (c1, intermediate_type, optab_default);
8369 if (interm_optab != unknown_optab
8370 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8371 && insn_data[icode1].operand[0].mode
8372 == insn_data[icode2].operand[0].mode)
8374 uns = false;
8375 optab1 = interm_optab;
8376 icode1 = icode2;
8380 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8381 intermediate steps in promotion sequence. We try
8382 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8383 interm_types->create (MAX_INTERM_CVT_STEPS);
8384 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8386 intermediate_mode = insn_data[icode1].operand[0].mode;
8387 intermediate_type
8388 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8389 interm_optab
8390 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8391 optab_default);
8392 if (!interm_optab
8393 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8394 || insn_data[icode1].operand[0].mode != intermediate_mode
8395 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8396 == CODE_FOR_nothing))
8397 break;
8399 interm_types->quick_push (intermediate_type);
8400 (*multi_step_cvt)++;
8402 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8403 return true;
8405 prev_mode = intermediate_mode;
8406 optab1 = interm_optab;
8409 interm_types->release ();
8410 return false;