re PR fortran/88376 (ICE in is_illegal_recursion, at fortran/resolve.c:1689)
[official-gcc.git] / gcc / tree-vect-stmts.c
blobc0e19dd178c4f255fd725bf43fee4f54f65314ff
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
101 stmt_info_for_cost si = { count, kind, where, stmt_info, misalign };
102 body_cost_vec->safe_push (si);
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
111 static tree
112 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT_INFO and the vector is associated
121 with scalar destination SCALAR_DEST. */
123 static tree
124 read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
125 tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
127 tree vect_type, vect, vect_name, array_ref;
128 gimple *new_stmt;
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
140 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
142 return vect_name;
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT_INFO. */
149 static void
150 write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
151 tree vect, tree array, unsigned HOST_WIDE_INT n)
153 tree array_ref;
154 gimple *new_stmt;
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
160 new_stmt = gimple_build_assign (array_ref, vect);
161 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
168 static tree
169 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
171 tree mem_ref;
173 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
174 /* Arrays have the same alignment as their type. */
175 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
176 return mem_ref;
179 /* Add a clobber of variable VAR to the vectorization of STMT_INFO.
180 Emit the clobber before *GSI. */
182 static void
183 vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
184 tree var)
186 tree clobber = build_clobber (TREE_TYPE (var));
187 gimple *new_stmt = gimple_build_assign (var, clobber);
188 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
199 enum vect_relevant relevant, bool live_p)
201 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
202 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 if (dump_enabled_p ())
205 dump_printf_loc (MSG_NOTE, vect_location,
206 "mark relevant %d, live %d: %G", relevant, live_p,
207 stmt_info->stmt);
209 /* If this stmt is an original stmt in a pattern, we might need to mark its
210 related pattern stmt instead of the original stmt. However, such stmts
211 may have their own uses that are not in any pattern, in such cases the
212 stmt itself should be marked. */
213 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
215 /* This is the last stmt in a sequence that was detected as a
216 pattern that can potentially be vectorized. Don't mark the stmt
217 as relevant/live because it's not going to be vectorized.
218 Instead mark the pattern-stmt that replaces it. */
220 if (dump_enabled_p ())
221 dump_printf_loc (MSG_NOTE, vect_location,
222 "last stmt in pattern. don't mark"
223 " relevant/live.\n");
224 stmt_vec_info old_stmt_info = stmt_info;
225 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
226 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
227 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
228 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
231 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
232 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
233 STMT_VINFO_RELEVANT (stmt_info) = relevant;
235 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
236 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
238 if (dump_enabled_p ())
239 dump_printf_loc (MSG_NOTE, vect_location,
240 "already marked relevant/live.\n");
241 return;
244 worklist->safe_push (stmt_info);
248 /* Function is_simple_and_all_uses_invariant
250 Return true if STMT_INFO is simple and all uses of it are invariant. */
252 bool
253 is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
254 loop_vec_info loop_vinfo)
256 tree op;
257 ssa_op_iter iter;
259 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
260 if (!stmt)
261 return false;
263 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
265 enum vect_def_type dt = vect_uninitialized_def;
267 if (!vect_is_simple_use (op, loop_vinfo, &dt))
269 if (dump_enabled_p ())
270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
271 "use not simple.\n");
272 return false;
275 if (dt != vect_external_def && dt != vect_constant_def)
276 return false;
278 return true;
281 /* Function vect_stmt_relevant_p.
283 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
284 is "relevant for vectorization".
286 A stmt is considered "relevant for vectorization" if:
287 - it has uses outside the loop.
288 - it has vdefs (it alters memory).
289 - control stmts in the loop (except for the exit condition).
291 CHECKME: what other side effects would the vectorizer allow? */
293 static bool
294 vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
295 enum vect_relevant *relevant, bool *live_p)
297 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
298 ssa_op_iter op_iter;
299 imm_use_iterator imm_iter;
300 use_operand_p use_p;
301 def_operand_p def_p;
303 *relevant = vect_unused_in_scope;
304 *live_p = false;
306 /* cond stmt other than loop exit cond. */
307 if (is_ctrl_stmt (stmt_info->stmt)
308 && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type)
309 *relevant = vect_used_in_scope;
311 /* changing memory. */
312 if (gimple_code (stmt_info->stmt) != GIMPLE_PHI)
313 if (gimple_vdef (stmt_info->stmt)
314 && !gimple_clobber_p (stmt_info->stmt))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE, vect_location,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant = vect_used_in_scope;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
325 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 basic_block bb = gimple_bb (USE_STMT (use_p));
328 if (!flow_bb_inside_loop_p (loop, bb))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE, vect_location,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p)))
335 continue;
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
340 gcc_assert (bb == single_exit (loop)->dest);
342 *live_p = true;
347 if (*live_p && *relevant == vect_unused_in_scope
348 && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "vec_stmt_relevant_p: stmt live but not relevant.\n");
353 *relevant = vect_used_only_live;
356 return (*live_p || *relevant);
360 /* Function exist_non_indexing_operands_for_use_p
362 USE is one of the uses attached to STMT_INFO. Check if USE is
363 used in STMT_INFO for anything other than indexing an array. */
365 static bool
366 exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
368 tree operand;
370 /* USE corresponds to some operand in STMT. If there is no data
371 reference in STMT, then any operand that corresponds to USE
372 is not indexing an array. */
373 if (!STMT_VINFO_DATA_REF (stmt_info))
374 return true;
376 /* STMT has a data_ref. FORNOW this means that its of one of
377 the following forms:
378 -1- ARRAY_REF = var
379 -2- var = ARRAY_REF
380 (This should have been verified in analyze_data_refs).
382 'var' in the second case corresponds to a def, not a use,
383 so USE cannot correspond to any operands that are not used
384 for array indexing.
386 Therefore, all we need to check is if STMT falls into the
387 first case, and whether var corresponds to USE. */
389 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
390 if (!assign || !gimple_assign_copy_p (assign))
392 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
393 if (call && gimple_call_internal_p (call))
395 internal_fn ifn = gimple_call_internal_fn (call);
396 int mask_index = internal_fn_mask_index (ifn);
397 if (mask_index >= 0
398 && use == gimple_call_arg (call, mask_index))
399 return true;
400 int stored_value_index = internal_fn_stored_value_index (ifn);
401 if (stored_value_index >= 0
402 && use == gimple_call_arg (call, stored_value_index))
403 return true;
404 if (internal_gather_scatter_fn_p (ifn)
405 && use == gimple_call_arg (call, 1))
406 return true;
408 return false;
411 if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME)
412 return false;
413 operand = gimple_assign_rhs1 (assign);
414 if (TREE_CODE (operand) != SSA_NAME)
415 return false;
417 if (operand == use)
418 return true;
420 return false;
425 Function process_use.
427 Inputs:
428 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
429 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
430 that defined USE. This is done by calling mark_relevant and passing it
431 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
432 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
433 be performed.
435 Outputs:
436 Generally, LIVE_P and RELEVANT are used to define the liveness and
437 relevance info of the DEF_STMT of this USE:
438 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
439 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
440 Exceptions:
441 - case 1: If USE is used only for address computations (e.g. array indexing),
442 which does not need to be directly vectorized, then the liveness/relevance
443 of the respective DEF_STMT is left unchanged.
444 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
445 we skip DEF_STMT cause it had already been processed.
446 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
447 "relevant" will be modified accordingly.
449 Return true if everything is as expected. Return false otherwise. */
451 static opt_result
452 process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
453 enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
454 bool force)
456 stmt_vec_info dstmt_vinfo;
457 basic_block bb, def_bb;
458 enum vect_def_type dt;
460 /* case 1: we are only interested in uses that need to be vectorized. Uses
461 that are used for address computation are not considered relevant. */
462 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
463 return opt_result::success ();
465 if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
466 return opt_result::failure_at (stmt_vinfo->stmt,
467 "not vectorized:"
468 " unsupported use in stmt.\n");
470 if (!dstmt_vinfo)
471 return opt_result::success ();
473 def_bb = gimple_bb (dstmt_vinfo->stmt);
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
476 DSTMT_VINFO must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DSTMT_VINFO in the loop. So we just
479 check that everything is as expected, and we are done. */
480 bb = gimple_bb (stmt_vinfo->stmt);
481 if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
482 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
483 && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
484 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
485 && bb->loop_father == def_bb->loop_father)
487 if (dump_enabled_p ())
488 dump_printf_loc (MSG_NOTE, vect_location,
489 "reduc-stmt defining reduc-phi in the same nest.\n");
490 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
491 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
492 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
493 return opt_result::success ();
496 /* case 3a: outer-loop stmt defining an inner-loop stmt:
497 outer-loop-header-bb:
498 d = dstmt_vinfo
499 inner-loop:
500 stmt # use (d)
501 outer-loop-tail-bb:
502 ... */
503 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
505 if (dump_enabled_p ())
506 dump_printf_loc (MSG_NOTE, vect_location,
507 "outer-loop def-stmt defining inner-loop stmt.\n");
509 switch (relevant)
511 case vect_unused_in_scope:
512 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
513 vect_used_in_scope : vect_unused_in_scope;
514 break;
516 case vect_used_in_outer_by_reduction:
517 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
518 relevant = vect_used_by_reduction;
519 break;
521 case vect_used_in_outer:
522 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
523 relevant = vect_used_in_scope;
524 break;
526 case vect_used_in_scope:
527 break;
529 default:
530 gcc_unreachable ();
534 /* case 3b: inner-loop stmt defining an outer-loop stmt:
535 outer-loop-header-bb:
537 inner-loop:
538 d = dstmt_vinfo
539 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
540 stmt # use (d) */
541 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
543 if (dump_enabled_p ())
544 dump_printf_loc (MSG_NOTE, vect_location,
545 "inner-loop def-stmt defining outer-loop stmt.\n");
547 switch (relevant)
549 case vect_unused_in_scope:
550 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
551 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
552 vect_used_in_outer_by_reduction : vect_unused_in_scope;
553 break;
555 case vect_used_by_reduction:
556 case vect_used_only_live:
557 relevant = vect_used_in_outer_by_reduction;
558 break;
560 case vect_used_in_scope:
561 relevant = vect_used_in_outer;
562 break;
564 default:
565 gcc_unreachable ();
568 /* We are also not interested in uses on loop PHI backedges that are
569 inductions. Otherwise we'll needlessly vectorize the IV increment
570 and cause hybrid SLP for SLP inductions. Unless the PHI is live
571 of course. */
572 else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
573 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
574 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
575 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
576 loop_latch_edge (bb->loop_father))
577 == use))
579 if (dump_enabled_p ())
580 dump_printf_loc (MSG_NOTE, vect_location,
581 "induction value on backedge.\n");
582 return opt_result::success ();
586 vect_mark_relevant (worklist, dstmt_vinfo, relevant, false);
587 return opt_result::success ();
591 /* Function vect_mark_stmts_to_be_vectorized.
593 Not all stmts in the loop need to be vectorized. For example:
595 for i...
596 for j...
597 1. T0 = i + j
598 2. T1 = a[T0]
600 3. j = j + 1
602 Stmt 1 and 3 do not need to be vectorized, because loop control and
603 addressing of vectorized data-refs are handled differently.
605 This pass detects such stmts. */
607 opt_result
608 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
610 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
611 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
612 unsigned int nbbs = loop->num_nodes;
613 gimple_stmt_iterator si;
614 unsigned int i;
615 basic_block bb;
616 bool live_p;
617 enum vect_relevant relevant;
619 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
621 auto_vec<stmt_vec_info, 64> worklist;
623 /* 1. Init worklist. */
624 for (i = 0; i < nbbs; i++)
626 bb = bbs[i];
627 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
629 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
630 if (dump_enabled_p ())
631 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G",
632 phi_info->stmt);
634 if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
635 vect_mark_relevant (&worklist, phi_info, relevant, live_p);
637 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
639 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
640 if (dump_enabled_p ())
641 dump_printf_loc (MSG_NOTE, vect_location,
642 "init: stmt relevant? %G", stmt_info->stmt);
644 if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
645 vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
649 /* 2. Process_worklist */
650 while (worklist.length () > 0)
652 use_operand_p use_p;
653 ssa_op_iter iter;
655 stmt_vec_info stmt_vinfo = worklist.pop ();
656 if (dump_enabled_p ())
657 dump_printf_loc (MSG_NOTE, vect_location,
658 "worklist: examine stmt: %G", stmt_vinfo->stmt);
660 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
661 (DEF_STMT) as relevant/irrelevant according to the relevance property
662 of STMT. */
663 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
665 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
666 propagated as is to the DEF_STMTs of its USEs.
668 One exception is when STMT has been identified as defining a reduction
669 variable; in this case we set the relevance to vect_used_by_reduction.
670 This is because we distinguish between two kinds of relevant stmts -
671 those that are used by a reduction computation, and those that are
672 (also) used by a regular computation. This allows us later on to
673 identify stmts that are used solely by a reduction, and therefore the
674 order of the results that they produce does not have to be kept. */
676 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
678 case vect_reduction_def:
679 gcc_assert (relevant != vect_unused_in_scope);
680 if (relevant != vect_unused_in_scope
681 && relevant != vect_used_in_scope
682 && relevant != vect_used_by_reduction
683 && relevant != vect_used_only_live)
684 return opt_result::failure_at
685 (stmt_vinfo->stmt, "unsupported use of reduction.\n");
686 break;
688 case vect_nested_cycle:
689 if (relevant != vect_unused_in_scope
690 && relevant != vect_used_in_outer_by_reduction
691 && relevant != vect_used_in_outer)
692 return opt_result::failure_at
693 (stmt_vinfo->stmt, "unsupported use of nested cycle.\n");
694 break;
696 case vect_double_reduction_def:
697 if (relevant != vect_unused_in_scope
698 && relevant != vect_used_by_reduction
699 && relevant != vect_used_only_live)
700 return opt_result::failure_at
701 (stmt_vinfo->stmt, "unsupported use of double reduction.\n");
702 break;
704 default:
705 break;
708 if (is_pattern_stmt_p (stmt_vinfo))
710 /* Pattern statements are not inserted into the code, so
711 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
712 have to scan the RHS or function arguments instead. */
713 if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
715 enum tree_code rhs_code = gimple_assign_rhs_code (assign);
716 tree op = gimple_assign_rhs1 (assign);
718 i = 1;
719 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
721 opt_result res
722 = process_use (stmt_vinfo, TREE_OPERAND (op, 0),
723 loop_vinfo, relevant, &worklist, false);
724 if (!res)
725 return res;
726 res = process_use (stmt_vinfo, TREE_OPERAND (op, 1),
727 loop_vinfo, relevant, &worklist, false);
728 if (!res)
729 return res;
730 i = 2;
732 for (; i < gimple_num_ops (assign); i++)
734 op = gimple_op (assign, i);
735 if (TREE_CODE (op) == SSA_NAME)
737 opt_result res
738 = process_use (stmt_vinfo, op, loop_vinfo, relevant,
739 &worklist, false);
740 if (!res)
741 return res;
745 else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
747 for (i = 0; i < gimple_call_num_args (call); i++)
749 tree arg = gimple_call_arg (call, i);
750 opt_result res
751 = process_use (stmt_vinfo, arg, loop_vinfo, relevant,
752 &worklist, false);
753 if (!res)
754 return res;
758 else
759 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
761 tree op = USE_FROM_PTR (use_p);
762 opt_result res
763 = process_use (stmt_vinfo, op, loop_vinfo, relevant,
764 &worklist, false);
765 if (!res)
766 return res;
769 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
771 gather_scatter_info gs_info;
772 if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
773 gcc_unreachable ();
774 opt_result res
775 = process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
776 &worklist, true);
777 if (!res)
778 return res;
780 } /* while worklist */
782 return opt_result::success ();
785 /* Compute the prologue cost for invariant or constant operands. */
787 static unsigned
788 vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
789 unsigned opno, enum vect_def_type dt,
790 stmt_vector_for_cost *cost_vec)
792 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
793 tree op = gimple_op (stmt, opno);
794 unsigned prologue_cost = 0;
796 /* Without looking at the actual initializer a vector of
797 constants can be implemented as load from the constant pool.
798 When all elements are the same we can use a splat. */
799 tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
800 unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
801 unsigned num_vects_to_check;
802 unsigned HOST_WIDE_INT const_nunits;
803 unsigned nelt_limit;
804 if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
805 && ! multiple_p (const_nunits, group_size))
807 num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
808 nelt_limit = const_nunits;
810 else
812 /* If either the vector has variable length or the vectors
813 are composed of repeated whole groups we only need to
814 cost construction once. All vectors will be the same. */
815 num_vects_to_check = 1;
816 nelt_limit = group_size;
818 tree elt = NULL_TREE;
819 unsigned nelt = 0;
820 for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j)
822 unsigned si = j % group_size;
823 if (nelt == 0)
824 elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno);
825 /* ??? We're just tracking whether all operands of a single
826 vector initializer are the same, ideally we'd check if
827 we emitted the same one already. */
828 else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt,
829 opno))
830 elt = NULL_TREE;
831 nelt++;
832 if (nelt == nelt_limit)
834 /* ??? We need to pass down stmt_info for a vector type
835 even if it points to the wrong stmt. */
836 prologue_cost += record_stmt_cost
837 (cost_vec, 1,
838 dt == vect_external_def
839 ? (elt ? scalar_to_vec : vec_construct)
840 : vector_load,
841 stmt_info, 0, vect_prologue);
842 nelt = 0;
846 return prologue_cost;
849 /* Function vect_model_simple_cost.
851 Models cost for simple operations, i.e. those that only emit ncopies of a
852 single op. Right now, this does not account for multiple insns that could
853 be generated for the single vector op. We will handle that shortly. */
855 static void
856 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
857 enum vect_def_type *dt,
858 int ndts,
859 slp_tree node,
860 stmt_vector_for_cost *cost_vec)
862 int inside_cost = 0, prologue_cost = 0;
864 gcc_assert (cost_vec != NULL);
866 /* ??? Somehow we need to fix this at the callers. */
867 if (node)
868 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
870 if (node)
872 /* Scan operands and account for prologue cost of constants/externals.
873 ??? This over-estimates cost for multiple uses and should be
874 re-engineered. */
875 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
876 tree lhs = gimple_get_lhs (stmt);
877 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
879 tree op = gimple_op (stmt, i);
880 enum vect_def_type dt;
881 if (!op || op == lhs)
882 continue;
883 if (vect_is_simple_use (op, stmt_info->vinfo, &dt)
884 && (dt == vect_constant_def || dt == vect_external_def))
885 prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
886 i, dt, cost_vec);
889 else
890 /* Cost the "broadcast" of a scalar operand in to a vector operand.
891 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
892 cost model. */
893 for (int i = 0; i < ndts; i++)
894 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
895 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
896 stmt_info, 0, vect_prologue);
898 /* Adjust for two-operator SLP nodes. */
899 if (node && SLP_TREE_TWO_OPERATORS (node))
901 ncopies *= 2;
902 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm,
903 stmt_info, 0, vect_body);
906 /* Pass the inside-of-loop statements to the target-specific cost model. */
907 inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
908 stmt_info, 0, vect_body);
910 if (dump_enabled_p ())
911 dump_printf_loc (MSG_NOTE, vect_location,
912 "vect_model_simple_cost: inside_cost = %d, "
913 "prologue_cost = %d .\n", inside_cost, prologue_cost);
917 /* Model cost for type demotion and promotion operations. PWR is normally
918 zero for single-step promotions and demotions. It will be one if
919 two-step promotion/demotion is required, and so on. Each additional
920 step doubles the number of instructions required. */
922 static void
923 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
924 enum vect_def_type *dt, int pwr,
925 stmt_vector_for_cost *cost_vec)
927 int i, tmp;
928 int inside_cost = 0, prologue_cost = 0;
930 for (i = 0; i < pwr + 1; i++)
932 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
933 (i + 1) : i;
934 inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp),
935 vec_promote_demote, stmt_info, 0,
936 vect_body);
939 /* FORNOW: Assuming maximum 2 args per stmts. */
940 for (i = 0; i < 2; i++)
941 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
942 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
943 stmt_info, 0, vect_prologue);
945 if (dump_enabled_p ())
946 dump_printf_loc (MSG_NOTE, vect_location,
947 "vect_model_promotion_demotion_cost: inside_cost = %d, "
948 "prologue_cost = %d .\n", inside_cost, prologue_cost);
951 /* Function vect_model_store_cost
953 Models cost for stores. In the case of grouped accesses, one access
954 has the overhead of the grouped access attributed to it. */
956 static void
957 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
958 enum vect_def_type dt,
959 vect_memory_access_type memory_access_type,
960 vec_load_store_type vls_type, slp_tree slp_node,
961 stmt_vector_for_cost *cost_vec)
963 unsigned int inside_cost = 0, prologue_cost = 0;
964 stmt_vec_info first_stmt_info = stmt_info;
965 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
967 /* ??? Somehow we need to fix this at the callers. */
968 if (slp_node)
969 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
971 if (vls_type == VLS_STORE_INVARIANT)
973 if (slp_node)
974 prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
975 1, dt, cost_vec);
976 else
977 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
978 stmt_info, 0, vect_prologue);
981 /* Grouped stores update all elements in the group at once,
982 so we want the DR for the first statement. */
983 if (!slp_node && grouped_access_p)
984 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
986 /* True if we should include any once-per-group costs as well as
987 the cost of the statement itself. For SLP we only get called
988 once per group anyhow. */
989 bool first_stmt_p = (first_stmt_info == stmt_info);
991 /* We assume that the cost of a single store-lanes instruction is
992 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
993 access is instead being provided by a permute-and-store operation,
994 include the cost of the permutes. */
995 if (first_stmt_p
996 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
998 /* Uses a high and low interleave or shuffle operations for each
999 needed permute. */
1000 int group_size = DR_GROUP_SIZE (first_stmt_info);
1001 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1002 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
1003 stmt_info, 0, vect_body);
1005 if (dump_enabled_p ())
1006 dump_printf_loc (MSG_NOTE, vect_location,
1007 "vect_model_store_cost: strided group_size = %d .\n",
1008 group_size);
1011 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1012 /* Costs of the stores. */
1013 if (memory_access_type == VMAT_ELEMENTWISE
1014 || memory_access_type == VMAT_GATHER_SCATTER)
1016 /* N scalar stores plus extracting the elements. */
1017 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1018 inside_cost += record_stmt_cost (cost_vec,
1019 ncopies * assumed_nunits,
1020 scalar_store, stmt_info, 0, vect_body);
1022 else
1023 vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
1025 if (memory_access_type == VMAT_ELEMENTWISE
1026 || memory_access_type == VMAT_STRIDED_SLP)
1028 /* N scalar stores plus extracting the elements. */
1029 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1030 inside_cost += record_stmt_cost (cost_vec,
1031 ncopies * assumed_nunits,
1032 vec_to_scalar, stmt_info, 0, vect_body);
1035 if (dump_enabled_p ())
1036 dump_printf_loc (MSG_NOTE, vect_location,
1037 "vect_model_store_cost: inside_cost = %d, "
1038 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1042 /* Calculate cost of DR's memory access. */
1043 void
1044 vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
1045 unsigned int *inside_cost,
1046 stmt_vector_for_cost *body_cost_vec)
1048 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1049 int alignment_support_scheme
1050 = vect_supportable_dr_alignment (dr_info, false);
1052 switch (alignment_support_scheme)
1054 case dr_aligned:
1056 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1057 vector_store, stmt_info, 0,
1058 vect_body);
1060 if (dump_enabled_p ())
1061 dump_printf_loc (MSG_NOTE, vect_location,
1062 "vect_model_store_cost: aligned.\n");
1063 break;
1066 case dr_unaligned_supported:
1068 /* Here, we assign an additional cost for the unaligned store. */
1069 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1070 unaligned_store, stmt_info,
1071 DR_MISALIGNMENT (dr_info),
1072 vect_body);
1073 if (dump_enabled_p ())
1074 dump_printf_loc (MSG_NOTE, vect_location,
1075 "vect_model_store_cost: unaligned supported by "
1076 "hardware.\n");
1077 break;
1080 case dr_unaligned_unsupported:
1082 *inside_cost = VECT_MAX_COST;
1084 if (dump_enabled_p ())
1085 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1086 "vect_model_store_cost: unsupported access.\n");
1087 break;
1090 default:
1091 gcc_unreachable ();
1096 /* Function vect_model_load_cost
1098 Models cost for loads. In the case of grouped accesses, one access has
1099 the overhead of the grouped access attributed to it. Since unaligned
1100 accesses are supported for loads, we also account for the costs of the
1101 access scheme chosen. */
1103 static void
1104 vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
1105 vect_memory_access_type memory_access_type,
1106 slp_instance instance,
1107 slp_tree slp_node,
1108 stmt_vector_for_cost *cost_vec)
1110 unsigned int inside_cost = 0, prologue_cost = 0;
1111 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1113 gcc_assert (cost_vec);
1115 /* ??? Somehow we need to fix this at the callers. */
1116 if (slp_node)
1117 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1119 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1121 /* If the load is permuted then the alignment is determined by
1122 the first group element not by the first scalar stmt DR. */
1123 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1124 /* Record the cost for the permutation. */
1125 unsigned n_perms;
1126 unsigned assumed_nunits
1127 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
1128 unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
1129 vect_transform_slp_perm_load (slp_node, vNULL, NULL,
1130 slp_vf, instance, true,
1131 &n_perms);
1132 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
1133 first_stmt_info, 0, vect_body);
1134 /* And adjust the number of loads performed. This handles
1135 redundancies as well as loads that are later dead. */
1136 auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
1137 bitmap_clear (perm);
1138 for (unsigned i = 0;
1139 i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
1140 bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
1141 ncopies = 0;
1142 bool load_seen = false;
1143 for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
1145 if (i % assumed_nunits == 0)
1147 if (load_seen)
1148 ncopies++;
1149 load_seen = false;
1151 if (bitmap_bit_p (perm, i))
1152 load_seen = true;
1154 if (load_seen)
1155 ncopies++;
1156 gcc_assert (ncopies
1157 <= (DR_GROUP_SIZE (first_stmt_info)
1158 - DR_GROUP_GAP (first_stmt_info)
1159 + assumed_nunits - 1) / assumed_nunits);
1162 /* Grouped loads read all elements in the group at once,
1163 so we want the DR for the first statement. */
1164 stmt_vec_info first_stmt_info = stmt_info;
1165 if (!slp_node && grouped_access_p)
1166 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1168 /* True if we should include any once-per-group costs as well as
1169 the cost of the statement itself. For SLP we only get called
1170 once per group anyhow. */
1171 bool first_stmt_p = (first_stmt_info == stmt_info);
1173 /* We assume that the cost of a single load-lanes instruction is
1174 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1175 access is instead being provided by a load-and-permute operation,
1176 include the cost of the permutes. */
1177 if (first_stmt_p
1178 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1180 /* Uses an even and odd extract operations or shuffle operations
1181 for each needed permute. */
1182 int group_size = DR_GROUP_SIZE (first_stmt_info);
1183 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1184 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1185 stmt_info, 0, vect_body);
1187 if (dump_enabled_p ())
1188 dump_printf_loc (MSG_NOTE, vect_location,
1189 "vect_model_load_cost: strided group_size = %d .\n",
1190 group_size);
1193 /* The loads themselves. */
1194 if (memory_access_type == VMAT_ELEMENTWISE
1195 || memory_access_type == VMAT_GATHER_SCATTER)
1197 /* N scalar loads plus gathering them into a vector. */
1198 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1199 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1200 inside_cost += record_stmt_cost (cost_vec,
1201 ncopies * assumed_nunits,
1202 scalar_load, stmt_info, 0, vect_body);
1204 else
1205 vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
1206 &inside_cost, &prologue_cost,
1207 cost_vec, cost_vec, true);
1208 if (memory_access_type == VMAT_ELEMENTWISE
1209 || memory_access_type == VMAT_STRIDED_SLP)
1210 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
1211 stmt_info, 0, vect_body);
1213 if (dump_enabled_p ())
1214 dump_printf_loc (MSG_NOTE, vect_location,
1215 "vect_model_load_cost: inside_cost = %d, "
1216 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1220 /* Calculate cost of DR's memory access. */
1221 void
1222 vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
1223 bool add_realign_cost, unsigned int *inside_cost,
1224 unsigned int *prologue_cost,
1225 stmt_vector_for_cost *prologue_cost_vec,
1226 stmt_vector_for_cost *body_cost_vec,
1227 bool record_prologue_costs)
1229 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1230 int alignment_support_scheme
1231 = vect_supportable_dr_alignment (dr_info, false);
1233 switch (alignment_support_scheme)
1235 case dr_aligned:
1237 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1238 stmt_info, 0, vect_body);
1240 if (dump_enabled_p ())
1241 dump_printf_loc (MSG_NOTE, vect_location,
1242 "vect_model_load_cost: aligned.\n");
1244 break;
1246 case dr_unaligned_supported:
1248 /* Here, we assign an additional cost for the unaligned load. */
1249 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1250 unaligned_load, stmt_info,
1251 DR_MISALIGNMENT (dr_info),
1252 vect_body);
1254 if (dump_enabled_p ())
1255 dump_printf_loc (MSG_NOTE, vect_location,
1256 "vect_model_load_cost: unaligned supported by "
1257 "hardware.\n");
1259 break;
1261 case dr_explicit_realign:
1263 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1264 vector_load, stmt_info, 0, vect_body);
1265 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1266 vec_perm, stmt_info, 0, vect_body);
1268 /* FIXME: If the misalignment remains fixed across the iterations of
1269 the containing loop, the following cost should be added to the
1270 prologue costs. */
1271 if (targetm.vectorize.builtin_mask_for_load)
1272 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1273 stmt_info, 0, vect_body);
1275 if (dump_enabled_p ())
1276 dump_printf_loc (MSG_NOTE, vect_location,
1277 "vect_model_load_cost: explicit realign\n");
1279 break;
1281 case dr_explicit_realign_optimized:
1283 if (dump_enabled_p ())
1284 dump_printf_loc (MSG_NOTE, vect_location,
1285 "vect_model_load_cost: unaligned software "
1286 "pipelined.\n");
1288 /* Unaligned software pipeline has a load of an address, an initial
1289 load, and possibly a mask operation to "prime" the loop. However,
1290 if this is an access in a group of loads, which provide grouped
1291 access, then the above cost should only be considered for one
1292 access in the group. Inside the loop, there is a load op
1293 and a realignment op. */
1295 if (add_realign_cost && record_prologue_costs)
1297 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1298 vector_stmt, stmt_info,
1299 0, vect_prologue);
1300 if (targetm.vectorize.builtin_mask_for_load)
1301 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1302 vector_stmt, stmt_info,
1303 0, vect_prologue);
1306 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1307 stmt_info, 0, vect_body);
1308 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1309 stmt_info, 0, vect_body);
1311 if (dump_enabled_p ())
1312 dump_printf_loc (MSG_NOTE, vect_location,
1313 "vect_model_load_cost: explicit realign optimized"
1314 "\n");
1316 break;
1319 case dr_unaligned_unsupported:
1321 *inside_cost = VECT_MAX_COST;
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1325 "vect_model_load_cost: unsupported access.\n");
1326 break;
1329 default:
1330 gcc_unreachable ();
1334 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1335 the loop preheader for the vectorized stmt STMT_VINFO. */
1337 static void
1338 vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
1339 gimple_stmt_iterator *gsi)
1341 if (gsi)
1342 vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
1343 else
1345 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1347 if (loop_vinfo)
1349 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1350 basic_block new_bb;
1351 edge pe;
1353 if (nested_in_vect_loop_p (loop, stmt_vinfo))
1354 loop = loop->inner;
1356 pe = loop_preheader_edge (loop);
1357 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1358 gcc_assert (!new_bb);
1360 else
1362 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1363 basic_block bb;
1364 gimple_stmt_iterator gsi_bb_start;
1366 gcc_assert (bb_vinfo);
1367 bb = BB_VINFO_BB (bb_vinfo);
1368 gsi_bb_start = gsi_after_labels (bb);
1369 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1373 if (dump_enabled_p ())
1374 dump_printf_loc (MSG_NOTE, vect_location,
1375 "created new init_stmt: %G", new_stmt);
1378 /* Function vect_init_vector.
1380 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1381 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1382 vector type a vector with all elements equal to VAL is created first.
1383 Place the initialization at BSI if it is not NULL. Otherwise, place the
1384 initialization at the loop preheader.
1385 Return the DEF of INIT_STMT.
1386 It will be used in the vectorization of STMT_INFO. */
1388 tree
1389 vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
1390 gimple_stmt_iterator *gsi)
1392 gimple *init_stmt;
1393 tree new_temp;
1395 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1396 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1398 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1399 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1401 /* Scalar boolean value should be transformed into
1402 all zeros or all ones value before building a vector. */
1403 if (VECTOR_BOOLEAN_TYPE_P (type))
1405 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1406 tree false_val = build_zero_cst (TREE_TYPE (type));
1408 if (CONSTANT_CLASS_P (val))
1409 val = integer_zerop (val) ? false_val : true_val;
1410 else
1412 new_temp = make_ssa_name (TREE_TYPE (type));
1413 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1414 val, true_val, false_val);
1415 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1416 val = new_temp;
1419 else if (CONSTANT_CLASS_P (val))
1420 val = fold_convert (TREE_TYPE (type), val);
1421 else
1423 new_temp = make_ssa_name (TREE_TYPE (type));
1424 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1425 init_stmt = gimple_build_assign (new_temp,
1426 fold_build1 (VIEW_CONVERT_EXPR,
1427 TREE_TYPE (type),
1428 val));
1429 else
1430 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1431 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1432 val = new_temp;
1435 val = build_vector_from_val (type, val);
1438 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1439 init_stmt = gimple_build_assign (new_temp, val);
1440 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1441 return new_temp;
1444 /* Function vect_get_vec_def_for_operand_1.
1446 For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
1447 with type DT that will be used in the vectorized stmt. */
1449 tree
1450 vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info,
1451 enum vect_def_type dt)
1453 tree vec_oprnd;
1454 stmt_vec_info vec_stmt_info;
1456 switch (dt)
1458 /* operand is a constant or a loop invariant. */
1459 case vect_constant_def:
1460 case vect_external_def:
1461 /* Code should use vect_get_vec_def_for_operand. */
1462 gcc_unreachable ();
1464 /* Operand is defined by a loop header phi. In case of nested
1465 cycles we also may have uses of the backedge def. */
1466 case vect_reduction_def:
1467 case vect_double_reduction_def:
1468 case vect_nested_cycle:
1469 case vect_induction_def:
1470 gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
1471 || dt == vect_nested_cycle);
1472 /* Fallthru. */
1474 /* operand is defined inside the loop. */
1475 case vect_internal_def:
1477 /* Get the def from the vectorized stmt. */
1478 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1479 /* Get vectorized pattern statement. */
1480 if (!vec_stmt_info
1481 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1482 && !STMT_VINFO_RELEVANT (def_stmt_info))
1483 vec_stmt_info = (STMT_VINFO_VEC_STMT
1484 (STMT_VINFO_RELATED_STMT (def_stmt_info)));
1485 gcc_assert (vec_stmt_info);
1486 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1487 vec_oprnd = PHI_RESULT (phi);
1488 else
1489 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1490 return vec_oprnd;
1493 default:
1494 gcc_unreachable ();
1499 /* Function vect_get_vec_def_for_operand.
1501 OP is an operand in STMT_VINFO. This function returns a (vector) def
1502 that will be used in the vectorized stmt for STMT_VINFO.
1504 In the case that OP is an SSA_NAME which is defined in the loop, then
1505 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1507 In case OP is an invariant or constant, a new stmt that creates a vector def
1508 needs to be introduced. VECTYPE may be used to specify a required type for
1509 vector invariant. */
1511 tree
1512 vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
1514 gimple *def_stmt;
1515 enum vect_def_type dt;
1516 bool is_simple_use;
1517 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1519 if (dump_enabled_p ())
1520 dump_printf_loc (MSG_NOTE, vect_location,
1521 "vect_get_vec_def_for_operand: %T\n", op);
1523 stmt_vec_info def_stmt_info;
1524 is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
1525 &def_stmt_info, &def_stmt);
1526 gcc_assert (is_simple_use);
1527 if (def_stmt && dump_enabled_p ())
1528 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = %G", def_stmt);
1530 if (dt == vect_constant_def || dt == vect_external_def)
1532 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1533 tree vector_type;
1535 if (vectype)
1536 vector_type = vectype;
1537 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1538 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1539 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1540 else
1541 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1543 gcc_assert (vector_type);
1544 return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
1546 else
1547 return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
1551 /* Function vect_get_vec_def_for_stmt_copy
1553 Return a vector-def for an operand. This function is used when the
1554 vectorized stmt to be created (by the caller to this function) is a "copy"
1555 created in case the vectorized result cannot fit in one vector, and several
1556 copies of the vector-stmt are required. In this case the vector-def is
1557 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1558 of the stmt that defines VEC_OPRND. VINFO describes the vectorization.
1560 Context:
1561 In case the vectorization factor (VF) is bigger than the number
1562 of elements that can fit in a vectype (nunits), we have to generate
1563 more than one vector stmt to vectorize the scalar stmt. This situation
1564 arises when there are multiple data-types operated upon in the loop; the
1565 smallest data-type determines the VF, and as a result, when vectorizing
1566 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1567 vector stmt (each computing a vector of 'nunits' results, and together
1568 computing 'VF' results in each iteration). This function is called when
1569 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1570 which VF=16 and nunits=4, so the number of copies required is 4):
1572 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1574 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1575 VS1.1: vx.1 = memref1 VS1.2
1576 VS1.2: vx.2 = memref2 VS1.3
1577 VS1.3: vx.3 = memref3
1579 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1580 VSnew.1: vz1 = vx.1 + ... VSnew.2
1581 VSnew.2: vz2 = vx.2 + ... VSnew.3
1582 VSnew.3: vz3 = vx.3 + ...
1584 The vectorization of S1 is explained in vectorizable_load.
1585 The vectorization of S2:
1586 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1587 the function 'vect_get_vec_def_for_operand' is called to
1588 get the relevant vector-def for each operand of S2. For operand x it
1589 returns the vector-def 'vx.0'.
1591 To create the remaining copies of the vector-stmt (VSnew.j), this
1592 function is called to get the relevant vector-def for each operand. It is
1593 obtained from the respective VS1.j stmt, which is recorded in the
1594 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1596 For example, to obtain the vector-def 'vx.1' in order to create the
1597 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1598 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1599 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1600 and return its def ('vx.1').
1601 Overall, to create the above sequence this function will be called 3 times:
1602 vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0);
1603 vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1);
1604 vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */
1606 tree
1607 vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd)
1609 stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd);
1610 if (!def_stmt_info)
1611 /* Do nothing; can reuse same def. */
1612 return vec_oprnd;
1614 def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info);
1615 gcc_assert (def_stmt_info);
1616 if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt))
1617 vec_oprnd = PHI_RESULT (phi);
1618 else
1619 vec_oprnd = gimple_get_lhs (def_stmt_info->stmt);
1620 return vec_oprnd;
1624 /* Get vectorized definitions for the operands to create a copy of an original
1625 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1627 void
1628 vect_get_vec_defs_for_stmt_copy (vec_info *vinfo,
1629 vec<tree> *vec_oprnds0,
1630 vec<tree> *vec_oprnds1)
1632 tree vec_oprnd = vec_oprnds0->pop ();
1634 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1635 vec_oprnds0->quick_push (vec_oprnd);
1637 if (vec_oprnds1 && vec_oprnds1->length ())
1639 vec_oprnd = vec_oprnds1->pop ();
1640 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1641 vec_oprnds1->quick_push (vec_oprnd);
1646 /* Get vectorized definitions for OP0 and OP1. */
1648 void
1649 vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
1650 vec<tree> *vec_oprnds0,
1651 vec<tree> *vec_oprnds1,
1652 slp_tree slp_node)
1654 if (slp_node)
1656 int nops = (op1 == NULL_TREE) ? 1 : 2;
1657 auto_vec<tree> ops (nops);
1658 auto_vec<vec<tree> > vec_defs (nops);
1660 ops.quick_push (op0);
1661 if (op1)
1662 ops.quick_push (op1);
1664 vect_get_slp_defs (ops, slp_node, &vec_defs);
1666 *vec_oprnds0 = vec_defs[0];
1667 if (op1)
1668 *vec_oprnds1 = vec_defs[1];
1670 else
1672 tree vec_oprnd;
1674 vec_oprnds0->create (1);
1675 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info);
1676 vec_oprnds0->quick_push (vec_oprnd);
1678 if (op1)
1680 vec_oprnds1->create (1);
1681 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info);
1682 vec_oprnds1->quick_push (vec_oprnd);
1687 /* Helper function called by vect_finish_replace_stmt and
1688 vect_finish_stmt_generation. Set the location of the new
1689 statement and create and return a stmt_vec_info for it. */
1691 static stmt_vec_info
1692 vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
1694 vec_info *vinfo = stmt_info->vinfo;
1696 stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: %G", vec_stmt);
1701 gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
1703 /* While EH edges will generally prevent vectorization, stmt might
1704 e.g. be in a must-not-throw region. Ensure newly created stmts
1705 that could throw are part of the same region. */
1706 int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
1707 if (lp_nr != 0 && stmt_could_throw_p (cfun, vec_stmt))
1708 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1710 return vec_stmt_info;
1713 /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1714 which sets the same scalar result as STMT_INFO did. Create and return a
1715 stmt_vec_info for VEC_STMT. */
1717 stmt_vec_info
1718 vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
1720 gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt));
1722 gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt);
1723 gsi_replace (&gsi, vec_stmt, true);
1725 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1728 /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
1729 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
1731 stmt_vec_info
1732 vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
1733 gimple_stmt_iterator *gsi)
1735 gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
1737 if (!gsi_end_p (*gsi)
1738 && gimple_has_mem_ops (vec_stmt))
1740 gimple *at_stmt = gsi_stmt (*gsi);
1741 tree vuse = gimple_vuse (at_stmt);
1742 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1744 tree vdef = gimple_vdef (at_stmt);
1745 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1746 /* If we have an SSA vuse and insert a store, update virtual
1747 SSA form to avoid triggering the renamer. Do so only
1748 if we can easily see all uses - which is what almost always
1749 happens with the way vectorized stmts are inserted. */
1750 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1751 && ((is_gimple_assign (vec_stmt)
1752 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1753 || (is_gimple_call (vec_stmt)
1754 && !(gimple_call_flags (vec_stmt)
1755 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1757 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1758 gimple_set_vdef (vec_stmt, new_vdef);
1759 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1763 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1764 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1767 /* We want to vectorize a call to combined function CFN with function
1768 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1769 as the types of all inputs. Check whether this is possible using
1770 an internal function, returning its code if so or IFN_LAST if not. */
1772 static internal_fn
1773 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1774 tree vectype_out, tree vectype_in)
1776 internal_fn ifn;
1777 if (internal_fn_p (cfn))
1778 ifn = as_internal_fn (cfn);
1779 else
1780 ifn = associated_internal_fn (fndecl);
1781 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1783 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1784 if (info.vectorizable)
1786 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1787 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1788 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1789 OPTIMIZE_FOR_SPEED))
1790 return ifn;
1793 return IFN_LAST;
1797 static tree permute_vec_elements (tree, tree, tree, stmt_vec_info,
1798 gimple_stmt_iterator *);
1800 /* Check whether a load or store statement in the loop described by
1801 LOOP_VINFO is possible in a fully-masked loop. This is testing
1802 whether the vectorizer pass has the appropriate support, as well as
1803 whether the target does.
1805 VLS_TYPE says whether the statement is a load or store and VECTYPE
1806 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1807 says how the load or store is going to be implemented and GROUP_SIZE
1808 is the number of load or store statements in the containing group.
1809 If the access is a gather load or scatter store, GS_INFO describes
1810 its arguments.
1812 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1813 supported, otherwise record the required mask types. */
1815 static void
1816 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1817 vec_load_store_type vls_type, int group_size,
1818 vect_memory_access_type memory_access_type,
1819 gather_scatter_info *gs_info)
1821 /* Invariant loads need no special support. */
1822 if (memory_access_type == VMAT_INVARIANT)
1823 return;
1825 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1826 machine_mode vecmode = TYPE_MODE (vectype);
1827 bool is_load = (vls_type == VLS_LOAD);
1828 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1830 if (is_load
1831 ? !vect_load_lanes_supported (vectype, group_size, true)
1832 : !vect_store_lanes_supported (vectype, group_size, true))
1834 if (dump_enabled_p ())
1835 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1836 "can't use a fully-masked loop because the"
1837 " target doesn't have an appropriate masked"
1838 " load/store-lanes instruction.\n");
1839 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1840 return;
1842 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1843 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1844 return;
1847 if (memory_access_type == VMAT_GATHER_SCATTER)
1849 internal_fn ifn = (is_load
1850 ? IFN_MASK_GATHER_LOAD
1851 : IFN_MASK_SCATTER_STORE);
1852 tree offset_type = TREE_TYPE (gs_info->offset);
1853 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1854 gs_info->memory_type,
1855 TYPE_SIGN (offset_type),
1856 gs_info->scale))
1858 if (dump_enabled_p ())
1859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1860 "can't use a fully-masked loop because the"
1861 " target doesn't have an appropriate masked"
1862 " gather load or scatter store instruction.\n");
1863 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1864 return;
1866 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1867 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1868 return;
1871 if (memory_access_type != VMAT_CONTIGUOUS
1872 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1874 /* Element X of the data must come from iteration i * VF + X of the
1875 scalar loop. We need more work to support other mappings. */
1876 if (dump_enabled_p ())
1877 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1878 "can't use a fully-masked loop because an access"
1879 " isn't contiguous.\n");
1880 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1881 return;
1884 machine_mode mask_mode;
1885 if (!(targetm.vectorize.get_mask_mode
1886 (GET_MODE_NUNITS (vecmode),
1887 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1888 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "can't use a fully-masked loop because the target"
1893 " doesn't have the appropriate masked load or"
1894 " store.\n");
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1896 return;
1898 /* We might load more scalars than we need for permuting SLP loads.
1899 We checked in get_group_load_store_type that the extra elements
1900 don't leak into a new vector. */
1901 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1902 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1903 unsigned int nvectors;
1904 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1905 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1906 else
1907 gcc_unreachable ();
1910 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1911 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1912 that needs to be applied to all loads and stores in a vectorized loop.
1913 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1915 MASK_TYPE is the type of both masks. If new statements are needed,
1916 insert them before GSI. */
1918 static tree
1919 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1920 gimple_stmt_iterator *gsi)
1922 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1923 if (!loop_mask)
1924 return vec_mask;
1926 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1927 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1928 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1929 vec_mask, loop_mask);
1930 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1931 return and_res;
1934 /* Determine whether we can use a gather load or scatter store to vectorize
1935 strided load or store STMT_INFO by truncating the current offset to a
1936 smaller width. We need to be able to construct an offset vector:
1938 { 0, X, X*2, X*3, ... }
1940 without loss of precision, where X is STMT_INFO's DR_STEP.
1942 Return true if this is possible, describing the gather load or scatter
1943 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1945 static bool
1946 vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
1947 loop_vec_info loop_vinfo, bool masked_p,
1948 gather_scatter_info *gs_info)
1950 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1951 data_reference *dr = dr_info->dr;
1952 tree step = DR_STEP (dr);
1953 if (TREE_CODE (step) != INTEGER_CST)
1955 /* ??? Perhaps we could use range information here? */
1956 if (dump_enabled_p ())
1957 dump_printf_loc (MSG_NOTE, vect_location,
1958 "cannot truncate variable step.\n");
1959 return false;
1962 /* Get the number of bits in an element. */
1963 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1964 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1965 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1967 /* Set COUNT to the upper limit on the number of elements - 1.
1968 Start with the maximum vectorization factor. */
1969 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
1971 /* Try lowering COUNT to the number of scalar latch iterations. */
1972 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1973 widest_int max_iters;
1974 if (max_loop_iterations (loop, &max_iters)
1975 && max_iters < count)
1976 count = max_iters.to_shwi ();
1978 /* Try scales of 1 and the element size. */
1979 int scales[] = { 1, vect_get_scalar_dr_size (dr_info) };
1980 wi::overflow_type overflow = wi::OVF_NONE;
1981 for (int i = 0; i < 2; ++i)
1983 int scale = scales[i];
1984 widest_int factor;
1985 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
1986 continue;
1988 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
1989 in OFFSET_BITS bits. */
1990 widest_int range = wi::mul (count, factor, SIGNED, &overflow);
1991 if (overflow)
1992 continue;
1993 signop sign = range >= 0 ? UNSIGNED : SIGNED;
1994 if (wi::min_precision (range, sign) > element_bits)
1996 overflow = wi::OVF_UNKNOWN;
1997 continue;
2000 /* See whether the target supports the operation. */
2001 tree memory_type = TREE_TYPE (DR_REF (dr));
2002 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
2003 memory_type, element_bits, sign, scale,
2004 &gs_info->ifn, &gs_info->element_type))
2005 continue;
2007 tree offset_type = build_nonstandard_integer_type (element_bits,
2008 sign == UNSIGNED);
2010 gs_info->decl = NULL_TREE;
2011 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2012 but we don't need to store that here. */
2013 gs_info->base = NULL_TREE;
2014 gs_info->offset = fold_convert (offset_type, step);
2015 gs_info->offset_dt = vect_constant_def;
2016 gs_info->offset_vectype = NULL_TREE;
2017 gs_info->scale = scale;
2018 gs_info->memory_type = memory_type;
2019 return true;
2022 if (overflow && dump_enabled_p ())
2023 dump_printf_loc (MSG_NOTE, vect_location,
2024 "truncating gather/scatter offset to %d bits"
2025 " might change its value.\n", element_bits);
2027 return false;
2030 /* Return true if we can use gather/scatter internal functions to
2031 vectorize STMT_INFO, which is a grouped or strided load or store.
2032 MASKED_P is true if load or store is conditional. When returning
2033 true, fill in GS_INFO with the information required to perform the
2034 operation. */
2036 static bool
2037 vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info,
2038 loop_vec_info loop_vinfo, bool masked_p,
2039 gather_scatter_info *gs_info)
2041 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)
2042 || gs_info->decl)
2043 return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo,
2044 masked_p, gs_info);
2046 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
2047 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2048 tree offset_type = TREE_TYPE (gs_info->offset);
2049 unsigned int offset_bits = TYPE_PRECISION (offset_type);
2051 /* Enforced by vect_check_gather_scatter. */
2052 gcc_assert (element_bits >= offset_bits);
2054 /* If the elements are wider than the offset, convert the offset to the
2055 same width, without changing its sign. */
2056 if (element_bits > offset_bits)
2058 bool unsigned_p = TYPE_UNSIGNED (offset_type);
2059 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
2060 gs_info->offset = fold_convert (offset_type, gs_info->offset);
2063 if (dump_enabled_p ())
2064 dump_printf_loc (MSG_NOTE, vect_location,
2065 "using gather/scatter for strided/grouped access,"
2066 " scale = %d\n", gs_info->scale);
2068 return true;
2071 /* STMT_INFO is a non-strided load or store, meaning that it accesses
2072 elements with a known constant step. Return -1 if that step
2073 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2075 static int
2076 compare_step_with_zero (stmt_vec_info stmt_info)
2078 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2079 return tree_int_cst_compare (vect_dr_behavior (dr_info)->step,
2080 size_zero_node);
2083 /* If the target supports a permute mask that reverses the elements in
2084 a vector of type VECTYPE, return that mask, otherwise return null. */
2086 static tree
2087 perm_mask_for_reverse (tree vectype)
2089 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2091 /* The encoding has a single stepped pattern. */
2092 vec_perm_builder sel (nunits, 1, 3);
2093 for (int i = 0; i < 3; ++i)
2094 sel.quick_push (nunits - 1 - i);
2096 vec_perm_indices indices (sel, 1, nunits);
2097 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2098 return NULL_TREE;
2099 return vect_gen_perm_mask_checked (vectype, indices);
2102 /* STMT_INFO is either a masked or unconditional store. Return the value
2103 being stored. */
2105 tree
2106 vect_get_store_rhs (stmt_vec_info stmt_info)
2108 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
2110 gcc_assert (gimple_assign_single_p (assign));
2111 return gimple_assign_rhs1 (assign);
2113 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
2115 internal_fn ifn = gimple_call_internal_fn (call);
2116 int index = internal_fn_stored_value_index (ifn);
2117 gcc_assert (index >= 0);
2118 return gimple_call_arg (call, index);
2120 gcc_unreachable ();
2123 /* A subroutine of get_load_store_type, with a subset of the same
2124 arguments. Handle the case where STMT_INFO is part of a grouped load
2125 or store.
2127 For stores, the statements in the group are all consecutive
2128 and there is no gap at the end. For loads, the statements in the
2129 group might not be consecutive; there can be gaps between statements
2130 as well as at the end. */
2132 static bool
2133 get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2134 bool masked_p, vec_load_store_type vls_type,
2135 vect_memory_access_type *memory_access_type,
2136 gather_scatter_info *gs_info)
2138 vec_info *vinfo = stmt_info->vinfo;
2139 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2140 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2141 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2142 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2143 unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
2144 bool single_element_p = (stmt_info == first_stmt_info
2145 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
2146 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
2147 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2149 /* True if the vectorized statements would access beyond the last
2150 statement in the group. */
2151 bool overrun_p = false;
2153 /* True if we can cope with such overrun by peeling for gaps, so that
2154 there is at least one final scalar iteration after the vector loop. */
2155 bool can_overrun_p = (!masked_p
2156 && vls_type == VLS_LOAD
2157 && loop_vinfo
2158 && !loop->inner);
2160 /* There can only be a gap at the end of the group if the stride is
2161 known at compile time. */
2162 gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0);
2164 /* Stores can't yet have gaps. */
2165 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2167 if (slp)
2169 if (STMT_VINFO_STRIDED_P (first_stmt_info))
2171 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2172 separated by the stride, until we have a complete vector.
2173 Fall back to scalar accesses if that isn't possible. */
2174 if (multiple_p (nunits, group_size))
2175 *memory_access_type = VMAT_STRIDED_SLP;
2176 else
2177 *memory_access_type = VMAT_ELEMENTWISE;
2179 else
2181 overrun_p = loop_vinfo && gap != 0;
2182 if (overrun_p && vls_type != VLS_LOAD)
2184 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2185 "Grouped store with gaps requires"
2186 " non-consecutive accesses\n");
2187 return false;
2189 /* An overrun is fine if the trailing elements are smaller
2190 than the alignment boundary B. Every vector access will
2191 be a multiple of B and so we are guaranteed to access a
2192 non-gap element in the same B-sized block. */
2193 if (overrun_p
2194 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2195 / vect_get_scalar_dr_size (first_dr_info)))
2196 overrun_p = false;
2197 if (overrun_p && !can_overrun_p)
2199 if (dump_enabled_p ())
2200 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2201 "Peeling for outer loop is not supported\n");
2202 return false;
2204 *memory_access_type = VMAT_CONTIGUOUS;
2207 else
2209 /* We can always handle this case using elementwise accesses,
2210 but see if something more efficient is available. */
2211 *memory_access_type = VMAT_ELEMENTWISE;
2213 /* If there is a gap at the end of the group then these optimizations
2214 would access excess elements in the last iteration. */
2215 bool would_overrun_p = (gap != 0);
2216 /* An overrun is fine if the trailing elements are smaller than the
2217 alignment boundary B. Every vector access will be a multiple of B
2218 and so we are guaranteed to access a non-gap element in the
2219 same B-sized block. */
2220 if (would_overrun_p
2221 && !masked_p
2222 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2223 / vect_get_scalar_dr_size (first_dr_info)))
2224 would_overrun_p = false;
2226 if (!STMT_VINFO_STRIDED_P (first_stmt_info)
2227 && (can_overrun_p || !would_overrun_p)
2228 && compare_step_with_zero (stmt_info) > 0)
2230 /* First cope with the degenerate case of a single-element
2231 vector. */
2232 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2233 *memory_access_type = VMAT_CONTIGUOUS;
2235 /* Otherwise try using LOAD/STORE_LANES. */
2236 if (*memory_access_type == VMAT_ELEMENTWISE
2237 && (vls_type == VLS_LOAD
2238 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2239 : vect_store_lanes_supported (vectype, group_size,
2240 masked_p)))
2242 *memory_access_type = VMAT_LOAD_STORE_LANES;
2243 overrun_p = would_overrun_p;
2246 /* If that fails, try using permuting loads. */
2247 if (*memory_access_type == VMAT_ELEMENTWISE
2248 && (vls_type == VLS_LOAD
2249 ? vect_grouped_load_supported (vectype, single_element_p,
2250 group_size)
2251 : vect_grouped_store_supported (vectype, group_size)))
2253 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2254 overrun_p = would_overrun_p;
2258 /* As a last resort, trying using a gather load or scatter store.
2260 ??? Although the code can handle all group sizes correctly,
2261 it probably isn't a win to use separate strided accesses based
2262 on nearby locations. Or, even if it's a win over scalar code,
2263 it might not be a win over vectorizing at a lower VF, if that
2264 allows us to use contiguous accesses. */
2265 if (*memory_access_type == VMAT_ELEMENTWISE
2266 && single_element_p
2267 && loop_vinfo
2268 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2269 masked_p, gs_info))
2270 *memory_access_type = VMAT_GATHER_SCATTER;
2273 if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
2275 /* STMT is the leader of the group. Check the operands of all the
2276 stmts of the group. */
2277 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
2278 while (next_stmt_info)
2280 tree op = vect_get_store_rhs (next_stmt_info);
2281 enum vect_def_type dt;
2282 if (!vect_is_simple_use (op, vinfo, &dt))
2284 if (dump_enabled_p ())
2285 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2286 "use not simple.\n");
2287 return false;
2289 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
2293 if (overrun_p)
2295 gcc_assert (can_overrun_p);
2296 if (dump_enabled_p ())
2297 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2298 "Data access with gaps requires scalar "
2299 "epilogue loop\n");
2300 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2303 return true;
2306 /* A subroutine of get_load_store_type, with a subset of the same
2307 arguments. Handle the case where STMT_INFO is a load or store that
2308 accesses consecutive elements with a negative step. */
2310 static vect_memory_access_type
2311 get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
2312 vec_load_store_type vls_type,
2313 unsigned int ncopies)
2315 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2316 dr_alignment_support alignment_support_scheme;
2318 if (ncopies > 1)
2320 if (dump_enabled_p ())
2321 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2322 "multiple types with negative step.\n");
2323 return VMAT_ELEMENTWISE;
2326 alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false);
2327 if (alignment_support_scheme != dr_aligned
2328 && alignment_support_scheme != dr_unaligned_supported)
2330 if (dump_enabled_p ())
2331 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2332 "negative step but alignment required.\n");
2333 return VMAT_ELEMENTWISE;
2336 if (vls_type == VLS_STORE_INVARIANT)
2338 if (dump_enabled_p ())
2339 dump_printf_loc (MSG_NOTE, vect_location,
2340 "negative step with invariant source;"
2341 " no permute needed.\n");
2342 return VMAT_CONTIGUOUS_DOWN;
2345 if (!perm_mask_for_reverse (vectype))
2347 if (dump_enabled_p ())
2348 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2349 "negative step and reversing not supported.\n");
2350 return VMAT_ELEMENTWISE;
2353 return VMAT_CONTIGUOUS_REVERSE;
2356 /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2357 if there is a memory access type that the vectorized form can use,
2358 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2359 or scatters, fill in GS_INFO accordingly.
2361 SLP says whether we're performing SLP rather than loop vectorization.
2362 MASKED_P is true if the statement is conditional on a vectorized mask.
2363 VECTYPE is the vector type that the vectorized statements will use.
2364 NCOPIES is the number of vector statements that will be needed. */
2366 static bool
2367 get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2368 bool masked_p, vec_load_store_type vls_type,
2369 unsigned int ncopies,
2370 vect_memory_access_type *memory_access_type,
2371 gather_scatter_info *gs_info)
2373 vec_info *vinfo = stmt_info->vinfo;
2374 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2375 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2376 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2378 *memory_access_type = VMAT_GATHER_SCATTER;
2379 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
2380 gcc_unreachable ();
2381 else if (!vect_is_simple_use (gs_info->offset, vinfo,
2382 &gs_info->offset_dt,
2383 &gs_info->offset_vectype))
2385 if (dump_enabled_p ())
2386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2387 "%s index use not simple.\n",
2388 vls_type == VLS_LOAD ? "gather" : "scatter");
2389 return false;
2392 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2394 if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
2395 vls_type, memory_access_type, gs_info))
2396 return false;
2398 else if (STMT_VINFO_STRIDED_P (stmt_info))
2400 gcc_assert (!slp);
2401 if (loop_vinfo
2402 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2403 masked_p, gs_info))
2404 *memory_access_type = VMAT_GATHER_SCATTER;
2405 else
2406 *memory_access_type = VMAT_ELEMENTWISE;
2408 else
2410 int cmp = compare_step_with_zero (stmt_info);
2411 if (cmp < 0)
2412 *memory_access_type = get_negative_load_store_type
2413 (stmt_info, vectype, vls_type, ncopies);
2414 else if (cmp == 0)
2416 gcc_assert (vls_type == VLS_LOAD);
2417 *memory_access_type = VMAT_INVARIANT;
2419 else
2420 *memory_access_type = VMAT_CONTIGUOUS;
2423 if ((*memory_access_type == VMAT_ELEMENTWISE
2424 || *memory_access_type == VMAT_STRIDED_SLP)
2425 && !nunits.is_constant ())
2427 if (dump_enabled_p ())
2428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2429 "Not using elementwise accesses due to variable "
2430 "vectorization factor.\n");
2431 return false;
2434 /* FIXME: At the moment the cost model seems to underestimate the
2435 cost of using elementwise accesses. This check preserves the
2436 traditional behavior until that can be fixed. */
2437 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2438 if (!first_stmt_info)
2439 first_stmt_info = stmt_info;
2440 if (*memory_access_type == VMAT_ELEMENTWISE
2441 && !STMT_VINFO_STRIDED_P (first_stmt_info)
2442 && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
2443 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2444 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2446 if (dump_enabled_p ())
2447 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2448 "not falling back to elementwise accesses\n");
2449 return false;
2451 return true;
2454 /* Return true if boolean argument MASK is suitable for vectorizing
2455 conditional load or store STMT_INFO. When returning true, store the type
2456 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2457 in *MASK_VECTYPE_OUT. */
2459 static bool
2460 vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask,
2461 vect_def_type *mask_dt_out,
2462 tree *mask_vectype_out)
2464 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2466 if (dump_enabled_p ())
2467 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2468 "mask argument is not a boolean.\n");
2469 return false;
2472 if (TREE_CODE (mask) != SSA_NAME)
2474 if (dump_enabled_p ())
2475 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2476 "mask argument is not an SSA name.\n");
2477 return false;
2480 enum vect_def_type mask_dt;
2481 tree mask_vectype;
2482 if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
2484 if (dump_enabled_p ())
2485 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2486 "mask use not simple.\n");
2487 return false;
2490 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2491 if (!mask_vectype)
2492 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2494 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2496 if (dump_enabled_p ())
2497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2498 "could not find an appropriate vector mask type.\n");
2499 return false;
2502 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2503 TYPE_VECTOR_SUBPARTS (vectype)))
2505 if (dump_enabled_p ())
2506 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2507 "vector mask type %T",
2508 " does not match vector data type %T.\n",
2509 mask_vectype, vectype);
2511 return false;
2514 *mask_dt_out = mask_dt;
2515 *mask_vectype_out = mask_vectype;
2516 return true;
2519 /* Return true if stored value RHS is suitable for vectorizing store
2520 statement STMT_INFO. When returning true, store the type of the
2521 definition in *RHS_DT_OUT, the type of the vectorized store value in
2522 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2524 static bool
2525 vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
2526 vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
2527 vec_load_store_type *vls_type_out)
2529 /* In the case this is a store from a constant make sure
2530 native_encode_expr can handle it. */
2531 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2533 if (dump_enabled_p ())
2534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2535 "cannot encode constant as a byte sequence.\n");
2536 return false;
2539 enum vect_def_type rhs_dt;
2540 tree rhs_vectype;
2541 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
2543 if (dump_enabled_p ())
2544 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2545 "use not simple.\n");
2546 return false;
2549 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2550 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2552 if (dump_enabled_p ())
2553 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2554 "incompatible vector types.\n");
2555 return false;
2558 *rhs_dt_out = rhs_dt;
2559 *rhs_vectype_out = rhs_vectype;
2560 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2561 *vls_type_out = VLS_STORE_INVARIANT;
2562 else
2563 *vls_type_out = VLS_STORE;
2564 return true;
2567 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
2568 Note that we support masks with floating-point type, in which case the
2569 floats are interpreted as a bitmask. */
2571 static tree
2572 vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype)
2574 if (TREE_CODE (masktype) == INTEGER_TYPE)
2575 return build_int_cst (masktype, -1);
2576 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2578 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2579 mask = build_vector_from_val (masktype, mask);
2580 return vect_init_vector (stmt_info, mask, masktype, NULL);
2582 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2584 REAL_VALUE_TYPE r;
2585 long tmp[6];
2586 for (int j = 0; j < 6; ++j)
2587 tmp[j] = -1;
2588 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2589 tree mask = build_real (TREE_TYPE (masktype), r);
2590 mask = build_vector_from_val (masktype, mask);
2591 return vect_init_vector (stmt_info, mask, masktype, NULL);
2593 gcc_unreachable ();
2596 /* Build an all-zero merge value of type VECTYPE while vectorizing
2597 STMT_INFO as a gather load. */
2599 static tree
2600 vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype)
2602 tree merge;
2603 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2604 merge = build_int_cst (TREE_TYPE (vectype), 0);
2605 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2607 REAL_VALUE_TYPE r;
2608 long tmp[6];
2609 for (int j = 0; j < 6; ++j)
2610 tmp[j] = 0;
2611 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2612 merge = build_real (TREE_TYPE (vectype), r);
2614 else
2615 gcc_unreachable ();
2616 merge = build_vector_from_val (vectype, merge);
2617 return vect_init_vector (stmt_info, merge, vectype, NULL);
2620 /* Build a gather load call while vectorizing STMT_INFO. Insert new
2621 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2622 the gather load operation. If the load is conditional, MASK is the
2623 unvectorized condition and MASK_DT is its definition type, otherwise
2624 MASK is null. */
2626 static void
2627 vect_build_gather_load_calls (stmt_vec_info stmt_info,
2628 gimple_stmt_iterator *gsi,
2629 stmt_vec_info *vec_stmt,
2630 gather_scatter_info *gs_info,
2631 tree mask)
2633 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2635 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2636 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2637 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2638 edge pe = loop_preheader_edge (loop);
2639 enum { NARROW, NONE, WIDEN } modifier;
2640 poly_uint64 gather_off_nunits
2641 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2643 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2644 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2645 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2646 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2647 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2648 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2649 tree scaletype = TREE_VALUE (arglist);
2650 tree real_masktype = masktype;
2651 gcc_checking_assert (types_compatible_p (srctype, rettype)
2652 && (!mask
2653 || TREE_CODE (masktype) == INTEGER_TYPE
2654 || types_compatible_p (srctype, masktype)));
2655 if (mask && TREE_CODE (masktype) == INTEGER_TYPE)
2656 masktype = build_same_sized_truth_vector_type (srctype);
2658 tree mask_halftype = masktype;
2659 tree perm_mask = NULL_TREE;
2660 tree mask_perm_mask = NULL_TREE;
2661 if (known_eq (nunits, gather_off_nunits))
2662 modifier = NONE;
2663 else if (known_eq (nunits * 2, gather_off_nunits))
2665 modifier = WIDEN;
2667 /* Currently widening gathers and scatters are only supported for
2668 fixed-length vectors. */
2669 int count = gather_off_nunits.to_constant ();
2670 vec_perm_builder sel (count, count, 1);
2671 for (int i = 0; i < count; ++i)
2672 sel.quick_push (i | (count / 2));
2674 vec_perm_indices indices (sel, 1, count);
2675 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2676 indices);
2678 else if (known_eq (nunits, gather_off_nunits * 2))
2680 modifier = NARROW;
2682 /* Currently narrowing gathers and scatters are only supported for
2683 fixed-length vectors. */
2684 int count = nunits.to_constant ();
2685 vec_perm_builder sel (count, count, 1);
2686 sel.quick_grow (count);
2687 for (int i = 0; i < count; ++i)
2688 sel[i] = i < count / 2 ? i : i + count / 2;
2689 vec_perm_indices indices (sel, 2, count);
2690 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2692 ncopies *= 2;
2694 if (mask && masktype == real_masktype)
2696 for (int i = 0; i < count; ++i)
2697 sel[i] = i | (count / 2);
2698 indices.new_vector (sel, 2, count);
2699 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2701 else if (mask)
2702 mask_halftype
2703 = build_same_sized_truth_vector_type (gs_info->offset_vectype);
2705 else
2706 gcc_unreachable ();
2708 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
2709 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
2711 tree ptr = fold_convert (ptrtype, gs_info->base);
2712 if (!is_gimple_min_invariant (ptr))
2714 gimple_seq seq;
2715 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2716 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2717 gcc_assert (!new_bb);
2720 tree scale = build_int_cst (scaletype, gs_info->scale);
2722 tree vec_oprnd0 = NULL_TREE;
2723 tree vec_mask = NULL_TREE;
2724 tree src_op = NULL_TREE;
2725 tree mask_op = NULL_TREE;
2726 tree prev_res = NULL_TREE;
2727 stmt_vec_info prev_stmt_info = NULL;
2729 if (!mask)
2731 src_op = vect_build_zero_merge_argument (stmt_info, rettype);
2732 mask_op = vect_build_all_ones_mask (stmt_info, masktype);
2735 for (int j = 0; j < ncopies; ++j)
2737 tree op, var;
2738 if (modifier == WIDEN && (j & 1))
2739 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2740 perm_mask, stmt_info, gsi);
2741 else if (j == 0)
2742 op = vec_oprnd0
2743 = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
2744 else
2745 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2746 vec_oprnd0);
2748 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2750 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2751 TYPE_VECTOR_SUBPARTS (idxtype)));
2752 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2753 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2754 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2755 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2756 op = var;
2759 if (mask)
2761 if (mask_perm_mask && (j & 1))
2762 mask_op = permute_vec_elements (mask_op, mask_op,
2763 mask_perm_mask, stmt_info, gsi);
2764 else
2766 if (j == 0)
2767 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
2768 else if (modifier != NARROW || (j & 1) == 0)
2769 vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2770 vec_mask);
2772 mask_op = vec_mask;
2773 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2775 poly_uint64 sub1 = TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op));
2776 poly_uint64 sub2 = TYPE_VECTOR_SUBPARTS (masktype);
2777 gcc_assert (known_eq (sub1, sub2));
2778 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2779 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2780 gassign *new_stmt
2781 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2782 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2783 mask_op = var;
2786 if (modifier == NARROW && masktype != real_masktype)
2788 var = vect_get_new_ssa_name (mask_halftype, vect_simple_var);
2789 gassign *new_stmt
2790 = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
2791 : VEC_UNPACK_LO_EXPR,
2792 mask_op);
2793 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2794 mask_op = var;
2796 src_op = mask_op;
2799 tree mask_arg = mask_op;
2800 if (masktype != real_masktype)
2802 tree utype, optype = TREE_TYPE (mask_op);
2803 if (TYPE_MODE (real_masktype) == TYPE_MODE (optype))
2804 utype = real_masktype;
2805 else
2806 utype = lang_hooks.types.type_for_mode (TYPE_MODE (optype), 1);
2807 var = vect_get_new_ssa_name (utype, vect_scalar_var);
2808 mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_op);
2809 gassign *new_stmt
2810 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
2811 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2812 mask_arg = var;
2813 if (!useless_type_conversion_p (real_masktype, utype))
2815 gcc_assert (TYPE_PRECISION (utype)
2816 <= TYPE_PRECISION (real_masktype));
2817 var = vect_get_new_ssa_name (real_masktype, vect_scalar_var);
2818 new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
2819 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2820 mask_arg = var;
2822 src_op = build_zero_cst (srctype);
2824 gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2825 mask_arg, scale);
2827 stmt_vec_info new_stmt_info;
2828 if (!useless_type_conversion_p (vectype, rettype))
2830 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2831 TYPE_VECTOR_SUBPARTS (rettype)));
2832 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2833 gimple_call_set_lhs (new_call, op);
2834 vect_finish_stmt_generation (stmt_info, new_call, gsi);
2835 var = make_ssa_name (vec_dest);
2836 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2837 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2838 new_stmt_info
2839 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2841 else
2843 var = make_ssa_name (vec_dest, new_call);
2844 gimple_call_set_lhs (new_call, var);
2845 new_stmt_info
2846 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
2849 if (modifier == NARROW)
2851 if ((j & 1) == 0)
2853 prev_res = var;
2854 continue;
2856 var = permute_vec_elements (prev_res, var, perm_mask,
2857 stmt_info, gsi);
2858 new_stmt_info = loop_vinfo->lookup_def (var);
2861 if (prev_stmt_info == NULL)
2862 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
2863 else
2864 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
2865 prev_stmt_info = new_stmt_info;
2869 /* Prepare the base and offset in GS_INFO for vectorization.
2870 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2871 to the vectorized offset argument for the first copy of STMT_INFO.
2872 STMT_INFO is the statement described by GS_INFO and LOOP is the
2873 containing loop. */
2875 static void
2876 vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
2877 gather_scatter_info *gs_info,
2878 tree *dataref_ptr, tree *vec_offset)
2880 gimple_seq stmts = NULL;
2881 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2882 if (stmts != NULL)
2884 basic_block new_bb;
2885 edge pe = loop_preheader_edge (loop);
2886 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2887 gcc_assert (!new_bb);
2889 tree offset_type = TREE_TYPE (gs_info->offset);
2890 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2891 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info,
2892 offset_vectype);
2895 /* Prepare to implement a grouped or strided load or store using
2896 the gather load or scatter store operation described by GS_INFO.
2897 STMT_INFO is the load or store statement.
2899 Set *DATAREF_BUMP to the amount that should be added to the base
2900 address after each copy of the vectorized statement. Set *VEC_OFFSET
2901 to an invariant offset vector in which element I has the value
2902 I * DR_STEP / SCALE. */
2904 static void
2905 vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
2906 loop_vec_info loop_vinfo,
2907 gather_scatter_info *gs_info,
2908 tree *dataref_bump, tree *vec_offset)
2910 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2911 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2912 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2913 gimple_seq stmts;
2915 tree bump = size_binop (MULT_EXPR,
2916 fold_convert (sizetype, DR_STEP (dr)),
2917 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2918 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2919 if (stmts)
2920 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2922 /* The offset given in GS_INFO can have pointer type, so use the element
2923 type of the vector instead. */
2924 tree offset_type = TREE_TYPE (gs_info->offset);
2925 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2926 offset_type = TREE_TYPE (offset_vectype);
2928 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2929 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2930 ssize_int (gs_info->scale));
2931 step = fold_convert (offset_type, step);
2932 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2934 /* Create {0, X, X*2, X*3, ...}. */
2935 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2936 build_zero_cst (offset_type), step);
2937 if (stmts)
2938 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2941 /* Return the amount that should be added to a vector pointer to move
2942 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
2943 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2944 vectorization. */
2946 static tree
2947 vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type,
2948 vect_memory_access_type memory_access_type)
2950 if (memory_access_type == VMAT_INVARIANT)
2951 return size_zero_node;
2953 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2954 tree step = vect_dr_behavior (dr_info)->step;
2955 if (tree_int_cst_sgn (step) == -1)
2956 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2957 return iv_step;
2960 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2962 static bool
2963 vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
2964 stmt_vec_info *vec_stmt, slp_tree slp_node,
2965 tree vectype_in, stmt_vector_for_cost *cost_vec)
2967 tree op, vectype;
2968 gcall *stmt = as_a <gcall *> (stmt_info->stmt);
2969 vec_info *vinfo = stmt_info->vinfo;
2970 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2971 unsigned ncopies;
2973 op = gimple_call_arg (stmt, 0);
2974 vectype = STMT_VINFO_VECTYPE (stmt_info);
2975 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2977 /* Multiple types in SLP are handled by creating the appropriate number of
2978 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2979 case of SLP. */
2980 if (slp_node)
2981 ncopies = 1;
2982 else
2983 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2985 gcc_assert (ncopies >= 1);
2987 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2988 if (! char_vectype)
2989 return false;
2991 poly_uint64 num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
2992 unsigned word_bytes;
2993 if (!constant_multiple_p (num_bytes, nunits, &word_bytes))
2994 return false;
2996 /* The encoding uses one stepped pattern for each byte in the word. */
2997 vec_perm_builder elts (num_bytes, word_bytes, 3);
2998 for (unsigned i = 0; i < 3; ++i)
2999 for (unsigned j = 0; j < word_bytes; ++j)
3000 elts.quick_push ((i + 1) * word_bytes - j - 1);
3002 vec_perm_indices indices (elts, 1, num_bytes);
3003 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
3004 return false;
3006 if (! vec_stmt)
3008 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3009 DUMP_VECT_SCOPE ("vectorizable_bswap");
3010 if (! slp_node)
3012 record_stmt_cost (cost_vec,
3013 1, vector_stmt, stmt_info, 0, vect_prologue);
3014 record_stmt_cost (cost_vec,
3015 ncopies, vec_perm, stmt_info, 0, vect_body);
3017 return true;
3020 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
3022 /* Transform. */
3023 vec<tree> vec_oprnds = vNULL;
3024 stmt_vec_info new_stmt_info = NULL;
3025 stmt_vec_info prev_stmt_info = NULL;
3026 for (unsigned j = 0; j < ncopies; j++)
3028 /* Handle uses. */
3029 if (j == 0)
3030 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
3031 else
3032 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
3034 /* Arguments are ready. create the new vector stmt. */
3035 unsigned i;
3036 tree vop;
3037 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3039 gimple *new_stmt;
3040 tree tem = make_ssa_name (char_vectype);
3041 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3042 char_vectype, vop));
3043 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3044 tree tem2 = make_ssa_name (char_vectype);
3045 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3046 tem, tem, bswap_vconst);
3047 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3048 tem = make_ssa_name (vectype);
3049 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3050 vectype, tem2));
3051 new_stmt_info
3052 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3053 if (slp_node)
3054 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3057 if (slp_node)
3058 continue;
3060 if (j == 0)
3061 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3062 else
3063 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3065 prev_stmt_info = new_stmt_info;
3068 vec_oprnds.release ();
3069 return true;
3072 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3073 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3074 in a single step. On success, store the binary pack code in
3075 *CONVERT_CODE. */
3077 static bool
3078 simple_integer_narrowing (tree vectype_out, tree vectype_in,
3079 tree_code *convert_code)
3081 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3082 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3083 return false;
3085 tree_code code;
3086 int multi_step_cvt = 0;
3087 auto_vec <tree, 8> interm_types;
3088 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3089 &code, &multi_step_cvt,
3090 &interm_types)
3091 || multi_step_cvt)
3092 return false;
3094 *convert_code = code;
3095 return true;
3098 /* Function vectorizable_call.
3100 Check if STMT_INFO performs a function call that can be vectorized.
3101 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3102 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3103 Return true if STMT_INFO is vectorizable in this way. */
3105 static bool
3106 vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
3107 stmt_vec_info *vec_stmt, slp_tree slp_node,
3108 stmt_vector_for_cost *cost_vec)
3110 gcall *stmt;
3111 tree vec_dest;
3112 tree scalar_dest;
3113 tree op;
3114 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3115 stmt_vec_info prev_stmt_info;
3116 tree vectype_out, vectype_in;
3117 poly_uint64 nunits_in;
3118 poly_uint64 nunits_out;
3119 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3120 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3121 vec_info *vinfo = stmt_info->vinfo;
3122 tree fndecl, new_temp, rhs_type;
3123 enum vect_def_type dt[4]
3124 = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
3125 vect_unknown_def_type };
3126 int ndts = ARRAY_SIZE (dt);
3127 int ncopies, j;
3128 auto_vec<tree, 8> vargs;
3129 auto_vec<tree, 8> orig_vargs;
3130 enum { NARROW, NONE, WIDEN } modifier;
3131 size_t i, nargs;
3132 tree lhs;
3134 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3135 return false;
3137 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3138 && ! vec_stmt)
3139 return false;
3141 /* Is STMT_INFO a vectorizable call? */
3142 stmt = dyn_cast <gcall *> (stmt_info->stmt);
3143 if (!stmt)
3144 return false;
3146 if (gimple_call_internal_p (stmt)
3147 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3148 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3149 /* Handled by vectorizable_load and vectorizable_store. */
3150 return false;
3152 if (gimple_call_lhs (stmt) == NULL_TREE
3153 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3154 return false;
3156 gcc_checking_assert (!stmt_can_throw_internal (cfun, stmt));
3158 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3160 /* Process function arguments. */
3161 rhs_type = NULL_TREE;
3162 vectype_in = NULL_TREE;
3163 nargs = gimple_call_num_args (stmt);
3165 /* Bail out if the function has more than three arguments, we do not have
3166 interesting builtin functions to vectorize with more than two arguments
3167 except for fma. No arguments is also not good. */
3168 if (nargs == 0 || nargs > 4)
3169 return false;
3171 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3172 combined_fn cfn = gimple_call_combined_fn (stmt);
3173 if (cfn == CFN_GOMP_SIMD_LANE)
3175 nargs = 0;
3176 rhs_type = unsigned_type_node;
3179 int mask_opno = -1;
3180 if (internal_fn_p (cfn))
3181 mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
3183 for (i = 0; i < nargs; i++)
3185 tree opvectype;
3187 op = gimple_call_arg (stmt, i);
3188 if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
3190 if (dump_enabled_p ())
3191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3192 "use not simple.\n");
3193 return false;
3196 /* Skip the mask argument to an internal function. This operand
3197 has been converted via a pattern if necessary. */
3198 if ((int) i == mask_opno)
3199 continue;
3201 /* We can only handle calls with arguments of the same type. */
3202 if (rhs_type
3203 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3205 if (dump_enabled_p ())
3206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3207 "argument types differ.\n");
3208 return false;
3210 if (!rhs_type)
3211 rhs_type = TREE_TYPE (op);
3213 if (!vectype_in)
3214 vectype_in = opvectype;
3215 else if (opvectype
3216 && opvectype != vectype_in)
3218 if (dump_enabled_p ())
3219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3220 "argument vector types differ.\n");
3221 return false;
3224 /* If all arguments are external or constant defs use a vector type with
3225 the same size as the output vector type. */
3226 if (!vectype_in)
3227 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3228 if (vec_stmt)
3229 gcc_assert (vectype_in);
3230 if (!vectype_in)
3232 if (dump_enabled_p ())
3233 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3234 "no vectype for scalar type %T\n", rhs_type);
3236 return false;
3239 /* FORNOW */
3240 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3241 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3242 if (known_eq (nunits_in * 2, nunits_out))
3243 modifier = NARROW;
3244 else if (known_eq (nunits_out, nunits_in))
3245 modifier = NONE;
3246 else if (known_eq (nunits_out * 2, nunits_in))
3247 modifier = WIDEN;
3248 else
3249 return false;
3251 /* We only handle functions that do not read or clobber memory. */
3252 if (gimple_vuse (stmt))
3254 if (dump_enabled_p ())
3255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3256 "function reads from or writes to memory.\n");
3257 return false;
3260 /* For now, we only vectorize functions if a target specific builtin
3261 is available. TODO -- in some cases, it might be profitable to
3262 insert the calls for pieces of the vector, in order to be able
3263 to vectorize other operations in the loop. */
3264 fndecl = NULL_TREE;
3265 internal_fn ifn = IFN_LAST;
3266 tree callee = gimple_call_fndecl (stmt);
3268 /* First try using an internal function. */
3269 tree_code convert_code = ERROR_MARK;
3270 if (cfn != CFN_LAST
3271 && (modifier == NONE
3272 || (modifier == NARROW
3273 && simple_integer_narrowing (vectype_out, vectype_in,
3274 &convert_code))))
3275 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3276 vectype_in);
3278 /* If that fails, try asking for a target-specific built-in function. */
3279 if (ifn == IFN_LAST)
3281 if (cfn != CFN_LAST)
3282 fndecl = targetm.vectorize.builtin_vectorized_function
3283 (cfn, vectype_out, vectype_in);
3284 else if (callee)
3285 fndecl = targetm.vectorize.builtin_md_vectorized_function
3286 (callee, vectype_out, vectype_in);
3289 if (ifn == IFN_LAST && !fndecl)
3291 if (cfn == CFN_GOMP_SIMD_LANE
3292 && !slp_node
3293 && loop_vinfo
3294 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3295 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3296 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3297 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3299 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3300 { 0, 1, 2, ... vf - 1 } vector. */
3301 gcc_assert (nargs == 0);
3303 else if (modifier == NONE
3304 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3305 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3306 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3307 return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
3308 vectype_in, cost_vec);
3309 else
3311 if (dump_enabled_p ())
3312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3313 "function is not vectorizable.\n");
3314 return false;
3318 if (slp_node)
3319 ncopies = 1;
3320 else if (modifier == NARROW && ifn == IFN_LAST)
3321 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3322 else
3323 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3325 /* Sanity check: make sure that at least one copy of the vectorized stmt
3326 needs to be generated. */
3327 gcc_assert (ncopies >= 1);
3329 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
3330 if (!vec_stmt) /* transformation not required. */
3332 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3333 DUMP_VECT_SCOPE ("vectorizable_call");
3334 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
3335 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3336 record_stmt_cost (cost_vec, ncopies / 2,
3337 vec_promote_demote, stmt_info, 0, vect_body);
3339 if (loop_vinfo && mask_opno >= 0)
3341 unsigned int nvectors = (slp_node
3342 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
3343 : ncopies);
3344 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out);
3346 return true;
3349 /* Transform. */
3351 if (dump_enabled_p ())
3352 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3354 /* Handle def. */
3355 scalar_dest = gimple_call_lhs (stmt);
3356 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3358 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
3360 stmt_vec_info new_stmt_info = NULL;
3361 prev_stmt_info = NULL;
3362 if (modifier == NONE || ifn != IFN_LAST)
3364 tree prev_res = NULL_TREE;
3365 vargs.safe_grow (nargs);
3366 orig_vargs.safe_grow (nargs);
3367 for (j = 0; j < ncopies; ++j)
3369 /* Build argument list for the vectorized call. */
3370 if (slp_node)
3372 auto_vec<vec<tree> > vec_defs (nargs);
3373 vec<tree> vec_oprnds0;
3375 for (i = 0; i < nargs; i++)
3376 vargs[i] = gimple_call_arg (stmt, i);
3377 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3378 vec_oprnds0 = vec_defs[0];
3380 /* Arguments are ready. Create the new vector stmt. */
3381 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3383 size_t k;
3384 for (k = 0; k < nargs; k++)
3386 vec<tree> vec_oprndsk = vec_defs[k];
3387 vargs[k] = vec_oprndsk[i];
3389 if (modifier == NARROW)
3391 /* We don't define any narrowing conditional functions
3392 at present. */
3393 gcc_assert (mask_opno < 0);
3394 tree half_res = make_ssa_name (vectype_in);
3395 gcall *call
3396 = gimple_build_call_internal_vec (ifn, vargs);
3397 gimple_call_set_lhs (call, half_res);
3398 gimple_call_set_nothrow (call, true);
3399 new_stmt_info
3400 = vect_finish_stmt_generation (stmt_info, call, gsi);
3401 if ((i & 1) == 0)
3403 prev_res = half_res;
3404 continue;
3406 new_temp = make_ssa_name (vec_dest);
3407 gimple *new_stmt
3408 = gimple_build_assign (new_temp, convert_code,
3409 prev_res, half_res);
3410 new_stmt_info
3411 = vect_finish_stmt_generation (stmt_info, new_stmt,
3412 gsi);
3414 else
3416 if (mask_opno >= 0 && masked_loop_p)
3418 unsigned int vec_num = vec_oprnds0.length ();
3419 /* Always true for SLP. */
3420 gcc_assert (ncopies == 1);
3421 tree mask = vect_get_loop_mask (gsi, masks, vec_num,
3422 vectype_out, i);
3423 vargs[mask_opno] = prepare_load_store_mask
3424 (TREE_TYPE (mask), mask, vargs[mask_opno], gsi);
3427 gcall *call;
3428 if (ifn != IFN_LAST)
3429 call = gimple_build_call_internal_vec (ifn, vargs);
3430 else
3431 call = gimple_build_call_vec (fndecl, vargs);
3432 new_temp = make_ssa_name (vec_dest, call);
3433 gimple_call_set_lhs (call, new_temp);
3434 gimple_call_set_nothrow (call, true);
3435 new_stmt_info
3436 = vect_finish_stmt_generation (stmt_info, call, gsi);
3438 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3441 for (i = 0; i < nargs; i++)
3443 vec<tree> vec_oprndsi = vec_defs[i];
3444 vec_oprndsi.release ();
3446 continue;
3449 for (i = 0; i < nargs; i++)
3451 op = gimple_call_arg (stmt, i);
3452 if (j == 0)
3453 vec_oprnd0
3454 = vect_get_vec_def_for_operand (op, stmt_info);
3455 else
3456 vec_oprnd0
3457 = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]);
3459 orig_vargs[i] = vargs[i] = vec_oprnd0;
3462 if (mask_opno >= 0 && masked_loop_p)
3464 tree mask = vect_get_loop_mask (gsi, masks, ncopies,
3465 vectype_out, j);
3466 vargs[mask_opno]
3467 = prepare_load_store_mask (TREE_TYPE (mask), mask,
3468 vargs[mask_opno], gsi);
3471 if (cfn == CFN_GOMP_SIMD_LANE)
3473 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3474 tree new_var
3475 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3476 gimple *init_stmt = gimple_build_assign (new_var, cst);
3477 vect_init_vector_1 (stmt_info, init_stmt, NULL);
3478 new_temp = make_ssa_name (vec_dest);
3479 gimple *new_stmt = gimple_build_assign (new_temp, new_var);
3480 new_stmt_info
3481 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3483 else if (modifier == NARROW)
3485 /* We don't define any narrowing conditional functions at
3486 present. */
3487 gcc_assert (mask_opno < 0);
3488 tree half_res = make_ssa_name (vectype_in);
3489 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3490 gimple_call_set_lhs (call, half_res);
3491 gimple_call_set_nothrow (call, true);
3492 new_stmt_info
3493 = vect_finish_stmt_generation (stmt_info, call, gsi);
3494 if ((j & 1) == 0)
3496 prev_res = half_res;
3497 continue;
3499 new_temp = make_ssa_name (vec_dest);
3500 gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
3501 prev_res, half_res);
3502 new_stmt_info
3503 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3505 else
3507 gcall *call;
3508 if (ifn != IFN_LAST)
3509 call = gimple_build_call_internal_vec (ifn, vargs);
3510 else
3511 call = gimple_build_call_vec (fndecl, vargs);
3512 new_temp = make_ssa_name (vec_dest, call);
3513 gimple_call_set_lhs (call, new_temp);
3514 gimple_call_set_nothrow (call, true);
3515 new_stmt_info
3516 = vect_finish_stmt_generation (stmt_info, call, gsi);
3519 if (j == (modifier == NARROW ? 1 : 0))
3520 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3521 else
3522 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3524 prev_stmt_info = new_stmt_info;
3527 else if (modifier == NARROW)
3529 /* We don't define any narrowing conditional functions at present. */
3530 gcc_assert (mask_opno < 0);
3531 for (j = 0; j < ncopies; ++j)
3533 /* Build argument list for the vectorized call. */
3534 if (j == 0)
3535 vargs.create (nargs * 2);
3536 else
3537 vargs.truncate (0);
3539 if (slp_node)
3541 auto_vec<vec<tree> > vec_defs (nargs);
3542 vec<tree> vec_oprnds0;
3544 for (i = 0; i < nargs; i++)
3545 vargs.quick_push (gimple_call_arg (stmt, i));
3546 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3547 vec_oprnds0 = vec_defs[0];
3549 /* Arguments are ready. Create the new vector stmt. */
3550 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3552 size_t k;
3553 vargs.truncate (0);
3554 for (k = 0; k < nargs; k++)
3556 vec<tree> vec_oprndsk = vec_defs[k];
3557 vargs.quick_push (vec_oprndsk[i]);
3558 vargs.quick_push (vec_oprndsk[i + 1]);
3560 gcall *call;
3561 if (ifn != IFN_LAST)
3562 call = gimple_build_call_internal_vec (ifn, vargs);
3563 else
3564 call = gimple_build_call_vec (fndecl, vargs);
3565 new_temp = make_ssa_name (vec_dest, call);
3566 gimple_call_set_lhs (call, new_temp);
3567 gimple_call_set_nothrow (call, true);
3568 new_stmt_info
3569 = vect_finish_stmt_generation (stmt_info, call, gsi);
3570 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3573 for (i = 0; i < nargs; i++)
3575 vec<tree> vec_oprndsi = vec_defs[i];
3576 vec_oprndsi.release ();
3578 continue;
3581 for (i = 0; i < nargs; i++)
3583 op = gimple_call_arg (stmt, i);
3584 if (j == 0)
3586 vec_oprnd0
3587 = vect_get_vec_def_for_operand (op, stmt_info);
3588 vec_oprnd1
3589 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3591 else
3593 vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt,
3594 2 * i + 1);
3595 vec_oprnd0
3596 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
3597 vec_oprnd1
3598 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3601 vargs.quick_push (vec_oprnd0);
3602 vargs.quick_push (vec_oprnd1);
3605 gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
3606 new_temp = make_ssa_name (vec_dest, new_stmt);
3607 gimple_call_set_lhs (new_stmt, new_temp);
3608 new_stmt_info
3609 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3611 if (j == 0)
3612 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
3613 else
3614 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3616 prev_stmt_info = new_stmt_info;
3619 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3621 else
3622 /* No current target implements this case. */
3623 return false;
3625 vargs.release ();
3627 /* The call in STMT might prevent it from being removed in dce.
3628 We however cannot remove it here, due to the way the ssa name
3629 it defines is mapped to the new definition. So just replace
3630 rhs of the statement with something harmless. */
3632 if (slp_node)
3633 return true;
3635 stmt_info = vect_orig_stmt (stmt_info);
3636 lhs = gimple_get_lhs (stmt_info->stmt);
3638 gassign *new_stmt
3639 = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3640 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
3642 return true;
3646 struct simd_call_arg_info
3648 tree vectype;
3649 tree op;
3650 HOST_WIDE_INT linear_step;
3651 enum vect_def_type dt;
3652 unsigned int align;
3653 bool simd_lane_linear;
3656 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3657 is linear within simd lane (but not within whole loop), note it in
3658 *ARGINFO. */
3660 static void
3661 vect_simd_lane_linear (tree op, struct loop *loop,
3662 struct simd_call_arg_info *arginfo)
3664 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3666 if (!is_gimple_assign (def_stmt)
3667 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3668 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3669 return;
3671 tree base = gimple_assign_rhs1 (def_stmt);
3672 HOST_WIDE_INT linear_step = 0;
3673 tree v = gimple_assign_rhs2 (def_stmt);
3674 while (TREE_CODE (v) == SSA_NAME)
3676 tree t;
3677 def_stmt = SSA_NAME_DEF_STMT (v);
3678 if (is_gimple_assign (def_stmt))
3679 switch (gimple_assign_rhs_code (def_stmt))
3681 case PLUS_EXPR:
3682 t = gimple_assign_rhs2 (def_stmt);
3683 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3684 return;
3685 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3686 v = gimple_assign_rhs1 (def_stmt);
3687 continue;
3688 case MULT_EXPR:
3689 t = gimple_assign_rhs2 (def_stmt);
3690 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3691 return;
3692 linear_step = tree_to_shwi (t);
3693 v = gimple_assign_rhs1 (def_stmt);
3694 continue;
3695 CASE_CONVERT:
3696 t = gimple_assign_rhs1 (def_stmt);
3697 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3698 || (TYPE_PRECISION (TREE_TYPE (v))
3699 < TYPE_PRECISION (TREE_TYPE (t))))
3700 return;
3701 if (!linear_step)
3702 linear_step = 1;
3703 v = t;
3704 continue;
3705 default:
3706 return;
3708 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3709 && loop->simduid
3710 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3711 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3712 == loop->simduid))
3714 if (!linear_step)
3715 linear_step = 1;
3716 arginfo->linear_step = linear_step;
3717 arginfo->op = base;
3718 arginfo->simd_lane_linear = true;
3719 return;
3724 /* Return the number of elements in vector type VECTYPE, which is associated
3725 with a SIMD clone. At present these vectors always have a constant
3726 length. */
3728 static unsigned HOST_WIDE_INT
3729 simd_clone_subparts (tree vectype)
3731 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3734 /* Function vectorizable_simd_clone_call.
3736 Check if STMT_INFO performs a function call that can be vectorized
3737 by calling a simd clone of the function.
3738 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3739 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3740 Return true if STMT_INFO is vectorizable in this way. */
3742 static bool
3743 vectorizable_simd_clone_call (stmt_vec_info stmt_info,
3744 gimple_stmt_iterator *gsi,
3745 stmt_vec_info *vec_stmt, slp_tree slp_node,
3746 stmt_vector_for_cost *)
3748 tree vec_dest;
3749 tree scalar_dest;
3750 tree op, type;
3751 tree vec_oprnd0 = NULL_TREE;
3752 stmt_vec_info prev_stmt_info;
3753 tree vectype;
3754 unsigned int nunits;
3755 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3756 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3757 vec_info *vinfo = stmt_info->vinfo;
3758 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3759 tree fndecl, new_temp;
3760 int ncopies, j;
3761 auto_vec<simd_call_arg_info> arginfo;
3762 vec<tree> vargs = vNULL;
3763 size_t i, nargs;
3764 tree lhs, rtype, ratype;
3765 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3767 /* Is STMT a vectorizable call? */
3768 gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
3769 if (!stmt)
3770 return false;
3772 fndecl = gimple_call_fndecl (stmt);
3773 if (fndecl == NULL_TREE)
3774 return false;
3776 struct cgraph_node *node = cgraph_node::get (fndecl);
3777 if (node == NULL || node->simd_clones == NULL)
3778 return false;
3780 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3781 return false;
3783 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3784 && ! vec_stmt)
3785 return false;
3787 if (gimple_call_lhs (stmt)
3788 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3789 return false;
3791 gcc_checking_assert (!stmt_can_throw_internal (cfun, stmt));
3793 vectype = STMT_VINFO_VECTYPE (stmt_info);
3795 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
3796 return false;
3798 /* FORNOW */
3799 if (slp_node)
3800 return false;
3802 /* Process function arguments. */
3803 nargs = gimple_call_num_args (stmt);
3805 /* Bail out if the function has zero arguments. */
3806 if (nargs == 0)
3807 return false;
3809 arginfo.reserve (nargs, true);
3811 for (i = 0; i < nargs; i++)
3813 simd_call_arg_info thisarginfo;
3814 affine_iv iv;
3816 thisarginfo.linear_step = 0;
3817 thisarginfo.align = 0;
3818 thisarginfo.op = NULL_TREE;
3819 thisarginfo.simd_lane_linear = false;
3821 op = gimple_call_arg (stmt, i);
3822 if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
3823 &thisarginfo.vectype)
3824 || thisarginfo.dt == vect_uninitialized_def)
3826 if (dump_enabled_p ())
3827 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3828 "use not simple.\n");
3829 return false;
3832 if (thisarginfo.dt == vect_constant_def
3833 || thisarginfo.dt == vect_external_def)
3834 gcc_assert (thisarginfo.vectype == NULL_TREE);
3835 else
3836 gcc_assert (thisarginfo.vectype != NULL_TREE);
3838 /* For linear arguments, the analyze phase should have saved
3839 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3840 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3841 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3843 gcc_assert (vec_stmt);
3844 thisarginfo.linear_step
3845 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3846 thisarginfo.op
3847 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3848 thisarginfo.simd_lane_linear
3849 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3850 == boolean_true_node);
3851 /* If loop has been peeled for alignment, we need to adjust it. */
3852 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3853 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3854 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3856 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3857 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3858 tree opt = TREE_TYPE (thisarginfo.op);
3859 bias = fold_convert (TREE_TYPE (step), bias);
3860 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3861 thisarginfo.op
3862 = fold_build2 (POINTER_TYPE_P (opt)
3863 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3864 thisarginfo.op, bias);
3867 else if (!vec_stmt
3868 && thisarginfo.dt != vect_constant_def
3869 && thisarginfo.dt != vect_external_def
3870 && loop_vinfo
3871 && TREE_CODE (op) == SSA_NAME
3872 && simple_iv (loop, loop_containing_stmt (stmt), op,
3873 &iv, false)
3874 && tree_fits_shwi_p (iv.step))
3876 thisarginfo.linear_step = tree_to_shwi (iv.step);
3877 thisarginfo.op = iv.base;
3879 else if ((thisarginfo.dt == vect_constant_def
3880 || thisarginfo.dt == vect_external_def)
3881 && POINTER_TYPE_P (TREE_TYPE (op)))
3882 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3883 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3884 linear too. */
3885 if (POINTER_TYPE_P (TREE_TYPE (op))
3886 && !thisarginfo.linear_step
3887 && !vec_stmt
3888 && thisarginfo.dt != vect_constant_def
3889 && thisarginfo.dt != vect_external_def
3890 && loop_vinfo
3891 && !slp_node
3892 && TREE_CODE (op) == SSA_NAME)
3893 vect_simd_lane_linear (op, loop, &thisarginfo);
3895 arginfo.quick_push (thisarginfo);
3898 unsigned HOST_WIDE_INT vf;
3899 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3901 if (dump_enabled_p ())
3902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3903 "not considering SIMD clones; not yet supported"
3904 " for variable-width vectors.\n");
3905 return false;
3908 unsigned int badness = 0;
3909 struct cgraph_node *bestn = NULL;
3910 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3911 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3912 else
3913 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3914 n = n->simdclone->next_clone)
3916 unsigned int this_badness = 0;
3917 if (n->simdclone->simdlen > vf
3918 || n->simdclone->nargs != nargs)
3919 continue;
3920 if (n->simdclone->simdlen < vf)
3921 this_badness += (exact_log2 (vf)
3922 - exact_log2 (n->simdclone->simdlen)) * 1024;
3923 if (n->simdclone->inbranch)
3924 this_badness += 2048;
3925 int target_badness = targetm.simd_clone.usable (n);
3926 if (target_badness < 0)
3927 continue;
3928 this_badness += target_badness * 512;
3929 /* FORNOW: Have to add code to add the mask argument. */
3930 if (n->simdclone->inbranch)
3931 continue;
3932 for (i = 0; i < nargs; i++)
3934 switch (n->simdclone->args[i].arg_type)
3936 case SIMD_CLONE_ARG_TYPE_VECTOR:
3937 if (!useless_type_conversion_p
3938 (n->simdclone->args[i].orig_type,
3939 TREE_TYPE (gimple_call_arg (stmt, i))))
3940 i = -1;
3941 else if (arginfo[i].dt == vect_constant_def
3942 || arginfo[i].dt == vect_external_def
3943 || arginfo[i].linear_step)
3944 this_badness += 64;
3945 break;
3946 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3947 if (arginfo[i].dt != vect_constant_def
3948 && arginfo[i].dt != vect_external_def)
3949 i = -1;
3950 break;
3951 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3952 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3953 if (arginfo[i].dt == vect_constant_def
3954 || arginfo[i].dt == vect_external_def
3955 || (arginfo[i].linear_step
3956 != n->simdclone->args[i].linear_step))
3957 i = -1;
3958 break;
3959 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3960 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3961 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3962 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3963 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3964 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3965 /* FORNOW */
3966 i = -1;
3967 break;
3968 case SIMD_CLONE_ARG_TYPE_MASK:
3969 gcc_unreachable ();
3971 if (i == (size_t) -1)
3972 break;
3973 if (n->simdclone->args[i].alignment > arginfo[i].align)
3975 i = -1;
3976 break;
3978 if (arginfo[i].align)
3979 this_badness += (exact_log2 (arginfo[i].align)
3980 - exact_log2 (n->simdclone->args[i].alignment));
3982 if (i == (size_t) -1)
3983 continue;
3984 if (bestn == NULL || this_badness < badness)
3986 bestn = n;
3987 badness = this_badness;
3991 if (bestn == NULL)
3992 return false;
3994 for (i = 0; i < nargs; i++)
3995 if ((arginfo[i].dt == vect_constant_def
3996 || arginfo[i].dt == vect_external_def)
3997 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3999 arginfo[i].vectype
4000 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
4001 i)));
4002 if (arginfo[i].vectype == NULL
4003 || (simd_clone_subparts (arginfo[i].vectype)
4004 > bestn->simdclone->simdlen))
4005 return false;
4008 fndecl = bestn->decl;
4009 nunits = bestn->simdclone->simdlen;
4010 ncopies = vf / nunits;
4012 /* If the function isn't const, only allow it in simd loops where user
4013 has asserted that at least nunits consecutive iterations can be
4014 performed using SIMD instructions. */
4015 if ((loop == NULL || (unsigned) loop->safelen < nunits)
4016 && gimple_vuse (stmt))
4017 return false;
4019 /* Sanity check: make sure that at least one copy of the vectorized stmt
4020 needs to be generated. */
4021 gcc_assert (ncopies >= 1);
4023 if (!vec_stmt) /* transformation not required. */
4025 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4026 for (i = 0; i < nargs; i++)
4027 if ((bestn->simdclone->args[i].arg_type
4028 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4029 || (bestn->simdclone->args[i].arg_type
4030 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
4032 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
4033 + 1);
4034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4035 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4036 ? size_type_node : TREE_TYPE (arginfo[i].op);
4037 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4038 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
4039 tree sll = arginfo[i].simd_lane_linear
4040 ? boolean_true_node : boolean_false_node;
4041 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
4043 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
4044 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4045 /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
4046 return true;
4049 /* Transform. */
4051 if (dump_enabled_p ())
4052 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4054 /* Handle def. */
4055 scalar_dest = gimple_call_lhs (stmt);
4056 vec_dest = NULL_TREE;
4057 rtype = NULL_TREE;
4058 ratype = NULL_TREE;
4059 if (scalar_dest)
4061 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4062 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4063 if (TREE_CODE (rtype) == ARRAY_TYPE)
4065 ratype = rtype;
4066 rtype = TREE_TYPE (ratype);
4070 prev_stmt_info = NULL;
4071 for (j = 0; j < ncopies; ++j)
4073 /* Build argument list for the vectorized call. */
4074 if (j == 0)
4075 vargs.create (nargs);
4076 else
4077 vargs.truncate (0);
4079 for (i = 0; i < nargs; i++)
4081 unsigned int k, l, m, o;
4082 tree atype;
4083 op = gimple_call_arg (stmt, i);
4084 switch (bestn->simdclone->args[i].arg_type)
4086 case SIMD_CLONE_ARG_TYPE_VECTOR:
4087 atype = bestn->simdclone->args[i].vector_type;
4088 o = nunits / simd_clone_subparts (atype);
4089 for (m = j * o; m < (j + 1) * o; m++)
4091 if (simd_clone_subparts (atype)
4092 < simd_clone_subparts (arginfo[i].vectype))
4094 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
4095 k = (simd_clone_subparts (arginfo[i].vectype)
4096 / simd_clone_subparts (atype));
4097 gcc_assert ((k & (k - 1)) == 0);
4098 if (m == 0)
4099 vec_oprnd0
4100 = vect_get_vec_def_for_operand (op, stmt_info);
4101 else
4103 vec_oprnd0 = arginfo[i].op;
4104 if ((m & (k - 1)) == 0)
4105 vec_oprnd0
4106 = vect_get_vec_def_for_stmt_copy (vinfo,
4107 vec_oprnd0);
4109 arginfo[i].op = vec_oprnd0;
4110 vec_oprnd0
4111 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
4112 bitsize_int (prec),
4113 bitsize_int ((m & (k - 1)) * prec));
4114 gassign *new_stmt
4115 = gimple_build_assign (make_ssa_name (atype),
4116 vec_oprnd0);
4117 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4118 vargs.safe_push (gimple_assign_lhs (new_stmt));
4120 else
4122 k = (simd_clone_subparts (atype)
4123 / simd_clone_subparts (arginfo[i].vectype));
4124 gcc_assert ((k & (k - 1)) == 0);
4125 vec<constructor_elt, va_gc> *ctor_elts;
4126 if (k != 1)
4127 vec_alloc (ctor_elts, k);
4128 else
4129 ctor_elts = NULL;
4130 for (l = 0; l < k; l++)
4132 if (m == 0 && l == 0)
4133 vec_oprnd0
4134 = vect_get_vec_def_for_operand (op, stmt_info);
4135 else
4136 vec_oprnd0
4137 = vect_get_vec_def_for_stmt_copy (vinfo,
4138 arginfo[i].op);
4139 arginfo[i].op = vec_oprnd0;
4140 if (k == 1)
4141 break;
4142 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4143 vec_oprnd0);
4145 if (k == 1)
4146 vargs.safe_push (vec_oprnd0);
4147 else
4149 vec_oprnd0 = build_constructor (atype, ctor_elts);
4150 gassign *new_stmt
4151 = gimple_build_assign (make_ssa_name (atype),
4152 vec_oprnd0);
4153 vect_finish_stmt_generation (stmt_info, new_stmt,
4154 gsi);
4155 vargs.safe_push (gimple_assign_lhs (new_stmt));
4159 break;
4160 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4161 vargs.safe_push (op);
4162 break;
4163 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4164 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4165 if (j == 0)
4167 gimple_seq stmts;
4168 arginfo[i].op
4169 = force_gimple_operand (arginfo[i].op, &stmts, true,
4170 NULL_TREE);
4171 if (stmts != NULL)
4173 basic_block new_bb;
4174 edge pe = loop_preheader_edge (loop);
4175 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4176 gcc_assert (!new_bb);
4178 if (arginfo[i].simd_lane_linear)
4180 vargs.safe_push (arginfo[i].op);
4181 break;
4183 tree phi_res = copy_ssa_name (op);
4184 gphi *new_phi = create_phi_node (phi_res, loop->header);
4185 loop_vinfo->add_stmt (new_phi);
4186 add_phi_arg (new_phi, arginfo[i].op,
4187 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4188 enum tree_code code
4189 = POINTER_TYPE_P (TREE_TYPE (op))
4190 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4191 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4192 ? sizetype : TREE_TYPE (op);
4193 widest_int cst
4194 = wi::mul (bestn->simdclone->args[i].linear_step,
4195 ncopies * nunits);
4196 tree tcst = wide_int_to_tree (type, cst);
4197 tree phi_arg = copy_ssa_name (op);
4198 gassign *new_stmt
4199 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4200 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4201 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4202 loop_vinfo->add_stmt (new_stmt);
4203 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4204 UNKNOWN_LOCATION);
4205 arginfo[i].op = phi_res;
4206 vargs.safe_push (phi_res);
4208 else
4210 enum tree_code code
4211 = POINTER_TYPE_P (TREE_TYPE (op))
4212 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4213 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4214 ? sizetype : TREE_TYPE (op);
4215 widest_int cst
4216 = wi::mul (bestn->simdclone->args[i].linear_step,
4217 j * nunits);
4218 tree tcst = wide_int_to_tree (type, cst);
4219 new_temp = make_ssa_name (TREE_TYPE (op));
4220 gassign *new_stmt
4221 = gimple_build_assign (new_temp, code,
4222 arginfo[i].op, tcst);
4223 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4224 vargs.safe_push (new_temp);
4226 break;
4227 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4228 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4229 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4230 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4231 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4232 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4233 default:
4234 gcc_unreachable ();
4238 gcall *new_call = gimple_build_call_vec (fndecl, vargs);
4239 if (vec_dest)
4241 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4242 if (ratype)
4243 new_temp = create_tmp_var (ratype);
4244 else if (simd_clone_subparts (vectype)
4245 == simd_clone_subparts (rtype))
4246 new_temp = make_ssa_name (vec_dest, new_call);
4247 else
4248 new_temp = make_ssa_name (rtype, new_call);
4249 gimple_call_set_lhs (new_call, new_temp);
4251 stmt_vec_info new_stmt_info
4252 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
4254 if (vec_dest)
4256 if (simd_clone_subparts (vectype) < nunits)
4258 unsigned int k, l;
4259 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4260 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4261 k = nunits / simd_clone_subparts (vectype);
4262 gcc_assert ((k & (k - 1)) == 0);
4263 for (l = 0; l < k; l++)
4265 tree t;
4266 if (ratype)
4268 t = build_fold_addr_expr (new_temp);
4269 t = build2 (MEM_REF, vectype, t,
4270 build_int_cst (TREE_TYPE (t), l * bytes));
4272 else
4273 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4274 bitsize_int (prec), bitsize_int (l * prec));
4275 gimple *new_stmt
4276 = gimple_build_assign (make_ssa_name (vectype), t);
4277 new_stmt_info
4278 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4280 if (j == 0 && l == 0)
4281 STMT_VINFO_VEC_STMT (stmt_info)
4282 = *vec_stmt = new_stmt_info;
4283 else
4284 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4286 prev_stmt_info = new_stmt_info;
4289 if (ratype)
4290 vect_clobber_variable (stmt_info, gsi, new_temp);
4291 continue;
4293 else if (simd_clone_subparts (vectype) > nunits)
4295 unsigned int k = (simd_clone_subparts (vectype)
4296 / simd_clone_subparts (rtype));
4297 gcc_assert ((k & (k - 1)) == 0);
4298 if ((j & (k - 1)) == 0)
4299 vec_alloc (ret_ctor_elts, k);
4300 if (ratype)
4302 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4303 for (m = 0; m < o; m++)
4305 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4306 size_int (m), NULL_TREE, NULL_TREE);
4307 gimple *new_stmt
4308 = gimple_build_assign (make_ssa_name (rtype), tem);
4309 new_stmt_info
4310 = vect_finish_stmt_generation (stmt_info, new_stmt,
4311 gsi);
4312 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4313 gimple_assign_lhs (new_stmt));
4315 vect_clobber_variable (stmt_info, gsi, new_temp);
4317 else
4318 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4319 if ((j & (k - 1)) != k - 1)
4320 continue;
4321 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4322 gimple *new_stmt
4323 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4324 new_stmt_info
4325 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4327 if ((unsigned) j == k - 1)
4328 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4329 else
4330 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4332 prev_stmt_info = new_stmt_info;
4333 continue;
4335 else if (ratype)
4337 tree t = build_fold_addr_expr (new_temp);
4338 t = build2 (MEM_REF, vectype, t,
4339 build_int_cst (TREE_TYPE (t), 0));
4340 gimple *new_stmt
4341 = gimple_build_assign (make_ssa_name (vec_dest), t);
4342 new_stmt_info
4343 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4344 vect_clobber_variable (stmt_info, gsi, new_temp);
4348 if (j == 0)
4349 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4350 else
4351 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4353 prev_stmt_info = new_stmt_info;
4356 vargs.release ();
4358 /* The call in STMT might prevent it from being removed in dce.
4359 We however cannot remove it here, due to the way the ssa name
4360 it defines is mapped to the new definition. So just replace
4361 rhs of the statement with something harmless. */
4363 if (slp_node)
4364 return true;
4366 gimple *new_stmt;
4367 if (scalar_dest)
4369 type = TREE_TYPE (scalar_dest);
4370 lhs = gimple_call_lhs (vect_orig_stmt (stmt_info)->stmt);
4371 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4373 else
4374 new_stmt = gimple_build_nop ();
4375 vinfo->replace_stmt (gsi, vect_orig_stmt (stmt_info), new_stmt);
4376 unlink_stmt_vdef (stmt);
4378 return true;
4382 /* Function vect_gen_widened_results_half
4384 Create a vector stmt whose code, type, number of arguments, and result
4385 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4386 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4387 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4388 needs to be created (DECL is a function-decl of a target-builtin).
4389 STMT_INFO is the original scalar stmt that we are vectorizing. */
4391 static gimple *
4392 vect_gen_widened_results_half (enum tree_code code,
4393 tree decl,
4394 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4395 tree vec_dest, gimple_stmt_iterator *gsi,
4396 stmt_vec_info stmt_info)
4398 gimple *new_stmt;
4399 tree new_temp;
4401 /* Generate half of the widened result: */
4402 if (code == CALL_EXPR)
4404 /* Target specific support */
4405 if (op_type == binary_op)
4406 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4407 else
4408 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4409 new_temp = make_ssa_name (vec_dest, new_stmt);
4410 gimple_call_set_lhs (new_stmt, new_temp);
4412 else
4414 /* Generic support */
4415 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4416 if (op_type != binary_op)
4417 vec_oprnd1 = NULL;
4418 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4419 new_temp = make_ssa_name (vec_dest, new_stmt);
4420 gimple_assign_set_lhs (new_stmt, new_temp);
4422 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4424 return new_stmt;
4428 /* Get vectorized definitions for loop-based vectorization of STMT_INFO.
4429 For the first operand we call vect_get_vec_def_for_operand (with OPRND
4430 containing scalar operand), and for the rest we get a copy with
4431 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4432 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4433 The vectors are collected into VEC_OPRNDS. */
4435 static void
4436 vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info,
4437 vec<tree> *vec_oprnds, int multi_step_cvt)
4439 vec_info *vinfo = stmt_info->vinfo;
4440 tree vec_oprnd;
4442 /* Get first vector operand. */
4443 /* All the vector operands except the very first one (that is scalar oprnd)
4444 are stmt copies. */
4445 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4446 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info);
4447 else
4448 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd);
4450 vec_oprnds->quick_push (vec_oprnd);
4452 /* Get second vector operand. */
4453 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
4454 vec_oprnds->quick_push (vec_oprnd);
4456 *oprnd = vec_oprnd;
4458 /* For conversion in multiple steps, continue to get operands
4459 recursively. */
4460 if (multi_step_cvt)
4461 vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds,
4462 multi_step_cvt - 1);
4466 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4467 For multi-step conversions store the resulting vectors and call the function
4468 recursively. */
4470 static void
4471 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4472 int multi_step_cvt,
4473 stmt_vec_info stmt_info,
4474 vec<tree> vec_dsts,
4475 gimple_stmt_iterator *gsi,
4476 slp_tree slp_node, enum tree_code code,
4477 stmt_vec_info *prev_stmt_info)
4479 unsigned int i;
4480 tree vop0, vop1, new_tmp, vec_dest;
4482 vec_dest = vec_dsts.pop ();
4484 for (i = 0; i < vec_oprnds->length (); i += 2)
4486 /* Create demotion operation. */
4487 vop0 = (*vec_oprnds)[i];
4488 vop1 = (*vec_oprnds)[i + 1];
4489 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4490 new_tmp = make_ssa_name (vec_dest, new_stmt);
4491 gimple_assign_set_lhs (new_stmt, new_tmp);
4492 stmt_vec_info new_stmt_info
4493 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4495 if (multi_step_cvt)
4496 /* Store the resulting vector for next recursive call. */
4497 (*vec_oprnds)[i/2] = new_tmp;
4498 else
4500 /* This is the last step of the conversion sequence. Store the
4501 vectors in SLP_NODE or in vector info of the scalar statement
4502 (or in STMT_VINFO_RELATED_STMT chain). */
4503 if (slp_node)
4504 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4505 else
4507 if (!*prev_stmt_info)
4508 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
4509 else
4510 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info;
4512 *prev_stmt_info = new_stmt_info;
4517 /* For multi-step demotion operations we first generate demotion operations
4518 from the source type to the intermediate types, and then combine the
4519 results (stored in VEC_OPRNDS) in demotion operation to the destination
4520 type. */
4521 if (multi_step_cvt)
4523 /* At each level of recursion we have half of the operands we had at the
4524 previous level. */
4525 vec_oprnds->truncate ((i+1)/2);
4526 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4527 stmt_info, vec_dsts, gsi,
4528 slp_node, VEC_PACK_TRUNC_EXPR,
4529 prev_stmt_info);
4532 vec_dsts.quick_push (vec_dest);
4536 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4537 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4538 STMT_INFO. For multi-step conversions store the resulting vectors and
4539 call the function recursively. */
4541 static void
4542 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4543 vec<tree> *vec_oprnds1,
4544 stmt_vec_info stmt_info, tree vec_dest,
4545 gimple_stmt_iterator *gsi,
4546 enum tree_code code1,
4547 enum tree_code code2, tree decl1,
4548 tree decl2, int op_type)
4550 int i;
4551 tree vop0, vop1, new_tmp1, new_tmp2;
4552 gimple *new_stmt1, *new_stmt2;
4553 vec<tree> vec_tmp = vNULL;
4555 vec_tmp.create (vec_oprnds0->length () * 2);
4556 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4558 if (op_type == binary_op)
4559 vop1 = (*vec_oprnds1)[i];
4560 else
4561 vop1 = NULL_TREE;
4563 /* Generate the two halves of promotion operation. */
4564 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4565 op_type, vec_dest, gsi,
4566 stmt_info);
4567 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4568 op_type, vec_dest, gsi,
4569 stmt_info);
4570 if (is_gimple_call (new_stmt1))
4572 new_tmp1 = gimple_call_lhs (new_stmt1);
4573 new_tmp2 = gimple_call_lhs (new_stmt2);
4575 else
4577 new_tmp1 = gimple_assign_lhs (new_stmt1);
4578 new_tmp2 = gimple_assign_lhs (new_stmt2);
4581 /* Store the results for the next step. */
4582 vec_tmp.quick_push (new_tmp1);
4583 vec_tmp.quick_push (new_tmp2);
4586 vec_oprnds0->release ();
4587 *vec_oprnds0 = vec_tmp;
4591 /* Check if STMT_INFO performs a conversion operation that can be vectorized.
4592 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4593 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4594 Return true if STMT_INFO is vectorizable in this way. */
4596 static bool
4597 vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
4598 stmt_vec_info *vec_stmt, slp_tree slp_node,
4599 stmt_vector_for_cost *cost_vec)
4601 tree vec_dest;
4602 tree scalar_dest;
4603 tree op0, op1 = NULL_TREE;
4604 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4605 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4606 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4607 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4608 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4609 tree new_temp;
4610 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4611 int ndts = 2;
4612 stmt_vec_info prev_stmt_info;
4613 poly_uint64 nunits_in;
4614 poly_uint64 nunits_out;
4615 tree vectype_out, vectype_in;
4616 int ncopies, i, j;
4617 tree lhs_type, rhs_type;
4618 enum { NARROW, NONE, WIDEN } modifier;
4619 vec<tree> vec_oprnds0 = vNULL;
4620 vec<tree> vec_oprnds1 = vNULL;
4621 tree vop0;
4622 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4623 vec_info *vinfo = stmt_info->vinfo;
4624 int multi_step_cvt = 0;
4625 vec<tree> interm_types = vNULL;
4626 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4627 int op_type;
4628 unsigned short fltsz;
4630 /* Is STMT a vectorizable conversion? */
4632 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4633 return false;
4635 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4636 && ! vec_stmt)
4637 return false;
4639 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
4640 if (!stmt)
4641 return false;
4643 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4644 return false;
4646 code = gimple_assign_rhs_code (stmt);
4647 if (!CONVERT_EXPR_CODE_P (code)
4648 && code != FIX_TRUNC_EXPR
4649 && code != FLOAT_EXPR
4650 && code != WIDEN_MULT_EXPR
4651 && code != WIDEN_LSHIFT_EXPR)
4652 return false;
4654 op_type = TREE_CODE_LENGTH (code);
4656 /* Check types of lhs and rhs. */
4657 scalar_dest = gimple_assign_lhs (stmt);
4658 lhs_type = TREE_TYPE (scalar_dest);
4659 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4661 op0 = gimple_assign_rhs1 (stmt);
4662 rhs_type = TREE_TYPE (op0);
4664 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4665 && !((INTEGRAL_TYPE_P (lhs_type)
4666 && INTEGRAL_TYPE_P (rhs_type))
4667 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4668 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4669 return false;
4671 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4672 && ((INTEGRAL_TYPE_P (lhs_type)
4673 && !type_has_mode_precision_p (lhs_type))
4674 || (INTEGRAL_TYPE_P (rhs_type)
4675 && !type_has_mode_precision_p (rhs_type))))
4677 if (dump_enabled_p ())
4678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4679 "type conversion to/from bit-precision unsupported."
4680 "\n");
4681 return false;
4684 /* Check the operands of the operation. */
4685 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in))
4687 if (dump_enabled_p ())
4688 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4689 "use not simple.\n");
4690 return false;
4692 if (op_type == binary_op)
4694 bool ok;
4696 op1 = gimple_assign_rhs2 (stmt);
4697 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4698 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4699 OP1. */
4700 if (CONSTANT_CLASS_P (op0))
4701 ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in);
4702 else
4703 ok = vect_is_simple_use (op1, vinfo, &dt[1]);
4705 if (!ok)
4707 if (dump_enabled_p ())
4708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4709 "use not simple.\n");
4710 return false;
4714 /* If op0 is an external or constant defs use a vector type of
4715 the same size as the output vector type. */
4716 if (!vectype_in)
4717 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4718 if (vec_stmt)
4719 gcc_assert (vectype_in);
4720 if (!vectype_in)
4722 if (dump_enabled_p ())
4723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4724 "no vectype for scalar type %T\n", rhs_type);
4726 return false;
4729 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4730 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4732 if (dump_enabled_p ())
4733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4734 "can't convert between boolean and non "
4735 "boolean vectors %T\n", rhs_type);
4737 return false;
4740 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4741 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4742 if (known_eq (nunits_out, nunits_in))
4743 modifier = NONE;
4744 else if (multiple_p (nunits_out, nunits_in))
4745 modifier = NARROW;
4746 else
4748 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4749 modifier = WIDEN;
4752 /* Multiple types in SLP are handled by creating the appropriate number of
4753 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4754 case of SLP. */
4755 if (slp_node)
4756 ncopies = 1;
4757 else if (modifier == NARROW)
4758 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4759 else
4760 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4762 /* Sanity check: make sure that at least one copy of the vectorized stmt
4763 needs to be generated. */
4764 gcc_assert (ncopies >= 1);
4766 bool found_mode = false;
4767 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4768 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4769 opt_scalar_mode rhs_mode_iter;
4771 /* Supportable by target? */
4772 switch (modifier)
4774 case NONE:
4775 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4776 return false;
4777 if (supportable_convert_operation (code, vectype_out, vectype_in,
4778 &decl1, &code1))
4779 break;
4780 /* FALLTHRU */
4781 unsupported:
4782 if (dump_enabled_p ())
4783 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4784 "conversion not supported by target.\n");
4785 return false;
4787 case WIDEN:
4788 if (supportable_widening_operation (code, stmt_info, vectype_out,
4789 vectype_in, &code1, &code2,
4790 &multi_step_cvt, &interm_types))
4792 /* Binary widening operation can only be supported directly by the
4793 architecture. */
4794 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4795 break;
4798 if (code != FLOAT_EXPR
4799 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4800 goto unsupported;
4802 fltsz = GET_MODE_SIZE (lhs_mode);
4803 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4805 rhs_mode = rhs_mode_iter.require ();
4806 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4807 break;
4809 cvt_type
4810 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4811 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4812 if (cvt_type == NULL_TREE)
4813 goto unsupported;
4815 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4817 if (!supportable_convert_operation (code, vectype_out,
4818 cvt_type, &decl1, &codecvt1))
4819 goto unsupported;
4821 else if (!supportable_widening_operation (code, stmt_info,
4822 vectype_out, cvt_type,
4823 &codecvt1, &codecvt2,
4824 &multi_step_cvt,
4825 &interm_types))
4826 continue;
4827 else
4828 gcc_assert (multi_step_cvt == 0);
4830 if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
4831 vectype_in, &code1, &code2,
4832 &multi_step_cvt, &interm_types))
4834 found_mode = true;
4835 break;
4839 if (!found_mode)
4840 goto unsupported;
4842 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4843 codecvt2 = ERROR_MARK;
4844 else
4846 multi_step_cvt++;
4847 interm_types.safe_push (cvt_type);
4848 cvt_type = NULL_TREE;
4850 break;
4852 case NARROW:
4853 gcc_assert (op_type == unary_op);
4854 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4855 &code1, &multi_step_cvt,
4856 &interm_types))
4857 break;
4859 if (code != FIX_TRUNC_EXPR
4860 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4861 goto unsupported;
4863 cvt_type
4864 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4865 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4866 if (cvt_type == NULL_TREE)
4867 goto unsupported;
4868 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4869 &decl1, &codecvt1))
4870 goto unsupported;
4871 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4872 &code1, &multi_step_cvt,
4873 &interm_types))
4874 break;
4875 goto unsupported;
4877 default:
4878 gcc_unreachable ();
4881 if (!vec_stmt) /* transformation not required. */
4883 DUMP_VECT_SCOPE ("vectorizable_conversion");
4884 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4886 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4887 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
4888 cost_vec);
4890 else if (modifier == NARROW)
4892 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4893 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4894 cost_vec);
4896 else
4898 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4899 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4900 cost_vec);
4902 interm_types.release ();
4903 return true;
4906 /* Transform. */
4907 if (dump_enabled_p ())
4908 dump_printf_loc (MSG_NOTE, vect_location,
4909 "transform conversion. ncopies = %d.\n", ncopies);
4911 if (op_type == binary_op)
4913 if (CONSTANT_CLASS_P (op0))
4914 op0 = fold_convert (TREE_TYPE (op1), op0);
4915 else if (CONSTANT_CLASS_P (op1))
4916 op1 = fold_convert (TREE_TYPE (op0), op1);
4919 /* In case of multi-step conversion, we first generate conversion operations
4920 to the intermediate types, and then from that types to the final one.
4921 We create vector destinations for the intermediate type (TYPES) received
4922 from supportable_*_operation, and store them in the correct order
4923 for future use in vect_create_vectorized_*_stmts (). */
4924 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4925 vec_dest = vect_create_destination_var (scalar_dest,
4926 (cvt_type && modifier == WIDEN)
4927 ? cvt_type : vectype_out);
4928 vec_dsts.quick_push (vec_dest);
4930 if (multi_step_cvt)
4932 for (i = interm_types.length () - 1;
4933 interm_types.iterate (i, &intermediate_type); i--)
4935 vec_dest = vect_create_destination_var (scalar_dest,
4936 intermediate_type);
4937 vec_dsts.quick_push (vec_dest);
4941 if (cvt_type)
4942 vec_dest = vect_create_destination_var (scalar_dest,
4943 modifier == WIDEN
4944 ? vectype_out : cvt_type);
4946 if (!slp_node)
4948 if (modifier == WIDEN)
4950 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4951 if (op_type == binary_op)
4952 vec_oprnds1.create (1);
4954 else if (modifier == NARROW)
4955 vec_oprnds0.create (
4956 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4958 else if (code == WIDEN_LSHIFT_EXPR)
4959 vec_oprnds1.create (slp_node->vec_stmts_size);
4961 last_oprnd = op0;
4962 prev_stmt_info = NULL;
4963 switch (modifier)
4965 case NONE:
4966 for (j = 0; j < ncopies; j++)
4968 if (j == 0)
4969 vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
4970 NULL, slp_node);
4971 else
4972 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL);
4974 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4976 stmt_vec_info new_stmt_info;
4977 /* Arguments are ready, create the new vector stmt. */
4978 if (code1 == CALL_EXPR)
4980 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4981 new_temp = make_ssa_name (vec_dest, new_stmt);
4982 gimple_call_set_lhs (new_stmt, new_temp);
4983 new_stmt_info
4984 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4986 else
4988 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4989 gassign *new_stmt
4990 = gimple_build_assign (vec_dest, code1, vop0);
4991 new_temp = make_ssa_name (vec_dest, new_stmt);
4992 gimple_assign_set_lhs (new_stmt, new_temp);
4993 new_stmt_info
4994 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4997 if (slp_node)
4998 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4999 else
5001 if (!prev_stmt_info)
5002 STMT_VINFO_VEC_STMT (stmt_info)
5003 = *vec_stmt = new_stmt_info;
5004 else
5005 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5006 prev_stmt_info = new_stmt_info;
5010 break;
5012 case WIDEN:
5013 /* In case the vectorization factor (VF) is bigger than the number
5014 of elements that we can fit in a vectype (nunits), we have to
5015 generate more than one vector stmt - i.e - we need to "unroll"
5016 the vector stmt by a factor VF/nunits. */
5017 for (j = 0; j < ncopies; j++)
5019 /* Handle uses. */
5020 if (j == 0)
5022 if (slp_node)
5024 if (code == WIDEN_LSHIFT_EXPR)
5026 unsigned int k;
5028 vec_oprnd1 = op1;
5029 /* Store vec_oprnd1 for every vector stmt to be created
5030 for SLP_NODE. We check during the analysis that all
5031 the shift arguments are the same. */
5032 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5033 vec_oprnds1.quick_push (vec_oprnd1);
5035 vect_get_vec_defs (op0, NULL_TREE, stmt_info,
5036 &vec_oprnds0, NULL, slp_node);
5038 else
5039 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
5040 &vec_oprnds1, slp_node);
5042 else
5044 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
5045 vec_oprnds0.quick_push (vec_oprnd0);
5046 if (op_type == binary_op)
5048 if (code == WIDEN_LSHIFT_EXPR)
5049 vec_oprnd1 = op1;
5050 else
5051 vec_oprnd1
5052 = vect_get_vec_def_for_operand (op1, stmt_info);
5053 vec_oprnds1.quick_push (vec_oprnd1);
5057 else
5059 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
5060 vec_oprnds0.truncate (0);
5061 vec_oprnds0.quick_push (vec_oprnd0);
5062 if (op_type == binary_op)
5064 if (code == WIDEN_LSHIFT_EXPR)
5065 vec_oprnd1 = op1;
5066 else
5067 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
5068 vec_oprnd1);
5069 vec_oprnds1.truncate (0);
5070 vec_oprnds1.quick_push (vec_oprnd1);
5074 /* Arguments are ready. Create the new vector stmts. */
5075 for (i = multi_step_cvt; i >= 0; i--)
5077 tree this_dest = vec_dsts[i];
5078 enum tree_code c1 = code1, c2 = code2;
5079 if (i == 0 && codecvt2 != ERROR_MARK)
5081 c1 = codecvt1;
5082 c2 = codecvt2;
5084 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
5085 &vec_oprnds1, stmt_info,
5086 this_dest, gsi,
5087 c1, c2, decl1, decl2,
5088 op_type);
5091 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5093 stmt_vec_info new_stmt_info;
5094 if (cvt_type)
5096 if (codecvt1 == CALL_EXPR)
5098 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5099 new_temp = make_ssa_name (vec_dest, new_stmt);
5100 gimple_call_set_lhs (new_stmt, new_temp);
5101 new_stmt_info
5102 = vect_finish_stmt_generation (stmt_info, new_stmt,
5103 gsi);
5105 else
5107 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5108 new_temp = make_ssa_name (vec_dest);
5109 gassign *new_stmt
5110 = gimple_build_assign (new_temp, codecvt1, vop0);
5111 new_stmt_info
5112 = vect_finish_stmt_generation (stmt_info, new_stmt,
5113 gsi);
5116 else
5117 new_stmt_info = vinfo->lookup_def (vop0);
5119 if (slp_node)
5120 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5121 else
5123 if (!prev_stmt_info)
5124 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
5125 else
5126 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5127 prev_stmt_info = new_stmt_info;
5132 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5133 break;
5135 case NARROW:
5136 /* In case the vectorization factor (VF) is bigger than the number
5137 of elements that we can fit in a vectype (nunits), we have to
5138 generate more than one vector stmt - i.e - we need to "unroll"
5139 the vector stmt by a factor VF/nunits. */
5140 for (j = 0; j < ncopies; j++)
5142 /* Handle uses. */
5143 if (slp_node)
5144 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5145 slp_node);
5146 else
5148 vec_oprnds0.truncate (0);
5149 vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0,
5150 vect_pow2 (multi_step_cvt) - 1);
5153 /* Arguments are ready. Create the new vector stmts. */
5154 if (cvt_type)
5155 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5157 if (codecvt1 == CALL_EXPR)
5159 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5160 new_temp = make_ssa_name (vec_dest, new_stmt);
5161 gimple_call_set_lhs (new_stmt, new_temp);
5162 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5164 else
5166 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5167 new_temp = make_ssa_name (vec_dest);
5168 gassign *new_stmt
5169 = gimple_build_assign (new_temp, codecvt1, vop0);
5170 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5173 vec_oprnds0[i] = new_temp;
5176 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5177 stmt_info, vec_dsts, gsi,
5178 slp_node, code1,
5179 &prev_stmt_info);
5182 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5183 break;
5186 vec_oprnds0.release ();
5187 vec_oprnds1.release ();
5188 interm_types.release ();
5190 return true;
5194 /* Function vectorizable_assignment.
5196 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5197 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5198 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5199 Return true if STMT_INFO is vectorizable in this way. */
5201 static bool
5202 vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5203 stmt_vec_info *vec_stmt, slp_tree slp_node,
5204 stmt_vector_for_cost *cost_vec)
5206 tree vec_dest;
5207 tree scalar_dest;
5208 tree op;
5209 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5210 tree new_temp;
5211 enum vect_def_type dt[1] = {vect_unknown_def_type};
5212 int ndts = 1;
5213 int ncopies;
5214 int i, j;
5215 vec<tree> vec_oprnds = vNULL;
5216 tree vop;
5217 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5218 vec_info *vinfo = stmt_info->vinfo;
5219 stmt_vec_info prev_stmt_info = NULL;
5220 enum tree_code code;
5221 tree vectype_in;
5223 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5224 return false;
5226 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5227 && ! vec_stmt)
5228 return false;
5230 /* Is vectorizable assignment? */
5231 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5232 if (!stmt)
5233 return false;
5235 scalar_dest = gimple_assign_lhs (stmt);
5236 if (TREE_CODE (scalar_dest) != SSA_NAME)
5237 return false;
5239 code = gimple_assign_rhs_code (stmt);
5240 if (gimple_assign_single_p (stmt)
5241 || code == PAREN_EXPR
5242 || CONVERT_EXPR_CODE_P (code))
5243 op = gimple_assign_rhs1 (stmt);
5244 else
5245 return false;
5247 if (code == VIEW_CONVERT_EXPR)
5248 op = TREE_OPERAND (op, 0);
5250 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5251 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5253 /* Multiple types in SLP are handled by creating the appropriate number of
5254 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5255 case of SLP. */
5256 if (slp_node)
5257 ncopies = 1;
5258 else
5259 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5261 gcc_assert (ncopies >= 1);
5263 if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in))
5265 if (dump_enabled_p ())
5266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5267 "use not simple.\n");
5268 return false;
5271 /* We can handle NOP_EXPR conversions that do not change the number
5272 of elements or the vector size. */
5273 if ((CONVERT_EXPR_CODE_P (code)
5274 || code == VIEW_CONVERT_EXPR)
5275 && (!vectype_in
5276 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5277 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5278 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5279 return false;
5281 /* We do not handle bit-precision changes. */
5282 if ((CONVERT_EXPR_CODE_P (code)
5283 || code == VIEW_CONVERT_EXPR)
5284 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5285 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5286 || !type_has_mode_precision_p (TREE_TYPE (op)))
5287 /* But a conversion that does not change the bit-pattern is ok. */
5288 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5289 > TYPE_PRECISION (TREE_TYPE (op)))
5290 && TYPE_UNSIGNED (TREE_TYPE (op)))
5291 /* Conversion between boolean types of different sizes is
5292 a simple assignment in case their vectypes are same
5293 boolean vectors. */
5294 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5295 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5297 if (dump_enabled_p ())
5298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5299 "type conversion to/from bit-precision "
5300 "unsupported.\n");
5301 return false;
5304 if (!vec_stmt) /* transformation not required. */
5306 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5307 DUMP_VECT_SCOPE ("vectorizable_assignment");
5308 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5309 return true;
5312 /* Transform. */
5313 if (dump_enabled_p ())
5314 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5316 /* Handle def. */
5317 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5319 /* Handle use. */
5320 for (j = 0; j < ncopies; j++)
5322 /* Handle uses. */
5323 if (j == 0)
5324 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
5325 else
5326 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
5328 /* Arguments are ready. create the new vector stmt. */
5329 stmt_vec_info new_stmt_info = NULL;
5330 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5332 if (CONVERT_EXPR_CODE_P (code)
5333 || code == VIEW_CONVERT_EXPR)
5334 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5335 gassign *new_stmt = gimple_build_assign (vec_dest, vop);
5336 new_temp = make_ssa_name (vec_dest, new_stmt);
5337 gimple_assign_set_lhs (new_stmt, new_temp);
5338 new_stmt_info
5339 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5340 if (slp_node)
5341 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5344 if (slp_node)
5345 continue;
5347 if (j == 0)
5348 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5349 else
5350 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5352 prev_stmt_info = new_stmt_info;
5355 vec_oprnds.release ();
5356 return true;
5360 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5361 either as shift by a scalar or by a vector. */
5363 bool
5364 vect_supportable_shift (enum tree_code code, tree scalar_type)
5367 machine_mode vec_mode;
5368 optab optab;
5369 int icode;
5370 tree vectype;
5372 vectype = get_vectype_for_scalar_type (scalar_type);
5373 if (!vectype)
5374 return false;
5376 optab = optab_for_tree_code (code, vectype, optab_scalar);
5377 if (!optab
5378 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5380 optab = optab_for_tree_code (code, vectype, optab_vector);
5381 if (!optab
5382 || (optab_handler (optab, TYPE_MODE (vectype))
5383 == CODE_FOR_nothing))
5384 return false;
5387 vec_mode = TYPE_MODE (vectype);
5388 icode = (int) optab_handler (optab, vec_mode);
5389 if (icode == CODE_FOR_nothing)
5390 return false;
5392 return true;
5396 /* Function vectorizable_shift.
5398 Check if STMT_INFO performs a shift operation that can be vectorized.
5399 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5400 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5401 Return true if STMT_INFO is vectorizable in this way. */
5403 bool
5404 vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5405 stmt_vec_info *vec_stmt, slp_tree slp_node,
5406 stmt_vector_for_cost *cost_vec)
5408 tree vec_dest;
5409 tree scalar_dest;
5410 tree op0, op1 = NULL;
5411 tree vec_oprnd1 = NULL_TREE;
5412 tree vectype;
5413 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5414 enum tree_code code;
5415 machine_mode vec_mode;
5416 tree new_temp;
5417 optab optab;
5418 int icode;
5419 machine_mode optab_op2_mode;
5420 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5421 int ndts = 2;
5422 stmt_vec_info prev_stmt_info;
5423 poly_uint64 nunits_in;
5424 poly_uint64 nunits_out;
5425 tree vectype_out;
5426 tree op1_vectype;
5427 int ncopies;
5428 int j, i;
5429 vec<tree> vec_oprnds0 = vNULL;
5430 vec<tree> vec_oprnds1 = vNULL;
5431 tree vop0, vop1;
5432 unsigned int k;
5433 bool scalar_shift_arg = true;
5434 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5435 vec_info *vinfo = stmt_info->vinfo;
5437 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5438 return false;
5440 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5441 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle
5442 && ! vec_stmt)
5443 return false;
5445 /* Is STMT a vectorizable binary/unary operation? */
5446 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5447 if (!stmt)
5448 return false;
5450 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5451 return false;
5453 code = gimple_assign_rhs_code (stmt);
5455 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5456 || code == RROTATE_EXPR))
5457 return false;
5459 scalar_dest = gimple_assign_lhs (stmt);
5460 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5461 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5463 if (dump_enabled_p ())
5464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5465 "bit-precision shifts not supported.\n");
5466 return false;
5469 op0 = gimple_assign_rhs1 (stmt);
5470 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5472 if (dump_enabled_p ())
5473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5474 "use not simple.\n");
5475 return false;
5477 /* If op0 is an external or constant def use a vector type with
5478 the same size as the output vector type. */
5479 if (!vectype)
5480 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5481 if (vec_stmt)
5482 gcc_assert (vectype);
5483 if (!vectype)
5485 if (dump_enabled_p ())
5486 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5487 "no vectype for scalar type\n");
5488 return false;
5491 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5492 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5493 if (maybe_ne (nunits_out, nunits_in))
5494 return false;
5496 op1 = gimple_assign_rhs2 (stmt);
5497 stmt_vec_info op1_def_stmt_info;
5498 if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype,
5499 &op1_def_stmt_info))
5501 if (dump_enabled_p ())
5502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5503 "use not simple.\n");
5504 return false;
5507 /* Multiple types in SLP are handled by creating the appropriate number of
5508 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5509 case of SLP. */
5510 if (slp_node)
5511 ncopies = 1;
5512 else
5513 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5515 gcc_assert (ncopies >= 1);
5517 /* Determine whether the shift amount is a vector, or scalar. If the
5518 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5520 if ((dt[1] == vect_internal_def
5521 || dt[1] == vect_induction_def
5522 || dt[1] == vect_nested_cycle)
5523 && !slp_node)
5524 scalar_shift_arg = false;
5525 else if (dt[1] == vect_constant_def
5526 || dt[1] == vect_external_def
5527 || dt[1] == vect_internal_def)
5529 /* In SLP, need to check whether the shift count is the same,
5530 in loops if it is a constant or invariant, it is always
5531 a scalar shift. */
5532 if (slp_node)
5534 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5535 stmt_vec_info slpstmt_info;
5537 FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
5539 gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
5540 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5541 scalar_shift_arg = false;
5545 /* If the shift amount is computed by a pattern stmt we cannot
5546 use the scalar amount directly thus give up and use a vector
5547 shift. */
5548 if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info))
5549 scalar_shift_arg = false;
5551 else
5553 if (dump_enabled_p ())
5554 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5555 "operand mode requires invariant argument.\n");
5556 return false;
5559 /* Vector shifted by vector. */
5560 if (!scalar_shift_arg)
5562 optab = optab_for_tree_code (code, vectype, optab_vector);
5563 if (dump_enabled_p ())
5564 dump_printf_loc (MSG_NOTE, vect_location,
5565 "vector/vector shift/rotate found.\n");
5567 if (!op1_vectype)
5568 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5569 if (op1_vectype == NULL_TREE
5570 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5572 if (dump_enabled_p ())
5573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5574 "unusable type for last operand in"
5575 " vector/vector shift/rotate.\n");
5576 return false;
5579 /* See if the machine has a vector shifted by scalar insn and if not
5580 then see if it has a vector shifted by vector insn. */
5581 else
5583 optab = optab_for_tree_code (code, vectype, optab_scalar);
5584 if (optab
5585 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5587 if (dump_enabled_p ())
5588 dump_printf_loc (MSG_NOTE, vect_location,
5589 "vector/scalar shift/rotate found.\n");
5591 else
5593 optab = optab_for_tree_code (code, vectype, optab_vector);
5594 if (optab
5595 && (optab_handler (optab, TYPE_MODE (vectype))
5596 != CODE_FOR_nothing))
5598 scalar_shift_arg = false;
5600 if (dump_enabled_p ())
5601 dump_printf_loc (MSG_NOTE, vect_location,
5602 "vector/vector shift/rotate found.\n");
5604 /* Unlike the other binary operators, shifts/rotates have
5605 the rhs being int, instead of the same type as the lhs,
5606 so make sure the scalar is the right type if we are
5607 dealing with vectors of long long/long/short/char. */
5608 if (dt[1] == vect_constant_def)
5609 op1 = fold_convert (TREE_TYPE (vectype), op1);
5610 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5611 TREE_TYPE (op1)))
5613 if (slp_node
5614 && TYPE_MODE (TREE_TYPE (vectype))
5615 != TYPE_MODE (TREE_TYPE (op1)))
5617 if (dump_enabled_p ())
5618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5619 "unusable type for last operand in"
5620 " vector/vector shift/rotate.\n");
5621 return false;
5623 if (vec_stmt && !slp_node)
5625 op1 = fold_convert (TREE_TYPE (vectype), op1);
5626 op1 = vect_init_vector (stmt_info, op1,
5627 TREE_TYPE (vectype), NULL);
5634 /* Supportable by target? */
5635 if (!optab)
5637 if (dump_enabled_p ())
5638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5639 "no optab.\n");
5640 return false;
5642 vec_mode = TYPE_MODE (vectype);
5643 icode = (int) optab_handler (optab, vec_mode);
5644 if (icode == CODE_FOR_nothing)
5646 if (dump_enabled_p ())
5647 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5648 "op not supported by target.\n");
5649 /* Check only during analysis. */
5650 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5651 || (!vec_stmt
5652 && !vect_worthwhile_without_simd_p (vinfo, code)))
5653 return false;
5654 if (dump_enabled_p ())
5655 dump_printf_loc (MSG_NOTE, vect_location,
5656 "proceeding using word mode.\n");
5659 /* Worthwhile without SIMD support? Check only during analysis. */
5660 if (!vec_stmt
5661 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5662 && !vect_worthwhile_without_simd_p (vinfo, code))
5664 if (dump_enabled_p ())
5665 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5666 "not worthwhile without SIMD support.\n");
5667 return false;
5670 if (!vec_stmt) /* transformation not required. */
5672 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5673 DUMP_VECT_SCOPE ("vectorizable_shift");
5674 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5675 return true;
5678 /* Transform. */
5680 if (dump_enabled_p ())
5681 dump_printf_loc (MSG_NOTE, vect_location,
5682 "transform binary/unary operation.\n");
5684 /* Handle def. */
5685 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5687 prev_stmt_info = NULL;
5688 for (j = 0; j < ncopies; j++)
5690 /* Handle uses. */
5691 if (j == 0)
5693 if (scalar_shift_arg)
5695 /* Vector shl and shr insn patterns can be defined with scalar
5696 operand 2 (shift operand). In this case, use constant or loop
5697 invariant op1 directly, without extending it to vector mode
5698 first. */
5699 optab_op2_mode = insn_data[icode].operand[2].mode;
5700 if (!VECTOR_MODE_P (optab_op2_mode))
5702 if (dump_enabled_p ())
5703 dump_printf_loc (MSG_NOTE, vect_location,
5704 "operand 1 using scalar mode.\n");
5705 vec_oprnd1 = op1;
5706 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5707 vec_oprnds1.quick_push (vec_oprnd1);
5708 if (slp_node)
5710 /* Store vec_oprnd1 for every vector stmt to be created
5711 for SLP_NODE. We check during the analysis that all
5712 the shift arguments are the same.
5713 TODO: Allow different constants for different vector
5714 stmts generated for an SLP instance. */
5715 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5716 vec_oprnds1.quick_push (vec_oprnd1);
5721 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5722 (a special case for certain kind of vector shifts); otherwise,
5723 operand 1 should be of a vector type (the usual case). */
5724 if (vec_oprnd1)
5725 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5726 slp_node);
5727 else
5728 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
5729 slp_node);
5731 else
5732 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
5734 /* Arguments are ready. Create the new vector stmt. */
5735 stmt_vec_info new_stmt_info = NULL;
5736 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5738 vop1 = vec_oprnds1[i];
5739 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5740 new_temp = make_ssa_name (vec_dest, new_stmt);
5741 gimple_assign_set_lhs (new_stmt, new_temp);
5742 new_stmt_info
5743 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5744 if (slp_node)
5745 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5748 if (slp_node)
5749 continue;
5751 if (j == 0)
5752 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5753 else
5754 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5755 prev_stmt_info = new_stmt_info;
5758 vec_oprnds0.release ();
5759 vec_oprnds1.release ();
5761 return true;
5765 /* Function vectorizable_operation.
5767 Check if STMT_INFO performs a binary, unary or ternary operation that can
5768 be vectorized.
5769 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5770 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5771 Return true if STMT_INFO is vectorizable in this way. */
5773 static bool
5774 vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5775 stmt_vec_info *vec_stmt, slp_tree slp_node,
5776 stmt_vector_for_cost *cost_vec)
5778 tree vec_dest;
5779 tree scalar_dest;
5780 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5781 tree vectype;
5782 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5783 enum tree_code code, orig_code;
5784 machine_mode vec_mode;
5785 tree new_temp;
5786 int op_type;
5787 optab optab;
5788 bool target_support_p;
5789 enum vect_def_type dt[3]
5790 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5791 int ndts = 3;
5792 stmt_vec_info prev_stmt_info;
5793 poly_uint64 nunits_in;
5794 poly_uint64 nunits_out;
5795 tree vectype_out;
5796 int ncopies;
5797 int j, i;
5798 vec<tree> vec_oprnds0 = vNULL;
5799 vec<tree> vec_oprnds1 = vNULL;
5800 vec<tree> vec_oprnds2 = vNULL;
5801 tree vop0, vop1, vop2;
5802 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5803 vec_info *vinfo = stmt_info->vinfo;
5805 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5806 return false;
5808 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5809 && ! vec_stmt)
5810 return false;
5812 /* Is STMT a vectorizable binary/unary operation? */
5813 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5814 if (!stmt)
5815 return false;
5817 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5818 return false;
5820 orig_code = code = gimple_assign_rhs_code (stmt);
5822 /* For pointer addition and subtraction, we should use the normal
5823 plus and minus for the vector operation. */
5824 if (code == POINTER_PLUS_EXPR)
5825 code = PLUS_EXPR;
5826 if (code == POINTER_DIFF_EXPR)
5827 code = MINUS_EXPR;
5829 /* Support only unary or binary operations. */
5830 op_type = TREE_CODE_LENGTH (code);
5831 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5835 "num. args = %d (not unary/binary/ternary op).\n",
5836 op_type);
5837 return false;
5840 scalar_dest = gimple_assign_lhs (stmt);
5841 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5843 /* Most operations cannot handle bit-precision types without extra
5844 truncations. */
5845 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5846 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5847 /* Exception are bitwise binary operations. */
5848 && code != BIT_IOR_EXPR
5849 && code != BIT_XOR_EXPR
5850 && code != BIT_AND_EXPR)
5852 if (dump_enabled_p ())
5853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5854 "bit-precision arithmetic not supported.\n");
5855 return false;
5858 op0 = gimple_assign_rhs1 (stmt);
5859 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5861 if (dump_enabled_p ())
5862 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5863 "use not simple.\n");
5864 return false;
5866 /* If op0 is an external or constant def use a vector type with
5867 the same size as the output vector type. */
5868 if (!vectype)
5870 /* For boolean type we cannot determine vectype by
5871 invariant value (don't know whether it is a vector
5872 of booleans or vector of integers). We use output
5873 vectype because operations on boolean don't change
5874 type. */
5875 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5877 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5879 if (dump_enabled_p ())
5880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5881 "not supported operation on bool value.\n");
5882 return false;
5884 vectype = vectype_out;
5886 else
5887 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5889 if (vec_stmt)
5890 gcc_assert (vectype);
5891 if (!vectype)
5893 if (dump_enabled_p ())
5894 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5895 "no vectype for scalar type %T\n",
5896 TREE_TYPE (op0));
5898 return false;
5901 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5902 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5903 if (maybe_ne (nunits_out, nunits_in))
5904 return false;
5906 if (op_type == binary_op || op_type == ternary_op)
5908 op1 = gimple_assign_rhs2 (stmt);
5909 if (!vect_is_simple_use (op1, vinfo, &dt[1]))
5911 if (dump_enabled_p ())
5912 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5913 "use not simple.\n");
5914 return false;
5917 if (op_type == ternary_op)
5919 op2 = gimple_assign_rhs3 (stmt);
5920 if (!vect_is_simple_use (op2, vinfo, &dt[2]))
5922 if (dump_enabled_p ())
5923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5924 "use not simple.\n");
5925 return false;
5929 /* Multiple types in SLP are handled by creating the appropriate number of
5930 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5931 case of SLP. */
5932 if (slp_node)
5933 ncopies = 1;
5934 else
5935 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5937 gcc_assert (ncopies >= 1);
5939 /* Shifts are handled in vectorizable_shift (). */
5940 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5941 || code == RROTATE_EXPR)
5942 return false;
5944 /* Supportable by target? */
5946 vec_mode = TYPE_MODE (vectype);
5947 if (code == MULT_HIGHPART_EXPR)
5948 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5949 else
5951 optab = optab_for_tree_code (code, vectype, optab_default);
5952 if (!optab)
5954 if (dump_enabled_p ())
5955 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5956 "no optab.\n");
5957 return false;
5959 target_support_p = (optab_handler (optab, vec_mode)
5960 != CODE_FOR_nothing);
5963 if (!target_support_p)
5965 if (dump_enabled_p ())
5966 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5967 "op not supported by target.\n");
5968 /* Check only during analysis. */
5969 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5970 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5971 return false;
5972 if (dump_enabled_p ())
5973 dump_printf_loc (MSG_NOTE, vect_location,
5974 "proceeding using word mode.\n");
5977 /* Worthwhile without SIMD support? Check only during analysis. */
5978 if (!VECTOR_MODE_P (vec_mode)
5979 && !vec_stmt
5980 && !vect_worthwhile_without_simd_p (vinfo, code))
5982 if (dump_enabled_p ())
5983 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5984 "not worthwhile without SIMD support.\n");
5985 return false;
5988 if (!vec_stmt) /* transformation not required. */
5990 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5991 DUMP_VECT_SCOPE ("vectorizable_operation");
5992 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5993 return true;
5996 /* Transform. */
5998 if (dump_enabled_p ())
5999 dump_printf_loc (MSG_NOTE, vect_location,
6000 "transform binary/unary operation.\n");
6002 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6003 vectors with unsigned elements, but the result is signed. So, we
6004 need to compute the MINUS_EXPR into vectype temporary and
6005 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6006 tree vec_cvt_dest = NULL_TREE;
6007 if (orig_code == POINTER_DIFF_EXPR)
6009 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6010 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
6012 /* Handle def. */
6013 else
6014 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6016 /* In case the vectorization factor (VF) is bigger than the number
6017 of elements that we can fit in a vectype (nunits), we have to generate
6018 more than one vector stmt - i.e - we need to "unroll" the
6019 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6020 from one copy of the vector stmt to the next, in the field
6021 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6022 stages to find the correct vector defs to be used when vectorizing
6023 stmts that use the defs of the current stmt. The example below
6024 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6025 we need to create 4 vectorized stmts):
6027 before vectorization:
6028 RELATED_STMT VEC_STMT
6029 S1: x = memref - -
6030 S2: z = x + 1 - -
6032 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6033 there):
6034 RELATED_STMT VEC_STMT
6035 VS1_0: vx0 = memref0 VS1_1 -
6036 VS1_1: vx1 = memref1 VS1_2 -
6037 VS1_2: vx2 = memref2 VS1_3 -
6038 VS1_3: vx3 = memref3 - -
6039 S1: x = load - VS1_0
6040 S2: z = x + 1 - -
6042 step2: vectorize stmt S2 (done here):
6043 To vectorize stmt S2 we first need to find the relevant vector
6044 def for the first operand 'x'. This is, as usual, obtained from
6045 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6046 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6047 relevant vector def 'vx0'. Having found 'vx0' we can generate
6048 the vector stmt VS2_0, and as usual, record it in the
6049 STMT_VINFO_VEC_STMT of stmt S2.
6050 When creating the second copy (VS2_1), we obtain the relevant vector
6051 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6052 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6053 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6054 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6055 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6056 chain of stmts and pointers:
6057 RELATED_STMT VEC_STMT
6058 VS1_0: vx0 = memref0 VS1_1 -
6059 VS1_1: vx1 = memref1 VS1_2 -
6060 VS1_2: vx2 = memref2 VS1_3 -
6061 VS1_3: vx3 = memref3 - -
6062 S1: x = load - VS1_0
6063 VS2_0: vz0 = vx0 + v1 VS2_1 -
6064 VS2_1: vz1 = vx1 + v1 VS2_2 -
6065 VS2_2: vz2 = vx2 + v1 VS2_3 -
6066 VS2_3: vz3 = vx3 + v1 - -
6067 S2: z = x + 1 - VS2_0 */
6069 prev_stmt_info = NULL;
6070 for (j = 0; j < ncopies; j++)
6072 /* Handle uses. */
6073 if (j == 0)
6075 if (op_type == binary_op)
6076 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
6077 slp_node);
6078 else if (op_type == ternary_op)
6080 if (slp_node)
6082 auto_vec<tree> ops(3);
6083 ops.quick_push (op0);
6084 ops.quick_push (op1);
6085 ops.quick_push (op2);
6086 auto_vec<vec<tree> > vec_defs(3);
6087 vect_get_slp_defs (ops, slp_node, &vec_defs);
6088 vec_oprnds0 = vec_defs[0];
6089 vec_oprnds1 = vec_defs[1];
6090 vec_oprnds2 = vec_defs[2];
6092 else
6094 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
6095 &vec_oprnds1, NULL);
6096 vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
6097 NULL, NULL);
6100 else
6101 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
6102 slp_node);
6104 else
6106 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
6107 if (op_type == ternary_op)
6109 tree vec_oprnd = vec_oprnds2.pop ();
6110 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo,
6111 vec_oprnd));
6115 /* Arguments are ready. Create the new vector stmt. */
6116 stmt_vec_info new_stmt_info = NULL;
6117 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6119 vop1 = ((op_type == binary_op || op_type == ternary_op)
6120 ? vec_oprnds1[i] : NULL_TREE);
6121 vop2 = ((op_type == ternary_op)
6122 ? vec_oprnds2[i] : NULL_TREE);
6123 gassign *new_stmt = gimple_build_assign (vec_dest, code,
6124 vop0, vop1, vop2);
6125 new_temp = make_ssa_name (vec_dest, new_stmt);
6126 gimple_assign_set_lhs (new_stmt, new_temp);
6127 new_stmt_info
6128 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6129 if (vec_cvt_dest)
6131 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
6132 gassign *new_stmt
6133 = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6134 new_temp);
6135 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6136 gimple_assign_set_lhs (new_stmt, new_temp);
6137 new_stmt_info
6138 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6140 if (slp_node)
6141 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
6144 if (slp_node)
6145 continue;
6147 if (j == 0)
6148 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6149 else
6150 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6151 prev_stmt_info = new_stmt_info;
6154 vec_oprnds0.release ();
6155 vec_oprnds1.release ();
6156 vec_oprnds2.release ();
6158 return true;
6161 /* A helper function to ensure data reference DR_INFO's base alignment. */
6163 static void
6164 ensure_base_align (dr_vec_info *dr_info)
6166 if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
6167 return;
6169 if (dr_info->base_misaligned)
6171 tree base_decl = dr_info->base_decl;
6173 // We should only be able to increase the alignment of a base object if
6174 // we know what its new alignment should be at compile time.
6175 unsigned HOST_WIDE_INT align_base_to =
6176 DR_TARGET_ALIGNMENT (dr_info).to_constant () * BITS_PER_UNIT;
6178 if (decl_in_symtab_p (base_decl))
6179 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6180 else
6182 SET_DECL_ALIGN (base_decl, align_base_to);
6183 DECL_USER_ALIGN (base_decl) = 1;
6185 dr_info->base_misaligned = false;
6190 /* Function get_group_alias_ptr_type.
6192 Return the alias type for the group starting at FIRST_STMT_INFO. */
6194 static tree
6195 get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
6197 struct data_reference *first_dr, *next_dr;
6199 first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
6200 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info);
6201 while (next_stmt_info)
6203 next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
6204 if (get_alias_set (DR_REF (first_dr))
6205 != get_alias_set (DR_REF (next_dr)))
6207 if (dump_enabled_p ())
6208 dump_printf_loc (MSG_NOTE, vect_location,
6209 "conflicting alias set types.\n");
6210 return ptr_type_node;
6212 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6214 return reference_alias_ptr_type (DR_REF (first_dr));
6218 /* Function vectorizable_store.
6220 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
6221 that can be vectorized.
6222 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6223 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6224 Return true if STMT_INFO is vectorizable in this way. */
6226 static bool
6227 vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6228 stmt_vec_info *vec_stmt, slp_tree slp_node,
6229 stmt_vector_for_cost *cost_vec)
6231 tree data_ref;
6232 tree op;
6233 tree vec_oprnd = NULL_TREE;
6234 tree elem_type;
6235 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6236 struct loop *loop = NULL;
6237 machine_mode vec_mode;
6238 tree dummy;
6239 enum dr_alignment_support alignment_support_scheme;
6240 enum vect_def_type rhs_dt = vect_unknown_def_type;
6241 enum vect_def_type mask_dt = vect_unknown_def_type;
6242 stmt_vec_info prev_stmt_info = NULL;
6243 tree dataref_ptr = NULL_TREE;
6244 tree dataref_offset = NULL_TREE;
6245 gimple *ptr_incr = NULL;
6246 int ncopies;
6247 int j;
6248 stmt_vec_info first_stmt_info;
6249 bool grouped_store;
6250 unsigned int group_size, i;
6251 vec<tree> oprnds = vNULL;
6252 vec<tree> result_chain = vNULL;
6253 tree offset = NULL_TREE;
6254 vec<tree> vec_oprnds = vNULL;
6255 bool slp = (slp_node != NULL);
6256 unsigned int vec_num;
6257 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6258 vec_info *vinfo = stmt_info->vinfo;
6259 tree aggr_type;
6260 gather_scatter_info gs_info;
6261 poly_uint64 vf;
6262 vec_load_store_type vls_type;
6263 tree ref_type;
6265 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6266 return false;
6268 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6269 && ! vec_stmt)
6270 return false;
6272 /* Is vectorizable store? */
6274 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6275 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
6277 tree scalar_dest = gimple_assign_lhs (assign);
6278 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6279 && is_pattern_stmt_p (stmt_info))
6280 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6281 if (TREE_CODE (scalar_dest) != ARRAY_REF
6282 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6283 && TREE_CODE (scalar_dest) != INDIRECT_REF
6284 && TREE_CODE (scalar_dest) != COMPONENT_REF
6285 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6286 && TREE_CODE (scalar_dest) != REALPART_EXPR
6287 && TREE_CODE (scalar_dest) != MEM_REF)
6288 return false;
6290 else
6292 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
6293 if (!call || !gimple_call_internal_p (call))
6294 return false;
6296 internal_fn ifn = gimple_call_internal_fn (call);
6297 if (!internal_store_fn_p (ifn))
6298 return false;
6300 if (slp_node != NULL)
6302 if (dump_enabled_p ())
6303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6304 "SLP of masked stores not supported.\n");
6305 return false;
6308 int mask_index = internal_fn_mask_index (ifn);
6309 if (mask_index >= 0)
6311 mask = gimple_call_arg (call, mask_index);
6312 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
6313 &mask_vectype))
6314 return false;
6318 op = vect_get_store_rhs (stmt_info);
6320 /* Cannot have hybrid store SLP -- that would mean storing to the
6321 same location twice. */
6322 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6324 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6325 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6327 if (loop_vinfo)
6329 loop = LOOP_VINFO_LOOP (loop_vinfo);
6330 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6332 else
6333 vf = 1;
6335 /* Multiple types in SLP are handled by creating the appropriate number of
6336 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6337 case of SLP. */
6338 if (slp)
6339 ncopies = 1;
6340 else
6341 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6343 gcc_assert (ncopies >= 1);
6345 /* FORNOW. This restriction should be relaxed. */
6346 if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
6348 if (dump_enabled_p ())
6349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6350 "multiple types in nested loop.\n");
6351 return false;
6354 if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
6355 return false;
6357 elem_type = TREE_TYPE (vectype);
6358 vec_mode = TYPE_MODE (vectype);
6360 if (!STMT_VINFO_DATA_REF (stmt_info))
6361 return false;
6363 vect_memory_access_type memory_access_type;
6364 if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
6365 &memory_access_type, &gs_info))
6366 return false;
6368 if (mask)
6370 if (memory_access_type == VMAT_CONTIGUOUS)
6372 if (!VECTOR_MODE_P (vec_mode)
6373 || !can_vec_mask_load_store_p (vec_mode,
6374 TYPE_MODE (mask_vectype), false))
6375 return false;
6377 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6378 && (memory_access_type != VMAT_GATHER_SCATTER
6379 || (gs_info.decl && !VECTOR_BOOLEAN_TYPE_P (mask_vectype))))
6381 if (dump_enabled_p ())
6382 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6383 "unsupported access type for masked store.\n");
6384 return false;
6387 else
6389 /* FORNOW. In some cases can vectorize even if data-type not supported
6390 (e.g. - array initialization with 0). */
6391 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6392 return false;
6395 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
6396 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6397 && memory_access_type != VMAT_GATHER_SCATTER
6398 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6399 if (grouped_store)
6401 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
6402 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6403 group_size = DR_GROUP_SIZE (first_stmt_info);
6405 else
6407 first_stmt_info = stmt_info;
6408 first_dr_info = dr_info;
6409 group_size = vec_num = 1;
6412 if (!vec_stmt) /* transformation not required. */
6414 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6416 if (loop_vinfo
6417 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6418 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6419 memory_access_type, &gs_info);
6421 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6422 vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
6423 vls_type, slp_node, cost_vec);
6424 return true;
6426 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6428 /* Transform. */
6430 ensure_base_align (dr_info);
6432 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6434 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6435 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6436 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6437 tree ptr, var, scale, vec_mask;
6438 tree mask_arg = NULL_TREE, mask_op = NULL_TREE, perm_mask = NULL_TREE;
6439 tree mask_halfvectype = mask_vectype;
6440 edge pe = loop_preheader_edge (loop);
6441 gimple_seq seq;
6442 basic_block new_bb;
6443 enum { NARROW, NONE, WIDEN } modifier;
6444 poly_uint64 scatter_off_nunits
6445 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6447 if (known_eq (nunits, scatter_off_nunits))
6448 modifier = NONE;
6449 else if (known_eq (nunits * 2, scatter_off_nunits))
6451 modifier = WIDEN;
6453 /* Currently gathers and scatters are only supported for
6454 fixed-length vectors. */
6455 unsigned int count = scatter_off_nunits.to_constant ();
6456 vec_perm_builder sel (count, count, 1);
6457 for (i = 0; i < (unsigned int) count; ++i)
6458 sel.quick_push (i | (count / 2));
6460 vec_perm_indices indices (sel, 1, count);
6461 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6462 indices);
6463 gcc_assert (perm_mask != NULL_TREE);
6465 else if (known_eq (nunits, scatter_off_nunits * 2))
6467 modifier = NARROW;
6469 /* Currently gathers and scatters are only supported for
6470 fixed-length vectors. */
6471 unsigned int count = nunits.to_constant ();
6472 vec_perm_builder sel (count, count, 1);
6473 for (i = 0; i < (unsigned int) count; ++i)
6474 sel.quick_push (i | (count / 2));
6476 vec_perm_indices indices (sel, 2, count);
6477 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6478 gcc_assert (perm_mask != NULL_TREE);
6479 ncopies *= 2;
6481 if (mask)
6482 mask_halfvectype
6483 = build_same_sized_truth_vector_type (gs_info.offset_vectype);
6485 else
6486 gcc_unreachable ();
6488 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6489 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6490 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6491 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6492 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6493 scaletype = TREE_VALUE (arglist);
6495 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6496 && TREE_CODE (rettype) == VOID_TYPE);
6498 ptr = fold_convert (ptrtype, gs_info.base);
6499 if (!is_gimple_min_invariant (ptr))
6501 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6502 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6503 gcc_assert (!new_bb);
6506 if (mask == NULL_TREE)
6508 mask_arg = build_int_cst (masktype, -1);
6509 mask_arg = vect_init_vector (stmt_info, mask_arg, masktype, NULL);
6512 scale = build_int_cst (scaletype, gs_info.scale);
6514 prev_stmt_info = NULL;
6515 for (j = 0; j < ncopies; ++j)
6517 if (j == 0)
6519 src = vec_oprnd1 = vect_get_vec_def_for_operand (op, stmt_info);
6520 op = vec_oprnd0 = vect_get_vec_def_for_operand (gs_info.offset,
6521 stmt_info);
6522 if (mask)
6523 mask_op = vec_mask = vect_get_vec_def_for_operand (mask,
6524 stmt_info);
6526 else if (modifier != NONE && (j & 1))
6528 if (modifier == WIDEN)
6531 = vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
6532 vec_oprnd1);
6533 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6534 stmt_info, gsi);
6535 if (mask)
6536 mask_op
6537 = vec_mask = vect_get_vec_def_for_stmt_copy (vinfo,
6538 vec_mask);
6540 else if (modifier == NARROW)
6542 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6543 stmt_info, gsi);
6544 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo,
6545 vec_oprnd0);
6547 else
6548 gcc_unreachable ();
6550 else
6552 src = vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
6553 vec_oprnd1);
6554 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo,
6555 vec_oprnd0);
6556 if (mask)
6557 mask_op = vec_mask = vect_get_vec_def_for_stmt_copy (vinfo,
6558 vec_mask);
6561 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6563 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6564 TYPE_VECTOR_SUBPARTS (srctype)));
6565 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6566 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6567 gassign *new_stmt
6568 = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6569 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6570 src = var;
6573 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6575 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6576 TYPE_VECTOR_SUBPARTS (idxtype)));
6577 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6578 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6579 gassign *new_stmt
6580 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6581 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6582 op = var;
6585 if (mask)
6587 tree utype;
6588 mask_arg = mask_op;
6589 if (modifier == NARROW)
6591 var = vect_get_new_ssa_name (mask_halfvectype,
6592 vect_simple_var);
6593 gassign *new_stmt
6594 = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
6595 : VEC_UNPACK_LO_EXPR,
6596 mask_op);
6597 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6598 mask_arg = var;
6600 tree optype = TREE_TYPE (mask_arg);
6601 if (TYPE_MODE (masktype) == TYPE_MODE (optype))
6602 utype = masktype;
6603 else
6604 utype = lang_hooks.types.type_for_mode (TYPE_MODE (optype), 1);
6605 var = vect_get_new_ssa_name (utype, vect_scalar_var);
6606 mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_arg);
6607 gassign *new_stmt
6608 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
6609 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6610 mask_arg = var;
6611 if (!useless_type_conversion_p (masktype, utype))
6613 gcc_assert (TYPE_PRECISION (utype)
6614 <= TYPE_PRECISION (masktype));
6615 var = vect_get_new_ssa_name (masktype, vect_scalar_var);
6616 new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
6617 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6618 mask_arg = var;
6622 gcall *new_stmt
6623 = gimple_build_call (gs_info.decl, 5, ptr, mask_arg, op, src, scale);
6624 stmt_vec_info new_stmt_info
6625 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6627 if (prev_stmt_info == NULL)
6628 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6629 else
6630 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6631 prev_stmt_info = new_stmt_info;
6633 return true;
6636 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6637 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
6639 if (grouped_store)
6641 /* FORNOW */
6642 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
6644 /* We vectorize all the stmts of the interleaving group when we
6645 reach the last stmt in the group. */
6646 if (DR_GROUP_STORE_COUNT (first_stmt_info)
6647 < DR_GROUP_SIZE (first_stmt_info)
6648 && !slp)
6650 *vec_stmt = NULL;
6651 return true;
6654 if (slp)
6656 grouped_store = false;
6657 /* VEC_NUM is the number of vect stmts to be created for this
6658 group. */
6659 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6660 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6661 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
6662 == first_stmt_info);
6663 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6664 op = vect_get_store_rhs (first_stmt_info);
6666 else
6667 /* VEC_NUM is the number of vect stmts to be created for this
6668 group. */
6669 vec_num = group_size;
6671 ref_type = get_group_alias_ptr_type (first_stmt_info);
6673 else
6674 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
6676 if (dump_enabled_p ())
6677 dump_printf_loc (MSG_NOTE, vect_location,
6678 "transform store. ncopies = %d\n", ncopies);
6680 if (memory_access_type == VMAT_ELEMENTWISE
6681 || memory_access_type == VMAT_STRIDED_SLP)
6683 gimple_stmt_iterator incr_gsi;
6684 bool insert_after;
6685 gimple *incr;
6686 tree offvar;
6687 tree ivstep;
6688 tree running_off;
6689 tree stride_base, stride_step, alias_off;
6690 tree vec_oprnd;
6691 unsigned int g;
6692 /* Checked by get_load_store_type. */
6693 unsigned int const_nunits = nunits.to_constant ();
6695 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6696 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
6698 stride_base
6699 = fold_build_pointer_plus
6700 (DR_BASE_ADDRESS (first_dr_info->dr),
6701 size_binop (PLUS_EXPR,
6702 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
6703 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
6704 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
6706 /* For a store with loop-invariant (but other than power-of-2)
6707 stride (i.e. not a grouped access) like so:
6709 for (i = 0; i < n; i += stride)
6710 array[i] = ...;
6712 we generate a new induction variable and new stores from
6713 the components of the (vectorized) rhs:
6715 for (j = 0; ; j += VF*stride)
6716 vectemp = ...;
6717 tmp1 = vectemp[0];
6718 array[j] = tmp1;
6719 tmp2 = vectemp[1];
6720 array[j + stride] = tmp2;
6724 unsigned nstores = const_nunits;
6725 unsigned lnel = 1;
6726 tree ltype = elem_type;
6727 tree lvectype = vectype;
6728 if (slp)
6730 if (group_size < const_nunits
6731 && const_nunits % group_size == 0)
6733 nstores = const_nunits / group_size;
6734 lnel = group_size;
6735 ltype = build_vector_type (elem_type, group_size);
6736 lvectype = vectype;
6738 /* First check if vec_extract optab doesn't support extraction
6739 of vector elts directly. */
6740 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6741 machine_mode vmode;
6742 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6743 || !VECTOR_MODE_P (vmode)
6744 || !targetm.vector_mode_supported_p (vmode)
6745 || (convert_optab_handler (vec_extract_optab,
6746 TYPE_MODE (vectype), vmode)
6747 == CODE_FOR_nothing))
6749 /* Try to avoid emitting an extract of vector elements
6750 by performing the extracts using an integer type of the
6751 same size, extracting from a vector of those and then
6752 re-interpreting it as the original vector type if
6753 supported. */
6754 unsigned lsize
6755 = group_size * GET_MODE_BITSIZE (elmode);
6756 unsigned int lnunits = const_nunits / group_size;
6757 /* If we can't construct such a vector fall back to
6758 element extracts from the original vector type and
6759 element size stores. */
6760 if (int_mode_for_size (lsize, 0).exists (&elmode)
6761 && mode_for_vector (elmode, lnunits).exists (&vmode)
6762 && VECTOR_MODE_P (vmode)
6763 && targetm.vector_mode_supported_p (vmode)
6764 && (convert_optab_handler (vec_extract_optab,
6765 vmode, elmode)
6766 != CODE_FOR_nothing))
6768 nstores = lnunits;
6769 lnel = group_size;
6770 ltype = build_nonstandard_integer_type (lsize, 1);
6771 lvectype = build_vector_type (ltype, nstores);
6773 /* Else fall back to vector extraction anyway.
6774 Fewer stores are more important than avoiding spilling
6775 of the vector we extract from. Compared to the
6776 construction case in vectorizable_load no store-forwarding
6777 issue exists here for reasonable archs. */
6780 else if (group_size >= const_nunits
6781 && group_size % const_nunits == 0)
6783 nstores = 1;
6784 lnel = const_nunits;
6785 ltype = vectype;
6786 lvectype = vectype;
6788 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6789 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6792 ivstep = stride_step;
6793 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6794 build_int_cst (TREE_TYPE (ivstep), vf));
6796 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6798 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6799 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6800 create_iv (stride_base, ivstep, NULL,
6801 loop, &incr_gsi, insert_after,
6802 &offvar, NULL);
6803 incr = gsi_stmt (incr_gsi);
6804 loop_vinfo->add_stmt (incr);
6806 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6808 prev_stmt_info = NULL;
6809 alias_off = build_int_cst (ref_type, 0);
6810 stmt_vec_info next_stmt_info = first_stmt_info;
6811 for (g = 0; g < group_size; g++)
6813 running_off = offvar;
6814 if (g)
6816 tree size = TYPE_SIZE_UNIT (ltype);
6817 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6818 size);
6819 tree newoff = copy_ssa_name (running_off, NULL);
6820 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6821 running_off, pos);
6822 vect_finish_stmt_generation (stmt_info, incr, gsi);
6823 running_off = newoff;
6825 unsigned int group_el = 0;
6826 unsigned HOST_WIDE_INT
6827 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6828 for (j = 0; j < ncopies; j++)
6830 /* We've set op and dt above, from vect_get_store_rhs,
6831 and first_stmt_info == stmt_info. */
6832 if (j == 0)
6834 if (slp)
6836 vect_get_vec_defs (op, NULL_TREE, stmt_info,
6837 &vec_oprnds, NULL, slp_node);
6838 vec_oprnd = vec_oprnds[0];
6840 else
6842 op = vect_get_store_rhs (next_stmt_info);
6843 vec_oprnd = vect_get_vec_def_for_operand
6844 (op, next_stmt_info);
6847 else
6849 if (slp)
6850 vec_oprnd = vec_oprnds[j];
6851 else
6852 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo,
6853 vec_oprnd);
6855 /* Pun the vector to extract from if necessary. */
6856 if (lvectype != vectype)
6858 tree tem = make_ssa_name (lvectype);
6859 gimple *pun
6860 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6861 lvectype, vec_oprnd));
6862 vect_finish_stmt_generation (stmt_info, pun, gsi);
6863 vec_oprnd = tem;
6865 for (i = 0; i < nstores; i++)
6867 tree newref, newoff;
6868 gimple *incr, *assign;
6869 tree size = TYPE_SIZE (ltype);
6870 /* Extract the i'th component. */
6871 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6872 bitsize_int (i), size);
6873 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6874 size, pos);
6876 elem = force_gimple_operand_gsi (gsi, elem, true,
6877 NULL_TREE, true,
6878 GSI_SAME_STMT);
6880 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6881 group_el * elsz);
6882 newref = build2 (MEM_REF, ltype,
6883 running_off, this_off);
6884 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
6886 /* And store it to *running_off. */
6887 assign = gimple_build_assign (newref, elem);
6888 stmt_vec_info assign_info
6889 = vect_finish_stmt_generation (stmt_info, assign, gsi);
6891 group_el += lnel;
6892 if (! slp
6893 || group_el == group_size)
6895 newoff = copy_ssa_name (running_off, NULL);
6896 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6897 running_off, stride_step);
6898 vect_finish_stmt_generation (stmt_info, incr, gsi);
6900 running_off = newoff;
6901 group_el = 0;
6903 if (g == group_size - 1
6904 && !slp)
6906 if (j == 0 && i == 0)
6907 STMT_VINFO_VEC_STMT (stmt_info)
6908 = *vec_stmt = assign_info;
6909 else
6910 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info;
6911 prev_stmt_info = assign_info;
6915 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6916 if (slp)
6917 break;
6920 vec_oprnds.release ();
6921 return true;
6924 auto_vec<tree> dr_chain (group_size);
6925 oprnds.create (group_size);
6927 alignment_support_scheme
6928 = vect_supportable_dr_alignment (first_dr_info, false);
6929 gcc_assert (alignment_support_scheme);
6930 vec_loop_masks *loop_masks
6931 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6932 ? &LOOP_VINFO_MASKS (loop_vinfo)
6933 : NULL);
6934 /* Targets with store-lane instructions must not require explicit
6935 realignment. vect_supportable_dr_alignment always returns either
6936 dr_aligned or dr_unaligned_supported for masked operations. */
6937 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6938 && !mask
6939 && !loop_masks)
6940 || alignment_support_scheme == dr_aligned
6941 || alignment_support_scheme == dr_unaligned_supported);
6943 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6944 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6945 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6947 tree bump;
6948 tree vec_offset = NULL_TREE;
6949 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6951 aggr_type = NULL_TREE;
6952 bump = NULL_TREE;
6954 else if (memory_access_type == VMAT_GATHER_SCATTER)
6956 aggr_type = elem_type;
6957 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
6958 &bump, &vec_offset);
6960 else
6962 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6963 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6964 else
6965 aggr_type = vectype;
6966 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
6967 memory_access_type);
6970 if (mask)
6971 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6973 /* In case the vectorization factor (VF) is bigger than the number
6974 of elements that we can fit in a vectype (nunits), we have to generate
6975 more than one vector stmt - i.e - we need to "unroll" the
6976 vector stmt by a factor VF/nunits. For more details see documentation in
6977 vect_get_vec_def_for_copy_stmt. */
6979 /* In case of interleaving (non-unit grouped access):
6981 S1: &base + 2 = x2
6982 S2: &base = x0
6983 S3: &base + 1 = x1
6984 S4: &base + 3 = x3
6986 We create vectorized stores starting from base address (the access of the
6987 first stmt in the chain (S2 in the above example), when the last store stmt
6988 of the chain (S4) is reached:
6990 VS1: &base = vx2
6991 VS2: &base + vec_size*1 = vx0
6992 VS3: &base + vec_size*2 = vx1
6993 VS4: &base + vec_size*3 = vx3
6995 Then permutation statements are generated:
6997 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6998 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
7001 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7002 (the order of the data-refs in the output of vect_permute_store_chain
7003 corresponds to the order of scalar stmts in the interleaving chain - see
7004 the documentation of vect_permute_store_chain()).
7006 In case of both multiple types and interleaving, above vector stores and
7007 permutation stmts are created for every copy. The result vector stmts are
7008 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
7009 STMT_VINFO_RELATED_STMT for the next copies.
7012 prev_stmt_info = NULL;
7013 tree vec_mask = NULL_TREE;
7014 for (j = 0; j < ncopies; j++)
7016 stmt_vec_info new_stmt_info;
7017 if (j == 0)
7019 if (slp)
7021 /* Get vectorized arguments for SLP_NODE. */
7022 vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
7023 NULL, slp_node);
7025 vec_oprnd = vec_oprnds[0];
7027 else
7029 /* For interleaved stores we collect vectorized defs for all the
7030 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
7031 used as an input to vect_permute_store_chain(), and OPRNDS as
7032 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
7034 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7035 OPRNDS are of size 1. */
7036 stmt_vec_info next_stmt_info = first_stmt_info;
7037 for (i = 0; i < group_size; i++)
7039 /* Since gaps are not supported for interleaved stores,
7040 DR_GROUP_SIZE is the exact number of stmts in the chain.
7041 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
7042 that there is no interleaving, DR_GROUP_SIZE is 1,
7043 and only one iteration of the loop will be executed. */
7044 op = vect_get_store_rhs (next_stmt_info);
7045 vec_oprnd = vect_get_vec_def_for_operand
7046 (op, next_stmt_info);
7047 dr_chain.quick_push (vec_oprnd);
7048 oprnds.quick_push (vec_oprnd);
7049 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7051 if (mask)
7052 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
7053 mask_vectype);
7056 /* We should have catched mismatched types earlier. */
7057 gcc_assert (useless_type_conversion_p (vectype,
7058 TREE_TYPE (vec_oprnd)));
7059 bool simd_lane_access_p
7060 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7061 if (simd_lane_access_p
7062 && !loop_masks
7063 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
7064 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
7065 && integer_zerop (DR_OFFSET (first_dr_info->dr))
7066 && integer_zerop (DR_INIT (first_dr_info->dr))
7067 && alias_sets_conflict_p (get_alias_set (aggr_type),
7068 get_alias_set (TREE_TYPE (ref_type))))
7070 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
7071 dataref_offset = build_int_cst (ref_type, 0);
7073 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7074 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
7075 &dataref_ptr, &vec_offset);
7076 else
7077 dataref_ptr
7078 = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
7079 simd_lane_access_p ? loop : NULL,
7080 offset, &dummy, gsi, &ptr_incr,
7081 simd_lane_access_p, NULL_TREE, bump);
7083 else
7085 /* For interleaved stores we created vectorized defs for all the
7086 defs stored in OPRNDS in the previous iteration (previous copy).
7087 DR_CHAIN is then used as an input to vect_permute_store_chain(),
7088 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7089 next copy.
7090 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7091 OPRNDS are of size 1. */
7092 for (i = 0; i < group_size; i++)
7094 op = oprnds[i];
7095 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op);
7096 dr_chain[i] = vec_oprnd;
7097 oprnds[i] = vec_oprnd;
7099 if (mask)
7100 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
7101 if (dataref_offset)
7102 dataref_offset
7103 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7104 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7105 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
7106 else
7107 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7108 stmt_info, bump);
7111 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7113 tree vec_array;
7115 /* Get an array into which we can store the individual vectors. */
7116 vec_array = create_vector_array (vectype, vec_num);
7118 /* Invalidate the current contents of VEC_ARRAY. This should
7119 become an RTL clobber too, which prevents the vector registers
7120 from being upward-exposed. */
7121 vect_clobber_variable (stmt_info, gsi, vec_array);
7123 /* Store the individual vectors into the array. */
7124 for (i = 0; i < vec_num; i++)
7126 vec_oprnd = dr_chain[i];
7127 write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
7130 tree final_mask = NULL;
7131 if (loop_masks)
7132 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
7133 vectype, j);
7134 if (vec_mask)
7135 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7136 vec_mask, gsi);
7138 gcall *call;
7139 if (final_mask)
7141 /* Emit:
7142 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7143 VEC_ARRAY). */
7144 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
7145 tree alias_ptr = build_int_cst (ref_type, align);
7146 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
7147 dataref_ptr, alias_ptr,
7148 final_mask, vec_array);
7150 else
7152 /* Emit:
7153 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7154 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7155 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
7156 vec_array);
7157 gimple_call_set_lhs (call, data_ref);
7159 gimple_call_set_nothrow (call, true);
7160 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
7162 /* Record that VEC_ARRAY is now dead. */
7163 vect_clobber_variable (stmt_info, gsi, vec_array);
7165 else
7167 new_stmt_info = NULL;
7168 if (grouped_store)
7170 if (j == 0)
7171 result_chain.create (group_size);
7172 /* Permute. */
7173 vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
7174 &result_chain);
7177 stmt_vec_info next_stmt_info = first_stmt_info;
7178 for (i = 0; i < vec_num; i++)
7180 unsigned misalign;
7181 unsigned HOST_WIDE_INT align;
7183 tree final_mask = NULL_TREE;
7184 if (loop_masks)
7185 final_mask = vect_get_loop_mask (gsi, loop_masks,
7186 vec_num * ncopies,
7187 vectype, vec_num * j + i);
7188 if (vec_mask)
7189 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7190 vec_mask, gsi);
7192 if (memory_access_type == VMAT_GATHER_SCATTER)
7194 tree scale = size_int (gs_info.scale);
7195 gcall *call;
7196 if (loop_masks)
7197 call = gimple_build_call_internal
7198 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
7199 scale, vec_oprnd, final_mask);
7200 else
7201 call = gimple_build_call_internal
7202 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
7203 scale, vec_oprnd);
7204 gimple_call_set_nothrow (call, true);
7205 new_stmt_info
7206 = vect_finish_stmt_generation (stmt_info, call, gsi);
7207 break;
7210 if (i > 0)
7211 /* Bump the vector pointer. */
7212 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7213 stmt_info, bump);
7215 if (slp)
7216 vec_oprnd = vec_oprnds[i];
7217 else if (grouped_store)
7218 /* For grouped stores vectorized defs are interleaved in
7219 vect_permute_store_chain(). */
7220 vec_oprnd = result_chain[i];
7222 align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
7223 if (aligned_access_p (first_dr_info))
7224 misalign = 0;
7225 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7227 align = dr_alignment (vect_dr_behavior (first_dr_info));
7228 misalign = 0;
7230 else
7231 misalign = DR_MISALIGNMENT (first_dr_info);
7232 if (dataref_offset == NULL_TREE
7233 && TREE_CODE (dataref_ptr) == SSA_NAME)
7234 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7235 misalign);
7237 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7239 tree perm_mask = perm_mask_for_reverse (vectype);
7240 tree perm_dest = vect_create_destination_var
7241 (vect_get_store_rhs (stmt_info), vectype);
7242 tree new_temp = make_ssa_name (perm_dest);
7244 /* Generate the permute statement. */
7245 gimple *perm_stmt
7246 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7247 vec_oprnd, perm_mask);
7248 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7250 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7251 vec_oprnd = new_temp;
7254 /* Arguments are ready. Create the new vector stmt. */
7255 if (final_mask)
7257 align = least_bit_hwi (misalign | align);
7258 tree ptr = build_int_cst (ref_type, align);
7259 gcall *call
7260 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7261 dataref_ptr, ptr,
7262 final_mask, vec_oprnd);
7263 gimple_call_set_nothrow (call, true);
7264 new_stmt_info
7265 = vect_finish_stmt_generation (stmt_info, call, gsi);
7267 else
7269 data_ref = fold_build2 (MEM_REF, vectype,
7270 dataref_ptr,
7271 dataref_offset
7272 ? dataref_offset
7273 : build_int_cst (ref_type, 0));
7274 if (aligned_access_p (first_dr_info))
7276 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7277 TREE_TYPE (data_ref)
7278 = build_aligned_type (TREE_TYPE (data_ref),
7279 align * BITS_PER_UNIT);
7280 else
7281 TREE_TYPE (data_ref)
7282 = build_aligned_type (TREE_TYPE (data_ref),
7283 TYPE_ALIGN (elem_type));
7284 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7285 gassign *new_stmt
7286 = gimple_build_assign (data_ref, vec_oprnd);
7287 new_stmt_info
7288 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7291 if (slp)
7292 continue;
7294 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7295 if (!next_stmt_info)
7296 break;
7299 if (!slp)
7301 if (j == 0)
7302 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7303 else
7304 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7305 prev_stmt_info = new_stmt_info;
7309 oprnds.release ();
7310 result_chain.release ();
7311 vec_oprnds.release ();
7313 return true;
7316 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7317 VECTOR_CST mask. No checks are made that the target platform supports the
7318 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7319 vect_gen_perm_mask_checked. */
7321 tree
7322 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7324 tree mask_type;
7326 poly_uint64 nunits = sel.length ();
7327 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7329 mask_type = build_vector_type (ssizetype, nunits);
7330 return vec_perm_indices_to_tree (mask_type, sel);
7333 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7334 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7336 tree
7337 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7339 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7340 return vect_gen_perm_mask_any (vectype, sel);
7343 /* Given a vector variable X and Y, that was generated for the scalar
7344 STMT_INFO, generate instructions to permute the vector elements of X and Y
7345 using permutation mask MASK_VEC, insert them at *GSI and return the
7346 permuted vector variable. */
7348 static tree
7349 permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
7350 gimple_stmt_iterator *gsi)
7352 tree vectype = TREE_TYPE (x);
7353 tree perm_dest, data_ref;
7354 gimple *perm_stmt;
7356 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
7357 if (scalar_dest && TREE_CODE (scalar_dest) == SSA_NAME)
7358 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7359 else
7360 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7361 data_ref = make_ssa_name (perm_dest);
7363 /* Generate the permute statement. */
7364 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7365 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7367 return data_ref;
7370 /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
7371 inserting them on the loops preheader edge. Returns true if we
7372 were successful in doing so (and thus STMT_INFO can be moved then),
7373 otherwise returns false. */
7375 static bool
7376 hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
7378 ssa_op_iter i;
7379 tree op;
7380 bool any = false;
7382 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7384 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7385 if (!gimple_nop_p (def_stmt)
7386 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7388 /* Make sure we don't need to recurse. While we could do
7389 so in simple cases when there are more complex use webs
7390 we don't have an easy way to preserve stmt order to fulfil
7391 dependencies within them. */
7392 tree op2;
7393 ssa_op_iter i2;
7394 if (gimple_code (def_stmt) == GIMPLE_PHI)
7395 return false;
7396 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7398 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7399 if (!gimple_nop_p (def_stmt2)
7400 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7401 return false;
7403 any = true;
7407 if (!any)
7408 return true;
7410 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7412 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7413 if (!gimple_nop_p (def_stmt)
7414 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7416 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7417 gsi_remove (&gsi, false);
7418 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7422 return true;
7425 /* vectorizable_load.
7427 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
7428 that can be vectorized.
7429 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7430 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7431 Return true if STMT_INFO is vectorizable in this way. */
7433 static bool
7434 vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7435 stmt_vec_info *vec_stmt, slp_tree slp_node,
7436 slp_instance slp_node_instance,
7437 stmt_vector_for_cost *cost_vec)
7439 tree scalar_dest;
7440 tree vec_dest = NULL;
7441 tree data_ref = NULL;
7442 stmt_vec_info prev_stmt_info;
7443 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7444 struct loop *loop = NULL;
7445 struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
7446 bool nested_in_vect_loop = false;
7447 tree elem_type;
7448 tree new_temp;
7449 machine_mode mode;
7450 tree dummy;
7451 enum dr_alignment_support alignment_support_scheme;
7452 tree dataref_ptr = NULL_TREE;
7453 tree dataref_offset = NULL_TREE;
7454 gimple *ptr_incr = NULL;
7455 int ncopies;
7456 int i, j;
7457 unsigned int group_size;
7458 poly_uint64 group_gap_adj;
7459 tree msq = NULL_TREE, lsq;
7460 tree offset = NULL_TREE;
7461 tree byte_offset = NULL_TREE;
7462 tree realignment_token = NULL_TREE;
7463 gphi *phi = NULL;
7464 vec<tree> dr_chain = vNULL;
7465 bool grouped_load = false;
7466 stmt_vec_info first_stmt_info;
7467 stmt_vec_info first_stmt_info_for_drptr = NULL;
7468 bool compute_in_loop = false;
7469 struct loop *at_loop;
7470 int vec_num;
7471 bool slp = (slp_node != NULL);
7472 bool slp_perm = false;
7473 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7474 poly_uint64 vf;
7475 tree aggr_type;
7476 gather_scatter_info gs_info;
7477 vec_info *vinfo = stmt_info->vinfo;
7478 tree ref_type;
7479 enum vect_def_type mask_dt = vect_unknown_def_type;
7481 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7482 return false;
7484 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7485 && ! vec_stmt)
7486 return false;
7488 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7489 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
7491 scalar_dest = gimple_assign_lhs (assign);
7492 if (TREE_CODE (scalar_dest) != SSA_NAME)
7493 return false;
7495 tree_code code = gimple_assign_rhs_code (assign);
7496 if (code != ARRAY_REF
7497 && code != BIT_FIELD_REF
7498 && code != INDIRECT_REF
7499 && code != COMPONENT_REF
7500 && code != IMAGPART_EXPR
7501 && code != REALPART_EXPR
7502 && code != MEM_REF
7503 && TREE_CODE_CLASS (code) != tcc_declaration)
7504 return false;
7506 else
7508 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
7509 if (!call || !gimple_call_internal_p (call))
7510 return false;
7512 internal_fn ifn = gimple_call_internal_fn (call);
7513 if (!internal_load_fn_p (ifn))
7514 return false;
7516 scalar_dest = gimple_call_lhs (call);
7517 if (!scalar_dest)
7518 return false;
7520 if (slp_node != NULL)
7522 if (dump_enabled_p ())
7523 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7524 "SLP of masked loads not supported.\n");
7525 return false;
7528 int mask_index = internal_fn_mask_index (ifn);
7529 if (mask_index >= 0)
7531 mask = gimple_call_arg (call, mask_index);
7532 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
7533 &mask_vectype))
7534 return false;
7538 if (!STMT_VINFO_DATA_REF (stmt_info))
7539 return false;
7541 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7542 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7544 if (loop_vinfo)
7546 loop = LOOP_VINFO_LOOP (loop_vinfo);
7547 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
7548 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7550 else
7551 vf = 1;
7553 /* Multiple types in SLP are handled by creating the appropriate number of
7554 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7555 case of SLP. */
7556 if (slp)
7557 ncopies = 1;
7558 else
7559 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7561 gcc_assert (ncopies >= 1);
7563 /* FORNOW. This restriction should be relaxed. */
7564 if (nested_in_vect_loop && ncopies > 1)
7566 if (dump_enabled_p ())
7567 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7568 "multiple types in nested loop.\n");
7569 return false;
7572 /* Invalidate assumptions made by dependence analysis when vectorization
7573 on the unrolled body effectively re-orders stmts. */
7574 if (ncopies > 1
7575 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7576 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7577 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7579 if (dump_enabled_p ())
7580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7581 "cannot perform implicit CSE when unrolling "
7582 "with negative dependence distance\n");
7583 return false;
7586 elem_type = TREE_TYPE (vectype);
7587 mode = TYPE_MODE (vectype);
7589 /* FORNOW. In some cases can vectorize even if data-type not supported
7590 (e.g. - data copies). */
7591 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7593 if (dump_enabled_p ())
7594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7595 "Aligned load, but unsupported type.\n");
7596 return false;
7599 /* Check if the load is a part of an interleaving chain. */
7600 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7602 grouped_load = true;
7603 /* FORNOW */
7604 gcc_assert (!nested_in_vect_loop);
7605 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7607 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7608 group_size = DR_GROUP_SIZE (first_stmt_info);
7610 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7611 slp_perm = true;
7613 /* Invalidate assumptions made by dependence analysis when vectorization
7614 on the unrolled body effectively re-orders stmts. */
7615 if (!PURE_SLP_STMT (stmt_info)
7616 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7617 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7618 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7620 if (dump_enabled_p ())
7621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7622 "cannot perform implicit CSE when performing "
7623 "group loads with negative dependence distance\n");
7624 return false;
7627 /* Similarly when the stmt is a load that is both part of a SLP
7628 instance and a loop vectorized stmt via the same-dr mechanism
7629 we have to give up. */
7630 if (DR_GROUP_SAME_DR_STMT (stmt_info)
7631 && (STMT_SLP_TYPE (stmt_info)
7632 != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info))))
7634 if (dump_enabled_p ())
7635 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7636 "conflicting SLP types for CSEd load\n");
7637 return false;
7640 else
7641 group_size = 1;
7643 vect_memory_access_type memory_access_type;
7644 if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
7645 &memory_access_type, &gs_info))
7646 return false;
7648 if (mask)
7650 if (memory_access_type == VMAT_CONTIGUOUS)
7652 machine_mode vec_mode = TYPE_MODE (vectype);
7653 if (!VECTOR_MODE_P (vec_mode)
7654 || !can_vec_mask_load_store_p (vec_mode,
7655 TYPE_MODE (mask_vectype), true))
7656 return false;
7658 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7659 && memory_access_type != VMAT_GATHER_SCATTER)
7661 if (dump_enabled_p ())
7662 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7663 "unsupported access type for masked load.\n");
7664 return false;
7668 if (!vec_stmt) /* transformation not required. */
7670 if (!slp)
7671 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7673 if (loop_vinfo
7674 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7675 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7676 memory_access_type, &gs_info);
7678 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7679 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7680 slp_node_instance, slp_node, cost_vec);
7681 return true;
7684 if (!slp)
7685 gcc_assert (memory_access_type
7686 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7688 if (dump_enabled_p ())
7689 dump_printf_loc (MSG_NOTE, vect_location,
7690 "transform load. ncopies = %d\n", ncopies);
7692 /* Transform. */
7694 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
7695 ensure_base_align (dr_info);
7697 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7699 vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask);
7700 return true;
7703 if (memory_access_type == VMAT_INVARIANT)
7705 gcc_assert (!grouped_load && !mask && !bb_vinfo);
7706 /* If we have versioned for aliasing or the loop doesn't
7707 have any data dependencies that would preclude this,
7708 then we are sure this is a loop invariant load and
7709 thus we can insert it on the preheader edge. */
7710 bool hoist_p = (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7711 && !nested_in_vect_loop
7712 && hoist_defs_of_uses (stmt_info, loop));
7713 if (hoist_p)
7715 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
7716 if (dump_enabled_p ())
7717 dump_printf_loc (MSG_NOTE, vect_location,
7718 "hoisting out of the vectorized loop: %G", stmt);
7719 scalar_dest = copy_ssa_name (scalar_dest);
7720 tree rhs = unshare_expr (gimple_assign_rhs1 (stmt));
7721 gsi_insert_on_edge_immediate
7722 (loop_preheader_edge (loop),
7723 gimple_build_assign (scalar_dest, rhs));
7725 /* These copies are all equivalent, but currently the representation
7726 requires a separate STMT_VINFO_VEC_STMT for each one. */
7727 prev_stmt_info = NULL;
7728 gimple_stmt_iterator gsi2 = *gsi;
7729 gsi_next (&gsi2);
7730 for (j = 0; j < ncopies; j++)
7732 stmt_vec_info new_stmt_info;
7733 if (hoist_p)
7735 new_temp = vect_init_vector (stmt_info, scalar_dest,
7736 vectype, NULL);
7737 gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
7738 new_stmt_info = vinfo->add_stmt (new_stmt);
7740 else
7742 new_temp = vect_init_vector (stmt_info, scalar_dest,
7743 vectype, &gsi2);
7744 new_stmt_info = vinfo->lookup_def (new_temp);
7746 if (slp)
7747 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7748 else if (j == 0)
7749 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7750 else
7751 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7752 prev_stmt_info = new_stmt_info;
7754 return true;
7757 if (memory_access_type == VMAT_ELEMENTWISE
7758 || memory_access_type == VMAT_STRIDED_SLP)
7760 gimple_stmt_iterator incr_gsi;
7761 bool insert_after;
7762 gimple *incr;
7763 tree offvar;
7764 tree ivstep;
7765 tree running_off;
7766 vec<constructor_elt, va_gc> *v = NULL;
7767 tree stride_base, stride_step, alias_off;
7768 /* Checked by get_load_store_type. */
7769 unsigned int const_nunits = nunits.to_constant ();
7770 unsigned HOST_WIDE_INT cst_offset = 0;
7772 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7773 gcc_assert (!nested_in_vect_loop);
7775 if (grouped_load)
7777 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7778 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7780 else
7782 first_stmt_info = stmt_info;
7783 first_dr_info = dr_info;
7785 if (slp && grouped_load)
7787 group_size = DR_GROUP_SIZE (first_stmt_info);
7788 ref_type = get_group_alias_ptr_type (first_stmt_info);
7790 else
7792 if (grouped_load)
7793 cst_offset
7794 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7795 * vect_get_place_in_interleaving_chain (stmt_info,
7796 first_stmt_info));
7797 group_size = 1;
7798 ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
7801 stride_base
7802 = fold_build_pointer_plus
7803 (DR_BASE_ADDRESS (first_dr_info->dr),
7804 size_binop (PLUS_EXPR,
7805 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
7806 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
7807 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
7809 /* For a load with loop-invariant (but other than power-of-2)
7810 stride (i.e. not a grouped access) like so:
7812 for (i = 0; i < n; i += stride)
7813 ... = array[i];
7815 we generate a new induction variable and new accesses to
7816 form a new vector (or vectors, depending on ncopies):
7818 for (j = 0; ; j += VF*stride)
7819 tmp1 = array[j];
7820 tmp2 = array[j + stride];
7822 vectemp = {tmp1, tmp2, ...}
7825 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7826 build_int_cst (TREE_TYPE (stride_step), vf));
7828 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7830 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7831 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7832 create_iv (stride_base, ivstep, NULL,
7833 loop, &incr_gsi, insert_after,
7834 &offvar, NULL);
7835 incr = gsi_stmt (incr_gsi);
7836 loop_vinfo->add_stmt (incr);
7838 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7840 prev_stmt_info = NULL;
7841 running_off = offvar;
7842 alias_off = build_int_cst (ref_type, 0);
7843 int nloads = const_nunits;
7844 int lnel = 1;
7845 tree ltype = TREE_TYPE (vectype);
7846 tree lvectype = vectype;
7847 auto_vec<tree> dr_chain;
7848 if (memory_access_type == VMAT_STRIDED_SLP)
7850 if (group_size < const_nunits)
7852 /* First check if vec_init optab supports construction from
7853 vector elts directly. */
7854 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7855 machine_mode vmode;
7856 if (mode_for_vector (elmode, group_size).exists (&vmode)
7857 && VECTOR_MODE_P (vmode)
7858 && targetm.vector_mode_supported_p (vmode)
7859 && (convert_optab_handler (vec_init_optab,
7860 TYPE_MODE (vectype), vmode)
7861 != CODE_FOR_nothing))
7863 nloads = const_nunits / group_size;
7864 lnel = group_size;
7865 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7867 else
7869 /* Otherwise avoid emitting a constructor of vector elements
7870 by performing the loads using an integer type of the same
7871 size, constructing a vector of those and then
7872 re-interpreting it as the original vector type.
7873 This avoids a huge runtime penalty due to the general
7874 inability to perform store forwarding from smaller stores
7875 to a larger load. */
7876 unsigned lsize
7877 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7878 unsigned int lnunits = const_nunits / group_size;
7879 /* If we can't construct such a vector fall back to
7880 element loads of the original vector type. */
7881 if (int_mode_for_size (lsize, 0).exists (&elmode)
7882 && mode_for_vector (elmode, lnunits).exists (&vmode)
7883 && VECTOR_MODE_P (vmode)
7884 && targetm.vector_mode_supported_p (vmode)
7885 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7886 != CODE_FOR_nothing))
7888 nloads = lnunits;
7889 lnel = group_size;
7890 ltype = build_nonstandard_integer_type (lsize, 1);
7891 lvectype = build_vector_type (ltype, nloads);
7895 else
7897 nloads = 1;
7898 lnel = const_nunits;
7899 ltype = vectype;
7901 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7903 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7904 else if (nloads == 1)
7905 ltype = vectype;
7907 if (slp)
7909 /* For SLP permutation support we need to load the whole group,
7910 not only the number of vector stmts the permutation result
7911 fits in. */
7912 if (slp_perm)
7914 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7915 variable VF. */
7916 unsigned int const_vf = vf.to_constant ();
7917 ncopies = CEIL (group_size * const_vf, const_nunits);
7918 dr_chain.create (ncopies);
7920 else
7921 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7923 unsigned int group_el = 0;
7924 unsigned HOST_WIDE_INT
7925 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7926 for (j = 0; j < ncopies; j++)
7928 if (nloads > 1)
7929 vec_alloc (v, nloads);
7930 stmt_vec_info new_stmt_info = NULL;
7931 for (i = 0; i < nloads; i++)
7933 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7934 group_el * elsz + cst_offset);
7935 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
7936 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7937 gassign *new_stmt
7938 = gimple_build_assign (make_ssa_name (ltype), data_ref);
7939 new_stmt_info
7940 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7941 if (nloads > 1)
7942 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7943 gimple_assign_lhs (new_stmt));
7945 group_el += lnel;
7946 if (! slp
7947 || group_el == group_size)
7949 tree newoff = copy_ssa_name (running_off);
7950 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7951 running_off, stride_step);
7952 vect_finish_stmt_generation (stmt_info, incr, gsi);
7954 running_off = newoff;
7955 group_el = 0;
7958 if (nloads > 1)
7960 tree vec_inv = build_constructor (lvectype, v);
7961 new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
7962 new_stmt_info = vinfo->lookup_def (new_temp);
7963 if (lvectype != vectype)
7965 gassign *new_stmt
7966 = gimple_build_assign (make_ssa_name (vectype),
7967 VIEW_CONVERT_EXPR,
7968 build1 (VIEW_CONVERT_EXPR,
7969 vectype, new_temp));
7970 new_stmt_info
7971 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7975 if (slp)
7977 if (slp_perm)
7978 dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt));
7979 else
7980 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7982 else
7984 if (j == 0)
7985 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7986 else
7987 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7988 prev_stmt_info = new_stmt_info;
7991 if (slp_perm)
7993 unsigned n_perms;
7994 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7995 slp_node_instance, false, &n_perms);
7997 return true;
8000 if (memory_access_type == VMAT_GATHER_SCATTER
8001 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
8002 grouped_load = false;
8004 if (grouped_load)
8006 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
8007 group_size = DR_GROUP_SIZE (first_stmt_info);
8008 /* For SLP vectorization we directly vectorize a subchain
8009 without permutation. */
8010 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
8011 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
8012 /* For BB vectorization always use the first stmt to base
8013 the data ref pointer on. */
8014 if (bb_vinfo)
8015 first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
8017 /* Check if the chain of loads is already vectorized. */
8018 if (STMT_VINFO_VEC_STMT (first_stmt_info)
8019 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
8020 ??? But we can only do so if there is exactly one
8021 as we have no way to get at the rest. Leave the CSE
8022 opportunity alone.
8023 ??? With the group load eventually participating
8024 in multiple different permutations (having multiple
8025 slp nodes which refer to the same group) the CSE
8026 is even wrong code. See PR56270. */
8027 && !slp)
8029 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8030 return true;
8032 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
8033 group_gap_adj = 0;
8035 /* VEC_NUM is the number of vect stmts to be created for this group. */
8036 if (slp)
8038 grouped_load = false;
8039 /* If an SLP permutation is from N elements to N elements,
8040 and if one vector holds a whole number of N, we can load
8041 the inputs to the permutation in the same way as an
8042 unpermuted sequence. In other cases we need to load the
8043 whole group, not only the number of vector stmts the
8044 permutation result fits in. */
8045 if (slp_perm
8046 && (group_size != SLP_INSTANCE_GROUP_SIZE (slp_node_instance)
8047 || !multiple_p (nunits, group_size)))
8049 /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for
8050 variable VF; see vect_transform_slp_perm_load. */
8051 unsigned int const_vf = vf.to_constant ();
8052 unsigned int const_nunits = nunits.to_constant ();
8053 vec_num = CEIL (group_size * const_vf, const_nunits);
8054 group_gap_adj = vf * group_size - nunits * vec_num;
8056 else
8058 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8059 group_gap_adj
8060 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
8063 else
8064 vec_num = group_size;
8066 ref_type = get_group_alias_ptr_type (first_stmt_info);
8068 else
8070 first_stmt_info = stmt_info;
8071 first_dr_info = dr_info;
8072 group_size = vec_num = 1;
8073 group_gap_adj = 0;
8074 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
8077 alignment_support_scheme
8078 = vect_supportable_dr_alignment (first_dr_info, false);
8079 gcc_assert (alignment_support_scheme);
8080 vec_loop_masks *loop_masks
8081 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8082 ? &LOOP_VINFO_MASKS (loop_vinfo)
8083 : NULL);
8084 /* Targets with store-lane instructions must not require explicit
8085 realignment. vect_supportable_dr_alignment always returns either
8086 dr_aligned or dr_unaligned_supported for masked operations. */
8087 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
8088 && !mask
8089 && !loop_masks)
8090 || alignment_support_scheme == dr_aligned
8091 || alignment_support_scheme == dr_unaligned_supported);
8093 /* In case the vectorization factor (VF) is bigger than the number
8094 of elements that we can fit in a vectype (nunits), we have to generate
8095 more than one vector stmt - i.e - we need to "unroll" the
8096 vector stmt by a factor VF/nunits. In doing so, we record a pointer
8097 from one copy of the vector stmt to the next, in the field
8098 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
8099 stages to find the correct vector defs to be used when vectorizing
8100 stmts that use the defs of the current stmt. The example below
8101 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
8102 need to create 4 vectorized stmts):
8104 before vectorization:
8105 RELATED_STMT VEC_STMT
8106 S1: x = memref - -
8107 S2: z = x + 1 - -
8109 step 1: vectorize stmt S1:
8110 We first create the vector stmt VS1_0, and, as usual, record a
8111 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
8112 Next, we create the vector stmt VS1_1, and record a pointer to
8113 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
8114 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
8115 stmts and pointers:
8116 RELATED_STMT VEC_STMT
8117 VS1_0: vx0 = memref0 VS1_1 -
8118 VS1_1: vx1 = memref1 VS1_2 -
8119 VS1_2: vx2 = memref2 VS1_3 -
8120 VS1_3: vx3 = memref3 - -
8121 S1: x = load - VS1_0
8122 S2: z = x + 1 - -
8124 See in documentation in vect_get_vec_def_for_stmt_copy for how the
8125 information we recorded in RELATED_STMT field is used to vectorize
8126 stmt S2. */
8128 /* In case of interleaving (non-unit grouped access):
8130 S1: x2 = &base + 2
8131 S2: x0 = &base
8132 S3: x1 = &base + 1
8133 S4: x3 = &base + 3
8135 Vectorized loads are created in the order of memory accesses
8136 starting from the access of the first stmt of the chain:
8138 VS1: vx0 = &base
8139 VS2: vx1 = &base + vec_size*1
8140 VS3: vx3 = &base + vec_size*2
8141 VS4: vx4 = &base + vec_size*3
8143 Then permutation statements are generated:
8145 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8146 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
8149 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8150 (the order of the data-refs in the output of vect_permute_load_chain
8151 corresponds to the order of scalar stmts in the interleaving chain - see
8152 the documentation of vect_permute_load_chain()).
8153 The generation of permutation stmts and recording them in
8154 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
8156 In case of both multiple types and interleaving, the vector loads and
8157 permutation stmts above are created for every copy. The result vector
8158 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8159 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
8161 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8162 on a target that supports unaligned accesses (dr_unaligned_supported)
8163 we generate the following code:
8164 p = initial_addr;
8165 indx = 0;
8166 loop {
8167 p = p + indx * vectype_size;
8168 vec_dest = *(p);
8169 indx = indx + 1;
8172 Otherwise, the data reference is potentially unaligned on a target that
8173 does not support unaligned accesses (dr_explicit_realign_optimized) -
8174 then generate the following code, in which the data in each iteration is
8175 obtained by two vector loads, one from the previous iteration, and one
8176 from the current iteration:
8177 p1 = initial_addr;
8178 msq_init = *(floor(p1))
8179 p2 = initial_addr + VS - 1;
8180 realignment_token = call target_builtin;
8181 indx = 0;
8182 loop {
8183 p2 = p2 + indx * vectype_size
8184 lsq = *(floor(p2))
8185 vec_dest = realign_load (msq, lsq, realignment_token)
8186 indx = indx + 1;
8187 msq = lsq;
8188 } */
8190 /* If the misalignment remains the same throughout the execution of the
8191 loop, we can create the init_addr and permutation mask at the loop
8192 preheader. Otherwise, it needs to be created inside the loop.
8193 This can only occur when vectorizing memory accesses in the inner-loop
8194 nested within an outer-loop that is being vectorized. */
8196 if (nested_in_vect_loop
8197 && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr),
8198 GET_MODE_SIZE (TYPE_MODE (vectype))))
8200 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
8201 compute_in_loop = true;
8204 if ((alignment_support_scheme == dr_explicit_realign_optimized
8205 || alignment_support_scheme == dr_explicit_realign)
8206 && !compute_in_loop)
8208 msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
8209 alignment_support_scheme, NULL_TREE,
8210 &at_loop);
8211 if (alignment_support_scheme == dr_explicit_realign_optimized)
8213 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
8214 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
8215 size_one_node);
8218 else
8219 at_loop = loop;
8221 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8222 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
8224 tree bump;
8225 tree vec_offset = NULL_TREE;
8226 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8228 aggr_type = NULL_TREE;
8229 bump = NULL_TREE;
8231 else if (memory_access_type == VMAT_GATHER_SCATTER)
8233 aggr_type = elem_type;
8234 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
8235 &bump, &vec_offset);
8237 else
8239 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8240 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8241 else
8242 aggr_type = vectype;
8243 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
8244 memory_access_type);
8247 tree vec_mask = NULL_TREE;
8248 prev_stmt_info = NULL;
8249 poly_uint64 group_elt = 0;
8250 for (j = 0; j < ncopies; j++)
8252 stmt_vec_info new_stmt_info = NULL;
8253 /* 1. Create the vector or array pointer update chain. */
8254 if (j == 0)
8256 bool simd_lane_access_p
8257 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
8258 if (simd_lane_access_p
8259 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
8260 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
8261 && integer_zerop (DR_OFFSET (first_dr_info->dr))
8262 && integer_zerop (DR_INIT (first_dr_info->dr))
8263 && alias_sets_conflict_p (get_alias_set (aggr_type),
8264 get_alias_set (TREE_TYPE (ref_type)))
8265 && (alignment_support_scheme == dr_aligned
8266 || alignment_support_scheme == dr_unaligned_supported))
8268 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
8269 dataref_offset = build_int_cst (ref_type, 0);
8271 else if (first_stmt_info_for_drptr
8272 && first_stmt_info != first_stmt_info_for_drptr)
8274 dataref_ptr
8275 = vect_create_data_ref_ptr (first_stmt_info_for_drptr,
8276 aggr_type, at_loop, offset, &dummy,
8277 gsi, &ptr_incr, simd_lane_access_p,
8278 byte_offset, bump);
8279 /* Adjust the pointer by the difference to first_stmt. */
8280 data_reference_p ptrdr
8281 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
8282 tree diff
8283 = fold_convert (sizetype,
8284 size_binop (MINUS_EXPR,
8285 DR_INIT (first_dr_info->dr),
8286 DR_INIT (ptrdr)));
8287 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8288 stmt_info, diff);
8290 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8291 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
8292 &dataref_ptr, &vec_offset);
8293 else
8294 dataref_ptr
8295 = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
8296 offset, &dummy, gsi, &ptr_incr,
8297 simd_lane_access_p,
8298 byte_offset, bump);
8299 if (mask)
8300 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
8301 mask_vectype);
8303 else
8305 if (dataref_offset)
8306 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8307 bump);
8308 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8309 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
8310 else
8311 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8312 stmt_info, bump);
8313 if (mask)
8314 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
8317 if (grouped_load || slp_perm)
8318 dr_chain.create (vec_num);
8320 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8322 tree vec_array;
8324 vec_array = create_vector_array (vectype, vec_num);
8326 tree final_mask = NULL_TREE;
8327 if (loop_masks)
8328 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8329 vectype, j);
8330 if (vec_mask)
8331 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8332 vec_mask, gsi);
8334 gcall *call;
8335 if (final_mask)
8337 /* Emit:
8338 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8339 VEC_MASK). */
8340 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8341 tree alias_ptr = build_int_cst (ref_type, align);
8342 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8343 dataref_ptr, alias_ptr,
8344 final_mask);
8346 else
8348 /* Emit:
8349 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8350 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8351 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8353 gimple_call_set_lhs (call, vec_array);
8354 gimple_call_set_nothrow (call, true);
8355 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
8357 /* Extract each vector into an SSA_NAME. */
8358 for (i = 0; i < vec_num; i++)
8360 new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
8361 vec_array, i);
8362 dr_chain.quick_push (new_temp);
8365 /* Record the mapping between SSA_NAMEs and statements. */
8366 vect_record_grouped_load_vectors (stmt_info, dr_chain);
8368 /* Record that VEC_ARRAY is now dead. */
8369 vect_clobber_variable (stmt_info, gsi, vec_array);
8371 else
8373 for (i = 0; i < vec_num; i++)
8375 tree final_mask = NULL_TREE;
8376 if (loop_masks
8377 && memory_access_type != VMAT_INVARIANT)
8378 final_mask = vect_get_loop_mask (gsi, loop_masks,
8379 vec_num * ncopies,
8380 vectype, vec_num * j + i);
8381 if (vec_mask)
8382 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8383 vec_mask, gsi);
8385 if (i > 0)
8386 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8387 stmt_info, bump);
8389 /* 2. Create the vector-load in the loop. */
8390 gimple *new_stmt = NULL;
8391 switch (alignment_support_scheme)
8393 case dr_aligned:
8394 case dr_unaligned_supported:
8396 unsigned int misalign;
8397 unsigned HOST_WIDE_INT align;
8399 if (memory_access_type == VMAT_GATHER_SCATTER)
8401 tree scale = size_int (gs_info.scale);
8402 gcall *call;
8403 if (loop_masks)
8404 call = gimple_build_call_internal
8405 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8406 vec_offset, scale, final_mask);
8407 else
8408 call = gimple_build_call_internal
8409 (IFN_GATHER_LOAD, 3, dataref_ptr,
8410 vec_offset, scale);
8411 gimple_call_set_nothrow (call, true);
8412 new_stmt = call;
8413 data_ref = NULL_TREE;
8414 break;
8417 align =
8418 known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
8419 if (alignment_support_scheme == dr_aligned)
8421 gcc_assert (aligned_access_p (first_dr_info));
8422 misalign = 0;
8424 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8426 align = dr_alignment
8427 (vect_dr_behavior (first_dr_info));
8428 misalign = 0;
8430 else
8431 misalign = DR_MISALIGNMENT (first_dr_info);
8432 if (dataref_offset == NULL_TREE
8433 && TREE_CODE (dataref_ptr) == SSA_NAME)
8434 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8435 align, misalign);
8437 if (final_mask)
8439 align = least_bit_hwi (misalign | align);
8440 tree ptr = build_int_cst (ref_type, align);
8441 gcall *call
8442 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8443 dataref_ptr, ptr,
8444 final_mask);
8445 gimple_call_set_nothrow (call, true);
8446 new_stmt = call;
8447 data_ref = NULL_TREE;
8449 else
8451 data_ref
8452 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8453 dataref_offset
8454 ? dataref_offset
8455 : build_int_cst (ref_type, 0));
8456 if (alignment_support_scheme == dr_aligned)
8458 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8459 TREE_TYPE (data_ref)
8460 = build_aligned_type (TREE_TYPE (data_ref),
8461 align * BITS_PER_UNIT);
8462 else
8463 TREE_TYPE (data_ref)
8464 = build_aligned_type (TREE_TYPE (data_ref),
8465 TYPE_ALIGN (elem_type));
8467 break;
8469 case dr_explicit_realign:
8471 tree ptr, bump;
8473 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8475 if (compute_in_loop)
8476 msq = vect_setup_realignment (first_stmt_info, gsi,
8477 &realignment_token,
8478 dr_explicit_realign,
8479 dataref_ptr, NULL);
8481 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8482 ptr = copy_ssa_name (dataref_ptr);
8483 else
8484 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8485 // For explicit realign the target alignment should be
8486 // known at compile time.
8487 unsigned HOST_WIDE_INT align =
8488 DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
8489 new_stmt = gimple_build_assign
8490 (ptr, BIT_AND_EXPR, dataref_ptr,
8491 build_int_cst
8492 (TREE_TYPE (dataref_ptr),
8493 -(HOST_WIDE_INT) align));
8494 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8495 data_ref
8496 = build2 (MEM_REF, vectype, ptr,
8497 build_int_cst (ref_type, 0));
8498 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8499 vec_dest = vect_create_destination_var (scalar_dest,
8500 vectype);
8501 new_stmt = gimple_build_assign (vec_dest, data_ref);
8502 new_temp = make_ssa_name (vec_dest, new_stmt);
8503 gimple_assign_set_lhs (new_stmt, new_temp);
8504 gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt));
8505 gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt));
8506 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8507 msq = new_temp;
8509 bump = size_binop (MULT_EXPR, vs,
8510 TYPE_SIZE_UNIT (elem_type));
8511 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8512 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
8513 stmt_info, bump);
8514 new_stmt = gimple_build_assign
8515 (NULL_TREE, BIT_AND_EXPR, ptr,
8516 build_int_cst
8517 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8518 ptr = copy_ssa_name (ptr, new_stmt);
8519 gimple_assign_set_lhs (new_stmt, ptr);
8520 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8521 data_ref
8522 = build2 (MEM_REF, vectype, ptr,
8523 build_int_cst (ref_type, 0));
8524 break;
8526 case dr_explicit_realign_optimized:
8528 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8529 new_temp = copy_ssa_name (dataref_ptr);
8530 else
8531 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8532 // We should only be doing this if we know the target
8533 // alignment at compile time.
8534 unsigned HOST_WIDE_INT align =
8535 DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
8536 new_stmt = gimple_build_assign
8537 (new_temp, BIT_AND_EXPR, dataref_ptr,
8538 build_int_cst (TREE_TYPE (dataref_ptr),
8539 -(HOST_WIDE_INT) align));
8540 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8541 data_ref
8542 = build2 (MEM_REF, vectype, new_temp,
8543 build_int_cst (ref_type, 0));
8544 break;
8546 default:
8547 gcc_unreachable ();
8549 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8550 /* DATA_REF is null if we've already built the statement. */
8551 if (data_ref)
8553 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8554 new_stmt = gimple_build_assign (vec_dest, data_ref);
8556 new_temp = make_ssa_name (vec_dest, new_stmt);
8557 gimple_set_lhs (new_stmt, new_temp);
8558 new_stmt_info
8559 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8561 /* 3. Handle explicit realignment if necessary/supported.
8562 Create in loop:
8563 vec_dest = realign_load (msq, lsq, realignment_token) */
8564 if (alignment_support_scheme == dr_explicit_realign_optimized
8565 || alignment_support_scheme == dr_explicit_realign)
8567 lsq = gimple_assign_lhs (new_stmt);
8568 if (!realignment_token)
8569 realignment_token = dataref_ptr;
8570 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8571 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8572 msq, lsq, realignment_token);
8573 new_temp = make_ssa_name (vec_dest, new_stmt);
8574 gimple_assign_set_lhs (new_stmt, new_temp);
8575 new_stmt_info
8576 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8578 if (alignment_support_scheme == dr_explicit_realign_optimized)
8580 gcc_assert (phi);
8581 if (i == vec_num - 1 && j == ncopies - 1)
8582 add_phi_arg (phi, lsq,
8583 loop_latch_edge (containing_loop),
8584 UNKNOWN_LOCATION);
8585 msq = lsq;
8589 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8591 tree perm_mask = perm_mask_for_reverse (vectype);
8592 new_temp = permute_vec_elements (new_temp, new_temp,
8593 perm_mask, stmt_info, gsi);
8594 new_stmt_info = vinfo->lookup_def (new_temp);
8597 /* Collect vector loads and later create their permutation in
8598 vect_transform_grouped_load (). */
8599 if (grouped_load || slp_perm)
8600 dr_chain.quick_push (new_temp);
8602 /* Store vector loads in the corresponding SLP_NODE. */
8603 if (slp && !slp_perm)
8604 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
8606 /* With SLP permutation we load the gaps as well, without
8607 we need to skip the gaps after we manage to fully load
8608 all elements. group_gap_adj is DR_GROUP_SIZE here. */
8609 group_elt += nunits;
8610 if (maybe_ne (group_gap_adj, 0U)
8611 && !slp_perm
8612 && known_eq (group_elt, group_size - group_gap_adj))
8614 poly_wide_int bump_val
8615 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8616 * group_gap_adj);
8617 tree bump = wide_int_to_tree (sizetype, bump_val);
8618 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8619 stmt_info, bump);
8620 group_elt = 0;
8623 /* Bump the vector pointer to account for a gap or for excess
8624 elements loaded for a permuted SLP load. */
8625 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8627 poly_wide_int bump_val
8628 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8629 * group_gap_adj);
8630 tree bump = wide_int_to_tree (sizetype, bump_val);
8631 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8632 stmt_info, bump);
8636 if (slp && !slp_perm)
8637 continue;
8639 if (slp_perm)
8641 unsigned n_perms;
8642 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8643 slp_node_instance, false,
8644 &n_perms))
8646 dr_chain.release ();
8647 return false;
8650 else
8652 if (grouped_load)
8654 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8655 vect_transform_grouped_load (stmt_info, dr_chain,
8656 group_size, gsi);
8657 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8659 else
8661 if (j == 0)
8662 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
8663 else
8664 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
8665 prev_stmt_info = new_stmt_info;
8668 dr_chain.release ();
8671 return true;
8674 /* Function vect_is_simple_cond.
8676 Input:
8677 LOOP - the loop that is being vectorized.
8678 COND - Condition that is checked for simple use.
8680 Output:
8681 *COMP_VECTYPE - the vector type for the comparison.
8682 *DTS - The def types for the arguments of the comparison
8684 Returns whether a COND can be vectorized. Checks whether
8685 condition operands are supportable using vec_is_simple_use. */
8687 static bool
8688 vect_is_simple_cond (tree cond, vec_info *vinfo,
8689 tree *comp_vectype, enum vect_def_type *dts,
8690 tree vectype)
8692 tree lhs, rhs;
8693 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8695 /* Mask case. */
8696 if (TREE_CODE (cond) == SSA_NAME
8697 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8699 if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype)
8700 || !*comp_vectype
8701 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8702 return false;
8703 return true;
8706 if (!COMPARISON_CLASS_P (cond))
8707 return false;
8709 lhs = TREE_OPERAND (cond, 0);
8710 rhs = TREE_OPERAND (cond, 1);
8712 if (TREE_CODE (lhs) == SSA_NAME)
8714 if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1))
8715 return false;
8717 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8718 || TREE_CODE (lhs) == FIXED_CST)
8719 dts[0] = vect_constant_def;
8720 else
8721 return false;
8723 if (TREE_CODE (rhs) == SSA_NAME)
8725 if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2))
8726 return false;
8728 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8729 || TREE_CODE (rhs) == FIXED_CST)
8730 dts[1] = vect_constant_def;
8731 else
8732 return false;
8734 if (vectype1 && vectype2
8735 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8736 TYPE_VECTOR_SUBPARTS (vectype2)))
8737 return false;
8739 *comp_vectype = vectype1 ? vectype1 : vectype2;
8740 /* Invariant comparison. */
8741 if (! *comp_vectype && vectype)
8743 tree scalar_type = TREE_TYPE (lhs);
8744 /* If we can widen the comparison to match vectype do so. */
8745 if (INTEGRAL_TYPE_P (scalar_type)
8746 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8747 TYPE_SIZE (TREE_TYPE (vectype))))
8748 scalar_type = build_nonstandard_integer_type
8749 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8750 TYPE_UNSIGNED (scalar_type));
8751 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8754 return true;
8757 /* vectorizable_condition.
8759 Check if STMT_INFO is conditional modify expression that can be vectorized.
8760 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
8761 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8762 at GSI.
8764 When STMT_INFO is vectorized as a nested cycle, for_reduction is true.
8766 Return true if STMT_INFO is vectorizable in this way. */
8768 bool
8769 vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
8770 stmt_vec_info *vec_stmt, bool for_reduction,
8771 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
8773 vec_info *vinfo = stmt_info->vinfo;
8774 tree scalar_dest = NULL_TREE;
8775 tree vec_dest = NULL_TREE;
8776 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8777 tree then_clause, else_clause;
8778 tree comp_vectype = NULL_TREE;
8779 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8780 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8781 tree vec_compare;
8782 tree new_temp;
8783 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8784 enum vect_def_type dts[4]
8785 = {vect_unknown_def_type, vect_unknown_def_type,
8786 vect_unknown_def_type, vect_unknown_def_type};
8787 int ndts = 4;
8788 int ncopies;
8789 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8790 stmt_vec_info prev_stmt_info = NULL;
8791 int i, j;
8792 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8793 vec<tree> vec_oprnds0 = vNULL;
8794 vec<tree> vec_oprnds1 = vNULL;
8795 vec<tree> vec_oprnds2 = vNULL;
8796 vec<tree> vec_oprnds3 = vNULL;
8797 tree vec_cmp_type;
8798 bool masked = false;
8800 if (for_reduction && STMT_SLP_TYPE (stmt_info))
8801 return false;
8803 vect_reduction_type reduction_type
8804 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8805 if (reduction_type == TREE_CODE_REDUCTION)
8807 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8808 return false;
8810 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8811 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8812 && for_reduction))
8813 return false;
8815 /* FORNOW: not yet supported. */
8816 if (STMT_VINFO_LIVE_P (stmt_info))
8818 if (dump_enabled_p ())
8819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8820 "value used after loop.\n");
8821 return false;
8825 /* Is vectorizable conditional operation? */
8826 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
8827 if (!stmt)
8828 return false;
8830 code = gimple_assign_rhs_code (stmt);
8832 if (code != COND_EXPR)
8833 return false;
8835 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8836 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8838 if (slp_node)
8839 ncopies = 1;
8840 else
8841 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8843 gcc_assert (ncopies >= 1);
8844 if (for_reduction && ncopies > 1)
8845 return false; /* FORNOW */
8847 cond_expr = gimple_assign_rhs1 (stmt);
8848 then_clause = gimple_assign_rhs2 (stmt);
8849 else_clause = gimple_assign_rhs3 (stmt);
8851 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8852 &comp_vectype, &dts[0], slp_node ? NULL : vectype)
8853 || !comp_vectype)
8854 return false;
8856 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1))
8857 return false;
8858 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2))
8859 return false;
8861 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8862 return false;
8864 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8865 return false;
8867 masked = !COMPARISON_CLASS_P (cond_expr);
8868 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8870 if (vec_cmp_type == NULL_TREE)
8871 return false;
8873 cond_code = TREE_CODE (cond_expr);
8874 if (!masked)
8876 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8877 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8880 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8882 /* Boolean values may have another representation in vectors
8883 and therefore we prefer bit operations over comparison for
8884 them (which also works for scalar masks). We store opcodes
8885 to use in bitop1 and bitop2. Statement is vectorized as
8886 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8887 depending on bitop1 and bitop2 arity. */
8888 switch (cond_code)
8890 case GT_EXPR:
8891 bitop1 = BIT_NOT_EXPR;
8892 bitop2 = BIT_AND_EXPR;
8893 break;
8894 case GE_EXPR:
8895 bitop1 = BIT_NOT_EXPR;
8896 bitop2 = BIT_IOR_EXPR;
8897 break;
8898 case LT_EXPR:
8899 bitop1 = BIT_NOT_EXPR;
8900 bitop2 = BIT_AND_EXPR;
8901 std::swap (cond_expr0, cond_expr1);
8902 break;
8903 case LE_EXPR:
8904 bitop1 = BIT_NOT_EXPR;
8905 bitop2 = BIT_IOR_EXPR;
8906 std::swap (cond_expr0, cond_expr1);
8907 break;
8908 case NE_EXPR:
8909 bitop1 = BIT_XOR_EXPR;
8910 break;
8911 case EQ_EXPR:
8912 bitop1 = BIT_XOR_EXPR;
8913 bitop2 = BIT_NOT_EXPR;
8914 break;
8915 default:
8916 return false;
8918 cond_code = SSA_NAME;
8921 if (!vec_stmt)
8923 if (bitop1 != NOP_EXPR)
8925 machine_mode mode = TYPE_MODE (comp_vectype);
8926 optab optab;
8928 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8929 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8930 return false;
8932 if (bitop2 != NOP_EXPR)
8934 optab = optab_for_tree_code (bitop2, comp_vectype,
8935 optab_default);
8936 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8937 return false;
8940 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8941 cond_code))
8943 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8944 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
8945 cost_vec);
8946 return true;
8948 return false;
8951 /* Transform. */
8953 if (!slp_node)
8955 vec_oprnds0.create (1);
8956 vec_oprnds1.create (1);
8957 vec_oprnds2.create (1);
8958 vec_oprnds3.create (1);
8961 /* Handle def. */
8962 scalar_dest = gimple_assign_lhs (stmt);
8963 if (reduction_type != EXTRACT_LAST_REDUCTION)
8964 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8966 /* Handle cond expr. */
8967 for (j = 0; j < ncopies; j++)
8969 stmt_vec_info new_stmt_info = NULL;
8970 if (j == 0)
8972 if (slp_node)
8974 auto_vec<tree, 4> ops;
8975 auto_vec<vec<tree>, 4> vec_defs;
8977 if (masked)
8978 ops.safe_push (cond_expr);
8979 else
8981 ops.safe_push (cond_expr0);
8982 ops.safe_push (cond_expr1);
8984 ops.safe_push (then_clause);
8985 ops.safe_push (else_clause);
8986 vect_get_slp_defs (ops, slp_node, &vec_defs);
8987 vec_oprnds3 = vec_defs.pop ();
8988 vec_oprnds2 = vec_defs.pop ();
8989 if (!masked)
8990 vec_oprnds1 = vec_defs.pop ();
8991 vec_oprnds0 = vec_defs.pop ();
8993 else
8995 if (masked)
8997 vec_cond_lhs
8998 = vect_get_vec_def_for_operand (cond_expr, stmt_info,
8999 comp_vectype);
9001 else
9003 vec_cond_lhs
9004 = vect_get_vec_def_for_operand (cond_expr0,
9005 stmt_info, comp_vectype);
9006 vec_cond_rhs
9007 = vect_get_vec_def_for_operand (cond_expr1,
9008 stmt_info, comp_vectype);
9010 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
9011 stmt_info);
9012 if (reduction_type != EXTRACT_LAST_REDUCTION)
9013 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
9014 stmt_info);
9017 else
9019 vec_cond_lhs
9020 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ());
9021 if (!masked)
9022 vec_cond_rhs
9023 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ());
9025 vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9026 vec_oprnds2.pop ());
9027 vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9028 vec_oprnds3.pop ());
9031 if (!slp_node)
9033 vec_oprnds0.quick_push (vec_cond_lhs);
9034 if (!masked)
9035 vec_oprnds1.quick_push (vec_cond_rhs);
9036 vec_oprnds2.quick_push (vec_then_clause);
9037 vec_oprnds3.quick_push (vec_else_clause);
9040 /* Arguments are ready. Create the new vector stmt. */
9041 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
9043 vec_then_clause = vec_oprnds2[i];
9044 vec_else_clause = vec_oprnds3[i];
9046 if (masked)
9047 vec_compare = vec_cond_lhs;
9048 else
9050 vec_cond_rhs = vec_oprnds1[i];
9051 if (bitop1 == NOP_EXPR)
9052 vec_compare = build2 (cond_code, vec_cmp_type,
9053 vec_cond_lhs, vec_cond_rhs);
9054 else
9056 new_temp = make_ssa_name (vec_cmp_type);
9057 gassign *new_stmt;
9058 if (bitop1 == BIT_NOT_EXPR)
9059 new_stmt = gimple_build_assign (new_temp, bitop1,
9060 vec_cond_rhs);
9061 else
9062 new_stmt
9063 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
9064 vec_cond_rhs);
9065 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9066 if (bitop2 == NOP_EXPR)
9067 vec_compare = new_temp;
9068 else if (bitop2 == BIT_NOT_EXPR)
9070 /* Instead of doing ~x ? y : z do x ? z : y. */
9071 vec_compare = new_temp;
9072 std::swap (vec_then_clause, vec_else_clause);
9074 else
9076 vec_compare = make_ssa_name (vec_cmp_type);
9077 new_stmt
9078 = gimple_build_assign (vec_compare, bitop2,
9079 vec_cond_lhs, new_temp);
9080 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9084 if (reduction_type == EXTRACT_LAST_REDUCTION)
9086 if (!is_gimple_val (vec_compare))
9088 tree vec_compare_name = make_ssa_name (vec_cmp_type);
9089 gassign *new_stmt = gimple_build_assign (vec_compare_name,
9090 vec_compare);
9091 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9092 vec_compare = vec_compare_name;
9094 gcall *new_stmt = gimple_build_call_internal
9095 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
9096 vec_then_clause);
9097 gimple_call_set_lhs (new_stmt, scalar_dest);
9098 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
9099 if (stmt_info->stmt == gsi_stmt (*gsi))
9100 new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
9101 else
9103 /* In this case we're moving the definition to later in the
9104 block. That doesn't matter because the only uses of the
9105 lhs are in phi statements. */
9106 gimple_stmt_iterator old_gsi
9107 = gsi_for_stmt (stmt_info->stmt);
9108 gsi_remove (&old_gsi, true);
9109 new_stmt_info
9110 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9113 else
9115 new_temp = make_ssa_name (vec_dest);
9116 gassign *new_stmt
9117 = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
9118 vec_then_clause, vec_else_clause);
9119 new_stmt_info
9120 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9122 if (slp_node)
9123 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9126 if (slp_node)
9127 continue;
9129 if (j == 0)
9130 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9131 else
9132 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9134 prev_stmt_info = new_stmt_info;
9137 vec_oprnds0.release ();
9138 vec_oprnds1.release ();
9139 vec_oprnds2.release ();
9140 vec_oprnds3.release ();
9142 return true;
9145 /* vectorizable_comparison.
9147 Check if STMT_INFO is comparison expression that can be vectorized.
9148 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
9149 comparison, put it in VEC_STMT, and insert it at GSI.
9151 Return true if STMT_INFO is vectorizable in this way. */
9153 static bool
9154 vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9155 stmt_vec_info *vec_stmt,
9156 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
9158 vec_info *vinfo = stmt_info->vinfo;
9159 tree lhs, rhs1, rhs2;
9160 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
9161 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
9162 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
9163 tree new_temp;
9164 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9165 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
9166 int ndts = 2;
9167 poly_uint64 nunits;
9168 int ncopies;
9169 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
9170 stmt_vec_info prev_stmt_info = NULL;
9171 int i, j;
9172 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9173 vec<tree> vec_oprnds0 = vNULL;
9174 vec<tree> vec_oprnds1 = vNULL;
9175 tree mask_type;
9176 tree mask;
9178 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
9179 return false;
9181 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
9182 return false;
9184 mask_type = vectype;
9185 nunits = TYPE_VECTOR_SUBPARTS (vectype);
9187 if (slp_node)
9188 ncopies = 1;
9189 else
9190 ncopies = vect_get_num_copies (loop_vinfo, vectype);
9192 gcc_assert (ncopies >= 1);
9193 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
9194 return false;
9196 if (STMT_VINFO_LIVE_P (stmt_info))
9198 if (dump_enabled_p ())
9199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9200 "value used after loop.\n");
9201 return false;
9204 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
9205 if (!stmt)
9206 return false;
9208 code = gimple_assign_rhs_code (stmt);
9210 if (TREE_CODE_CLASS (code) != tcc_comparison)
9211 return false;
9213 rhs1 = gimple_assign_rhs1 (stmt);
9214 rhs2 = gimple_assign_rhs2 (stmt);
9216 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1))
9217 return false;
9219 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2))
9220 return false;
9222 if (vectype1 && vectype2
9223 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
9224 TYPE_VECTOR_SUBPARTS (vectype2)))
9225 return false;
9227 vectype = vectype1 ? vectype1 : vectype2;
9229 /* Invariant comparison. */
9230 if (!vectype)
9232 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
9233 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
9234 return false;
9236 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
9237 return false;
9239 /* Can't compare mask and non-mask types. */
9240 if (vectype1 && vectype2
9241 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9242 return false;
9244 /* Boolean values may have another representation in vectors
9245 and therefore we prefer bit operations over comparison for
9246 them (which also works for scalar masks). We store opcodes
9247 to use in bitop1 and bitop2. Statement is vectorized as
9248 BITOP2 (rhs1 BITOP1 rhs2) or
9249 rhs1 BITOP2 (BITOP1 rhs2)
9250 depending on bitop1 and bitop2 arity. */
9251 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9253 if (code == GT_EXPR)
9255 bitop1 = BIT_NOT_EXPR;
9256 bitop2 = BIT_AND_EXPR;
9258 else if (code == GE_EXPR)
9260 bitop1 = BIT_NOT_EXPR;
9261 bitop2 = BIT_IOR_EXPR;
9263 else if (code == LT_EXPR)
9265 bitop1 = BIT_NOT_EXPR;
9266 bitop2 = BIT_AND_EXPR;
9267 std::swap (rhs1, rhs2);
9268 std::swap (dts[0], dts[1]);
9270 else if (code == LE_EXPR)
9272 bitop1 = BIT_NOT_EXPR;
9273 bitop2 = BIT_IOR_EXPR;
9274 std::swap (rhs1, rhs2);
9275 std::swap (dts[0], dts[1]);
9277 else
9279 bitop1 = BIT_XOR_EXPR;
9280 if (code == EQ_EXPR)
9281 bitop2 = BIT_NOT_EXPR;
9285 if (!vec_stmt)
9287 if (bitop1 == NOP_EXPR)
9289 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
9290 return false;
9292 else
9294 machine_mode mode = TYPE_MODE (vectype);
9295 optab optab;
9297 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9298 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9299 return false;
9301 if (bitop2 != NOP_EXPR)
9303 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9304 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9305 return false;
9309 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9310 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9311 dts, ndts, slp_node, cost_vec);
9312 return true;
9315 /* Transform. */
9316 if (!slp_node)
9318 vec_oprnds0.create (1);
9319 vec_oprnds1.create (1);
9322 /* Handle def. */
9323 lhs = gimple_assign_lhs (stmt);
9324 mask = vect_create_destination_var (lhs, mask_type);
9326 /* Handle cmp expr. */
9327 for (j = 0; j < ncopies; j++)
9329 stmt_vec_info new_stmt_info = NULL;
9330 if (j == 0)
9332 if (slp_node)
9334 auto_vec<tree, 2> ops;
9335 auto_vec<vec<tree>, 2> vec_defs;
9337 ops.safe_push (rhs1);
9338 ops.safe_push (rhs2);
9339 vect_get_slp_defs (ops, slp_node, &vec_defs);
9340 vec_oprnds1 = vec_defs.pop ();
9341 vec_oprnds0 = vec_defs.pop ();
9343 else
9345 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
9346 vectype);
9347 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
9348 vectype);
9351 else
9353 vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo,
9354 vec_oprnds0.pop ());
9355 vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo,
9356 vec_oprnds1.pop ());
9359 if (!slp_node)
9361 vec_oprnds0.quick_push (vec_rhs1);
9362 vec_oprnds1.quick_push (vec_rhs2);
9365 /* Arguments are ready. Create the new vector stmt. */
9366 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9368 vec_rhs2 = vec_oprnds1[i];
9370 new_temp = make_ssa_name (mask);
9371 if (bitop1 == NOP_EXPR)
9373 gassign *new_stmt = gimple_build_assign (new_temp, code,
9374 vec_rhs1, vec_rhs2);
9375 new_stmt_info
9376 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9378 else
9380 gassign *new_stmt;
9381 if (bitop1 == BIT_NOT_EXPR)
9382 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9383 else
9384 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9385 vec_rhs2);
9386 new_stmt_info
9387 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9388 if (bitop2 != NOP_EXPR)
9390 tree res = make_ssa_name (mask);
9391 if (bitop2 == BIT_NOT_EXPR)
9392 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9393 else
9394 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9395 new_temp);
9396 new_stmt_info
9397 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9400 if (slp_node)
9401 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9404 if (slp_node)
9405 continue;
9407 if (j == 0)
9408 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9409 else
9410 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9412 prev_stmt_info = new_stmt_info;
9415 vec_oprnds0.release ();
9416 vec_oprnds1.release ();
9418 return true;
9421 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9422 can handle all live statements in the node. Otherwise return true
9423 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
9424 GSI and VEC_STMT are as for vectorizable_live_operation. */
9426 static bool
9427 can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9428 slp_tree slp_node, stmt_vec_info *vec_stmt,
9429 stmt_vector_for_cost *cost_vec)
9431 if (slp_node)
9433 stmt_vec_info slp_stmt_info;
9434 unsigned int i;
9435 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
9437 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9438 && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i,
9439 vec_stmt, cost_vec))
9440 return false;
9443 else if (STMT_VINFO_LIVE_P (stmt_info)
9444 && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1,
9445 vec_stmt, cost_vec))
9446 return false;
9448 return true;
9451 /* Make sure the statement is vectorizable. */
9453 opt_result
9454 vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
9455 slp_tree node, slp_instance node_instance,
9456 stmt_vector_for_cost *cost_vec)
9458 vec_info *vinfo = stmt_info->vinfo;
9459 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9460 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9461 bool ok;
9462 gimple_seq pattern_def_seq;
9464 if (dump_enabled_p ())
9465 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
9466 stmt_info->stmt);
9468 if (gimple_has_volatile_ops (stmt_info->stmt))
9469 return opt_result::failure_at (stmt_info->stmt,
9470 "not vectorized:"
9471 " stmt has volatile operands: %G\n",
9472 stmt_info->stmt);
9474 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9475 && node == NULL
9476 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9478 gimple_stmt_iterator si;
9480 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9482 stmt_vec_info pattern_def_stmt_info
9483 = vinfo->lookup_stmt (gsi_stmt (si));
9484 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
9485 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
9487 /* Analyze def stmt of STMT if it's a pattern stmt. */
9488 if (dump_enabled_p ())
9489 dump_printf_loc (MSG_NOTE, vect_location,
9490 "==> examining pattern def statement: %G",
9491 pattern_def_stmt_info->stmt);
9493 opt_result res
9494 = vect_analyze_stmt (pattern_def_stmt_info,
9495 need_to_vectorize, node, node_instance,
9496 cost_vec);
9497 if (!res)
9498 return res;
9503 /* Skip stmts that do not need to be vectorized. In loops this is expected
9504 to include:
9505 - the COND_EXPR which is the loop exit condition
9506 - any LABEL_EXPRs in the loop
9507 - computations that are used only for array indexing or loop control.
9508 In basic blocks we only analyze statements that are a part of some SLP
9509 instance, therefore, all the statements are relevant.
9511 Pattern statement needs to be analyzed instead of the original statement
9512 if the original statement is not relevant. Otherwise, we analyze both
9513 statements. In basic blocks we are called from some SLP instance
9514 traversal, don't analyze pattern stmts instead, the pattern stmts
9515 already will be part of SLP instance. */
9517 stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
9518 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9519 && !STMT_VINFO_LIVE_P (stmt_info))
9521 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9522 && pattern_stmt_info
9523 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9524 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9526 /* Analyze PATTERN_STMT instead of the original stmt. */
9527 stmt_info = pattern_stmt_info;
9528 if (dump_enabled_p ())
9529 dump_printf_loc (MSG_NOTE, vect_location,
9530 "==> examining pattern statement: %G",
9531 stmt_info->stmt);
9533 else
9535 if (dump_enabled_p ())
9536 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9538 return opt_result::success ();
9541 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9542 && node == NULL
9543 && pattern_stmt_info
9544 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9545 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9547 /* Analyze PATTERN_STMT too. */
9548 if (dump_enabled_p ())
9549 dump_printf_loc (MSG_NOTE, vect_location,
9550 "==> examining pattern statement: %G",
9551 pattern_stmt_info->stmt);
9553 opt_result res
9554 = vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
9555 node_instance, cost_vec);
9556 if (!res)
9557 return res;
9560 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9562 case vect_internal_def:
9563 break;
9565 case vect_reduction_def:
9566 case vect_nested_cycle:
9567 gcc_assert (!bb_vinfo
9568 && (relevance == vect_used_in_outer
9569 || relevance == vect_used_in_outer_by_reduction
9570 || relevance == vect_used_by_reduction
9571 || relevance == vect_unused_in_scope
9572 || relevance == vect_used_only_live));
9573 break;
9575 case vect_induction_def:
9576 gcc_assert (!bb_vinfo);
9577 break;
9579 case vect_constant_def:
9580 case vect_external_def:
9581 case vect_unknown_def_type:
9582 default:
9583 gcc_unreachable ();
9586 if (STMT_VINFO_RELEVANT_P (stmt_info))
9588 tree type = gimple_expr_type (stmt_info->stmt);
9589 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
9590 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
9591 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9592 || (call && gimple_call_lhs (call) == NULL_TREE));
9593 *need_to_vectorize = true;
9596 if (PURE_SLP_STMT (stmt_info) && !node)
9598 if (dump_enabled_p ())
9599 dump_printf_loc (MSG_NOTE, vect_location,
9600 "handled only by SLP analysis\n");
9601 return opt_result::success ();
9604 ok = true;
9605 if (!bb_vinfo
9606 && (STMT_VINFO_RELEVANT_P (stmt_info)
9607 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9608 /* Prefer vectorizable_call over vectorizable_simd_clone_call so
9609 -mveclibabi= takes preference over library functions with
9610 the simd attribute. */
9611 ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9612 || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
9613 cost_vec)
9614 || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
9615 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9616 || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
9617 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9618 cost_vec)
9619 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9620 || vectorizable_reduction (stmt_info, NULL, NULL, node,
9621 node_instance, cost_vec)
9622 || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
9623 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9624 || vectorizable_condition (stmt_info, NULL, NULL, false, node,
9625 cost_vec)
9626 || vectorizable_comparison (stmt_info, NULL, NULL, node,
9627 cost_vec));
9628 else
9630 if (bb_vinfo)
9631 ok = (vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9632 || vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
9633 cost_vec)
9634 || vectorizable_conversion (stmt_info, NULL, NULL, node,
9635 cost_vec)
9636 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9637 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9638 || vectorizable_assignment (stmt_info, NULL, NULL, node,
9639 cost_vec)
9640 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9641 cost_vec)
9642 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9643 || vectorizable_condition (stmt_info, NULL, NULL, false, node,
9644 cost_vec)
9645 || vectorizable_comparison (stmt_info, NULL, NULL, node,
9646 cost_vec));
9649 if (!ok)
9650 return opt_result::failure_at (stmt_info->stmt,
9651 "not vectorized:"
9652 " relevant stmt not supported: %G",
9653 stmt_info->stmt);
9655 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9656 need extra handling, except for vectorizable reductions. */
9657 if (!bb_vinfo
9658 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9659 && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
9660 return opt_result::failure_at (stmt_info->stmt,
9661 "not vectorized:"
9662 " live stmt not supported: %G",
9663 stmt_info->stmt);
9665 return opt_result::success ();
9669 /* Function vect_transform_stmt.
9671 Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
9673 bool
9674 vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9675 slp_tree slp_node, slp_instance slp_node_instance)
9677 vec_info *vinfo = stmt_info->vinfo;
9678 bool is_store = false;
9679 stmt_vec_info vec_stmt = NULL;
9680 bool done;
9682 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9683 stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
9685 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9686 && nested_in_vect_loop_p
9687 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9688 stmt_info));
9690 gimple *stmt = stmt_info->stmt;
9691 switch (STMT_VINFO_TYPE (stmt_info))
9693 case type_demotion_vec_info_type:
9694 case type_promotion_vec_info_type:
9695 case type_conversion_vec_info_type:
9696 done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
9697 NULL);
9698 gcc_assert (done);
9699 break;
9701 case induc_vec_info_type:
9702 done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
9703 NULL);
9704 gcc_assert (done);
9705 break;
9707 case shift_vec_info_type:
9708 done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9709 gcc_assert (done);
9710 break;
9712 case op_vec_info_type:
9713 done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
9714 NULL);
9715 gcc_assert (done);
9716 break;
9718 case assignment_vec_info_type:
9719 done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
9720 NULL);
9721 gcc_assert (done);
9722 break;
9724 case load_vec_info_type:
9725 done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
9726 slp_node_instance, NULL);
9727 gcc_assert (done);
9728 break;
9730 case store_vec_info_type:
9731 done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9732 gcc_assert (done);
9733 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9735 /* In case of interleaving, the whole chain is vectorized when the
9736 last store in the chain is reached. Store stmts before the last
9737 one are skipped, and there vec_stmt_info shouldn't be freed
9738 meanwhile. */
9739 stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9740 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
9741 is_store = true;
9743 else
9744 is_store = true;
9745 break;
9747 case condition_vec_info_type:
9748 done = vectorizable_condition (stmt_info, gsi, &vec_stmt, false,
9749 slp_node, NULL);
9750 gcc_assert (done);
9751 break;
9753 case comparison_vec_info_type:
9754 done = vectorizable_comparison (stmt_info, gsi, &vec_stmt,
9755 slp_node, NULL);
9756 gcc_assert (done);
9757 break;
9759 case call_vec_info_type:
9760 done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9761 stmt = gsi_stmt (*gsi);
9762 break;
9764 case call_simd_clone_vec_info_type:
9765 done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
9766 slp_node, NULL);
9767 stmt = gsi_stmt (*gsi);
9768 break;
9770 case reduc_vec_info_type:
9771 done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node,
9772 slp_node_instance, NULL);
9773 gcc_assert (done);
9774 break;
9776 default:
9777 if (!STMT_VINFO_LIVE_P (stmt_info))
9779 if (dump_enabled_p ())
9780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9781 "stmt not supported.\n");
9782 gcc_unreachable ();
9786 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9787 This would break hybrid SLP vectorization. */
9788 if (slp_node)
9789 gcc_assert (!vec_stmt
9790 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info);
9792 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9793 is being vectorized, but outside the immediately enclosing loop. */
9794 if (vec_stmt
9795 && nested_p
9796 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9797 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9798 || STMT_VINFO_RELEVANT (stmt_info) ==
9799 vect_used_in_outer_by_reduction))
9801 struct loop *innerloop = LOOP_VINFO_LOOP (
9802 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9803 imm_use_iterator imm_iter;
9804 use_operand_p use_p;
9805 tree scalar_dest;
9807 if (dump_enabled_p ())
9808 dump_printf_loc (MSG_NOTE, vect_location,
9809 "Record the vdef for outer-loop vectorization.\n");
9811 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9812 (to be used when vectorizing outer-loop stmts that use the DEF of
9813 STMT). */
9814 if (gimple_code (stmt) == GIMPLE_PHI)
9815 scalar_dest = PHI_RESULT (stmt);
9816 else
9817 scalar_dest = gimple_get_lhs (stmt);
9819 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9820 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9822 stmt_vec_info exit_phi_info
9823 = vinfo->lookup_stmt (USE_STMT (use_p));
9824 STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt;
9828 /* Handle stmts whose DEF is used outside the loop-nest that is
9829 being vectorized. */
9830 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9832 done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt,
9833 NULL);
9834 gcc_assert (done);
9837 if (vec_stmt)
9838 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9840 return is_store;
9844 /* Remove a group of stores (for SLP or interleaving), free their
9845 stmt_vec_info. */
9847 void
9848 vect_remove_stores (stmt_vec_info first_stmt_info)
9850 vec_info *vinfo = first_stmt_info->vinfo;
9851 stmt_vec_info next_stmt_info = first_stmt_info;
9853 while (next_stmt_info)
9855 stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
9856 next_stmt_info = vect_orig_stmt (next_stmt_info);
9857 /* Free the attached stmt_vec_info and remove the stmt. */
9858 vinfo->remove_stmt (next_stmt_info);
9859 next_stmt_info = tmp;
9863 /* Function get_vectype_for_scalar_type_and_size.
9865 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9866 by the target. */
9868 tree
9869 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9871 tree orig_scalar_type = scalar_type;
9872 scalar_mode inner_mode;
9873 machine_mode simd_mode;
9874 poly_uint64 nunits;
9875 tree vectype;
9877 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9878 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9879 return NULL_TREE;
9881 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9883 /* For vector types of elements whose mode precision doesn't
9884 match their types precision we use a element type of mode
9885 precision. The vectorization routines will have to make sure
9886 they support the proper result truncation/extension.
9887 We also make sure to build vector types with INTEGER_TYPE
9888 component type only. */
9889 if (INTEGRAL_TYPE_P (scalar_type)
9890 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9891 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9892 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9893 TYPE_UNSIGNED (scalar_type));
9895 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9896 When the component mode passes the above test simply use a type
9897 corresponding to that mode. The theory is that any use that
9898 would cause problems with this will disable vectorization anyway. */
9899 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9900 && !INTEGRAL_TYPE_P (scalar_type))
9901 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9903 /* We can't build a vector type of elements with alignment bigger than
9904 their size. */
9905 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9906 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9907 TYPE_UNSIGNED (scalar_type));
9909 /* If we felt back to using the mode fail if there was
9910 no scalar type for it. */
9911 if (scalar_type == NULL_TREE)
9912 return NULL_TREE;
9914 /* If no size was supplied use the mode the target prefers. Otherwise
9915 lookup a vector mode of the specified size. */
9916 if (known_eq (size, 0U))
9917 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9918 else if (!multiple_p (size, nbytes, &nunits)
9919 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9920 return NULL_TREE;
9921 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9922 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9923 return NULL_TREE;
9925 vectype = build_vector_type (scalar_type, nunits);
9927 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9928 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9929 return NULL_TREE;
9931 /* Re-attach the address-space qualifier if we canonicalized the scalar
9932 type. */
9933 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9934 return build_qualified_type
9935 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9937 return vectype;
9940 poly_uint64 current_vector_size;
9942 /* Function get_vectype_for_scalar_type.
9944 Returns the vector type corresponding to SCALAR_TYPE as supported
9945 by the target. */
9947 tree
9948 get_vectype_for_scalar_type (tree scalar_type)
9950 tree vectype;
9951 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9952 current_vector_size);
9953 if (vectype
9954 && known_eq (current_vector_size, 0U))
9955 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9956 return vectype;
9959 /* Function get_mask_type_for_scalar_type.
9961 Returns the mask type corresponding to a result of comparison
9962 of vectors of specified SCALAR_TYPE as supported by target. */
9964 tree
9965 get_mask_type_for_scalar_type (tree scalar_type)
9967 tree vectype = get_vectype_for_scalar_type (scalar_type);
9969 if (!vectype)
9970 return NULL;
9972 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9973 current_vector_size);
9976 /* Function get_same_sized_vectype
9978 Returns a vector type corresponding to SCALAR_TYPE of size
9979 VECTOR_TYPE if supported by the target. */
9981 tree
9982 get_same_sized_vectype (tree scalar_type, tree vector_type)
9984 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9985 return build_same_sized_truth_vector_type (vector_type);
9987 return get_vectype_for_scalar_type_and_size
9988 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9991 /* Function vect_is_simple_use.
9993 Input:
9994 VINFO - the vect info of the loop or basic block that is being vectorized.
9995 OPERAND - operand in the loop or bb.
9996 Output:
9997 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
9998 case OPERAND is an SSA_NAME that is defined in the vectorizable region
9999 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
10000 the definition could be anywhere in the function
10001 DT - the type of definition
10003 Returns whether a stmt with OPERAND can be vectorized.
10004 For loops, supportable operands are constants, loop invariants, and operands
10005 that are defined by the current iteration of the loop. Unsupportable
10006 operands are those that are defined by a previous iteration of the loop (as
10007 is the case in reduction/induction computations).
10008 For basic blocks, supportable operands are constants and bb invariants.
10009 For now, operands defined outside the basic block are not supported. */
10011 bool
10012 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10013 stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out)
10015 if (def_stmt_info_out)
10016 *def_stmt_info_out = NULL;
10017 if (def_stmt_out)
10018 *def_stmt_out = NULL;
10019 *dt = vect_unknown_def_type;
10021 if (dump_enabled_p ())
10023 dump_printf_loc (MSG_NOTE, vect_location,
10024 "vect_is_simple_use: operand ");
10025 if (TREE_CODE (operand) == SSA_NAME
10026 && !SSA_NAME_IS_DEFAULT_DEF (operand))
10027 dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0);
10028 else
10029 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
10032 if (CONSTANT_CLASS_P (operand))
10033 *dt = vect_constant_def;
10034 else if (is_gimple_min_invariant (operand))
10035 *dt = vect_external_def;
10036 else if (TREE_CODE (operand) != SSA_NAME)
10037 *dt = vect_unknown_def_type;
10038 else if (SSA_NAME_IS_DEFAULT_DEF (operand))
10039 *dt = vect_external_def;
10040 else
10042 gimple *def_stmt = SSA_NAME_DEF_STMT (operand);
10043 stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand);
10044 if (!stmt_vinfo)
10045 *dt = vect_external_def;
10046 else
10048 stmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
10049 def_stmt = stmt_vinfo->stmt;
10050 switch (gimple_code (def_stmt))
10052 case GIMPLE_PHI:
10053 case GIMPLE_ASSIGN:
10054 case GIMPLE_CALL:
10055 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
10056 break;
10057 default:
10058 *dt = vect_unknown_def_type;
10059 break;
10061 if (def_stmt_info_out)
10062 *def_stmt_info_out = stmt_vinfo;
10064 if (def_stmt_out)
10065 *def_stmt_out = def_stmt;
10068 if (dump_enabled_p ())
10070 dump_printf (MSG_NOTE, ", type of def: ");
10071 switch (*dt)
10073 case vect_uninitialized_def:
10074 dump_printf (MSG_NOTE, "uninitialized\n");
10075 break;
10076 case vect_constant_def:
10077 dump_printf (MSG_NOTE, "constant\n");
10078 break;
10079 case vect_external_def:
10080 dump_printf (MSG_NOTE, "external\n");
10081 break;
10082 case vect_internal_def:
10083 dump_printf (MSG_NOTE, "internal\n");
10084 break;
10085 case vect_induction_def:
10086 dump_printf (MSG_NOTE, "induction\n");
10087 break;
10088 case vect_reduction_def:
10089 dump_printf (MSG_NOTE, "reduction\n");
10090 break;
10091 case vect_double_reduction_def:
10092 dump_printf (MSG_NOTE, "double reduction\n");
10093 break;
10094 case vect_nested_cycle:
10095 dump_printf (MSG_NOTE, "nested cycle\n");
10096 break;
10097 case vect_unknown_def_type:
10098 dump_printf (MSG_NOTE, "unknown\n");
10099 break;
10103 if (*dt == vect_unknown_def_type)
10105 if (dump_enabled_p ())
10106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10107 "Unsupported pattern.\n");
10108 return false;
10111 return true;
10114 /* Function vect_is_simple_use.
10116 Same as vect_is_simple_use but also determines the vector operand
10117 type of OPERAND and stores it to *VECTYPE. If the definition of
10118 OPERAND is vect_uninitialized_def, vect_constant_def or
10119 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10120 is responsible to compute the best suited vector type for the
10121 scalar operand. */
10123 bool
10124 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10125 tree *vectype, stmt_vec_info *def_stmt_info_out,
10126 gimple **def_stmt_out)
10128 stmt_vec_info def_stmt_info;
10129 gimple *def_stmt;
10130 if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt))
10131 return false;
10133 if (def_stmt_out)
10134 *def_stmt_out = def_stmt;
10135 if (def_stmt_info_out)
10136 *def_stmt_info_out = def_stmt_info;
10138 /* Now get a vector type if the def is internal, otherwise supply
10139 NULL_TREE and leave it up to the caller to figure out a proper
10140 type for the use stmt. */
10141 if (*dt == vect_internal_def
10142 || *dt == vect_induction_def
10143 || *dt == vect_reduction_def
10144 || *dt == vect_double_reduction_def
10145 || *dt == vect_nested_cycle)
10147 *vectype = STMT_VINFO_VECTYPE (def_stmt_info);
10148 gcc_assert (*vectype != NULL_TREE);
10149 if (dump_enabled_p ())
10150 dump_printf_loc (MSG_NOTE, vect_location,
10151 "vect_is_simple_use: vectype %T\n", *vectype);
10153 else if (*dt == vect_uninitialized_def
10154 || *dt == vect_constant_def
10155 || *dt == vect_external_def)
10156 *vectype = NULL_TREE;
10157 else
10158 gcc_unreachable ();
10160 return true;
10164 /* Function supportable_widening_operation
10166 Check whether an operation represented by the code CODE is a
10167 widening operation that is supported by the target platform in
10168 vector form (i.e., when operating on arguments of type VECTYPE_IN
10169 producing a result of type VECTYPE_OUT).
10171 Widening operations we currently support are NOP (CONVERT), FLOAT,
10172 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10173 are supported by the target platform either directly (via vector
10174 tree-codes), or via target builtins.
10176 Output:
10177 - CODE1 and CODE2 are codes of vector operations to be used when
10178 vectorizing the operation, if available.
10179 - MULTI_STEP_CVT determines the number of required intermediate steps in
10180 case of multi-step conversion (like char->short->int - in that case
10181 MULTI_STEP_CVT will be 1).
10182 - INTERM_TYPES contains the intermediate type required to perform the
10183 widening operation (short in the above example). */
10185 bool
10186 supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
10187 tree vectype_out, tree vectype_in,
10188 enum tree_code *code1, enum tree_code *code2,
10189 int *multi_step_cvt,
10190 vec<tree> *interm_types)
10192 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10193 struct loop *vect_loop = NULL;
10194 machine_mode vec_mode;
10195 enum insn_code icode1, icode2;
10196 optab optab1, optab2;
10197 tree vectype = vectype_in;
10198 tree wide_vectype = vectype_out;
10199 enum tree_code c1, c2;
10200 int i;
10201 tree prev_type, intermediate_type;
10202 machine_mode intermediate_mode, prev_mode;
10203 optab optab3, optab4;
10205 *multi_step_cvt = 0;
10206 if (loop_info)
10207 vect_loop = LOOP_VINFO_LOOP (loop_info);
10209 switch (code)
10211 case WIDEN_MULT_EXPR:
10212 /* The result of a vectorized widening operation usually requires
10213 two vectors (because the widened results do not fit into one vector).
10214 The generated vector results would normally be expected to be
10215 generated in the same order as in the original scalar computation,
10216 i.e. if 8 results are generated in each vector iteration, they are
10217 to be organized as follows:
10218 vect1: [res1,res2,res3,res4],
10219 vect2: [res5,res6,res7,res8].
10221 However, in the special case that the result of the widening
10222 operation is used in a reduction computation only, the order doesn't
10223 matter (because when vectorizing a reduction we change the order of
10224 the computation). Some targets can take advantage of this and
10225 generate more efficient code. For example, targets like Altivec,
10226 that support widen_mult using a sequence of {mult_even,mult_odd}
10227 generate the following vectors:
10228 vect1: [res1,res3,res5,res7],
10229 vect2: [res2,res4,res6,res8].
10231 When vectorizing outer-loops, we execute the inner-loop sequentially
10232 (each vectorized inner-loop iteration contributes to VF outer-loop
10233 iterations in parallel). We therefore don't allow to change the
10234 order of the computation in the inner-loop during outer-loop
10235 vectorization. */
10236 /* TODO: Another case in which order doesn't *really* matter is when we
10237 widen and then contract again, e.g. (short)((int)x * y >> 8).
10238 Normally, pack_trunc performs an even/odd permute, whereas the
10239 repack from an even/odd expansion would be an interleave, which
10240 would be significantly simpler for e.g. AVX2. */
10241 /* In any case, in order to avoid duplicating the code below, recurse
10242 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10243 are properly set up for the caller. If we fail, we'll continue with
10244 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10245 if (vect_loop
10246 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10247 && !nested_in_vect_loop_p (vect_loop, stmt_info)
10248 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10249 stmt_info, vectype_out,
10250 vectype_in, code1, code2,
10251 multi_step_cvt, interm_types))
10253 /* Elements in a vector with vect_used_by_reduction property cannot
10254 be reordered if the use chain with this property does not have the
10255 same operation. One such an example is s += a * b, where elements
10256 in a and b cannot be reordered. Here we check if the vector defined
10257 by STMT is only directly used in the reduction statement. */
10258 tree lhs = gimple_assign_lhs (stmt_info->stmt);
10259 stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
10260 if (use_stmt_info
10261 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10262 return true;
10264 c1 = VEC_WIDEN_MULT_LO_EXPR;
10265 c2 = VEC_WIDEN_MULT_HI_EXPR;
10266 break;
10268 case DOT_PROD_EXPR:
10269 c1 = DOT_PROD_EXPR;
10270 c2 = DOT_PROD_EXPR;
10271 break;
10273 case SAD_EXPR:
10274 c1 = SAD_EXPR;
10275 c2 = SAD_EXPR;
10276 break;
10278 case VEC_WIDEN_MULT_EVEN_EXPR:
10279 /* Support the recursion induced just above. */
10280 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10281 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10282 break;
10284 case WIDEN_LSHIFT_EXPR:
10285 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10286 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10287 break;
10289 CASE_CONVERT:
10290 c1 = VEC_UNPACK_LO_EXPR;
10291 c2 = VEC_UNPACK_HI_EXPR;
10292 break;
10294 case FLOAT_EXPR:
10295 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10296 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10297 break;
10299 case FIX_TRUNC_EXPR:
10300 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
10301 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
10302 break;
10304 default:
10305 gcc_unreachable ();
10308 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10309 std::swap (c1, c2);
10311 if (code == FIX_TRUNC_EXPR)
10313 /* The signedness is determined from output operand. */
10314 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10315 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10317 else if (CONVERT_EXPR_CODE_P (code)
10318 && VECTOR_BOOLEAN_TYPE_P (wide_vectype)
10319 && VECTOR_BOOLEAN_TYPE_P (vectype)
10320 && TYPE_MODE (wide_vectype) == TYPE_MODE (vectype)
10321 && SCALAR_INT_MODE_P (TYPE_MODE (vectype)))
10323 /* If the input and result modes are the same, a different optab
10324 is needed where we pass in the number of units in vectype. */
10325 optab1 = vec_unpacks_sbool_lo_optab;
10326 optab2 = vec_unpacks_sbool_hi_optab;
10328 else
10330 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10331 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10334 if (!optab1 || !optab2)
10335 return false;
10337 vec_mode = TYPE_MODE (vectype);
10338 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10339 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10340 return false;
10342 *code1 = c1;
10343 *code2 = c2;
10345 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10346 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10348 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
10349 return true;
10350 /* For scalar masks we may have different boolean
10351 vector types having the same QImode. Thus we
10352 add additional check for elements number. */
10353 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10354 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2))
10355 return true;
10358 /* Check if it's a multi-step conversion that can be done using intermediate
10359 types. */
10361 prev_type = vectype;
10362 prev_mode = vec_mode;
10364 if (!CONVERT_EXPR_CODE_P (code))
10365 return false;
10367 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10368 intermediate steps in promotion sequence. We try
10369 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10370 not. */
10371 interm_types->create (MAX_INTERM_CVT_STEPS);
10372 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10374 intermediate_mode = insn_data[icode1].operand[0].mode;
10375 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10377 intermediate_type = vect_halve_mask_nunits (prev_type);
10378 if (intermediate_mode != TYPE_MODE (intermediate_type))
10379 return false;
10381 else
10382 intermediate_type
10383 = lang_hooks.types.type_for_mode (intermediate_mode,
10384 TYPE_UNSIGNED (prev_type));
10386 if (VECTOR_BOOLEAN_TYPE_P (intermediate_type)
10387 && VECTOR_BOOLEAN_TYPE_P (prev_type)
10388 && intermediate_mode == prev_mode
10389 && SCALAR_INT_MODE_P (prev_mode))
10391 /* If the input and result modes are the same, a different optab
10392 is needed where we pass in the number of units in vectype. */
10393 optab3 = vec_unpacks_sbool_lo_optab;
10394 optab4 = vec_unpacks_sbool_hi_optab;
10396 else
10398 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10399 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10402 if (!optab3 || !optab4
10403 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10404 || insn_data[icode1].operand[0].mode != intermediate_mode
10405 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10406 || insn_data[icode2].operand[0].mode != intermediate_mode
10407 || ((icode1 = optab_handler (optab3, intermediate_mode))
10408 == CODE_FOR_nothing)
10409 || ((icode2 = optab_handler (optab4, intermediate_mode))
10410 == CODE_FOR_nothing))
10411 break;
10413 interm_types->quick_push (intermediate_type);
10414 (*multi_step_cvt)++;
10416 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10417 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10419 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
10420 return true;
10421 if (known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10422 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2))
10423 return true;
10426 prev_type = intermediate_type;
10427 prev_mode = intermediate_mode;
10430 interm_types->release ();
10431 return false;
10435 /* Function supportable_narrowing_operation
10437 Check whether an operation represented by the code CODE is a
10438 narrowing operation that is supported by the target platform in
10439 vector form (i.e., when operating on arguments of type VECTYPE_IN
10440 and producing a result of type VECTYPE_OUT).
10442 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10443 and FLOAT. This function checks if these operations are supported by
10444 the target platform directly via vector tree-codes.
10446 Output:
10447 - CODE1 is the code of a vector operation to be used when
10448 vectorizing the operation, if available.
10449 - MULTI_STEP_CVT determines the number of required intermediate steps in
10450 case of multi-step conversion (like int->short->char - in that case
10451 MULTI_STEP_CVT will be 1).
10452 - INTERM_TYPES contains the intermediate type required to perform the
10453 narrowing operation (short in the above example). */
10455 bool
10456 supportable_narrowing_operation (enum tree_code code,
10457 tree vectype_out, tree vectype_in,
10458 enum tree_code *code1, int *multi_step_cvt,
10459 vec<tree> *interm_types)
10461 machine_mode vec_mode;
10462 enum insn_code icode1;
10463 optab optab1, interm_optab;
10464 tree vectype = vectype_in;
10465 tree narrow_vectype = vectype_out;
10466 enum tree_code c1;
10467 tree intermediate_type, prev_type;
10468 machine_mode intermediate_mode, prev_mode;
10469 int i;
10470 bool uns;
10472 *multi_step_cvt = 0;
10473 switch (code)
10475 CASE_CONVERT:
10476 c1 = VEC_PACK_TRUNC_EXPR;
10477 if (VECTOR_BOOLEAN_TYPE_P (narrow_vectype)
10478 && VECTOR_BOOLEAN_TYPE_P (vectype)
10479 && TYPE_MODE (narrow_vectype) == TYPE_MODE (vectype)
10480 && SCALAR_INT_MODE_P (TYPE_MODE (vectype)))
10481 optab1 = vec_pack_sbool_trunc_optab;
10482 else
10483 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10484 break;
10486 case FIX_TRUNC_EXPR:
10487 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10488 /* The signedness is determined from output operand. */
10489 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10490 break;
10492 case FLOAT_EXPR:
10493 c1 = VEC_PACK_FLOAT_EXPR;
10494 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10495 break;
10497 default:
10498 gcc_unreachable ();
10501 if (!optab1)
10502 return false;
10504 vec_mode = TYPE_MODE (vectype);
10505 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10506 return false;
10508 *code1 = c1;
10510 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10512 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
10513 return true;
10514 /* For scalar masks we may have different boolean
10515 vector types having the same QImode. Thus we
10516 add additional check for elements number. */
10517 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10518 TYPE_VECTOR_SUBPARTS (narrow_vectype)))
10519 return true;
10522 if (code == FLOAT_EXPR)
10523 return false;
10525 /* Check if it's a multi-step conversion that can be done using intermediate
10526 types. */
10527 prev_mode = vec_mode;
10528 prev_type = vectype;
10529 if (code == FIX_TRUNC_EXPR)
10530 uns = TYPE_UNSIGNED (vectype_out);
10531 else
10532 uns = TYPE_UNSIGNED (vectype);
10534 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10535 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10536 costly than signed. */
10537 if (code == FIX_TRUNC_EXPR && uns)
10539 enum insn_code icode2;
10541 intermediate_type
10542 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10543 interm_optab
10544 = optab_for_tree_code (c1, intermediate_type, optab_default);
10545 if (interm_optab != unknown_optab
10546 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10547 && insn_data[icode1].operand[0].mode
10548 == insn_data[icode2].operand[0].mode)
10550 uns = false;
10551 optab1 = interm_optab;
10552 icode1 = icode2;
10556 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10557 intermediate steps in promotion sequence. We try
10558 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10559 interm_types->create (MAX_INTERM_CVT_STEPS);
10560 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10562 intermediate_mode = insn_data[icode1].operand[0].mode;
10563 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10565 intermediate_type = vect_double_mask_nunits (prev_type);
10566 if (intermediate_mode != TYPE_MODE (intermediate_type))
10567 return false;
10569 else
10570 intermediate_type
10571 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10572 if (VECTOR_BOOLEAN_TYPE_P (intermediate_type)
10573 && VECTOR_BOOLEAN_TYPE_P (prev_type)
10574 && intermediate_mode == prev_mode
10575 && SCALAR_INT_MODE_P (prev_mode))
10576 interm_optab = vec_pack_sbool_trunc_optab;
10577 else
10578 interm_optab
10579 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10580 optab_default);
10581 if (!interm_optab
10582 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10583 || insn_data[icode1].operand[0].mode != intermediate_mode
10584 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10585 == CODE_FOR_nothing))
10586 break;
10588 interm_types->quick_push (intermediate_type);
10589 (*multi_step_cvt)++;
10591 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10593 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
10594 return true;
10595 if (known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10596 TYPE_VECTOR_SUBPARTS (narrow_vectype)))
10597 return true;
10600 prev_mode = intermediate_mode;
10601 prev_type = intermediate_type;
10602 optab1 = interm_optab;
10605 interm_types->release ();
10606 return false;
10609 /* Generate and return a statement that sets vector mask MASK such that
10610 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10612 gcall *
10613 vect_gen_while (tree mask, tree start_index, tree end_index)
10615 tree cmp_type = TREE_TYPE (start_index);
10616 tree mask_type = TREE_TYPE (mask);
10617 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10618 cmp_type, mask_type,
10619 OPTIMIZE_FOR_SPEED));
10620 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10621 start_index, end_index,
10622 build_zero_cst (mask_type));
10623 gimple_call_set_lhs (call, mask);
10624 return call;
10627 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10628 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10630 tree
10631 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10632 tree end_index)
10634 tree tmp = make_ssa_name (mask_type);
10635 gcall *call = vect_gen_while (tmp, start_index, end_index);
10636 gimple_seq_add_stmt (seq, call);
10637 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
10640 /* Try to compute the vector types required to vectorize STMT_INFO,
10641 returning true on success and false if vectorization isn't possible.
10643 On success:
10645 - Set *STMT_VECTYPE_OUT to:
10646 - NULL_TREE if the statement doesn't need to be vectorized;
10647 - boolean_type_node if the statement is a boolean operation whose
10648 vector type can only be determined once all the other vector types
10649 are known; and
10650 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10652 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10653 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10654 statement does not help to determine the overall number of units. */
10656 opt_result
10657 vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
10658 tree *stmt_vectype_out,
10659 tree *nunits_vectype_out)
10661 gimple *stmt = stmt_info->stmt;
10663 *stmt_vectype_out = NULL_TREE;
10664 *nunits_vectype_out = NULL_TREE;
10666 if (gimple_get_lhs (stmt) == NULL_TREE
10667 /* MASK_STORE has no lhs, but is ok. */
10668 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
10670 if (is_a <gcall *> (stmt))
10672 /* Ignore calls with no lhs. These must be calls to
10673 #pragma omp simd functions, and what vectorization factor
10674 it really needs can't be determined until
10675 vectorizable_simd_clone_call. */
10676 if (dump_enabled_p ())
10677 dump_printf_loc (MSG_NOTE, vect_location,
10678 "defer to SIMD clone analysis.\n");
10679 return opt_result::success ();
10682 return opt_result::failure_at (stmt,
10683 "not vectorized: irregular stmt.%G", stmt);
10686 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
10687 return opt_result::failure_at (stmt,
10688 "not vectorized: vector stmt in loop:%G",
10689 stmt);
10691 tree vectype;
10692 tree scalar_type = NULL_TREE;
10693 if (STMT_VINFO_VECTYPE (stmt_info))
10694 *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
10695 else
10697 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
10698 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
10699 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
10700 else
10701 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
10703 /* Pure bool ops don't participate in number-of-units computation.
10704 For comparisons use the types being compared. */
10705 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
10706 && is_gimple_assign (stmt)
10707 && gimple_assign_rhs_code (stmt) != COND_EXPR)
10709 *stmt_vectype_out = boolean_type_node;
10711 tree rhs1 = gimple_assign_rhs1 (stmt);
10712 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10713 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
10714 scalar_type = TREE_TYPE (rhs1);
10715 else
10717 if (dump_enabled_p ())
10718 dump_printf_loc (MSG_NOTE, vect_location,
10719 "pure bool operation.\n");
10720 return opt_result::success ();
10724 if (dump_enabled_p ())
10725 dump_printf_loc (MSG_NOTE, vect_location,
10726 "get vectype for scalar type: %T\n", scalar_type);
10727 vectype = get_vectype_for_scalar_type (scalar_type);
10728 if (!vectype)
10729 return opt_result::failure_at (stmt,
10730 "not vectorized:"
10731 " unsupported data-type %T\n",
10732 scalar_type);
10734 if (!*stmt_vectype_out)
10735 *stmt_vectype_out = vectype;
10737 if (dump_enabled_p ())
10738 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
10741 /* Don't try to compute scalar types if the stmt produces a boolean
10742 vector; use the existing vector type instead. */
10743 tree nunits_vectype;
10744 if (VECTOR_BOOLEAN_TYPE_P (vectype))
10745 nunits_vectype = vectype;
10746 else
10748 /* The number of units is set according to the smallest scalar
10749 type (or the largest vector size, but we only support one
10750 vector size per vectorization). */
10751 if (*stmt_vectype_out != boolean_type_node)
10753 HOST_WIDE_INT dummy;
10754 scalar_type = vect_get_smallest_scalar_type (stmt_info,
10755 &dummy, &dummy);
10757 if (dump_enabled_p ())
10758 dump_printf_loc (MSG_NOTE, vect_location,
10759 "get vectype for scalar type: %T\n", scalar_type);
10760 nunits_vectype = get_vectype_for_scalar_type (scalar_type);
10762 if (!nunits_vectype)
10763 return opt_result::failure_at (stmt,
10764 "not vectorized: unsupported data-type %T\n",
10765 scalar_type);
10767 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
10768 GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
10769 return opt_result::failure_at (stmt,
10770 "not vectorized: different sized vector "
10771 "types in statement, %T and %T\n",
10772 vectype, nunits_vectype);
10774 if (dump_enabled_p ())
10776 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
10777 nunits_vectype);
10779 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
10780 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
10781 dump_printf (MSG_NOTE, "\n");
10784 *nunits_vectype_out = nunits_vectype;
10785 return opt_result::success ();
10788 /* Try to determine the correct vector type for STMT_INFO, which is a
10789 statement that produces a scalar boolean result. Return the vector
10790 type on success, otherwise return NULL_TREE. */
10792 opt_tree
10793 vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
10795 gimple *stmt = stmt_info->stmt;
10796 tree mask_type = NULL;
10797 tree vectype, scalar_type;
10799 if (is_gimple_assign (stmt)
10800 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10801 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
10803 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
10804 mask_type = get_mask_type_for_scalar_type (scalar_type);
10806 if (!mask_type)
10807 return opt_tree::failure_at (stmt,
10808 "not vectorized: unsupported mask\n");
10810 else
10812 tree rhs;
10813 ssa_op_iter iter;
10814 enum vect_def_type dt;
10816 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
10818 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
10819 return opt_tree::failure_at (stmt,
10820 "not vectorized:can't compute mask"
10821 " type for statement, %G", stmt);
10823 /* No vectype probably means external definition.
10824 Allow it in case there is another operand which
10825 allows to determine mask type. */
10826 if (!vectype)
10827 continue;
10829 if (!mask_type)
10830 mask_type = vectype;
10831 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
10832 TYPE_VECTOR_SUBPARTS (vectype)))
10833 return opt_tree::failure_at (stmt,
10834 "not vectorized: different sized mask"
10835 " types in statement, %T and %T\n",
10836 mask_type, vectype);
10837 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
10838 != VECTOR_BOOLEAN_TYPE_P (vectype))
10839 return opt_tree::failure_at (stmt,
10840 "not vectorized: mixed mask and "
10841 "nonmask vector types in statement, "
10842 "%T and %T\n",
10843 mask_type, vectype);
10846 /* We may compare boolean value loaded as vector of integers.
10847 Fix mask_type in such case. */
10848 if (mask_type
10849 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
10850 && gimple_code (stmt) == GIMPLE_ASSIGN
10851 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10852 mask_type = build_same_sized_truth_vector_type (mask_type);
10855 /* No mask_type should mean loop invariant predicate.
10856 This is probably a subject for optimization in if-conversion. */
10857 if (!mask_type)
10858 return opt_tree::failure_at (stmt,
10859 "not vectorized: can't compute mask type "
10860 "for statement: %G", stmt);
10862 return opt_tree::success (mask_type);