* builtins.h (c_srlen): Add argument.
[official-gcc.git] / gcc / tree-vect-stmts.c
blob3b23ad5c75c9ec9ea840cf876a2244d032b43aff
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
101 stmt_info_for_cost si = { count, kind, where, stmt_info, misalign };
102 body_cost_vec->safe_push (si);
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
111 static tree
112 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT_INFO and the vector is associated
121 with scalar destination SCALAR_DEST. */
123 static tree
124 read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
125 tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
127 tree vect_type, vect, vect_name, array_ref;
128 gimple *new_stmt;
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
140 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
142 return vect_name;
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT_INFO. */
149 static void
150 write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
151 tree vect, tree array, unsigned HOST_WIDE_INT n)
153 tree array_ref;
154 gimple *new_stmt;
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
160 new_stmt = gimple_build_assign (array_ref, vect);
161 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
168 static tree
169 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
171 tree mem_ref;
173 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
174 /* Arrays have the same alignment as their type. */
175 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
176 return mem_ref;
179 /* Add a clobber of variable VAR to the vectorization of STMT_INFO.
180 Emit the clobber before *GSI. */
182 static void
183 vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
184 tree var)
186 tree clobber = build_clobber (TREE_TYPE (var));
187 gimple *new_stmt = gimple_build_assign (var, clobber);
188 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
199 enum vect_relevant relevant, bool live_p)
201 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
202 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 if (dump_enabled_p ())
206 dump_printf_loc (MSG_NOTE, vect_location,
207 "mark relevant %d, live %d: ", relevant, live_p);
208 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
211 /* If this stmt is an original stmt in a pattern, we might need to mark its
212 related pattern stmt instead of the original stmt. However, such stmts
213 may have their own uses that are not in any pattern, in such cases the
214 stmt itself should be marked. */
215 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
217 /* This is the last stmt in a sequence that was detected as a
218 pattern that can potentially be vectorized. Don't mark the stmt
219 as relevant/live because it's not going to be vectorized.
220 Instead mark the pattern-stmt that replaces it. */
222 if (dump_enabled_p ())
223 dump_printf_loc (MSG_NOTE, vect_location,
224 "last stmt in pattern. don't mark"
225 " relevant/live.\n");
226 stmt_vec_info old_stmt_info = stmt_info;
227 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
228 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
229 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
230 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
233 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
234 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
235 STMT_VINFO_RELEVANT (stmt_info) = relevant;
237 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
238 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
240 if (dump_enabled_p ())
241 dump_printf_loc (MSG_NOTE, vect_location,
242 "already marked relevant/live.\n");
243 return;
246 worklist->safe_push (stmt_info);
250 /* Function is_simple_and_all_uses_invariant
252 Return true if STMT_INFO is simple and all uses of it are invariant. */
254 bool
255 is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
256 loop_vec_info loop_vinfo)
258 tree op;
259 ssa_op_iter iter;
261 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
262 if (!stmt)
263 return false;
265 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
267 enum vect_def_type dt = vect_uninitialized_def;
269 if (!vect_is_simple_use (op, loop_vinfo, &dt))
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
273 "use not simple.\n");
274 return false;
277 if (dt != vect_external_def && dt != vect_constant_def)
278 return false;
280 return true;
283 /* Function vect_stmt_relevant_p.
285 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
286 is "relevant for vectorization".
288 A stmt is considered "relevant for vectorization" if:
289 - it has uses outside the loop.
290 - it has vdefs (it alters memory).
291 - control stmts in the loop (except for the exit condition).
293 CHECKME: what other side effects would the vectorizer allow? */
295 static bool
296 vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
297 enum vect_relevant *relevant, bool *live_p)
299 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
300 ssa_op_iter op_iter;
301 imm_use_iterator imm_iter;
302 use_operand_p use_p;
303 def_operand_p def_p;
305 *relevant = vect_unused_in_scope;
306 *live_p = false;
308 /* cond stmt other than loop exit cond. */
309 if (is_ctrl_stmt (stmt_info->stmt)
310 && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type)
311 *relevant = vect_used_in_scope;
313 /* changing memory. */
314 if (gimple_code (stmt_info->stmt) != GIMPLE_PHI)
315 if (gimple_vdef (stmt_info->stmt)
316 && !gimple_clobber_p (stmt_info->stmt))
318 if (dump_enabled_p ())
319 dump_printf_loc (MSG_NOTE, vect_location,
320 "vec_stmt_relevant_p: stmt has vdefs.\n");
321 *relevant = vect_used_in_scope;
324 /* uses outside the loop. */
325 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
327 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
329 basic_block bb = gimple_bb (USE_STMT (use_p));
330 if (!flow_bb_inside_loop_p (loop, bb))
332 if (dump_enabled_p ())
333 dump_printf_loc (MSG_NOTE, vect_location,
334 "vec_stmt_relevant_p: used out of loop.\n");
336 if (is_gimple_debug (USE_STMT (use_p)))
337 continue;
339 /* We expect all such uses to be in the loop exit phis
340 (because of loop closed form) */
341 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
342 gcc_assert (bb == single_exit (loop)->dest);
344 *live_p = true;
349 if (*live_p && *relevant == vect_unused_in_scope
350 && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo))
352 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE, vect_location,
354 "vec_stmt_relevant_p: stmt live but not relevant.\n");
355 *relevant = vect_used_only_live;
358 return (*live_p || *relevant);
362 /* Function exist_non_indexing_operands_for_use_p
364 USE is one of the uses attached to STMT_INFO. Check if USE is
365 used in STMT_INFO for anything other than indexing an array. */
367 static bool
368 exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
370 tree operand;
372 /* USE corresponds to some operand in STMT. If there is no data
373 reference in STMT, then any operand that corresponds to USE
374 is not indexing an array. */
375 if (!STMT_VINFO_DATA_REF (stmt_info))
376 return true;
378 /* STMT has a data_ref. FORNOW this means that its of one of
379 the following forms:
380 -1- ARRAY_REF = var
381 -2- var = ARRAY_REF
382 (This should have been verified in analyze_data_refs).
384 'var' in the second case corresponds to a def, not a use,
385 so USE cannot correspond to any operands that are not used
386 for array indexing.
388 Therefore, all we need to check is if STMT falls into the
389 first case, and whether var corresponds to USE. */
391 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
392 if (!assign || !gimple_assign_copy_p (assign))
394 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
395 if (call && gimple_call_internal_p (call))
397 internal_fn ifn = gimple_call_internal_fn (call);
398 int mask_index = internal_fn_mask_index (ifn);
399 if (mask_index >= 0
400 && use == gimple_call_arg (call, mask_index))
401 return true;
402 int stored_value_index = internal_fn_stored_value_index (ifn);
403 if (stored_value_index >= 0
404 && use == gimple_call_arg (call, stored_value_index))
405 return true;
406 if (internal_gather_scatter_fn_p (ifn)
407 && use == gimple_call_arg (call, 1))
408 return true;
410 return false;
413 if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME)
414 return false;
415 operand = gimple_assign_rhs1 (assign);
416 if (TREE_CODE (operand) != SSA_NAME)
417 return false;
419 if (operand == use)
420 return true;
422 return false;
427 Function process_use.
429 Inputs:
430 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
431 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
432 that defined USE. This is done by calling mark_relevant and passing it
433 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
434 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 be performed.
437 Outputs:
438 Generally, LIVE_P and RELEVANT are used to define the liveness and
439 relevance info of the DEF_STMT of this USE:
440 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
441 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
442 Exceptions:
443 - case 1: If USE is used only for address computations (e.g. array indexing),
444 which does not need to be directly vectorized, then the liveness/relevance
445 of the respective DEF_STMT is left unchanged.
446 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
447 we skip DEF_STMT cause it had already been processed.
448 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
449 "relevant" will be modified accordingly.
451 Return true if everything is as expected. Return false otherwise. */
453 static bool
454 process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
455 enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
456 bool force)
458 stmt_vec_info dstmt_vinfo;
459 basic_block bb, def_bb;
460 enum vect_def_type dt;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
465 return true;
467 if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
471 "not vectorized: unsupported use in stmt.\n");
472 return false;
475 if (!dstmt_vinfo)
476 return true;
478 def_bb = gimple_bb (dstmt_vinfo->stmt);
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
481 DSTMT_VINFO must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DSTMT_VINFO in the loop. So we just
484 check that everything is as expected, and we are done. */
485 bb = gimple_bb (stmt_vinfo->stmt);
486 if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
487 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
488 && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
490 && bb->loop_father == def_bb->loop_father)
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_NOTE, vect_location,
494 "reduc-stmt defining reduc-phi in the same nest.\n");
495 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
496 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
497 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
498 return true;
501 /* case 3a: outer-loop stmt defining an inner-loop stmt:
502 outer-loop-header-bb:
503 d = dstmt_vinfo
504 inner-loop:
505 stmt # use (d)
506 outer-loop-tail-bb:
507 ... */
508 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
510 if (dump_enabled_p ())
511 dump_printf_loc (MSG_NOTE, vect_location,
512 "outer-loop def-stmt defining inner-loop stmt.\n");
514 switch (relevant)
516 case vect_unused_in_scope:
517 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
518 vect_used_in_scope : vect_unused_in_scope;
519 break;
521 case vect_used_in_outer_by_reduction:
522 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
523 relevant = vect_used_by_reduction;
524 break;
526 case vect_used_in_outer:
527 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
528 relevant = vect_used_in_scope;
529 break;
531 case vect_used_in_scope:
532 break;
534 default:
535 gcc_unreachable ();
539 /* case 3b: inner-loop stmt defining an outer-loop stmt:
540 outer-loop-header-bb:
542 inner-loop:
543 d = dstmt_vinfo
544 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
545 stmt # use (d) */
546 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
548 if (dump_enabled_p ())
549 dump_printf_loc (MSG_NOTE, vect_location,
550 "inner-loop def-stmt defining outer-loop stmt.\n");
552 switch (relevant)
554 case vect_unused_in_scope:
555 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
556 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
557 vect_used_in_outer_by_reduction : vect_unused_in_scope;
558 break;
560 case vect_used_by_reduction:
561 case vect_used_only_live:
562 relevant = vect_used_in_outer_by_reduction;
563 break;
565 case vect_used_in_scope:
566 relevant = vect_used_in_outer;
567 break;
569 default:
570 gcc_unreachable ();
573 /* We are also not interested in uses on loop PHI backedges that are
574 inductions. Otherwise we'll needlessly vectorize the IV increment
575 and cause hybrid SLP for SLP inductions. Unless the PHI is live
576 of course. */
577 else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
578 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
579 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
580 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
581 loop_latch_edge (bb->loop_father))
582 == use))
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE, vect_location,
586 "induction value on backedge.\n");
587 return true;
591 vect_mark_relevant (worklist, dstmt_vinfo, relevant, false);
592 return true;
596 /* Function vect_mark_stmts_to_be_vectorized.
598 Not all stmts in the loop need to be vectorized. For example:
600 for i...
601 for j...
602 1. T0 = i + j
603 2. T1 = a[T0]
605 3. j = j + 1
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
610 This pass detects such stmts. */
612 bool
613 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
615 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
616 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
617 unsigned int nbbs = loop->num_nodes;
618 gimple_stmt_iterator si;
619 unsigned int i;
620 basic_block bb;
621 bool live_p;
622 enum vect_relevant relevant;
624 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
626 auto_vec<stmt_vec_info, 64> worklist;
628 /* 1. Init worklist. */
629 for (i = 0; i < nbbs; i++)
631 bb = bbs[i];
632 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
634 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
638 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
641 if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
642 vect_mark_relevant (&worklist, phi_info, relevant, live_p);
644 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
646 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
653 if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
654 vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
658 /* 2. Process_worklist */
659 while (worklist.length () > 0)
661 use_operand_p use_p;
662 ssa_op_iter iter;
664 stmt_vec_info stmt_vinfo = worklist.pop ();
665 if (dump_enabled_p ())
667 dump_printf_loc (MSG_NOTE, vect_location,
668 "worklist: examine stmt: ");
669 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0);
672 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
673 (DEF_STMT) as relevant/irrelevant according to the relevance property
674 of STMT. */
675 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
677 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
678 propagated as is to the DEF_STMTs of its USEs.
680 One exception is when STMT has been identified as defining a reduction
681 variable; in this case we set the relevance to vect_used_by_reduction.
682 This is because we distinguish between two kinds of relevant stmts -
683 those that are used by a reduction computation, and those that are
684 (also) used by a regular computation. This allows us later on to
685 identify stmts that are used solely by a reduction, and therefore the
686 order of the results that they produce does not have to be kept. */
688 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
690 case vect_reduction_def:
691 gcc_assert (relevant != vect_unused_in_scope);
692 if (relevant != vect_unused_in_scope
693 && relevant != vect_used_in_scope
694 && relevant != vect_used_by_reduction
695 && relevant != vect_used_only_live)
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
702 break;
704 case vect_nested_cycle:
705 if (relevant != vect_unused_in_scope
706 && relevant != vect_used_in_outer_by_reduction
707 && relevant != vect_used_in_outer)
709 if (dump_enabled_p ())
710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
711 "unsupported use of nested cycle.\n");
713 return false;
715 break;
717 case vect_double_reduction_def:
718 if (relevant != vect_unused_in_scope
719 && relevant != vect_used_by_reduction
720 && relevant != vect_used_only_live)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of double reduction.\n");
726 return false;
728 break;
730 default:
731 break;
734 if (is_pattern_stmt_p (stmt_vinfo))
736 /* Pattern statements are not inserted into the code, so
737 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
738 have to scan the RHS or function arguments instead. */
739 if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
741 enum tree_code rhs_code = gimple_assign_rhs_code (assign);
742 tree op = gimple_assign_rhs1 (assign);
744 i = 1;
745 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
747 if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0),
748 loop_vinfo, relevant, &worklist, false)
749 || !process_use (stmt_vinfo, TREE_OPERAND (op, 1),
750 loop_vinfo, relevant, &worklist, false))
751 return false;
752 i = 2;
754 for (; i < gimple_num_ops (assign); i++)
756 op = gimple_op (assign, i);
757 if (TREE_CODE (op) == SSA_NAME
758 && !process_use (stmt_vinfo, op, loop_vinfo, relevant,
759 &worklist, false))
760 return false;
763 else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
765 for (i = 0; i < gimple_call_num_args (call); i++)
767 tree arg = gimple_call_arg (call, i);
768 if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant,
769 &worklist, false))
770 return false;
774 else
775 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
777 tree op = USE_FROM_PTR (use_p);
778 if (!process_use (stmt_vinfo, op, loop_vinfo, relevant,
779 &worklist, false))
780 return false;
783 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
785 gather_scatter_info gs_info;
786 if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
787 gcc_unreachable ();
788 if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
789 &worklist, true))
790 return false;
792 } /* while worklist */
794 return true;
797 /* Compute the prologue cost for invariant or constant operands. */
799 static unsigned
800 vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
801 unsigned opno, enum vect_def_type dt,
802 stmt_vector_for_cost *cost_vec)
804 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
805 tree op = gimple_op (stmt, opno);
806 unsigned prologue_cost = 0;
808 /* Without looking at the actual initializer a vector of
809 constants can be implemented as load from the constant pool.
810 When all elements are the same we can use a splat. */
811 tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
812 unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
813 unsigned num_vects_to_check;
814 unsigned HOST_WIDE_INT const_nunits;
815 unsigned nelt_limit;
816 if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
817 && ! multiple_p (const_nunits, group_size))
819 num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
820 nelt_limit = const_nunits;
822 else
824 /* If either the vector has variable length or the vectors
825 are composed of repeated whole groups we only need to
826 cost construction once. All vectors will be the same. */
827 num_vects_to_check = 1;
828 nelt_limit = group_size;
830 tree elt = NULL_TREE;
831 unsigned nelt = 0;
832 for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j)
834 unsigned si = j % group_size;
835 if (nelt == 0)
836 elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno);
837 /* ??? We're just tracking whether all operands of a single
838 vector initializer are the same, ideally we'd check if
839 we emitted the same one already. */
840 else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt,
841 opno))
842 elt = NULL_TREE;
843 nelt++;
844 if (nelt == nelt_limit)
846 /* ??? We need to pass down stmt_info for a vector type
847 even if it points to the wrong stmt. */
848 prologue_cost += record_stmt_cost
849 (cost_vec, 1,
850 dt == vect_external_def
851 ? (elt ? scalar_to_vec : vec_construct)
852 : vector_load,
853 stmt_info, 0, vect_prologue);
854 nelt = 0;
858 return prologue_cost;
861 /* Function vect_model_simple_cost.
863 Models cost for simple operations, i.e. those that only emit ncopies of a
864 single op. Right now, this does not account for multiple insns that could
865 be generated for the single vector op. We will handle that shortly. */
867 static void
868 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
869 enum vect_def_type *dt,
870 int ndts,
871 slp_tree node,
872 stmt_vector_for_cost *cost_vec)
874 int inside_cost = 0, prologue_cost = 0;
876 gcc_assert (cost_vec != NULL);
878 /* ??? Somehow we need to fix this at the callers. */
879 if (node)
880 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
882 if (node)
884 /* Scan operands and account for prologue cost of constants/externals.
885 ??? This over-estimates cost for multiple uses and should be
886 re-engineered. */
887 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
888 tree lhs = gimple_get_lhs (stmt);
889 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
891 tree op = gimple_op (stmt, i);
892 enum vect_def_type dt;
893 if (!op || op == lhs)
894 continue;
895 if (vect_is_simple_use (op, stmt_info->vinfo, &dt)
896 && (dt == vect_constant_def || dt == vect_external_def))
897 prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
898 i, dt, cost_vec);
901 else
902 /* Cost the "broadcast" of a scalar operand in to a vector operand.
903 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
904 cost model. */
905 for (int i = 0; i < ndts; i++)
906 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
907 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
908 stmt_info, 0, vect_prologue);
910 /* Adjust for two-operator SLP nodes. */
911 if (node && SLP_TREE_TWO_OPERATORS (node))
913 ncopies *= 2;
914 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm,
915 stmt_info, 0, vect_body);
918 /* Pass the inside-of-loop statements to the target-specific cost model. */
919 inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
920 stmt_info, 0, vect_body);
922 if (dump_enabled_p ())
923 dump_printf_loc (MSG_NOTE, vect_location,
924 "vect_model_simple_cost: inside_cost = %d, "
925 "prologue_cost = %d .\n", inside_cost, prologue_cost);
929 /* Model cost for type demotion and promotion operations. PWR is normally
930 zero for single-step promotions and demotions. It will be one if
931 two-step promotion/demotion is required, and so on. Each additional
932 step doubles the number of instructions required. */
934 static void
935 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
936 enum vect_def_type *dt, int pwr,
937 stmt_vector_for_cost *cost_vec)
939 int i, tmp;
940 int inside_cost = 0, prologue_cost = 0;
942 for (i = 0; i < pwr + 1; i++)
944 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
945 (i + 1) : i;
946 inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp),
947 vec_promote_demote, stmt_info, 0,
948 vect_body);
951 /* FORNOW: Assuming maximum 2 args per stmts. */
952 for (i = 0; i < 2; i++)
953 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
954 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
955 stmt_info, 0, vect_prologue);
957 if (dump_enabled_p ())
958 dump_printf_loc (MSG_NOTE, vect_location,
959 "vect_model_promotion_demotion_cost: inside_cost = %d, "
960 "prologue_cost = %d .\n", inside_cost, prologue_cost);
963 /* Function vect_model_store_cost
965 Models cost for stores. In the case of grouped accesses, one access
966 has the overhead of the grouped access attributed to it. */
968 static void
969 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
970 enum vect_def_type dt,
971 vect_memory_access_type memory_access_type,
972 vec_load_store_type vls_type, slp_tree slp_node,
973 stmt_vector_for_cost *cost_vec)
975 unsigned int inside_cost = 0, prologue_cost = 0;
976 stmt_vec_info first_stmt_info = stmt_info;
977 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
979 /* ??? Somehow we need to fix this at the callers. */
980 if (slp_node)
981 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
983 if (vls_type == VLS_STORE_INVARIANT)
985 if (slp_node)
986 prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
987 1, dt, cost_vec);
988 else
989 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
990 stmt_info, 0, vect_prologue);
993 /* Grouped stores update all elements in the group at once,
994 so we want the DR for the first statement. */
995 if (!slp_node && grouped_access_p)
996 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
998 /* True if we should include any once-per-group costs as well as
999 the cost of the statement itself. For SLP we only get called
1000 once per group anyhow. */
1001 bool first_stmt_p = (first_stmt_info == stmt_info);
1003 /* We assume that the cost of a single store-lanes instruction is
1004 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
1005 access is instead being provided by a permute-and-store operation,
1006 include the cost of the permutes. */
1007 if (first_stmt_p
1008 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1010 /* Uses a high and low interleave or shuffle operations for each
1011 needed permute. */
1012 int group_size = DR_GROUP_SIZE (first_stmt_info);
1013 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1014 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
1015 stmt_info, 0, vect_body);
1017 if (dump_enabled_p ())
1018 dump_printf_loc (MSG_NOTE, vect_location,
1019 "vect_model_store_cost: strided group_size = %d .\n",
1020 group_size);
1023 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1024 /* Costs of the stores. */
1025 if (memory_access_type == VMAT_ELEMENTWISE
1026 || memory_access_type == VMAT_GATHER_SCATTER)
1028 /* N scalar stores plus extracting the elements. */
1029 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1030 inside_cost += record_stmt_cost (cost_vec,
1031 ncopies * assumed_nunits,
1032 scalar_store, stmt_info, 0, vect_body);
1034 else
1035 vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
1037 if (memory_access_type == VMAT_ELEMENTWISE
1038 || memory_access_type == VMAT_STRIDED_SLP)
1040 /* N scalar stores plus extracting the elements. */
1041 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1042 inside_cost += record_stmt_cost (cost_vec,
1043 ncopies * assumed_nunits,
1044 vec_to_scalar, stmt_info, 0, vect_body);
1047 if (dump_enabled_p ())
1048 dump_printf_loc (MSG_NOTE, vect_location,
1049 "vect_model_store_cost: inside_cost = %d, "
1050 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1054 /* Calculate cost of DR's memory access. */
1055 void
1056 vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
1057 unsigned int *inside_cost,
1058 stmt_vector_for_cost *body_cost_vec)
1060 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1061 int alignment_support_scheme
1062 = vect_supportable_dr_alignment (dr_info, false);
1064 switch (alignment_support_scheme)
1066 case dr_aligned:
1068 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1069 vector_store, stmt_info, 0,
1070 vect_body);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE, vect_location,
1074 "vect_model_store_cost: aligned.\n");
1075 break;
1078 case dr_unaligned_supported:
1080 /* Here, we assign an additional cost for the unaligned store. */
1081 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1082 unaligned_store, stmt_info,
1083 DR_MISALIGNMENT (dr_info),
1084 vect_body);
1085 if (dump_enabled_p ())
1086 dump_printf_loc (MSG_NOTE, vect_location,
1087 "vect_model_store_cost: unaligned supported by "
1088 "hardware.\n");
1089 break;
1092 case dr_unaligned_unsupported:
1094 *inside_cost = VECT_MAX_COST;
1096 if (dump_enabled_p ())
1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1098 "vect_model_store_cost: unsupported access.\n");
1099 break;
1102 default:
1103 gcc_unreachable ();
1108 /* Function vect_model_load_cost
1110 Models cost for loads. In the case of grouped accesses, one access has
1111 the overhead of the grouped access attributed to it. Since unaligned
1112 accesses are supported for loads, we also account for the costs of the
1113 access scheme chosen. */
1115 static void
1116 vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
1117 vect_memory_access_type memory_access_type,
1118 slp_instance instance,
1119 slp_tree slp_node,
1120 stmt_vector_for_cost *cost_vec)
1122 unsigned int inside_cost = 0, prologue_cost = 0;
1123 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1125 gcc_assert (cost_vec);
1127 /* ??? Somehow we need to fix this at the callers. */
1128 if (slp_node)
1129 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1131 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1133 /* If the load is permuted then the alignment is determined by
1134 the first group element not by the first scalar stmt DR. */
1135 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1136 /* Record the cost for the permutation. */
1137 unsigned n_perms;
1138 unsigned assumed_nunits
1139 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
1140 unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
1141 vect_transform_slp_perm_load (slp_node, vNULL, NULL,
1142 slp_vf, instance, true,
1143 &n_perms);
1144 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
1145 first_stmt_info, 0, vect_body);
1146 /* And adjust the number of loads performed. This handles
1147 redundancies as well as loads that are later dead. */
1148 auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
1149 bitmap_clear (perm);
1150 for (unsigned i = 0;
1151 i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
1152 bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
1153 ncopies = 0;
1154 bool load_seen = false;
1155 for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
1157 if (i % assumed_nunits == 0)
1159 if (load_seen)
1160 ncopies++;
1161 load_seen = false;
1163 if (bitmap_bit_p (perm, i))
1164 load_seen = true;
1166 if (load_seen)
1167 ncopies++;
1168 gcc_assert (ncopies
1169 <= (DR_GROUP_SIZE (first_stmt_info)
1170 - DR_GROUP_GAP (first_stmt_info)
1171 + assumed_nunits - 1) / assumed_nunits);
1174 /* Grouped loads read all elements in the group at once,
1175 so we want the DR for the first statement. */
1176 stmt_vec_info first_stmt_info = stmt_info;
1177 if (!slp_node && grouped_access_p)
1178 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1180 /* True if we should include any once-per-group costs as well as
1181 the cost of the statement itself. For SLP we only get called
1182 once per group anyhow. */
1183 bool first_stmt_p = (first_stmt_info == stmt_info);
1185 /* We assume that the cost of a single load-lanes instruction is
1186 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1187 access is instead being provided by a load-and-permute operation,
1188 include the cost of the permutes. */
1189 if (first_stmt_p
1190 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1192 /* Uses an even and odd extract operations or shuffle operations
1193 for each needed permute. */
1194 int group_size = DR_GROUP_SIZE (first_stmt_info);
1195 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1196 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1197 stmt_info, 0, vect_body);
1199 if (dump_enabled_p ())
1200 dump_printf_loc (MSG_NOTE, vect_location,
1201 "vect_model_load_cost: strided group_size = %d .\n",
1202 group_size);
1205 /* The loads themselves. */
1206 if (memory_access_type == VMAT_ELEMENTWISE
1207 || memory_access_type == VMAT_GATHER_SCATTER)
1209 /* N scalar loads plus gathering them into a vector. */
1210 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1211 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1212 inside_cost += record_stmt_cost (cost_vec,
1213 ncopies * assumed_nunits,
1214 scalar_load, stmt_info, 0, vect_body);
1216 else
1217 vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
1218 &inside_cost, &prologue_cost,
1219 cost_vec, cost_vec, true);
1220 if (memory_access_type == VMAT_ELEMENTWISE
1221 || memory_access_type == VMAT_STRIDED_SLP)
1222 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
1223 stmt_info, 0, vect_body);
1225 if (dump_enabled_p ())
1226 dump_printf_loc (MSG_NOTE, vect_location,
1227 "vect_model_load_cost: inside_cost = %d, "
1228 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1232 /* Calculate cost of DR's memory access. */
1233 void
1234 vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
1235 bool add_realign_cost, unsigned int *inside_cost,
1236 unsigned int *prologue_cost,
1237 stmt_vector_for_cost *prologue_cost_vec,
1238 stmt_vector_for_cost *body_cost_vec,
1239 bool record_prologue_costs)
1241 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1242 int alignment_support_scheme
1243 = vect_supportable_dr_alignment (dr_info, false);
1245 switch (alignment_support_scheme)
1247 case dr_aligned:
1249 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1250 stmt_info, 0, vect_body);
1252 if (dump_enabled_p ())
1253 dump_printf_loc (MSG_NOTE, vect_location,
1254 "vect_model_load_cost: aligned.\n");
1256 break;
1258 case dr_unaligned_supported:
1260 /* Here, we assign an additional cost for the unaligned load. */
1261 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1262 unaligned_load, stmt_info,
1263 DR_MISALIGNMENT (dr_info),
1264 vect_body);
1266 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_NOTE, vect_location,
1268 "vect_model_load_cost: unaligned supported by "
1269 "hardware.\n");
1271 break;
1273 case dr_explicit_realign:
1275 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1276 vector_load, stmt_info, 0, vect_body);
1277 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1278 vec_perm, stmt_info, 0, vect_body);
1280 /* FIXME: If the misalignment remains fixed across the iterations of
1281 the containing loop, the following cost should be added to the
1282 prologue costs. */
1283 if (targetm.vectorize.builtin_mask_for_load)
1284 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1285 stmt_info, 0, vect_body);
1287 if (dump_enabled_p ())
1288 dump_printf_loc (MSG_NOTE, vect_location,
1289 "vect_model_load_cost: explicit realign\n");
1291 break;
1293 case dr_explicit_realign_optimized:
1295 if (dump_enabled_p ())
1296 dump_printf_loc (MSG_NOTE, vect_location,
1297 "vect_model_load_cost: unaligned software "
1298 "pipelined.\n");
1300 /* Unaligned software pipeline has a load of an address, an initial
1301 load, and possibly a mask operation to "prime" the loop. However,
1302 if this is an access in a group of loads, which provide grouped
1303 access, then the above cost should only be considered for one
1304 access in the group. Inside the loop, there is a load op
1305 and a realignment op. */
1307 if (add_realign_cost && record_prologue_costs)
1309 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1310 vector_stmt, stmt_info,
1311 0, vect_prologue);
1312 if (targetm.vectorize.builtin_mask_for_load)
1313 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1314 vector_stmt, stmt_info,
1315 0, vect_prologue);
1318 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1319 stmt_info, 0, vect_body);
1320 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1321 stmt_info, 0, vect_body);
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_NOTE, vect_location,
1325 "vect_model_load_cost: explicit realign optimized"
1326 "\n");
1328 break;
1331 case dr_unaligned_unsupported:
1333 *inside_cost = VECT_MAX_COST;
1335 if (dump_enabled_p ())
1336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1337 "vect_model_load_cost: unsupported access.\n");
1338 break;
1341 default:
1342 gcc_unreachable ();
1346 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1347 the loop preheader for the vectorized stmt STMT_VINFO. */
1349 static void
1350 vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
1351 gimple_stmt_iterator *gsi)
1353 if (gsi)
1354 vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
1355 else
1357 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1359 if (loop_vinfo)
1361 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1362 basic_block new_bb;
1363 edge pe;
1365 if (nested_in_vect_loop_p (loop, stmt_vinfo))
1366 loop = loop->inner;
1368 pe = loop_preheader_edge (loop);
1369 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1370 gcc_assert (!new_bb);
1372 else
1374 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1375 basic_block bb;
1376 gimple_stmt_iterator gsi_bb_start;
1378 gcc_assert (bb_vinfo);
1379 bb = BB_VINFO_BB (bb_vinfo);
1380 gsi_bb_start = gsi_after_labels (bb);
1381 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1385 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE, vect_location,
1388 "created new init_stmt: ");
1389 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1393 /* Function vect_init_vector.
1395 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1396 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1397 vector type a vector with all elements equal to VAL is created first.
1398 Place the initialization at BSI if it is not NULL. Otherwise, place the
1399 initialization at the loop preheader.
1400 Return the DEF of INIT_STMT.
1401 It will be used in the vectorization of STMT_INFO. */
1403 tree
1404 vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
1405 gimple_stmt_iterator *gsi)
1407 gimple *init_stmt;
1408 tree new_temp;
1410 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1411 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1413 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1414 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1416 /* Scalar boolean value should be transformed into
1417 all zeros or all ones value before building a vector. */
1418 if (VECTOR_BOOLEAN_TYPE_P (type))
1420 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1421 tree false_val = build_zero_cst (TREE_TYPE (type));
1423 if (CONSTANT_CLASS_P (val))
1424 val = integer_zerop (val) ? false_val : true_val;
1425 else
1427 new_temp = make_ssa_name (TREE_TYPE (type));
1428 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1429 val, true_val, false_val);
1430 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1431 val = new_temp;
1434 else if (CONSTANT_CLASS_P (val))
1435 val = fold_convert (TREE_TYPE (type), val);
1436 else
1438 new_temp = make_ssa_name (TREE_TYPE (type));
1439 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1440 init_stmt = gimple_build_assign (new_temp,
1441 fold_build1 (VIEW_CONVERT_EXPR,
1442 TREE_TYPE (type),
1443 val));
1444 else
1445 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1446 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1447 val = new_temp;
1450 val = build_vector_from_val (type, val);
1453 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1454 init_stmt = gimple_build_assign (new_temp, val);
1455 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1456 return new_temp;
1459 /* Function vect_get_vec_def_for_operand_1.
1461 For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
1462 with type DT that will be used in the vectorized stmt. */
1464 tree
1465 vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info,
1466 enum vect_def_type dt)
1468 tree vec_oprnd;
1469 stmt_vec_info vec_stmt_info;
1471 switch (dt)
1473 /* operand is a constant or a loop invariant. */
1474 case vect_constant_def:
1475 case vect_external_def:
1476 /* Code should use vect_get_vec_def_for_operand. */
1477 gcc_unreachable ();
1479 /* operand is defined inside the loop. */
1480 case vect_internal_def:
1482 /* Get the def from the vectorized stmt. */
1483 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1484 /* Get vectorized pattern statement. */
1485 if (!vec_stmt_info
1486 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1487 && !STMT_VINFO_RELEVANT (def_stmt_info))
1488 vec_stmt_info = (STMT_VINFO_VEC_STMT
1489 (STMT_VINFO_RELATED_STMT (def_stmt_info)));
1490 gcc_assert (vec_stmt_info);
1491 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1492 vec_oprnd = PHI_RESULT (phi);
1493 else
1494 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1495 return vec_oprnd;
1498 /* operand is defined by a loop header phi. */
1499 case vect_reduction_def:
1500 case vect_double_reduction_def:
1501 case vect_nested_cycle:
1502 case vect_induction_def:
1504 gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI);
1506 /* Get the def from the vectorized stmt. */
1507 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1508 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1509 vec_oprnd = PHI_RESULT (phi);
1510 else
1511 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1512 return vec_oprnd;
1515 default:
1516 gcc_unreachable ();
1521 /* Function vect_get_vec_def_for_operand.
1523 OP is an operand in STMT_VINFO. This function returns a (vector) def
1524 that will be used in the vectorized stmt for STMT_VINFO.
1526 In the case that OP is an SSA_NAME which is defined in the loop, then
1527 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1529 In case OP is an invariant or constant, a new stmt that creates a vector def
1530 needs to be introduced. VECTYPE may be used to specify a required type for
1531 vector invariant. */
1533 tree
1534 vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
1536 gimple *def_stmt;
1537 enum vect_def_type dt;
1538 bool is_simple_use;
1539 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1541 if (dump_enabled_p ())
1543 dump_printf_loc (MSG_NOTE, vect_location,
1544 "vect_get_vec_def_for_operand: ");
1545 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1546 dump_printf (MSG_NOTE, "\n");
1549 stmt_vec_info def_stmt_info;
1550 is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
1551 &def_stmt_info, &def_stmt);
1552 gcc_assert (is_simple_use);
1553 if (def_stmt && dump_enabled_p ())
1555 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1556 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1559 if (dt == vect_constant_def || dt == vect_external_def)
1561 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1562 tree vector_type;
1564 if (vectype)
1565 vector_type = vectype;
1566 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1567 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1568 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1569 else
1570 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1572 gcc_assert (vector_type);
1573 return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
1575 else
1576 return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
1580 /* Function vect_get_vec_def_for_stmt_copy
1582 Return a vector-def for an operand. This function is used when the
1583 vectorized stmt to be created (by the caller to this function) is a "copy"
1584 created in case the vectorized result cannot fit in one vector, and several
1585 copies of the vector-stmt are required. In this case the vector-def is
1586 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1587 of the stmt that defines VEC_OPRND. VINFO describes the vectorization.
1589 Context:
1590 In case the vectorization factor (VF) is bigger than the number
1591 of elements that can fit in a vectype (nunits), we have to generate
1592 more than one vector stmt to vectorize the scalar stmt. This situation
1593 arises when there are multiple data-types operated upon in the loop; the
1594 smallest data-type determines the VF, and as a result, when vectorizing
1595 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1596 vector stmt (each computing a vector of 'nunits' results, and together
1597 computing 'VF' results in each iteration). This function is called when
1598 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1599 which VF=16 and nunits=4, so the number of copies required is 4):
1601 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1603 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1604 VS1.1: vx.1 = memref1 VS1.2
1605 VS1.2: vx.2 = memref2 VS1.3
1606 VS1.3: vx.3 = memref3
1608 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1609 VSnew.1: vz1 = vx.1 + ... VSnew.2
1610 VSnew.2: vz2 = vx.2 + ... VSnew.3
1611 VSnew.3: vz3 = vx.3 + ...
1613 The vectorization of S1 is explained in vectorizable_load.
1614 The vectorization of S2:
1615 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1616 the function 'vect_get_vec_def_for_operand' is called to
1617 get the relevant vector-def for each operand of S2. For operand x it
1618 returns the vector-def 'vx.0'.
1620 To create the remaining copies of the vector-stmt (VSnew.j), this
1621 function is called to get the relevant vector-def for each operand. It is
1622 obtained from the respective VS1.j stmt, which is recorded in the
1623 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1625 For example, to obtain the vector-def 'vx.1' in order to create the
1626 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1627 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1628 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1629 and return its def ('vx.1').
1630 Overall, to create the above sequence this function will be called 3 times:
1631 vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0);
1632 vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1);
1633 vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */
1635 tree
1636 vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd)
1638 stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd);
1639 if (!def_stmt_info)
1640 /* Do nothing; can reuse same def. */
1641 return vec_oprnd;
1643 def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info);
1644 gcc_assert (def_stmt_info);
1645 if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt))
1646 vec_oprnd = PHI_RESULT (phi);
1647 else
1648 vec_oprnd = gimple_get_lhs (def_stmt_info->stmt);
1649 return vec_oprnd;
1653 /* Get vectorized definitions for the operands to create a copy of an original
1654 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1656 void
1657 vect_get_vec_defs_for_stmt_copy (vec_info *vinfo,
1658 vec<tree> *vec_oprnds0,
1659 vec<tree> *vec_oprnds1)
1661 tree vec_oprnd = vec_oprnds0->pop ();
1663 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1664 vec_oprnds0->quick_push (vec_oprnd);
1666 if (vec_oprnds1 && vec_oprnds1->length ())
1668 vec_oprnd = vec_oprnds1->pop ();
1669 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1670 vec_oprnds1->quick_push (vec_oprnd);
1675 /* Get vectorized definitions for OP0 and OP1. */
1677 void
1678 vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
1679 vec<tree> *vec_oprnds0,
1680 vec<tree> *vec_oprnds1,
1681 slp_tree slp_node)
1683 if (slp_node)
1685 int nops = (op1 == NULL_TREE) ? 1 : 2;
1686 auto_vec<tree> ops (nops);
1687 auto_vec<vec<tree> > vec_defs (nops);
1689 ops.quick_push (op0);
1690 if (op1)
1691 ops.quick_push (op1);
1693 vect_get_slp_defs (ops, slp_node, &vec_defs);
1695 *vec_oprnds0 = vec_defs[0];
1696 if (op1)
1697 *vec_oprnds1 = vec_defs[1];
1699 else
1701 tree vec_oprnd;
1703 vec_oprnds0->create (1);
1704 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info);
1705 vec_oprnds0->quick_push (vec_oprnd);
1707 if (op1)
1709 vec_oprnds1->create (1);
1710 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info);
1711 vec_oprnds1->quick_push (vec_oprnd);
1716 /* Helper function called by vect_finish_replace_stmt and
1717 vect_finish_stmt_generation. Set the location of the new
1718 statement and create and return a stmt_vec_info for it. */
1720 static stmt_vec_info
1721 vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
1723 vec_info *vinfo = stmt_info->vinfo;
1725 stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
1727 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1730 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1733 gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
1735 /* While EH edges will generally prevent vectorization, stmt might
1736 e.g. be in a must-not-throw region. Ensure newly created stmts
1737 that could throw are part of the same region. */
1738 int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
1739 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1740 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1742 return vec_stmt_info;
1745 /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1746 which sets the same scalar result as STMT_INFO did. Create and return a
1747 stmt_vec_info for VEC_STMT. */
1749 stmt_vec_info
1750 vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
1752 gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt));
1754 gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt);
1755 gsi_replace (&gsi, vec_stmt, false);
1757 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1760 /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
1761 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
1763 stmt_vec_info
1764 vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
1765 gimple_stmt_iterator *gsi)
1767 gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
1769 if (!gsi_end_p (*gsi)
1770 && gimple_has_mem_ops (vec_stmt))
1772 gimple *at_stmt = gsi_stmt (*gsi);
1773 tree vuse = gimple_vuse (at_stmt);
1774 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1776 tree vdef = gimple_vdef (at_stmt);
1777 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1778 /* If we have an SSA vuse and insert a store, update virtual
1779 SSA form to avoid triggering the renamer. Do so only
1780 if we can easily see all uses - which is what almost always
1781 happens with the way vectorized stmts are inserted. */
1782 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1783 && ((is_gimple_assign (vec_stmt)
1784 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1785 || (is_gimple_call (vec_stmt)
1786 && !(gimple_call_flags (vec_stmt)
1787 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1789 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1790 gimple_set_vdef (vec_stmt, new_vdef);
1791 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1795 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1796 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1799 /* We want to vectorize a call to combined function CFN with function
1800 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1801 as the types of all inputs. Check whether this is possible using
1802 an internal function, returning its code if so or IFN_LAST if not. */
1804 static internal_fn
1805 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1806 tree vectype_out, tree vectype_in)
1808 internal_fn ifn;
1809 if (internal_fn_p (cfn))
1810 ifn = as_internal_fn (cfn);
1811 else
1812 ifn = associated_internal_fn (fndecl);
1813 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1815 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1816 if (info.vectorizable)
1818 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1819 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1820 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1821 OPTIMIZE_FOR_SPEED))
1822 return ifn;
1825 return IFN_LAST;
1829 static tree permute_vec_elements (tree, tree, tree, stmt_vec_info,
1830 gimple_stmt_iterator *);
1832 /* Check whether a load or store statement in the loop described by
1833 LOOP_VINFO is possible in a fully-masked loop. This is testing
1834 whether the vectorizer pass has the appropriate support, as well as
1835 whether the target does.
1837 VLS_TYPE says whether the statement is a load or store and VECTYPE
1838 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1839 says how the load or store is going to be implemented and GROUP_SIZE
1840 is the number of load or store statements in the containing group.
1841 If the access is a gather load or scatter store, GS_INFO describes
1842 its arguments.
1844 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1845 supported, otherwise record the required mask types. */
1847 static void
1848 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1849 vec_load_store_type vls_type, int group_size,
1850 vect_memory_access_type memory_access_type,
1851 gather_scatter_info *gs_info)
1853 /* Invariant loads need no special support. */
1854 if (memory_access_type == VMAT_INVARIANT)
1855 return;
1857 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1858 machine_mode vecmode = TYPE_MODE (vectype);
1859 bool is_load = (vls_type == VLS_LOAD);
1860 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1862 if (is_load
1863 ? !vect_load_lanes_supported (vectype, group_size, true)
1864 : !vect_store_lanes_supported (vectype, group_size, true))
1866 if (dump_enabled_p ())
1867 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1868 "can't use a fully-masked loop because the"
1869 " target doesn't have an appropriate masked"
1870 " load/store-lanes instruction.\n");
1871 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1872 return;
1874 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1875 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1876 return;
1879 if (memory_access_type == VMAT_GATHER_SCATTER)
1881 internal_fn ifn = (is_load
1882 ? IFN_MASK_GATHER_LOAD
1883 : IFN_MASK_SCATTER_STORE);
1884 tree offset_type = TREE_TYPE (gs_info->offset);
1885 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1886 gs_info->memory_type,
1887 TYPE_SIGN (offset_type),
1888 gs_info->scale))
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "can't use a fully-masked loop because the"
1893 " target doesn't have an appropriate masked"
1894 " gather load or scatter store instruction.\n");
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1896 return;
1898 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1899 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1900 return;
1903 if (memory_access_type != VMAT_CONTIGUOUS
1904 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1906 /* Element X of the data must come from iteration i * VF + X of the
1907 scalar loop. We need more work to support other mappings. */
1908 if (dump_enabled_p ())
1909 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1910 "can't use a fully-masked loop because an access"
1911 " isn't contiguous.\n");
1912 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1913 return;
1916 machine_mode mask_mode;
1917 if (!(targetm.vectorize.get_mask_mode
1918 (GET_MODE_NUNITS (vecmode),
1919 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1920 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "can't use a fully-masked loop because the target"
1925 " doesn't have the appropriate masked load or"
1926 " store.\n");
1927 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1928 return;
1930 /* We might load more scalars than we need for permuting SLP loads.
1931 We checked in get_group_load_store_type that the extra elements
1932 don't leak into a new vector. */
1933 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1934 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1935 unsigned int nvectors;
1936 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1937 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1938 else
1939 gcc_unreachable ();
1942 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1943 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1944 that needs to be applied to all loads and stores in a vectorized loop.
1945 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1947 MASK_TYPE is the type of both masks. If new statements are needed,
1948 insert them before GSI. */
1950 static tree
1951 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1952 gimple_stmt_iterator *gsi)
1954 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1955 if (!loop_mask)
1956 return vec_mask;
1958 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1959 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1960 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1961 vec_mask, loop_mask);
1962 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1963 return and_res;
1966 /* Determine whether we can use a gather load or scatter store to vectorize
1967 strided load or store STMT_INFO by truncating the current offset to a
1968 smaller width. We need to be able to construct an offset vector:
1970 { 0, X, X*2, X*3, ... }
1972 without loss of precision, where X is STMT_INFO's DR_STEP.
1974 Return true if this is possible, describing the gather load or scatter
1975 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1977 static bool
1978 vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
1979 loop_vec_info loop_vinfo, bool masked_p,
1980 gather_scatter_info *gs_info)
1982 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1983 data_reference *dr = dr_info->dr;
1984 tree step = DR_STEP (dr);
1985 if (TREE_CODE (step) != INTEGER_CST)
1987 /* ??? Perhaps we could use range information here? */
1988 if (dump_enabled_p ())
1989 dump_printf_loc (MSG_NOTE, vect_location,
1990 "cannot truncate variable step.\n");
1991 return false;
1994 /* Get the number of bits in an element. */
1995 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1996 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1997 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1999 /* Set COUNT to the upper limit on the number of elements - 1.
2000 Start with the maximum vectorization factor. */
2001 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
2003 /* Try lowering COUNT to the number of scalar latch iterations. */
2004 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2005 widest_int max_iters;
2006 if (max_loop_iterations (loop, &max_iters)
2007 && max_iters < count)
2008 count = max_iters.to_shwi ();
2010 /* Try scales of 1 and the element size. */
2011 int scales[] = { 1, vect_get_scalar_dr_size (dr_info) };
2012 wi::overflow_type overflow = wi::OVF_NONE;
2013 for (int i = 0; i < 2; ++i)
2015 int scale = scales[i];
2016 widest_int factor;
2017 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
2018 continue;
2020 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
2021 in OFFSET_BITS bits. */
2022 widest_int range = wi::mul (count, factor, SIGNED, &overflow);
2023 if (overflow)
2024 continue;
2025 signop sign = range >= 0 ? UNSIGNED : SIGNED;
2026 if (wi::min_precision (range, sign) > element_bits)
2028 overflow = wi::OVF_UNKNOWN;
2029 continue;
2032 /* See whether the target supports the operation. */
2033 tree memory_type = TREE_TYPE (DR_REF (dr));
2034 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
2035 memory_type, element_bits, sign, scale,
2036 &gs_info->ifn, &gs_info->element_type))
2037 continue;
2039 tree offset_type = build_nonstandard_integer_type (element_bits,
2040 sign == UNSIGNED);
2042 gs_info->decl = NULL_TREE;
2043 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2044 but we don't need to store that here. */
2045 gs_info->base = NULL_TREE;
2046 gs_info->offset = fold_convert (offset_type, step);
2047 gs_info->offset_dt = vect_constant_def;
2048 gs_info->offset_vectype = NULL_TREE;
2049 gs_info->scale = scale;
2050 gs_info->memory_type = memory_type;
2051 return true;
2054 if (overflow && dump_enabled_p ())
2055 dump_printf_loc (MSG_NOTE, vect_location,
2056 "truncating gather/scatter offset to %d bits"
2057 " might change its value.\n", element_bits);
2059 return false;
2062 /* Return true if we can use gather/scatter internal functions to
2063 vectorize STMT_INFO, which is a grouped or strided load or store.
2064 MASKED_P is true if load or store is conditional. When returning
2065 true, fill in GS_INFO with the information required to perform the
2066 operation. */
2068 static bool
2069 vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info,
2070 loop_vec_info loop_vinfo, bool masked_p,
2071 gather_scatter_info *gs_info)
2073 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)
2074 || gs_info->decl)
2075 return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo,
2076 masked_p, gs_info);
2078 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
2079 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2080 tree offset_type = TREE_TYPE (gs_info->offset);
2081 unsigned int offset_bits = TYPE_PRECISION (offset_type);
2083 /* Enforced by vect_check_gather_scatter. */
2084 gcc_assert (element_bits >= offset_bits);
2086 /* If the elements are wider than the offset, convert the offset to the
2087 same width, without changing its sign. */
2088 if (element_bits > offset_bits)
2090 bool unsigned_p = TYPE_UNSIGNED (offset_type);
2091 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
2092 gs_info->offset = fold_convert (offset_type, gs_info->offset);
2095 if (dump_enabled_p ())
2096 dump_printf_loc (MSG_NOTE, vect_location,
2097 "using gather/scatter for strided/grouped access,"
2098 " scale = %d\n", gs_info->scale);
2100 return true;
2103 /* STMT_INFO is a non-strided load or store, meaning that it accesses
2104 elements with a known constant step. Return -1 if that step
2105 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2107 static int
2108 compare_step_with_zero (stmt_vec_info stmt_info)
2110 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2111 return tree_int_cst_compare (vect_dr_behavior (dr_info)->step,
2112 size_zero_node);
2115 /* If the target supports a permute mask that reverses the elements in
2116 a vector of type VECTYPE, return that mask, otherwise return null. */
2118 static tree
2119 perm_mask_for_reverse (tree vectype)
2121 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2123 /* The encoding has a single stepped pattern. */
2124 vec_perm_builder sel (nunits, 1, 3);
2125 for (int i = 0; i < 3; ++i)
2126 sel.quick_push (nunits - 1 - i);
2128 vec_perm_indices indices (sel, 1, nunits);
2129 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2130 return NULL_TREE;
2131 return vect_gen_perm_mask_checked (vectype, indices);
2134 /* STMT_INFO is either a masked or unconditional store. Return the value
2135 being stored. */
2137 tree
2138 vect_get_store_rhs (stmt_vec_info stmt_info)
2140 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
2142 gcc_assert (gimple_assign_single_p (assign));
2143 return gimple_assign_rhs1 (assign);
2145 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
2147 internal_fn ifn = gimple_call_internal_fn (call);
2148 int index = internal_fn_stored_value_index (ifn);
2149 gcc_assert (index >= 0);
2150 return gimple_call_arg (call, index);
2152 gcc_unreachable ();
2155 /* A subroutine of get_load_store_type, with a subset of the same
2156 arguments. Handle the case where STMT_INFO is part of a grouped load
2157 or store.
2159 For stores, the statements in the group are all consecutive
2160 and there is no gap at the end. For loads, the statements in the
2161 group might not be consecutive; there can be gaps between statements
2162 as well as at the end. */
2164 static bool
2165 get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2166 bool masked_p, vec_load_store_type vls_type,
2167 vect_memory_access_type *memory_access_type,
2168 gather_scatter_info *gs_info)
2170 vec_info *vinfo = stmt_info->vinfo;
2171 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2172 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2173 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2174 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2175 unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
2176 bool single_element_p = (stmt_info == first_stmt_info
2177 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
2178 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
2179 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2181 /* True if the vectorized statements would access beyond the last
2182 statement in the group. */
2183 bool overrun_p = false;
2185 /* True if we can cope with such overrun by peeling for gaps, so that
2186 there is at least one final scalar iteration after the vector loop. */
2187 bool can_overrun_p = (!masked_p
2188 && vls_type == VLS_LOAD
2189 && loop_vinfo
2190 && !loop->inner);
2192 /* There can only be a gap at the end of the group if the stride is
2193 known at compile time. */
2194 gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0);
2196 /* Stores can't yet have gaps. */
2197 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2199 if (slp)
2201 if (STMT_VINFO_STRIDED_P (first_stmt_info))
2203 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2204 separated by the stride, until we have a complete vector.
2205 Fall back to scalar accesses if that isn't possible. */
2206 if (multiple_p (nunits, group_size))
2207 *memory_access_type = VMAT_STRIDED_SLP;
2208 else
2209 *memory_access_type = VMAT_ELEMENTWISE;
2211 else
2213 overrun_p = loop_vinfo && gap != 0;
2214 if (overrun_p && vls_type != VLS_LOAD)
2216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2217 "Grouped store with gaps requires"
2218 " non-consecutive accesses\n");
2219 return false;
2221 /* An overrun is fine if the trailing elements are smaller
2222 than the alignment boundary B. Every vector access will
2223 be a multiple of B and so we are guaranteed to access a
2224 non-gap element in the same B-sized block. */
2225 if (overrun_p
2226 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2227 / vect_get_scalar_dr_size (first_dr_info)))
2228 overrun_p = false;
2229 if (overrun_p && !can_overrun_p)
2231 if (dump_enabled_p ())
2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2233 "Peeling for outer loop is not supported\n");
2234 return false;
2236 *memory_access_type = VMAT_CONTIGUOUS;
2239 else
2241 /* We can always handle this case using elementwise accesses,
2242 but see if something more efficient is available. */
2243 *memory_access_type = VMAT_ELEMENTWISE;
2245 /* If there is a gap at the end of the group then these optimizations
2246 would access excess elements in the last iteration. */
2247 bool would_overrun_p = (gap != 0);
2248 /* An overrun is fine if the trailing elements are smaller than the
2249 alignment boundary B. Every vector access will be a multiple of B
2250 and so we are guaranteed to access a non-gap element in the
2251 same B-sized block. */
2252 if (would_overrun_p
2253 && !masked_p
2254 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2255 / vect_get_scalar_dr_size (first_dr_info)))
2256 would_overrun_p = false;
2258 if (!STMT_VINFO_STRIDED_P (first_stmt_info)
2259 && (can_overrun_p || !would_overrun_p)
2260 && compare_step_with_zero (stmt_info) > 0)
2262 /* First cope with the degenerate case of a single-element
2263 vector. */
2264 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2265 *memory_access_type = VMAT_CONTIGUOUS;
2267 /* Otherwise try using LOAD/STORE_LANES. */
2268 if (*memory_access_type == VMAT_ELEMENTWISE
2269 && (vls_type == VLS_LOAD
2270 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2271 : vect_store_lanes_supported (vectype, group_size,
2272 masked_p)))
2274 *memory_access_type = VMAT_LOAD_STORE_LANES;
2275 overrun_p = would_overrun_p;
2278 /* If that fails, try using permuting loads. */
2279 if (*memory_access_type == VMAT_ELEMENTWISE
2280 && (vls_type == VLS_LOAD
2281 ? vect_grouped_load_supported (vectype, single_element_p,
2282 group_size)
2283 : vect_grouped_store_supported (vectype, group_size)))
2285 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2286 overrun_p = would_overrun_p;
2290 /* As a last resort, trying using a gather load or scatter store.
2292 ??? Although the code can handle all group sizes correctly,
2293 it probably isn't a win to use separate strided accesses based
2294 on nearby locations. Or, even if it's a win over scalar code,
2295 it might not be a win over vectorizing at a lower VF, if that
2296 allows us to use contiguous accesses. */
2297 if (*memory_access_type == VMAT_ELEMENTWISE
2298 && single_element_p
2299 && loop_vinfo
2300 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2301 masked_p, gs_info))
2302 *memory_access_type = VMAT_GATHER_SCATTER;
2305 if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
2307 /* STMT is the leader of the group. Check the operands of all the
2308 stmts of the group. */
2309 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
2310 while (next_stmt_info)
2312 tree op = vect_get_store_rhs (next_stmt_info);
2313 enum vect_def_type dt;
2314 if (!vect_is_simple_use (op, vinfo, &dt))
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "use not simple.\n");
2319 return false;
2321 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
2325 if (overrun_p)
2327 gcc_assert (can_overrun_p);
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2330 "Data access with gaps requires scalar "
2331 "epilogue loop\n");
2332 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2335 return true;
2338 /* A subroutine of get_load_store_type, with a subset of the same
2339 arguments. Handle the case where STMT_INFO is a load or store that
2340 accesses consecutive elements with a negative step. */
2342 static vect_memory_access_type
2343 get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
2344 vec_load_store_type vls_type,
2345 unsigned int ncopies)
2347 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2348 dr_alignment_support alignment_support_scheme;
2350 if (ncopies > 1)
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2354 "multiple types with negative step.\n");
2355 return VMAT_ELEMENTWISE;
2358 alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false);
2359 if (alignment_support_scheme != dr_aligned
2360 && alignment_support_scheme != dr_unaligned_supported)
2362 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2364 "negative step but alignment required.\n");
2365 return VMAT_ELEMENTWISE;
2368 if (vls_type == VLS_STORE_INVARIANT)
2370 if (dump_enabled_p ())
2371 dump_printf_loc (MSG_NOTE, vect_location,
2372 "negative step with invariant source;"
2373 " no permute needed.\n");
2374 return VMAT_CONTIGUOUS_DOWN;
2377 if (!perm_mask_for_reverse (vectype))
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2381 "negative step and reversing not supported.\n");
2382 return VMAT_ELEMENTWISE;
2385 return VMAT_CONTIGUOUS_REVERSE;
2388 /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2389 if there is a memory access type that the vectorized form can use,
2390 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2391 or scatters, fill in GS_INFO accordingly.
2393 SLP says whether we're performing SLP rather than loop vectorization.
2394 MASKED_P is true if the statement is conditional on a vectorized mask.
2395 VECTYPE is the vector type that the vectorized statements will use.
2396 NCOPIES is the number of vector statements that will be needed. */
2398 static bool
2399 get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2400 bool masked_p, vec_load_store_type vls_type,
2401 unsigned int ncopies,
2402 vect_memory_access_type *memory_access_type,
2403 gather_scatter_info *gs_info)
2405 vec_info *vinfo = stmt_info->vinfo;
2406 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2407 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2408 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2410 *memory_access_type = VMAT_GATHER_SCATTER;
2411 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
2412 gcc_unreachable ();
2413 else if (!vect_is_simple_use (gs_info->offset, vinfo,
2414 &gs_info->offset_dt,
2415 &gs_info->offset_vectype))
2417 if (dump_enabled_p ())
2418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2419 "%s index use not simple.\n",
2420 vls_type == VLS_LOAD ? "gather" : "scatter");
2421 return false;
2424 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2426 if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
2427 vls_type, memory_access_type, gs_info))
2428 return false;
2430 else if (STMT_VINFO_STRIDED_P (stmt_info))
2432 gcc_assert (!slp);
2433 if (loop_vinfo
2434 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2435 masked_p, gs_info))
2436 *memory_access_type = VMAT_GATHER_SCATTER;
2437 else
2438 *memory_access_type = VMAT_ELEMENTWISE;
2440 else
2442 int cmp = compare_step_with_zero (stmt_info);
2443 if (cmp < 0)
2444 *memory_access_type = get_negative_load_store_type
2445 (stmt_info, vectype, vls_type, ncopies);
2446 else if (cmp == 0)
2448 gcc_assert (vls_type == VLS_LOAD);
2449 *memory_access_type = VMAT_INVARIANT;
2451 else
2452 *memory_access_type = VMAT_CONTIGUOUS;
2455 if ((*memory_access_type == VMAT_ELEMENTWISE
2456 || *memory_access_type == VMAT_STRIDED_SLP)
2457 && !nunits.is_constant ())
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2461 "Not using elementwise accesses due to variable "
2462 "vectorization factor.\n");
2463 return false;
2466 /* FIXME: At the moment the cost model seems to underestimate the
2467 cost of using elementwise accesses. This check preserves the
2468 traditional behavior until that can be fixed. */
2469 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2470 if (!first_stmt_info)
2471 first_stmt_info = stmt_info;
2472 if (*memory_access_type == VMAT_ELEMENTWISE
2473 && !STMT_VINFO_STRIDED_P (first_stmt_info)
2474 && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
2475 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2476 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2480 "not falling back to elementwise accesses\n");
2481 return false;
2483 return true;
2486 /* Return true if boolean argument MASK is suitable for vectorizing
2487 conditional load or store STMT_INFO. When returning true, store the type
2488 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2489 in *MASK_VECTYPE_OUT. */
2491 static bool
2492 vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask,
2493 vect_def_type *mask_dt_out,
2494 tree *mask_vectype_out)
2496 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2498 if (dump_enabled_p ())
2499 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2500 "mask argument is not a boolean.\n");
2501 return false;
2504 if (TREE_CODE (mask) != SSA_NAME)
2506 if (dump_enabled_p ())
2507 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2508 "mask argument is not an SSA name.\n");
2509 return false;
2512 enum vect_def_type mask_dt;
2513 tree mask_vectype;
2514 if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
2516 if (dump_enabled_p ())
2517 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2518 "mask use not simple.\n");
2519 return false;
2522 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2523 if (!mask_vectype)
2524 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2526 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2528 if (dump_enabled_p ())
2529 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2530 "could not find an appropriate vector mask type.\n");
2531 return false;
2534 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2535 TYPE_VECTOR_SUBPARTS (vectype)))
2537 if (dump_enabled_p ())
2539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2540 "vector mask type ");
2541 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2542 dump_printf (MSG_MISSED_OPTIMIZATION,
2543 " does not match vector data type ");
2544 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2545 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2547 return false;
2550 *mask_dt_out = mask_dt;
2551 *mask_vectype_out = mask_vectype;
2552 return true;
2555 /* Return true if stored value RHS is suitable for vectorizing store
2556 statement STMT_INFO. When returning true, store the type of the
2557 definition in *RHS_DT_OUT, the type of the vectorized store value in
2558 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2560 static bool
2561 vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
2562 vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
2563 vec_load_store_type *vls_type_out)
2565 /* In the case this is a store from a constant make sure
2566 native_encode_expr can handle it. */
2567 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2569 if (dump_enabled_p ())
2570 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2571 "cannot encode constant as a byte sequence.\n");
2572 return false;
2575 enum vect_def_type rhs_dt;
2576 tree rhs_vectype;
2577 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
2579 if (dump_enabled_p ())
2580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2581 "use not simple.\n");
2582 return false;
2585 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2586 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2588 if (dump_enabled_p ())
2589 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2590 "incompatible vector types.\n");
2591 return false;
2594 *rhs_dt_out = rhs_dt;
2595 *rhs_vectype_out = rhs_vectype;
2596 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2597 *vls_type_out = VLS_STORE_INVARIANT;
2598 else
2599 *vls_type_out = VLS_STORE;
2600 return true;
2603 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
2604 Note that we support masks with floating-point type, in which case the
2605 floats are interpreted as a bitmask. */
2607 static tree
2608 vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype)
2610 if (TREE_CODE (masktype) == INTEGER_TYPE)
2611 return build_int_cst (masktype, -1);
2612 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2614 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2615 mask = build_vector_from_val (masktype, mask);
2616 return vect_init_vector (stmt_info, mask, masktype, NULL);
2618 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2620 REAL_VALUE_TYPE r;
2621 long tmp[6];
2622 for (int j = 0; j < 6; ++j)
2623 tmp[j] = -1;
2624 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2625 tree mask = build_real (TREE_TYPE (masktype), r);
2626 mask = build_vector_from_val (masktype, mask);
2627 return vect_init_vector (stmt_info, mask, masktype, NULL);
2629 gcc_unreachable ();
2632 /* Build an all-zero merge value of type VECTYPE while vectorizing
2633 STMT_INFO as a gather load. */
2635 static tree
2636 vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype)
2638 tree merge;
2639 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2640 merge = build_int_cst (TREE_TYPE (vectype), 0);
2641 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2643 REAL_VALUE_TYPE r;
2644 long tmp[6];
2645 for (int j = 0; j < 6; ++j)
2646 tmp[j] = 0;
2647 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2648 merge = build_real (TREE_TYPE (vectype), r);
2650 else
2651 gcc_unreachable ();
2652 merge = build_vector_from_val (vectype, merge);
2653 return vect_init_vector (stmt_info, merge, vectype, NULL);
2656 /* Build a gather load call while vectorizing STMT_INFO. Insert new
2657 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2658 the gather load operation. If the load is conditional, MASK is the
2659 unvectorized condition and MASK_DT is its definition type, otherwise
2660 MASK is null. */
2662 static void
2663 vect_build_gather_load_calls (stmt_vec_info stmt_info,
2664 gimple_stmt_iterator *gsi,
2665 stmt_vec_info *vec_stmt,
2666 gather_scatter_info *gs_info,
2667 tree mask)
2669 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2670 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2671 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2672 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2673 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2674 edge pe = loop_preheader_edge (loop);
2675 enum { NARROW, NONE, WIDEN } modifier;
2676 poly_uint64 gather_off_nunits
2677 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2679 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2680 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2681 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2682 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2683 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2684 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2685 tree scaletype = TREE_VALUE (arglist);
2686 gcc_checking_assert (types_compatible_p (srctype, rettype)
2687 && (!mask || types_compatible_p (srctype, masktype)));
2689 tree perm_mask = NULL_TREE;
2690 tree mask_perm_mask = NULL_TREE;
2691 if (known_eq (nunits, gather_off_nunits))
2692 modifier = NONE;
2693 else if (known_eq (nunits * 2, gather_off_nunits))
2695 modifier = WIDEN;
2697 /* Currently widening gathers and scatters are only supported for
2698 fixed-length vectors. */
2699 int count = gather_off_nunits.to_constant ();
2700 vec_perm_builder sel (count, count, 1);
2701 for (int i = 0; i < count; ++i)
2702 sel.quick_push (i | (count / 2));
2704 vec_perm_indices indices (sel, 1, count);
2705 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2706 indices);
2708 else if (known_eq (nunits, gather_off_nunits * 2))
2710 modifier = NARROW;
2712 /* Currently narrowing gathers and scatters are only supported for
2713 fixed-length vectors. */
2714 int count = nunits.to_constant ();
2715 vec_perm_builder sel (count, count, 1);
2716 sel.quick_grow (count);
2717 for (int i = 0; i < count; ++i)
2718 sel[i] = i < count / 2 ? i : i + count / 2;
2719 vec_perm_indices indices (sel, 2, count);
2720 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2722 ncopies *= 2;
2724 if (mask)
2726 for (int i = 0; i < count; ++i)
2727 sel[i] = i | (count / 2);
2728 indices.new_vector (sel, 2, count);
2729 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2732 else
2733 gcc_unreachable ();
2735 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
2736 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
2738 tree ptr = fold_convert (ptrtype, gs_info->base);
2739 if (!is_gimple_min_invariant (ptr))
2741 gimple_seq seq;
2742 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2743 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2744 gcc_assert (!new_bb);
2747 tree scale = build_int_cst (scaletype, gs_info->scale);
2749 tree vec_oprnd0 = NULL_TREE;
2750 tree vec_mask = NULL_TREE;
2751 tree src_op = NULL_TREE;
2752 tree mask_op = NULL_TREE;
2753 tree prev_res = NULL_TREE;
2754 stmt_vec_info prev_stmt_info = NULL;
2756 if (!mask)
2758 src_op = vect_build_zero_merge_argument (stmt_info, rettype);
2759 mask_op = vect_build_all_ones_mask (stmt_info, masktype);
2762 for (int j = 0; j < ncopies; ++j)
2764 tree op, var;
2765 if (modifier == WIDEN && (j & 1))
2766 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2767 perm_mask, stmt_info, gsi);
2768 else if (j == 0)
2769 op = vec_oprnd0
2770 = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
2771 else
2772 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2773 vec_oprnd0);
2775 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2777 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2778 TYPE_VECTOR_SUBPARTS (idxtype)));
2779 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2780 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2781 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2782 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2783 op = var;
2786 if (mask)
2788 if (mask_perm_mask && (j & 1))
2789 mask_op = permute_vec_elements (mask_op, mask_op,
2790 mask_perm_mask, stmt_info, gsi);
2791 else
2793 if (j == 0)
2794 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
2795 else
2796 vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2797 vec_mask);
2799 mask_op = vec_mask;
2800 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2802 gcc_assert
2803 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2804 TYPE_VECTOR_SUBPARTS (masktype)));
2805 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2806 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2807 gassign *new_stmt
2808 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2809 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2810 mask_op = var;
2813 src_op = mask_op;
2816 gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2817 mask_op, scale);
2819 stmt_vec_info new_stmt_info;
2820 if (!useless_type_conversion_p (vectype, rettype))
2822 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2823 TYPE_VECTOR_SUBPARTS (rettype)));
2824 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2825 gimple_call_set_lhs (new_call, op);
2826 vect_finish_stmt_generation (stmt_info, new_call, gsi);
2827 var = make_ssa_name (vec_dest);
2828 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2829 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2830 new_stmt_info
2831 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2833 else
2835 var = make_ssa_name (vec_dest, new_call);
2836 gimple_call_set_lhs (new_call, var);
2837 new_stmt_info
2838 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
2841 if (modifier == NARROW)
2843 if ((j & 1) == 0)
2845 prev_res = var;
2846 continue;
2848 var = permute_vec_elements (prev_res, var, perm_mask,
2849 stmt_info, gsi);
2850 new_stmt_info = loop_vinfo->lookup_def (var);
2853 if (prev_stmt_info == NULL)
2854 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
2855 else
2856 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
2857 prev_stmt_info = new_stmt_info;
2861 /* Prepare the base and offset in GS_INFO for vectorization.
2862 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2863 to the vectorized offset argument for the first copy of STMT_INFO.
2864 STMT_INFO is the statement described by GS_INFO and LOOP is the
2865 containing loop. */
2867 static void
2868 vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
2869 gather_scatter_info *gs_info,
2870 tree *dataref_ptr, tree *vec_offset)
2872 gimple_seq stmts = NULL;
2873 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2874 if (stmts != NULL)
2876 basic_block new_bb;
2877 edge pe = loop_preheader_edge (loop);
2878 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2879 gcc_assert (!new_bb);
2881 tree offset_type = TREE_TYPE (gs_info->offset);
2882 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2883 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info,
2884 offset_vectype);
2887 /* Prepare to implement a grouped or strided load or store using
2888 the gather load or scatter store operation described by GS_INFO.
2889 STMT_INFO is the load or store statement.
2891 Set *DATAREF_BUMP to the amount that should be added to the base
2892 address after each copy of the vectorized statement. Set *VEC_OFFSET
2893 to an invariant offset vector in which element I has the value
2894 I * DR_STEP / SCALE. */
2896 static void
2897 vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
2898 loop_vec_info loop_vinfo,
2899 gather_scatter_info *gs_info,
2900 tree *dataref_bump, tree *vec_offset)
2902 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2903 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2904 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2905 gimple_seq stmts;
2907 tree bump = size_binop (MULT_EXPR,
2908 fold_convert (sizetype, DR_STEP (dr)),
2909 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2910 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2911 if (stmts)
2912 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2914 /* The offset given in GS_INFO can have pointer type, so use the element
2915 type of the vector instead. */
2916 tree offset_type = TREE_TYPE (gs_info->offset);
2917 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2918 offset_type = TREE_TYPE (offset_vectype);
2920 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2921 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2922 ssize_int (gs_info->scale));
2923 step = fold_convert (offset_type, step);
2924 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2926 /* Create {0, X, X*2, X*3, ...}. */
2927 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2928 build_zero_cst (offset_type), step);
2929 if (stmts)
2930 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2933 /* Return the amount that should be added to a vector pointer to move
2934 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
2935 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2936 vectorization. */
2938 static tree
2939 vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type,
2940 vect_memory_access_type memory_access_type)
2942 if (memory_access_type == VMAT_INVARIANT)
2943 return size_zero_node;
2945 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2946 tree step = vect_dr_behavior (dr_info)->step;
2947 if (tree_int_cst_sgn (step) == -1)
2948 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2949 return iv_step;
2952 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2954 static bool
2955 vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
2956 stmt_vec_info *vec_stmt, slp_tree slp_node,
2957 tree vectype_in, stmt_vector_for_cost *cost_vec)
2959 tree op, vectype;
2960 gcall *stmt = as_a <gcall *> (stmt_info->stmt);
2961 vec_info *vinfo = stmt_info->vinfo;
2962 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2963 unsigned ncopies;
2965 op = gimple_call_arg (stmt, 0);
2966 vectype = STMT_VINFO_VECTYPE (stmt_info);
2967 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2969 /* Multiple types in SLP are handled by creating the appropriate number of
2970 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2971 case of SLP. */
2972 if (slp_node)
2973 ncopies = 1;
2974 else
2975 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2977 gcc_assert (ncopies >= 1);
2979 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2980 if (! char_vectype)
2981 return false;
2983 poly_uint64 num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
2984 unsigned word_bytes;
2985 if (!constant_multiple_p (num_bytes, nunits, &word_bytes))
2986 return false;
2988 /* The encoding uses one stepped pattern for each byte in the word. */
2989 vec_perm_builder elts (num_bytes, word_bytes, 3);
2990 for (unsigned i = 0; i < 3; ++i)
2991 for (unsigned j = 0; j < word_bytes; ++j)
2992 elts.quick_push ((i + 1) * word_bytes - j - 1);
2994 vec_perm_indices indices (elts, 1, num_bytes);
2995 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2996 return false;
2998 if (! vec_stmt)
3000 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3001 DUMP_VECT_SCOPE ("vectorizable_bswap");
3002 if (! slp_node)
3004 record_stmt_cost (cost_vec,
3005 1, vector_stmt, stmt_info, 0, vect_prologue);
3006 record_stmt_cost (cost_vec,
3007 ncopies, vec_perm, stmt_info, 0, vect_body);
3009 return true;
3012 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
3014 /* Transform. */
3015 vec<tree> vec_oprnds = vNULL;
3016 stmt_vec_info new_stmt_info = NULL;
3017 stmt_vec_info prev_stmt_info = NULL;
3018 for (unsigned j = 0; j < ncopies; j++)
3020 /* Handle uses. */
3021 if (j == 0)
3022 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
3023 else
3024 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
3026 /* Arguments are ready. create the new vector stmt. */
3027 unsigned i;
3028 tree vop;
3029 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3031 gimple *new_stmt;
3032 tree tem = make_ssa_name (char_vectype);
3033 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3034 char_vectype, vop));
3035 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3036 tree tem2 = make_ssa_name (char_vectype);
3037 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3038 tem, tem, bswap_vconst);
3039 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3040 tem = make_ssa_name (vectype);
3041 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3042 vectype, tem2));
3043 new_stmt_info
3044 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3045 if (slp_node)
3046 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3049 if (slp_node)
3050 continue;
3052 if (j == 0)
3053 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3054 else
3055 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3057 prev_stmt_info = new_stmt_info;
3060 vec_oprnds.release ();
3061 return true;
3064 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3065 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3066 in a single step. On success, store the binary pack code in
3067 *CONVERT_CODE. */
3069 static bool
3070 simple_integer_narrowing (tree vectype_out, tree vectype_in,
3071 tree_code *convert_code)
3073 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3074 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3075 return false;
3077 tree_code code;
3078 int multi_step_cvt = 0;
3079 auto_vec <tree, 8> interm_types;
3080 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3081 &code, &multi_step_cvt,
3082 &interm_types)
3083 || multi_step_cvt)
3084 return false;
3086 *convert_code = code;
3087 return true;
3090 /* Function vectorizable_call.
3092 Check if STMT_INFO performs a function call that can be vectorized.
3093 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3094 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3095 Return true if STMT_INFO is vectorizable in this way. */
3097 static bool
3098 vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
3099 stmt_vec_info *vec_stmt, slp_tree slp_node,
3100 stmt_vector_for_cost *cost_vec)
3102 gcall *stmt;
3103 tree vec_dest;
3104 tree scalar_dest;
3105 tree op;
3106 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3107 stmt_vec_info prev_stmt_info;
3108 tree vectype_out, vectype_in;
3109 poly_uint64 nunits_in;
3110 poly_uint64 nunits_out;
3111 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3112 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3113 vec_info *vinfo = stmt_info->vinfo;
3114 tree fndecl, new_temp, rhs_type;
3115 enum vect_def_type dt[4]
3116 = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
3117 vect_unknown_def_type };
3118 int ndts = ARRAY_SIZE (dt);
3119 int ncopies, j;
3120 auto_vec<tree, 8> vargs;
3121 auto_vec<tree, 8> orig_vargs;
3122 enum { NARROW, NONE, WIDEN } modifier;
3123 size_t i, nargs;
3124 tree lhs;
3126 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3127 return false;
3129 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3130 && ! vec_stmt)
3131 return false;
3133 /* Is STMT_INFO a vectorizable call? */
3134 stmt = dyn_cast <gcall *> (stmt_info->stmt);
3135 if (!stmt)
3136 return false;
3138 if (gimple_call_internal_p (stmt)
3139 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3140 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3141 /* Handled by vectorizable_load and vectorizable_store. */
3142 return false;
3144 if (gimple_call_lhs (stmt) == NULL_TREE
3145 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3146 return false;
3148 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3150 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3152 /* Process function arguments. */
3153 rhs_type = NULL_TREE;
3154 vectype_in = NULL_TREE;
3155 nargs = gimple_call_num_args (stmt);
3157 /* Bail out if the function has more than three arguments, we do not have
3158 interesting builtin functions to vectorize with more than two arguments
3159 except for fma. No arguments is also not good. */
3160 if (nargs == 0 || nargs > 4)
3161 return false;
3163 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3164 combined_fn cfn = gimple_call_combined_fn (stmt);
3165 if (cfn == CFN_GOMP_SIMD_LANE)
3167 nargs = 0;
3168 rhs_type = unsigned_type_node;
3171 int mask_opno = -1;
3172 if (internal_fn_p (cfn))
3173 mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
3175 for (i = 0; i < nargs; i++)
3177 tree opvectype;
3179 op = gimple_call_arg (stmt, i);
3180 if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
3182 if (dump_enabled_p ())
3183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3184 "use not simple.\n");
3185 return false;
3188 /* Skip the mask argument to an internal function. This operand
3189 has been converted via a pattern if necessary. */
3190 if ((int) i == mask_opno)
3191 continue;
3193 /* We can only handle calls with arguments of the same type. */
3194 if (rhs_type
3195 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3197 if (dump_enabled_p ())
3198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3199 "argument types differ.\n");
3200 return false;
3202 if (!rhs_type)
3203 rhs_type = TREE_TYPE (op);
3205 if (!vectype_in)
3206 vectype_in = opvectype;
3207 else if (opvectype
3208 && opvectype != vectype_in)
3210 if (dump_enabled_p ())
3211 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3212 "argument vector types differ.\n");
3213 return false;
3216 /* If all arguments are external or constant defs use a vector type with
3217 the same size as the output vector type. */
3218 if (!vectype_in)
3219 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3220 if (vec_stmt)
3221 gcc_assert (vectype_in);
3222 if (!vectype_in)
3224 if (dump_enabled_p ())
3226 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3227 "no vectype for scalar type ");
3228 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3229 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3232 return false;
3235 /* FORNOW */
3236 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3237 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3238 if (known_eq (nunits_in * 2, nunits_out))
3239 modifier = NARROW;
3240 else if (known_eq (nunits_out, nunits_in))
3241 modifier = NONE;
3242 else if (known_eq (nunits_out * 2, nunits_in))
3243 modifier = WIDEN;
3244 else
3245 return false;
3247 /* We only handle functions that do not read or clobber memory. */
3248 if (gimple_vuse (stmt))
3250 if (dump_enabled_p ())
3251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3252 "function reads from or writes to memory.\n");
3253 return false;
3256 /* For now, we only vectorize functions if a target specific builtin
3257 is available. TODO -- in some cases, it might be profitable to
3258 insert the calls for pieces of the vector, in order to be able
3259 to vectorize other operations in the loop. */
3260 fndecl = NULL_TREE;
3261 internal_fn ifn = IFN_LAST;
3262 tree callee = gimple_call_fndecl (stmt);
3264 /* First try using an internal function. */
3265 tree_code convert_code = ERROR_MARK;
3266 if (cfn != CFN_LAST
3267 && (modifier == NONE
3268 || (modifier == NARROW
3269 && simple_integer_narrowing (vectype_out, vectype_in,
3270 &convert_code))))
3271 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3272 vectype_in);
3274 /* If that fails, try asking for a target-specific built-in function. */
3275 if (ifn == IFN_LAST)
3277 if (cfn != CFN_LAST)
3278 fndecl = targetm.vectorize.builtin_vectorized_function
3279 (cfn, vectype_out, vectype_in);
3280 else if (callee)
3281 fndecl = targetm.vectorize.builtin_md_vectorized_function
3282 (callee, vectype_out, vectype_in);
3285 if (ifn == IFN_LAST && !fndecl)
3287 if (cfn == CFN_GOMP_SIMD_LANE
3288 && !slp_node
3289 && loop_vinfo
3290 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3291 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3292 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3293 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3295 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3296 { 0, 1, 2, ... vf - 1 } vector. */
3297 gcc_assert (nargs == 0);
3299 else if (modifier == NONE
3300 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3301 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3302 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3303 return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
3304 vectype_in, cost_vec);
3305 else
3307 if (dump_enabled_p ())
3308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3309 "function is not vectorizable.\n");
3310 return false;
3314 if (slp_node)
3315 ncopies = 1;
3316 else if (modifier == NARROW && ifn == IFN_LAST)
3317 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3318 else
3319 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3321 /* Sanity check: make sure that at least one copy of the vectorized stmt
3322 needs to be generated. */
3323 gcc_assert (ncopies >= 1);
3325 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
3326 if (!vec_stmt) /* transformation not required. */
3328 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3329 DUMP_VECT_SCOPE ("vectorizable_call");
3330 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
3331 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3332 record_stmt_cost (cost_vec, ncopies / 2,
3333 vec_promote_demote, stmt_info, 0, vect_body);
3335 if (loop_vinfo && mask_opno >= 0)
3337 unsigned int nvectors = (slp_node
3338 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
3339 : ncopies);
3340 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out);
3342 return true;
3345 /* Transform. */
3347 if (dump_enabled_p ())
3348 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3350 /* Handle def. */
3351 scalar_dest = gimple_call_lhs (stmt);
3352 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3354 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
3356 stmt_vec_info new_stmt_info = NULL;
3357 prev_stmt_info = NULL;
3358 if (modifier == NONE || ifn != IFN_LAST)
3360 tree prev_res = NULL_TREE;
3361 vargs.safe_grow (nargs);
3362 orig_vargs.safe_grow (nargs);
3363 for (j = 0; j < ncopies; ++j)
3365 /* Build argument list for the vectorized call. */
3366 if (slp_node)
3368 auto_vec<vec<tree> > vec_defs (nargs);
3369 vec<tree> vec_oprnds0;
3371 for (i = 0; i < nargs; i++)
3372 vargs[i] = gimple_call_arg (stmt, i);
3373 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3374 vec_oprnds0 = vec_defs[0];
3376 /* Arguments are ready. Create the new vector stmt. */
3377 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3379 size_t k;
3380 for (k = 0; k < nargs; k++)
3382 vec<tree> vec_oprndsk = vec_defs[k];
3383 vargs[k] = vec_oprndsk[i];
3385 if (modifier == NARROW)
3387 /* We don't define any narrowing conditional functions
3388 at present. */
3389 gcc_assert (mask_opno < 0);
3390 tree half_res = make_ssa_name (vectype_in);
3391 gcall *call
3392 = gimple_build_call_internal_vec (ifn, vargs);
3393 gimple_call_set_lhs (call, half_res);
3394 gimple_call_set_nothrow (call, true);
3395 new_stmt_info
3396 = vect_finish_stmt_generation (stmt_info, call, gsi);
3397 if ((i & 1) == 0)
3399 prev_res = half_res;
3400 continue;
3402 new_temp = make_ssa_name (vec_dest);
3403 gimple *new_stmt
3404 = gimple_build_assign (new_temp, convert_code,
3405 prev_res, half_res);
3406 new_stmt_info
3407 = vect_finish_stmt_generation (stmt_info, new_stmt,
3408 gsi);
3410 else
3412 if (mask_opno >= 0 && masked_loop_p)
3414 unsigned int vec_num = vec_oprnds0.length ();
3415 /* Always true for SLP. */
3416 gcc_assert (ncopies == 1);
3417 tree mask = vect_get_loop_mask (gsi, masks, vec_num,
3418 vectype_out, i);
3419 vargs[mask_opno] = prepare_load_store_mask
3420 (TREE_TYPE (mask), mask, vargs[mask_opno], gsi);
3423 gcall *call;
3424 if (ifn != IFN_LAST)
3425 call = gimple_build_call_internal_vec (ifn, vargs);
3426 else
3427 call = gimple_build_call_vec (fndecl, vargs);
3428 new_temp = make_ssa_name (vec_dest, call);
3429 gimple_call_set_lhs (call, new_temp);
3430 gimple_call_set_nothrow (call, true);
3431 new_stmt_info
3432 = vect_finish_stmt_generation (stmt_info, call, gsi);
3434 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3437 for (i = 0; i < nargs; i++)
3439 vec<tree> vec_oprndsi = vec_defs[i];
3440 vec_oprndsi.release ();
3442 continue;
3445 for (i = 0; i < nargs; i++)
3447 op = gimple_call_arg (stmt, i);
3448 if (j == 0)
3449 vec_oprnd0
3450 = vect_get_vec_def_for_operand (op, stmt_info);
3451 else
3452 vec_oprnd0
3453 = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]);
3455 orig_vargs[i] = vargs[i] = vec_oprnd0;
3458 if (mask_opno >= 0 && masked_loop_p)
3460 tree mask = vect_get_loop_mask (gsi, masks, ncopies,
3461 vectype_out, j);
3462 vargs[mask_opno]
3463 = prepare_load_store_mask (TREE_TYPE (mask), mask,
3464 vargs[mask_opno], gsi);
3467 if (cfn == CFN_GOMP_SIMD_LANE)
3469 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3470 tree new_var
3471 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3472 gimple *init_stmt = gimple_build_assign (new_var, cst);
3473 vect_init_vector_1 (stmt_info, init_stmt, NULL);
3474 new_temp = make_ssa_name (vec_dest);
3475 gimple *new_stmt = gimple_build_assign (new_temp, new_var);
3476 new_stmt_info
3477 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3479 else if (modifier == NARROW)
3481 /* We don't define any narrowing conditional functions at
3482 present. */
3483 gcc_assert (mask_opno < 0);
3484 tree half_res = make_ssa_name (vectype_in);
3485 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3486 gimple_call_set_lhs (call, half_res);
3487 gimple_call_set_nothrow (call, true);
3488 new_stmt_info
3489 = vect_finish_stmt_generation (stmt_info, call, gsi);
3490 if ((j & 1) == 0)
3492 prev_res = half_res;
3493 continue;
3495 new_temp = make_ssa_name (vec_dest);
3496 gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
3497 prev_res, half_res);
3498 new_stmt_info
3499 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3501 else
3503 gcall *call;
3504 if (ifn != IFN_LAST)
3505 call = gimple_build_call_internal_vec (ifn, vargs);
3506 else
3507 call = gimple_build_call_vec (fndecl, vargs);
3508 new_temp = make_ssa_name (vec_dest, call);
3509 gimple_call_set_lhs (call, new_temp);
3510 gimple_call_set_nothrow (call, true);
3511 new_stmt_info
3512 = vect_finish_stmt_generation (stmt_info, call, gsi);
3515 if (j == (modifier == NARROW ? 1 : 0))
3516 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3517 else
3518 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3520 prev_stmt_info = new_stmt_info;
3523 else if (modifier == NARROW)
3525 /* We don't define any narrowing conditional functions at present. */
3526 gcc_assert (mask_opno < 0);
3527 for (j = 0; j < ncopies; ++j)
3529 /* Build argument list for the vectorized call. */
3530 if (j == 0)
3531 vargs.create (nargs * 2);
3532 else
3533 vargs.truncate (0);
3535 if (slp_node)
3537 auto_vec<vec<tree> > vec_defs (nargs);
3538 vec<tree> vec_oprnds0;
3540 for (i = 0; i < nargs; i++)
3541 vargs.quick_push (gimple_call_arg (stmt, i));
3542 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3543 vec_oprnds0 = vec_defs[0];
3545 /* Arguments are ready. Create the new vector stmt. */
3546 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3548 size_t k;
3549 vargs.truncate (0);
3550 for (k = 0; k < nargs; k++)
3552 vec<tree> vec_oprndsk = vec_defs[k];
3553 vargs.quick_push (vec_oprndsk[i]);
3554 vargs.quick_push (vec_oprndsk[i + 1]);
3556 gcall *call;
3557 if (ifn != IFN_LAST)
3558 call = gimple_build_call_internal_vec (ifn, vargs);
3559 else
3560 call = gimple_build_call_vec (fndecl, vargs);
3561 new_temp = make_ssa_name (vec_dest, call);
3562 gimple_call_set_lhs (call, new_temp);
3563 gimple_call_set_nothrow (call, true);
3564 new_stmt_info
3565 = vect_finish_stmt_generation (stmt_info, call, gsi);
3566 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3569 for (i = 0; i < nargs; i++)
3571 vec<tree> vec_oprndsi = vec_defs[i];
3572 vec_oprndsi.release ();
3574 continue;
3577 for (i = 0; i < nargs; i++)
3579 op = gimple_call_arg (stmt, i);
3580 if (j == 0)
3582 vec_oprnd0
3583 = vect_get_vec_def_for_operand (op, stmt_info);
3584 vec_oprnd1
3585 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3587 else
3589 vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt,
3590 2 * i + 1);
3591 vec_oprnd0
3592 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
3593 vec_oprnd1
3594 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3597 vargs.quick_push (vec_oprnd0);
3598 vargs.quick_push (vec_oprnd1);
3601 gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
3602 new_temp = make_ssa_name (vec_dest, new_stmt);
3603 gimple_call_set_lhs (new_stmt, new_temp);
3604 new_stmt_info
3605 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3607 if (j == 0)
3608 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
3609 else
3610 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3612 prev_stmt_info = new_stmt_info;
3615 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3617 else
3618 /* No current target implements this case. */
3619 return false;
3621 vargs.release ();
3623 /* The call in STMT might prevent it from being removed in dce.
3624 We however cannot remove it here, due to the way the ssa name
3625 it defines is mapped to the new definition. So just replace
3626 rhs of the statement with something harmless. */
3628 if (slp_node)
3629 return true;
3631 stmt_info = vect_orig_stmt (stmt_info);
3632 lhs = gimple_get_lhs (stmt_info->stmt);
3634 gassign *new_stmt
3635 = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3636 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
3638 return true;
3642 struct simd_call_arg_info
3644 tree vectype;
3645 tree op;
3646 HOST_WIDE_INT linear_step;
3647 enum vect_def_type dt;
3648 unsigned int align;
3649 bool simd_lane_linear;
3652 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3653 is linear within simd lane (but not within whole loop), note it in
3654 *ARGINFO. */
3656 static void
3657 vect_simd_lane_linear (tree op, struct loop *loop,
3658 struct simd_call_arg_info *arginfo)
3660 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3662 if (!is_gimple_assign (def_stmt)
3663 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3664 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3665 return;
3667 tree base = gimple_assign_rhs1 (def_stmt);
3668 HOST_WIDE_INT linear_step = 0;
3669 tree v = gimple_assign_rhs2 (def_stmt);
3670 while (TREE_CODE (v) == SSA_NAME)
3672 tree t;
3673 def_stmt = SSA_NAME_DEF_STMT (v);
3674 if (is_gimple_assign (def_stmt))
3675 switch (gimple_assign_rhs_code (def_stmt))
3677 case PLUS_EXPR:
3678 t = gimple_assign_rhs2 (def_stmt);
3679 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3680 return;
3681 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3682 v = gimple_assign_rhs1 (def_stmt);
3683 continue;
3684 case MULT_EXPR:
3685 t = gimple_assign_rhs2 (def_stmt);
3686 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3687 return;
3688 linear_step = tree_to_shwi (t);
3689 v = gimple_assign_rhs1 (def_stmt);
3690 continue;
3691 CASE_CONVERT:
3692 t = gimple_assign_rhs1 (def_stmt);
3693 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3694 || (TYPE_PRECISION (TREE_TYPE (v))
3695 < TYPE_PRECISION (TREE_TYPE (t))))
3696 return;
3697 if (!linear_step)
3698 linear_step = 1;
3699 v = t;
3700 continue;
3701 default:
3702 return;
3704 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3705 && loop->simduid
3706 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3707 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3708 == loop->simduid))
3710 if (!linear_step)
3711 linear_step = 1;
3712 arginfo->linear_step = linear_step;
3713 arginfo->op = base;
3714 arginfo->simd_lane_linear = true;
3715 return;
3720 /* Return the number of elements in vector type VECTYPE, which is associated
3721 with a SIMD clone. At present these vectors always have a constant
3722 length. */
3724 static unsigned HOST_WIDE_INT
3725 simd_clone_subparts (tree vectype)
3727 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3730 /* Function vectorizable_simd_clone_call.
3732 Check if STMT_INFO performs a function call that can be vectorized
3733 by calling a simd clone of the function.
3734 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3735 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3736 Return true if STMT_INFO is vectorizable in this way. */
3738 static bool
3739 vectorizable_simd_clone_call (stmt_vec_info stmt_info,
3740 gimple_stmt_iterator *gsi,
3741 stmt_vec_info *vec_stmt, slp_tree slp_node,
3742 stmt_vector_for_cost *)
3744 tree vec_dest;
3745 tree scalar_dest;
3746 tree op, type;
3747 tree vec_oprnd0 = NULL_TREE;
3748 stmt_vec_info prev_stmt_info;
3749 tree vectype;
3750 unsigned int nunits;
3751 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3752 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3753 vec_info *vinfo = stmt_info->vinfo;
3754 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3755 tree fndecl, new_temp;
3756 int ncopies, j;
3757 auto_vec<simd_call_arg_info> arginfo;
3758 vec<tree> vargs = vNULL;
3759 size_t i, nargs;
3760 tree lhs, rtype, ratype;
3761 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3763 /* Is STMT a vectorizable call? */
3764 gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
3765 if (!stmt)
3766 return false;
3768 fndecl = gimple_call_fndecl (stmt);
3769 if (fndecl == NULL_TREE)
3770 return false;
3772 struct cgraph_node *node = cgraph_node::get (fndecl);
3773 if (node == NULL || node->simd_clones == NULL)
3774 return false;
3776 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3777 return false;
3779 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3780 && ! vec_stmt)
3781 return false;
3783 if (gimple_call_lhs (stmt)
3784 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3785 return false;
3787 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3789 vectype = STMT_VINFO_VECTYPE (stmt_info);
3791 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
3792 return false;
3794 /* FORNOW */
3795 if (slp_node)
3796 return false;
3798 /* Process function arguments. */
3799 nargs = gimple_call_num_args (stmt);
3801 /* Bail out if the function has zero arguments. */
3802 if (nargs == 0)
3803 return false;
3805 arginfo.reserve (nargs, true);
3807 for (i = 0; i < nargs; i++)
3809 simd_call_arg_info thisarginfo;
3810 affine_iv iv;
3812 thisarginfo.linear_step = 0;
3813 thisarginfo.align = 0;
3814 thisarginfo.op = NULL_TREE;
3815 thisarginfo.simd_lane_linear = false;
3817 op = gimple_call_arg (stmt, i);
3818 if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
3819 &thisarginfo.vectype)
3820 || thisarginfo.dt == vect_uninitialized_def)
3822 if (dump_enabled_p ())
3823 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3824 "use not simple.\n");
3825 return false;
3828 if (thisarginfo.dt == vect_constant_def
3829 || thisarginfo.dt == vect_external_def)
3830 gcc_assert (thisarginfo.vectype == NULL_TREE);
3831 else
3832 gcc_assert (thisarginfo.vectype != NULL_TREE);
3834 /* For linear arguments, the analyze phase should have saved
3835 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3836 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3837 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3839 gcc_assert (vec_stmt);
3840 thisarginfo.linear_step
3841 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3842 thisarginfo.op
3843 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3844 thisarginfo.simd_lane_linear
3845 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3846 == boolean_true_node);
3847 /* If loop has been peeled for alignment, we need to adjust it. */
3848 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3849 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3850 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3852 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3853 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3854 tree opt = TREE_TYPE (thisarginfo.op);
3855 bias = fold_convert (TREE_TYPE (step), bias);
3856 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3857 thisarginfo.op
3858 = fold_build2 (POINTER_TYPE_P (opt)
3859 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3860 thisarginfo.op, bias);
3863 else if (!vec_stmt
3864 && thisarginfo.dt != vect_constant_def
3865 && thisarginfo.dt != vect_external_def
3866 && loop_vinfo
3867 && TREE_CODE (op) == SSA_NAME
3868 && simple_iv (loop, loop_containing_stmt (stmt), op,
3869 &iv, false)
3870 && tree_fits_shwi_p (iv.step))
3872 thisarginfo.linear_step = tree_to_shwi (iv.step);
3873 thisarginfo.op = iv.base;
3875 else if ((thisarginfo.dt == vect_constant_def
3876 || thisarginfo.dt == vect_external_def)
3877 && POINTER_TYPE_P (TREE_TYPE (op)))
3878 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3879 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3880 linear too. */
3881 if (POINTER_TYPE_P (TREE_TYPE (op))
3882 && !thisarginfo.linear_step
3883 && !vec_stmt
3884 && thisarginfo.dt != vect_constant_def
3885 && thisarginfo.dt != vect_external_def
3886 && loop_vinfo
3887 && !slp_node
3888 && TREE_CODE (op) == SSA_NAME)
3889 vect_simd_lane_linear (op, loop, &thisarginfo);
3891 arginfo.quick_push (thisarginfo);
3894 unsigned HOST_WIDE_INT vf;
3895 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3897 if (dump_enabled_p ())
3898 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3899 "not considering SIMD clones; not yet supported"
3900 " for variable-width vectors.\n");
3901 return false;
3904 unsigned int badness = 0;
3905 struct cgraph_node *bestn = NULL;
3906 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3907 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3908 else
3909 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3910 n = n->simdclone->next_clone)
3912 unsigned int this_badness = 0;
3913 if (n->simdclone->simdlen > vf
3914 || n->simdclone->nargs != nargs)
3915 continue;
3916 if (n->simdclone->simdlen < vf)
3917 this_badness += (exact_log2 (vf)
3918 - exact_log2 (n->simdclone->simdlen)) * 1024;
3919 if (n->simdclone->inbranch)
3920 this_badness += 2048;
3921 int target_badness = targetm.simd_clone.usable (n);
3922 if (target_badness < 0)
3923 continue;
3924 this_badness += target_badness * 512;
3925 /* FORNOW: Have to add code to add the mask argument. */
3926 if (n->simdclone->inbranch)
3927 continue;
3928 for (i = 0; i < nargs; i++)
3930 switch (n->simdclone->args[i].arg_type)
3932 case SIMD_CLONE_ARG_TYPE_VECTOR:
3933 if (!useless_type_conversion_p
3934 (n->simdclone->args[i].orig_type,
3935 TREE_TYPE (gimple_call_arg (stmt, i))))
3936 i = -1;
3937 else if (arginfo[i].dt == vect_constant_def
3938 || arginfo[i].dt == vect_external_def
3939 || arginfo[i].linear_step)
3940 this_badness += 64;
3941 break;
3942 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3943 if (arginfo[i].dt != vect_constant_def
3944 && arginfo[i].dt != vect_external_def)
3945 i = -1;
3946 break;
3947 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3948 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3949 if (arginfo[i].dt == vect_constant_def
3950 || arginfo[i].dt == vect_external_def
3951 || (arginfo[i].linear_step
3952 != n->simdclone->args[i].linear_step))
3953 i = -1;
3954 break;
3955 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3956 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3957 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3958 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3959 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3960 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3961 /* FORNOW */
3962 i = -1;
3963 break;
3964 case SIMD_CLONE_ARG_TYPE_MASK:
3965 gcc_unreachable ();
3967 if (i == (size_t) -1)
3968 break;
3969 if (n->simdclone->args[i].alignment > arginfo[i].align)
3971 i = -1;
3972 break;
3974 if (arginfo[i].align)
3975 this_badness += (exact_log2 (arginfo[i].align)
3976 - exact_log2 (n->simdclone->args[i].alignment));
3978 if (i == (size_t) -1)
3979 continue;
3980 if (bestn == NULL || this_badness < badness)
3982 bestn = n;
3983 badness = this_badness;
3987 if (bestn == NULL)
3988 return false;
3990 for (i = 0; i < nargs; i++)
3991 if ((arginfo[i].dt == vect_constant_def
3992 || arginfo[i].dt == vect_external_def)
3993 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3995 arginfo[i].vectype
3996 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3997 i)));
3998 if (arginfo[i].vectype == NULL
3999 || (simd_clone_subparts (arginfo[i].vectype)
4000 > bestn->simdclone->simdlen))
4001 return false;
4004 fndecl = bestn->decl;
4005 nunits = bestn->simdclone->simdlen;
4006 ncopies = vf / nunits;
4008 /* If the function isn't const, only allow it in simd loops where user
4009 has asserted that at least nunits consecutive iterations can be
4010 performed using SIMD instructions. */
4011 if ((loop == NULL || (unsigned) loop->safelen < nunits)
4012 && gimple_vuse (stmt))
4013 return false;
4015 /* Sanity check: make sure that at least one copy of the vectorized stmt
4016 needs to be generated. */
4017 gcc_assert (ncopies >= 1);
4019 if (!vec_stmt) /* transformation not required. */
4021 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4022 for (i = 0; i < nargs; i++)
4023 if ((bestn->simdclone->args[i].arg_type
4024 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4025 || (bestn->simdclone->args[i].arg_type
4026 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
4028 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
4029 + 1);
4030 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4031 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4032 ? size_type_node : TREE_TYPE (arginfo[i].op);
4033 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
4035 tree sll = arginfo[i].simd_lane_linear
4036 ? boolean_true_node : boolean_false_node;
4037 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
4039 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
4040 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4041 /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
4042 return true;
4045 /* Transform. */
4047 if (dump_enabled_p ())
4048 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4050 /* Handle def. */
4051 scalar_dest = gimple_call_lhs (stmt);
4052 vec_dest = NULL_TREE;
4053 rtype = NULL_TREE;
4054 ratype = NULL_TREE;
4055 if (scalar_dest)
4057 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4058 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4059 if (TREE_CODE (rtype) == ARRAY_TYPE)
4061 ratype = rtype;
4062 rtype = TREE_TYPE (ratype);
4066 prev_stmt_info = NULL;
4067 for (j = 0; j < ncopies; ++j)
4069 /* Build argument list for the vectorized call. */
4070 if (j == 0)
4071 vargs.create (nargs);
4072 else
4073 vargs.truncate (0);
4075 for (i = 0; i < nargs; i++)
4077 unsigned int k, l, m, o;
4078 tree atype;
4079 op = gimple_call_arg (stmt, i);
4080 switch (bestn->simdclone->args[i].arg_type)
4082 case SIMD_CLONE_ARG_TYPE_VECTOR:
4083 atype = bestn->simdclone->args[i].vector_type;
4084 o = nunits / simd_clone_subparts (atype);
4085 for (m = j * o; m < (j + 1) * o; m++)
4087 if (simd_clone_subparts (atype)
4088 < simd_clone_subparts (arginfo[i].vectype))
4090 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
4091 k = (simd_clone_subparts (arginfo[i].vectype)
4092 / simd_clone_subparts (atype));
4093 gcc_assert ((k & (k - 1)) == 0);
4094 if (m == 0)
4095 vec_oprnd0
4096 = vect_get_vec_def_for_operand (op, stmt_info);
4097 else
4099 vec_oprnd0 = arginfo[i].op;
4100 if ((m & (k - 1)) == 0)
4101 vec_oprnd0
4102 = vect_get_vec_def_for_stmt_copy (vinfo,
4103 vec_oprnd0);
4105 arginfo[i].op = vec_oprnd0;
4106 vec_oprnd0
4107 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
4108 bitsize_int (prec),
4109 bitsize_int ((m & (k - 1)) * prec));
4110 gassign *new_stmt
4111 = gimple_build_assign (make_ssa_name (atype),
4112 vec_oprnd0);
4113 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4114 vargs.safe_push (gimple_assign_lhs (new_stmt));
4116 else
4118 k = (simd_clone_subparts (atype)
4119 / simd_clone_subparts (arginfo[i].vectype));
4120 gcc_assert ((k & (k - 1)) == 0);
4121 vec<constructor_elt, va_gc> *ctor_elts;
4122 if (k != 1)
4123 vec_alloc (ctor_elts, k);
4124 else
4125 ctor_elts = NULL;
4126 for (l = 0; l < k; l++)
4128 if (m == 0 && l == 0)
4129 vec_oprnd0
4130 = vect_get_vec_def_for_operand (op, stmt_info);
4131 else
4132 vec_oprnd0
4133 = vect_get_vec_def_for_stmt_copy (vinfo,
4134 arginfo[i].op);
4135 arginfo[i].op = vec_oprnd0;
4136 if (k == 1)
4137 break;
4138 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4139 vec_oprnd0);
4141 if (k == 1)
4142 vargs.safe_push (vec_oprnd0);
4143 else
4145 vec_oprnd0 = build_constructor (atype, ctor_elts);
4146 gassign *new_stmt
4147 = gimple_build_assign (make_ssa_name (atype),
4148 vec_oprnd0);
4149 vect_finish_stmt_generation (stmt_info, new_stmt,
4150 gsi);
4151 vargs.safe_push (gimple_assign_lhs (new_stmt));
4155 break;
4156 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4157 vargs.safe_push (op);
4158 break;
4159 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4160 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4161 if (j == 0)
4163 gimple_seq stmts;
4164 arginfo[i].op
4165 = force_gimple_operand (arginfo[i].op, &stmts, true,
4166 NULL_TREE);
4167 if (stmts != NULL)
4169 basic_block new_bb;
4170 edge pe = loop_preheader_edge (loop);
4171 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4172 gcc_assert (!new_bb);
4174 if (arginfo[i].simd_lane_linear)
4176 vargs.safe_push (arginfo[i].op);
4177 break;
4179 tree phi_res = copy_ssa_name (op);
4180 gphi *new_phi = create_phi_node (phi_res, loop->header);
4181 loop_vinfo->add_stmt (new_phi);
4182 add_phi_arg (new_phi, arginfo[i].op,
4183 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4184 enum tree_code code
4185 = POINTER_TYPE_P (TREE_TYPE (op))
4186 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4187 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4188 ? sizetype : TREE_TYPE (op);
4189 widest_int cst
4190 = wi::mul (bestn->simdclone->args[i].linear_step,
4191 ncopies * nunits);
4192 tree tcst = wide_int_to_tree (type, cst);
4193 tree phi_arg = copy_ssa_name (op);
4194 gassign *new_stmt
4195 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4196 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4197 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4198 loop_vinfo->add_stmt (new_stmt);
4199 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4200 UNKNOWN_LOCATION);
4201 arginfo[i].op = phi_res;
4202 vargs.safe_push (phi_res);
4204 else
4206 enum tree_code code
4207 = POINTER_TYPE_P (TREE_TYPE (op))
4208 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4209 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4210 ? sizetype : TREE_TYPE (op);
4211 widest_int cst
4212 = wi::mul (bestn->simdclone->args[i].linear_step,
4213 j * nunits);
4214 tree tcst = wide_int_to_tree (type, cst);
4215 new_temp = make_ssa_name (TREE_TYPE (op));
4216 gassign *new_stmt
4217 = gimple_build_assign (new_temp, code,
4218 arginfo[i].op, tcst);
4219 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4220 vargs.safe_push (new_temp);
4222 break;
4223 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4224 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4225 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4226 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4227 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4228 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4229 default:
4230 gcc_unreachable ();
4234 gcall *new_call = gimple_build_call_vec (fndecl, vargs);
4235 if (vec_dest)
4237 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4238 if (ratype)
4239 new_temp = create_tmp_var (ratype);
4240 else if (simd_clone_subparts (vectype)
4241 == simd_clone_subparts (rtype))
4242 new_temp = make_ssa_name (vec_dest, new_call);
4243 else
4244 new_temp = make_ssa_name (rtype, new_call);
4245 gimple_call_set_lhs (new_call, new_temp);
4247 stmt_vec_info new_stmt_info
4248 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
4250 if (vec_dest)
4252 if (simd_clone_subparts (vectype) < nunits)
4254 unsigned int k, l;
4255 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4256 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4257 k = nunits / simd_clone_subparts (vectype);
4258 gcc_assert ((k & (k - 1)) == 0);
4259 for (l = 0; l < k; l++)
4261 tree t;
4262 if (ratype)
4264 t = build_fold_addr_expr (new_temp);
4265 t = build2 (MEM_REF, vectype, t,
4266 build_int_cst (TREE_TYPE (t), l * bytes));
4268 else
4269 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4270 bitsize_int (prec), bitsize_int (l * prec));
4271 gimple *new_stmt
4272 = gimple_build_assign (make_ssa_name (vectype), t);
4273 new_stmt_info
4274 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4276 if (j == 0 && l == 0)
4277 STMT_VINFO_VEC_STMT (stmt_info)
4278 = *vec_stmt = new_stmt_info;
4279 else
4280 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4282 prev_stmt_info = new_stmt_info;
4285 if (ratype)
4286 vect_clobber_variable (stmt_info, gsi, new_temp);
4287 continue;
4289 else if (simd_clone_subparts (vectype) > nunits)
4291 unsigned int k = (simd_clone_subparts (vectype)
4292 / simd_clone_subparts (rtype));
4293 gcc_assert ((k & (k - 1)) == 0);
4294 if ((j & (k - 1)) == 0)
4295 vec_alloc (ret_ctor_elts, k);
4296 if (ratype)
4298 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4299 for (m = 0; m < o; m++)
4301 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4302 size_int (m), NULL_TREE, NULL_TREE);
4303 gimple *new_stmt
4304 = gimple_build_assign (make_ssa_name (rtype), tem);
4305 new_stmt_info
4306 = vect_finish_stmt_generation (stmt_info, new_stmt,
4307 gsi);
4308 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4309 gimple_assign_lhs (new_stmt));
4311 vect_clobber_variable (stmt_info, gsi, new_temp);
4313 else
4314 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4315 if ((j & (k - 1)) != k - 1)
4316 continue;
4317 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4318 gimple *new_stmt
4319 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4320 new_stmt_info
4321 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4323 if ((unsigned) j == k - 1)
4324 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4325 else
4326 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4328 prev_stmt_info = new_stmt_info;
4329 continue;
4331 else if (ratype)
4333 tree t = build_fold_addr_expr (new_temp);
4334 t = build2 (MEM_REF, vectype, t,
4335 build_int_cst (TREE_TYPE (t), 0));
4336 gimple *new_stmt
4337 = gimple_build_assign (make_ssa_name (vec_dest), t);
4338 new_stmt_info
4339 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4340 vect_clobber_variable (stmt_info, gsi, new_temp);
4344 if (j == 0)
4345 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4346 else
4347 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4349 prev_stmt_info = new_stmt_info;
4352 vargs.release ();
4354 /* The call in STMT might prevent it from being removed in dce.
4355 We however cannot remove it here, due to the way the ssa name
4356 it defines is mapped to the new definition. So just replace
4357 rhs of the statement with something harmless. */
4359 if (slp_node)
4360 return true;
4362 gimple *new_stmt;
4363 if (scalar_dest)
4365 type = TREE_TYPE (scalar_dest);
4366 lhs = gimple_call_lhs (vect_orig_stmt (stmt_info)->stmt);
4367 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4369 else
4370 new_stmt = gimple_build_nop ();
4371 vinfo->replace_stmt (gsi, vect_orig_stmt (stmt_info), new_stmt);
4372 unlink_stmt_vdef (stmt);
4374 return true;
4378 /* Function vect_gen_widened_results_half
4380 Create a vector stmt whose code, type, number of arguments, and result
4381 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4382 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4383 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4384 needs to be created (DECL is a function-decl of a target-builtin).
4385 STMT_INFO is the original scalar stmt that we are vectorizing. */
4387 static gimple *
4388 vect_gen_widened_results_half (enum tree_code code,
4389 tree decl,
4390 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4391 tree vec_dest, gimple_stmt_iterator *gsi,
4392 stmt_vec_info stmt_info)
4394 gimple *new_stmt;
4395 tree new_temp;
4397 /* Generate half of the widened result: */
4398 if (code == CALL_EXPR)
4400 /* Target specific support */
4401 if (op_type == binary_op)
4402 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4403 else
4404 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4405 new_temp = make_ssa_name (vec_dest, new_stmt);
4406 gimple_call_set_lhs (new_stmt, new_temp);
4408 else
4410 /* Generic support */
4411 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4412 if (op_type != binary_op)
4413 vec_oprnd1 = NULL;
4414 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4415 new_temp = make_ssa_name (vec_dest, new_stmt);
4416 gimple_assign_set_lhs (new_stmt, new_temp);
4418 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4420 return new_stmt;
4424 /* Get vectorized definitions for loop-based vectorization of STMT_INFO.
4425 For the first operand we call vect_get_vec_def_for_operand (with OPRND
4426 containing scalar operand), and for the rest we get a copy with
4427 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4428 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4429 The vectors are collected into VEC_OPRNDS. */
4431 static void
4432 vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info,
4433 vec<tree> *vec_oprnds, int multi_step_cvt)
4435 vec_info *vinfo = stmt_info->vinfo;
4436 tree vec_oprnd;
4438 /* Get first vector operand. */
4439 /* All the vector operands except the very first one (that is scalar oprnd)
4440 are stmt copies. */
4441 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4442 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info);
4443 else
4444 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd);
4446 vec_oprnds->quick_push (vec_oprnd);
4448 /* Get second vector operand. */
4449 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
4450 vec_oprnds->quick_push (vec_oprnd);
4452 *oprnd = vec_oprnd;
4454 /* For conversion in multiple steps, continue to get operands
4455 recursively. */
4456 if (multi_step_cvt)
4457 vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds,
4458 multi_step_cvt - 1);
4462 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4463 For multi-step conversions store the resulting vectors and call the function
4464 recursively. */
4466 static void
4467 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4468 int multi_step_cvt,
4469 stmt_vec_info stmt_info,
4470 vec<tree> vec_dsts,
4471 gimple_stmt_iterator *gsi,
4472 slp_tree slp_node, enum tree_code code,
4473 stmt_vec_info *prev_stmt_info)
4475 unsigned int i;
4476 tree vop0, vop1, new_tmp, vec_dest;
4478 vec_dest = vec_dsts.pop ();
4480 for (i = 0; i < vec_oprnds->length (); i += 2)
4482 /* Create demotion operation. */
4483 vop0 = (*vec_oprnds)[i];
4484 vop1 = (*vec_oprnds)[i + 1];
4485 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4486 new_tmp = make_ssa_name (vec_dest, new_stmt);
4487 gimple_assign_set_lhs (new_stmt, new_tmp);
4488 stmt_vec_info new_stmt_info
4489 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4491 if (multi_step_cvt)
4492 /* Store the resulting vector for next recursive call. */
4493 (*vec_oprnds)[i/2] = new_tmp;
4494 else
4496 /* This is the last step of the conversion sequence. Store the
4497 vectors in SLP_NODE or in vector info of the scalar statement
4498 (or in STMT_VINFO_RELATED_STMT chain). */
4499 if (slp_node)
4500 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4501 else
4503 if (!*prev_stmt_info)
4504 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
4505 else
4506 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info;
4508 *prev_stmt_info = new_stmt_info;
4513 /* For multi-step demotion operations we first generate demotion operations
4514 from the source type to the intermediate types, and then combine the
4515 results (stored in VEC_OPRNDS) in demotion operation to the destination
4516 type. */
4517 if (multi_step_cvt)
4519 /* At each level of recursion we have half of the operands we had at the
4520 previous level. */
4521 vec_oprnds->truncate ((i+1)/2);
4522 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4523 stmt_info, vec_dsts, gsi,
4524 slp_node, VEC_PACK_TRUNC_EXPR,
4525 prev_stmt_info);
4528 vec_dsts.quick_push (vec_dest);
4532 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4533 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4534 STMT_INFO. For multi-step conversions store the resulting vectors and
4535 call the function recursively. */
4537 static void
4538 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4539 vec<tree> *vec_oprnds1,
4540 stmt_vec_info stmt_info, tree vec_dest,
4541 gimple_stmt_iterator *gsi,
4542 enum tree_code code1,
4543 enum tree_code code2, tree decl1,
4544 tree decl2, int op_type)
4546 int i;
4547 tree vop0, vop1, new_tmp1, new_tmp2;
4548 gimple *new_stmt1, *new_stmt2;
4549 vec<tree> vec_tmp = vNULL;
4551 vec_tmp.create (vec_oprnds0->length () * 2);
4552 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4554 if (op_type == binary_op)
4555 vop1 = (*vec_oprnds1)[i];
4556 else
4557 vop1 = NULL_TREE;
4559 /* Generate the two halves of promotion operation. */
4560 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4561 op_type, vec_dest, gsi,
4562 stmt_info);
4563 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4564 op_type, vec_dest, gsi,
4565 stmt_info);
4566 if (is_gimple_call (new_stmt1))
4568 new_tmp1 = gimple_call_lhs (new_stmt1);
4569 new_tmp2 = gimple_call_lhs (new_stmt2);
4571 else
4573 new_tmp1 = gimple_assign_lhs (new_stmt1);
4574 new_tmp2 = gimple_assign_lhs (new_stmt2);
4577 /* Store the results for the next step. */
4578 vec_tmp.quick_push (new_tmp1);
4579 vec_tmp.quick_push (new_tmp2);
4582 vec_oprnds0->release ();
4583 *vec_oprnds0 = vec_tmp;
4587 /* Check if STMT_INFO performs a conversion operation that can be vectorized.
4588 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4589 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4590 Return true if STMT_INFO is vectorizable in this way. */
4592 static bool
4593 vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
4594 stmt_vec_info *vec_stmt, slp_tree slp_node,
4595 stmt_vector_for_cost *cost_vec)
4597 tree vec_dest;
4598 tree scalar_dest;
4599 tree op0, op1 = NULL_TREE;
4600 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4601 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4602 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4603 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4604 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4605 tree new_temp;
4606 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4607 int ndts = 2;
4608 stmt_vec_info prev_stmt_info;
4609 poly_uint64 nunits_in;
4610 poly_uint64 nunits_out;
4611 tree vectype_out, vectype_in;
4612 int ncopies, i, j;
4613 tree lhs_type, rhs_type;
4614 enum { NARROW, NONE, WIDEN } modifier;
4615 vec<tree> vec_oprnds0 = vNULL;
4616 vec<tree> vec_oprnds1 = vNULL;
4617 tree vop0;
4618 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4619 vec_info *vinfo = stmt_info->vinfo;
4620 int multi_step_cvt = 0;
4621 vec<tree> interm_types = vNULL;
4622 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4623 int op_type;
4624 unsigned short fltsz;
4626 /* Is STMT a vectorizable conversion? */
4628 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4629 return false;
4631 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4632 && ! vec_stmt)
4633 return false;
4635 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
4636 if (!stmt)
4637 return false;
4639 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4640 return false;
4642 code = gimple_assign_rhs_code (stmt);
4643 if (!CONVERT_EXPR_CODE_P (code)
4644 && code != FIX_TRUNC_EXPR
4645 && code != FLOAT_EXPR
4646 && code != WIDEN_MULT_EXPR
4647 && code != WIDEN_LSHIFT_EXPR)
4648 return false;
4650 op_type = TREE_CODE_LENGTH (code);
4652 /* Check types of lhs and rhs. */
4653 scalar_dest = gimple_assign_lhs (stmt);
4654 lhs_type = TREE_TYPE (scalar_dest);
4655 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4657 op0 = gimple_assign_rhs1 (stmt);
4658 rhs_type = TREE_TYPE (op0);
4660 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4661 && !((INTEGRAL_TYPE_P (lhs_type)
4662 && INTEGRAL_TYPE_P (rhs_type))
4663 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4664 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4665 return false;
4667 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4668 && ((INTEGRAL_TYPE_P (lhs_type)
4669 && !type_has_mode_precision_p (lhs_type))
4670 || (INTEGRAL_TYPE_P (rhs_type)
4671 && !type_has_mode_precision_p (rhs_type))))
4673 if (dump_enabled_p ())
4674 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4675 "type conversion to/from bit-precision unsupported."
4676 "\n");
4677 return false;
4680 /* Check the operands of the operation. */
4681 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in))
4683 if (dump_enabled_p ())
4684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4685 "use not simple.\n");
4686 return false;
4688 if (op_type == binary_op)
4690 bool ok;
4692 op1 = gimple_assign_rhs2 (stmt);
4693 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4694 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4695 OP1. */
4696 if (CONSTANT_CLASS_P (op0))
4697 ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in);
4698 else
4699 ok = vect_is_simple_use (op1, vinfo, &dt[1]);
4701 if (!ok)
4703 if (dump_enabled_p ())
4704 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4705 "use not simple.\n");
4706 return false;
4710 /* If op0 is an external or constant defs use a vector type of
4711 the same size as the output vector type. */
4712 if (!vectype_in)
4713 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4714 if (vec_stmt)
4715 gcc_assert (vectype_in);
4716 if (!vectype_in)
4718 if (dump_enabled_p ())
4720 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4721 "no vectype for scalar type ");
4722 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4723 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4726 return false;
4729 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4730 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4732 if (dump_enabled_p ())
4734 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4735 "can't convert between boolean and non "
4736 "boolean vectors");
4737 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4738 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4741 return false;
4744 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4745 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4746 if (known_eq (nunits_out, nunits_in))
4747 modifier = NONE;
4748 else if (multiple_p (nunits_out, nunits_in))
4749 modifier = NARROW;
4750 else
4752 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4753 modifier = WIDEN;
4756 /* Multiple types in SLP are handled by creating the appropriate number of
4757 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4758 case of SLP. */
4759 if (slp_node)
4760 ncopies = 1;
4761 else if (modifier == NARROW)
4762 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4763 else
4764 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4766 /* Sanity check: make sure that at least one copy of the vectorized stmt
4767 needs to be generated. */
4768 gcc_assert (ncopies >= 1);
4770 bool found_mode = false;
4771 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4772 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4773 opt_scalar_mode rhs_mode_iter;
4775 /* Supportable by target? */
4776 switch (modifier)
4778 case NONE:
4779 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4780 return false;
4781 if (supportable_convert_operation (code, vectype_out, vectype_in,
4782 &decl1, &code1))
4783 break;
4784 /* FALLTHRU */
4785 unsupported:
4786 if (dump_enabled_p ())
4787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4788 "conversion not supported by target.\n");
4789 return false;
4791 case WIDEN:
4792 if (supportable_widening_operation (code, stmt_info, vectype_out,
4793 vectype_in, &code1, &code2,
4794 &multi_step_cvt, &interm_types))
4796 /* Binary widening operation can only be supported directly by the
4797 architecture. */
4798 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4799 break;
4802 if (code != FLOAT_EXPR
4803 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4804 goto unsupported;
4806 fltsz = GET_MODE_SIZE (lhs_mode);
4807 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4809 rhs_mode = rhs_mode_iter.require ();
4810 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4811 break;
4813 cvt_type
4814 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4815 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4816 if (cvt_type == NULL_TREE)
4817 goto unsupported;
4819 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4821 if (!supportable_convert_operation (code, vectype_out,
4822 cvt_type, &decl1, &codecvt1))
4823 goto unsupported;
4825 else if (!supportable_widening_operation (code, stmt_info,
4826 vectype_out, cvt_type,
4827 &codecvt1, &codecvt2,
4828 &multi_step_cvt,
4829 &interm_types))
4830 continue;
4831 else
4832 gcc_assert (multi_step_cvt == 0);
4834 if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
4835 vectype_in, &code1, &code2,
4836 &multi_step_cvt, &interm_types))
4838 found_mode = true;
4839 break;
4843 if (!found_mode)
4844 goto unsupported;
4846 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4847 codecvt2 = ERROR_MARK;
4848 else
4850 multi_step_cvt++;
4851 interm_types.safe_push (cvt_type);
4852 cvt_type = NULL_TREE;
4854 break;
4856 case NARROW:
4857 gcc_assert (op_type == unary_op);
4858 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4859 &code1, &multi_step_cvt,
4860 &interm_types))
4861 break;
4863 if (code != FIX_TRUNC_EXPR
4864 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4865 goto unsupported;
4867 cvt_type
4868 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4869 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4870 if (cvt_type == NULL_TREE)
4871 goto unsupported;
4872 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4873 &decl1, &codecvt1))
4874 goto unsupported;
4875 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4876 &code1, &multi_step_cvt,
4877 &interm_types))
4878 break;
4879 goto unsupported;
4881 default:
4882 gcc_unreachable ();
4885 if (!vec_stmt) /* transformation not required. */
4887 DUMP_VECT_SCOPE ("vectorizable_conversion");
4888 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4890 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4891 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
4892 cost_vec);
4894 else if (modifier == NARROW)
4896 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4897 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4898 cost_vec);
4900 else
4902 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4903 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4904 cost_vec);
4906 interm_types.release ();
4907 return true;
4910 /* Transform. */
4911 if (dump_enabled_p ())
4912 dump_printf_loc (MSG_NOTE, vect_location,
4913 "transform conversion. ncopies = %d.\n", ncopies);
4915 if (op_type == binary_op)
4917 if (CONSTANT_CLASS_P (op0))
4918 op0 = fold_convert (TREE_TYPE (op1), op0);
4919 else if (CONSTANT_CLASS_P (op1))
4920 op1 = fold_convert (TREE_TYPE (op0), op1);
4923 /* In case of multi-step conversion, we first generate conversion operations
4924 to the intermediate types, and then from that types to the final one.
4925 We create vector destinations for the intermediate type (TYPES) received
4926 from supportable_*_operation, and store them in the correct order
4927 for future use in vect_create_vectorized_*_stmts (). */
4928 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4929 vec_dest = vect_create_destination_var (scalar_dest,
4930 (cvt_type && modifier == WIDEN)
4931 ? cvt_type : vectype_out);
4932 vec_dsts.quick_push (vec_dest);
4934 if (multi_step_cvt)
4936 for (i = interm_types.length () - 1;
4937 interm_types.iterate (i, &intermediate_type); i--)
4939 vec_dest = vect_create_destination_var (scalar_dest,
4940 intermediate_type);
4941 vec_dsts.quick_push (vec_dest);
4945 if (cvt_type)
4946 vec_dest = vect_create_destination_var (scalar_dest,
4947 modifier == WIDEN
4948 ? vectype_out : cvt_type);
4950 if (!slp_node)
4952 if (modifier == WIDEN)
4954 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4955 if (op_type == binary_op)
4956 vec_oprnds1.create (1);
4958 else if (modifier == NARROW)
4959 vec_oprnds0.create (
4960 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4962 else if (code == WIDEN_LSHIFT_EXPR)
4963 vec_oprnds1.create (slp_node->vec_stmts_size);
4965 last_oprnd = op0;
4966 prev_stmt_info = NULL;
4967 switch (modifier)
4969 case NONE:
4970 for (j = 0; j < ncopies; j++)
4972 if (j == 0)
4973 vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
4974 NULL, slp_node);
4975 else
4976 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL);
4978 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4980 stmt_vec_info new_stmt_info;
4981 /* Arguments are ready, create the new vector stmt. */
4982 if (code1 == CALL_EXPR)
4984 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4985 new_temp = make_ssa_name (vec_dest, new_stmt);
4986 gimple_call_set_lhs (new_stmt, new_temp);
4987 new_stmt_info
4988 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4990 else
4992 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4993 gassign *new_stmt
4994 = gimple_build_assign (vec_dest, code1, vop0);
4995 new_temp = make_ssa_name (vec_dest, new_stmt);
4996 gimple_assign_set_lhs (new_stmt, new_temp);
4997 new_stmt_info
4998 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5001 if (slp_node)
5002 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5003 else
5005 if (!prev_stmt_info)
5006 STMT_VINFO_VEC_STMT (stmt_info)
5007 = *vec_stmt = new_stmt_info;
5008 else
5009 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5010 prev_stmt_info = new_stmt_info;
5014 break;
5016 case WIDEN:
5017 /* In case the vectorization factor (VF) is bigger than the number
5018 of elements that we can fit in a vectype (nunits), we have to
5019 generate more than one vector stmt - i.e - we need to "unroll"
5020 the vector stmt by a factor VF/nunits. */
5021 for (j = 0; j < ncopies; j++)
5023 /* Handle uses. */
5024 if (j == 0)
5026 if (slp_node)
5028 if (code == WIDEN_LSHIFT_EXPR)
5030 unsigned int k;
5032 vec_oprnd1 = op1;
5033 /* Store vec_oprnd1 for every vector stmt to be created
5034 for SLP_NODE. We check during the analysis that all
5035 the shift arguments are the same. */
5036 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5037 vec_oprnds1.quick_push (vec_oprnd1);
5039 vect_get_vec_defs (op0, NULL_TREE, stmt_info,
5040 &vec_oprnds0, NULL, slp_node);
5042 else
5043 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
5044 &vec_oprnds1, slp_node);
5046 else
5048 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
5049 vec_oprnds0.quick_push (vec_oprnd0);
5050 if (op_type == binary_op)
5052 if (code == WIDEN_LSHIFT_EXPR)
5053 vec_oprnd1 = op1;
5054 else
5055 vec_oprnd1
5056 = vect_get_vec_def_for_operand (op1, stmt_info);
5057 vec_oprnds1.quick_push (vec_oprnd1);
5061 else
5063 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
5064 vec_oprnds0.truncate (0);
5065 vec_oprnds0.quick_push (vec_oprnd0);
5066 if (op_type == binary_op)
5068 if (code == WIDEN_LSHIFT_EXPR)
5069 vec_oprnd1 = op1;
5070 else
5071 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
5072 vec_oprnd1);
5073 vec_oprnds1.truncate (0);
5074 vec_oprnds1.quick_push (vec_oprnd1);
5078 /* Arguments are ready. Create the new vector stmts. */
5079 for (i = multi_step_cvt; i >= 0; i--)
5081 tree this_dest = vec_dsts[i];
5082 enum tree_code c1 = code1, c2 = code2;
5083 if (i == 0 && codecvt2 != ERROR_MARK)
5085 c1 = codecvt1;
5086 c2 = codecvt2;
5088 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
5089 &vec_oprnds1, stmt_info,
5090 this_dest, gsi,
5091 c1, c2, decl1, decl2,
5092 op_type);
5095 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5097 stmt_vec_info new_stmt_info;
5098 if (cvt_type)
5100 if (codecvt1 == CALL_EXPR)
5102 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5103 new_temp = make_ssa_name (vec_dest, new_stmt);
5104 gimple_call_set_lhs (new_stmt, new_temp);
5105 new_stmt_info
5106 = vect_finish_stmt_generation (stmt_info, new_stmt,
5107 gsi);
5109 else
5111 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5112 new_temp = make_ssa_name (vec_dest);
5113 gassign *new_stmt
5114 = gimple_build_assign (new_temp, codecvt1, vop0);
5115 new_stmt_info
5116 = vect_finish_stmt_generation (stmt_info, new_stmt,
5117 gsi);
5120 else
5121 new_stmt_info = vinfo->lookup_def (vop0);
5123 if (slp_node)
5124 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5125 else
5127 if (!prev_stmt_info)
5128 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
5129 else
5130 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5131 prev_stmt_info = new_stmt_info;
5136 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5137 break;
5139 case NARROW:
5140 /* In case the vectorization factor (VF) is bigger than the number
5141 of elements that we can fit in a vectype (nunits), we have to
5142 generate more than one vector stmt - i.e - we need to "unroll"
5143 the vector stmt by a factor VF/nunits. */
5144 for (j = 0; j < ncopies; j++)
5146 /* Handle uses. */
5147 if (slp_node)
5148 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5149 slp_node);
5150 else
5152 vec_oprnds0.truncate (0);
5153 vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0,
5154 vect_pow2 (multi_step_cvt) - 1);
5157 /* Arguments are ready. Create the new vector stmts. */
5158 if (cvt_type)
5159 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5161 if (codecvt1 == CALL_EXPR)
5163 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5164 new_temp = make_ssa_name (vec_dest, new_stmt);
5165 gimple_call_set_lhs (new_stmt, new_temp);
5166 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5168 else
5170 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5171 new_temp = make_ssa_name (vec_dest);
5172 gassign *new_stmt
5173 = gimple_build_assign (new_temp, codecvt1, vop0);
5174 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5177 vec_oprnds0[i] = new_temp;
5180 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5181 stmt_info, vec_dsts, gsi,
5182 slp_node, code1,
5183 &prev_stmt_info);
5186 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5187 break;
5190 vec_oprnds0.release ();
5191 vec_oprnds1.release ();
5192 interm_types.release ();
5194 return true;
5198 /* Function vectorizable_assignment.
5200 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5201 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5202 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5203 Return true if STMT_INFO is vectorizable in this way. */
5205 static bool
5206 vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5207 stmt_vec_info *vec_stmt, slp_tree slp_node,
5208 stmt_vector_for_cost *cost_vec)
5210 tree vec_dest;
5211 tree scalar_dest;
5212 tree op;
5213 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5214 tree new_temp;
5215 enum vect_def_type dt[1] = {vect_unknown_def_type};
5216 int ndts = 1;
5217 int ncopies;
5218 int i, j;
5219 vec<tree> vec_oprnds = vNULL;
5220 tree vop;
5221 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5222 vec_info *vinfo = stmt_info->vinfo;
5223 stmt_vec_info prev_stmt_info = NULL;
5224 enum tree_code code;
5225 tree vectype_in;
5227 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5228 return false;
5230 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5231 && ! vec_stmt)
5232 return false;
5234 /* Is vectorizable assignment? */
5235 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5236 if (!stmt)
5237 return false;
5239 scalar_dest = gimple_assign_lhs (stmt);
5240 if (TREE_CODE (scalar_dest) != SSA_NAME)
5241 return false;
5243 code = gimple_assign_rhs_code (stmt);
5244 if (gimple_assign_single_p (stmt)
5245 || code == PAREN_EXPR
5246 || CONVERT_EXPR_CODE_P (code))
5247 op = gimple_assign_rhs1 (stmt);
5248 else
5249 return false;
5251 if (code == VIEW_CONVERT_EXPR)
5252 op = TREE_OPERAND (op, 0);
5254 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5255 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5257 /* Multiple types in SLP are handled by creating the appropriate number of
5258 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5259 case of SLP. */
5260 if (slp_node)
5261 ncopies = 1;
5262 else
5263 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5265 gcc_assert (ncopies >= 1);
5267 if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in))
5269 if (dump_enabled_p ())
5270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5271 "use not simple.\n");
5272 return false;
5275 /* We can handle NOP_EXPR conversions that do not change the number
5276 of elements or the vector size. */
5277 if ((CONVERT_EXPR_CODE_P (code)
5278 || code == VIEW_CONVERT_EXPR)
5279 && (!vectype_in
5280 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5281 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5282 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5283 return false;
5285 /* We do not handle bit-precision changes. */
5286 if ((CONVERT_EXPR_CODE_P (code)
5287 || code == VIEW_CONVERT_EXPR)
5288 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5289 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5290 || !type_has_mode_precision_p (TREE_TYPE (op)))
5291 /* But a conversion that does not change the bit-pattern is ok. */
5292 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5293 > TYPE_PRECISION (TREE_TYPE (op)))
5294 && TYPE_UNSIGNED (TREE_TYPE (op)))
5295 /* Conversion between boolean types of different sizes is
5296 a simple assignment in case their vectypes are same
5297 boolean vectors. */
5298 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5299 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5301 if (dump_enabled_p ())
5302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5303 "type conversion to/from bit-precision "
5304 "unsupported.\n");
5305 return false;
5308 if (!vec_stmt) /* transformation not required. */
5310 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5311 DUMP_VECT_SCOPE ("vectorizable_assignment");
5312 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5313 return true;
5316 /* Transform. */
5317 if (dump_enabled_p ())
5318 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5320 /* Handle def. */
5321 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5323 /* Handle use. */
5324 for (j = 0; j < ncopies; j++)
5326 /* Handle uses. */
5327 if (j == 0)
5328 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
5329 else
5330 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
5332 /* Arguments are ready. create the new vector stmt. */
5333 stmt_vec_info new_stmt_info = NULL;
5334 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5336 if (CONVERT_EXPR_CODE_P (code)
5337 || code == VIEW_CONVERT_EXPR)
5338 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5339 gassign *new_stmt = gimple_build_assign (vec_dest, vop);
5340 new_temp = make_ssa_name (vec_dest, new_stmt);
5341 gimple_assign_set_lhs (new_stmt, new_temp);
5342 new_stmt_info
5343 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5344 if (slp_node)
5345 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5348 if (slp_node)
5349 continue;
5351 if (j == 0)
5352 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5353 else
5354 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5356 prev_stmt_info = new_stmt_info;
5359 vec_oprnds.release ();
5360 return true;
5364 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5365 either as shift by a scalar or by a vector. */
5367 bool
5368 vect_supportable_shift (enum tree_code code, tree scalar_type)
5371 machine_mode vec_mode;
5372 optab optab;
5373 int icode;
5374 tree vectype;
5376 vectype = get_vectype_for_scalar_type (scalar_type);
5377 if (!vectype)
5378 return false;
5380 optab = optab_for_tree_code (code, vectype, optab_scalar);
5381 if (!optab
5382 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5384 optab = optab_for_tree_code (code, vectype, optab_vector);
5385 if (!optab
5386 || (optab_handler (optab, TYPE_MODE (vectype))
5387 == CODE_FOR_nothing))
5388 return false;
5391 vec_mode = TYPE_MODE (vectype);
5392 icode = (int) optab_handler (optab, vec_mode);
5393 if (icode == CODE_FOR_nothing)
5394 return false;
5396 return true;
5400 /* Function vectorizable_shift.
5402 Check if STMT_INFO performs a shift operation that can be vectorized.
5403 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5404 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5405 Return true if STMT_INFO is vectorizable in this way. */
5407 static bool
5408 vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5409 stmt_vec_info *vec_stmt, slp_tree slp_node,
5410 stmt_vector_for_cost *cost_vec)
5412 tree vec_dest;
5413 tree scalar_dest;
5414 tree op0, op1 = NULL;
5415 tree vec_oprnd1 = NULL_TREE;
5416 tree vectype;
5417 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5418 enum tree_code code;
5419 machine_mode vec_mode;
5420 tree new_temp;
5421 optab optab;
5422 int icode;
5423 machine_mode optab_op2_mode;
5424 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5425 int ndts = 2;
5426 stmt_vec_info prev_stmt_info;
5427 poly_uint64 nunits_in;
5428 poly_uint64 nunits_out;
5429 tree vectype_out;
5430 tree op1_vectype;
5431 int ncopies;
5432 int j, i;
5433 vec<tree> vec_oprnds0 = vNULL;
5434 vec<tree> vec_oprnds1 = vNULL;
5435 tree vop0, vop1;
5436 unsigned int k;
5437 bool scalar_shift_arg = true;
5438 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5439 vec_info *vinfo = stmt_info->vinfo;
5441 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5442 return false;
5444 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5445 && ! vec_stmt)
5446 return false;
5448 /* Is STMT a vectorizable binary/unary operation? */
5449 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5450 if (!stmt)
5451 return false;
5453 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5454 return false;
5456 code = gimple_assign_rhs_code (stmt);
5458 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5459 || code == RROTATE_EXPR))
5460 return false;
5462 scalar_dest = gimple_assign_lhs (stmt);
5463 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5464 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5466 if (dump_enabled_p ())
5467 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5468 "bit-precision shifts not supported.\n");
5469 return false;
5472 op0 = gimple_assign_rhs1 (stmt);
5473 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5475 if (dump_enabled_p ())
5476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5477 "use not simple.\n");
5478 return false;
5480 /* If op0 is an external or constant def use a vector type with
5481 the same size as the output vector type. */
5482 if (!vectype)
5483 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5484 if (vec_stmt)
5485 gcc_assert (vectype);
5486 if (!vectype)
5488 if (dump_enabled_p ())
5489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5490 "no vectype for scalar type\n");
5491 return false;
5494 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5495 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5496 if (maybe_ne (nunits_out, nunits_in))
5497 return false;
5499 op1 = gimple_assign_rhs2 (stmt);
5500 stmt_vec_info op1_def_stmt_info;
5501 if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype,
5502 &op1_def_stmt_info))
5504 if (dump_enabled_p ())
5505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5506 "use not simple.\n");
5507 return false;
5510 /* Multiple types in SLP are handled by creating the appropriate number of
5511 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5512 case of SLP. */
5513 if (slp_node)
5514 ncopies = 1;
5515 else
5516 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5518 gcc_assert (ncopies >= 1);
5520 /* Determine whether the shift amount is a vector, or scalar. If the
5521 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5523 if ((dt[1] == vect_internal_def
5524 || dt[1] == vect_induction_def)
5525 && !slp_node)
5526 scalar_shift_arg = false;
5527 else if (dt[1] == vect_constant_def
5528 || dt[1] == vect_external_def
5529 || dt[1] == vect_internal_def)
5531 /* In SLP, need to check whether the shift count is the same,
5532 in loops if it is a constant or invariant, it is always
5533 a scalar shift. */
5534 if (slp_node)
5536 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5537 stmt_vec_info slpstmt_info;
5539 FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
5541 gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
5542 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5543 scalar_shift_arg = false;
5547 /* If the shift amount is computed by a pattern stmt we cannot
5548 use the scalar amount directly thus give up and use a vector
5549 shift. */
5550 if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info))
5551 scalar_shift_arg = false;
5553 else
5555 if (dump_enabled_p ())
5556 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5557 "operand mode requires invariant argument.\n");
5558 return false;
5561 /* Vector shifted by vector. */
5562 if (!scalar_shift_arg)
5564 optab = optab_for_tree_code (code, vectype, optab_vector);
5565 if (dump_enabled_p ())
5566 dump_printf_loc (MSG_NOTE, vect_location,
5567 "vector/vector shift/rotate found.\n");
5569 if (!op1_vectype)
5570 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5571 if (op1_vectype == NULL_TREE
5572 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5574 if (dump_enabled_p ())
5575 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5576 "unusable type for last operand in"
5577 " vector/vector shift/rotate.\n");
5578 return false;
5581 /* See if the machine has a vector shifted by scalar insn and if not
5582 then see if it has a vector shifted by vector insn. */
5583 else
5585 optab = optab_for_tree_code (code, vectype, optab_scalar);
5586 if (optab
5587 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5589 if (dump_enabled_p ())
5590 dump_printf_loc (MSG_NOTE, vect_location,
5591 "vector/scalar shift/rotate found.\n");
5593 else
5595 optab = optab_for_tree_code (code, vectype, optab_vector);
5596 if (optab
5597 && (optab_handler (optab, TYPE_MODE (vectype))
5598 != CODE_FOR_nothing))
5600 scalar_shift_arg = false;
5602 if (dump_enabled_p ())
5603 dump_printf_loc (MSG_NOTE, vect_location,
5604 "vector/vector shift/rotate found.\n");
5606 /* Unlike the other binary operators, shifts/rotates have
5607 the rhs being int, instead of the same type as the lhs,
5608 so make sure the scalar is the right type if we are
5609 dealing with vectors of long long/long/short/char. */
5610 if (dt[1] == vect_constant_def)
5611 op1 = fold_convert (TREE_TYPE (vectype), op1);
5612 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5613 TREE_TYPE (op1)))
5615 if (slp_node
5616 && TYPE_MODE (TREE_TYPE (vectype))
5617 != TYPE_MODE (TREE_TYPE (op1)))
5619 if (dump_enabled_p ())
5620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5621 "unusable type for last operand in"
5622 " vector/vector shift/rotate.\n");
5623 return false;
5625 if (vec_stmt && !slp_node)
5627 op1 = fold_convert (TREE_TYPE (vectype), op1);
5628 op1 = vect_init_vector (stmt_info, op1,
5629 TREE_TYPE (vectype), NULL);
5636 /* Supportable by target? */
5637 if (!optab)
5639 if (dump_enabled_p ())
5640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5641 "no optab.\n");
5642 return false;
5644 vec_mode = TYPE_MODE (vectype);
5645 icode = (int) optab_handler (optab, vec_mode);
5646 if (icode == CODE_FOR_nothing)
5648 if (dump_enabled_p ())
5649 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5650 "op not supported by target.\n");
5651 /* Check only during analysis. */
5652 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5653 || (!vec_stmt
5654 && !vect_worthwhile_without_simd_p (vinfo, code)))
5655 return false;
5656 if (dump_enabled_p ())
5657 dump_printf_loc (MSG_NOTE, vect_location,
5658 "proceeding using word mode.\n");
5661 /* Worthwhile without SIMD support? Check only during analysis. */
5662 if (!vec_stmt
5663 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5664 && !vect_worthwhile_without_simd_p (vinfo, code))
5666 if (dump_enabled_p ())
5667 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5668 "not worthwhile without SIMD support.\n");
5669 return false;
5672 if (!vec_stmt) /* transformation not required. */
5674 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5675 DUMP_VECT_SCOPE ("vectorizable_shift");
5676 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5677 return true;
5680 /* Transform. */
5682 if (dump_enabled_p ())
5683 dump_printf_loc (MSG_NOTE, vect_location,
5684 "transform binary/unary operation.\n");
5686 /* Handle def. */
5687 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5689 prev_stmt_info = NULL;
5690 for (j = 0; j < ncopies; j++)
5692 /* Handle uses. */
5693 if (j == 0)
5695 if (scalar_shift_arg)
5697 /* Vector shl and shr insn patterns can be defined with scalar
5698 operand 2 (shift operand). In this case, use constant or loop
5699 invariant op1 directly, without extending it to vector mode
5700 first. */
5701 optab_op2_mode = insn_data[icode].operand[2].mode;
5702 if (!VECTOR_MODE_P (optab_op2_mode))
5704 if (dump_enabled_p ())
5705 dump_printf_loc (MSG_NOTE, vect_location,
5706 "operand 1 using scalar mode.\n");
5707 vec_oprnd1 = op1;
5708 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5709 vec_oprnds1.quick_push (vec_oprnd1);
5710 if (slp_node)
5712 /* Store vec_oprnd1 for every vector stmt to be created
5713 for SLP_NODE. We check during the analysis that all
5714 the shift arguments are the same.
5715 TODO: Allow different constants for different vector
5716 stmts generated for an SLP instance. */
5717 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5718 vec_oprnds1.quick_push (vec_oprnd1);
5723 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5724 (a special case for certain kind of vector shifts); otherwise,
5725 operand 1 should be of a vector type (the usual case). */
5726 if (vec_oprnd1)
5727 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5728 slp_node);
5729 else
5730 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
5731 slp_node);
5733 else
5734 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
5736 /* Arguments are ready. Create the new vector stmt. */
5737 stmt_vec_info new_stmt_info = NULL;
5738 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5740 vop1 = vec_oprnds1[i];
5741 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5742 new_temp = make_ssa_name (vec_dest, new_stmt);
5743 gimple_assign_set_lhs (new_stmt, new_temp);
5744 new_stmt_info
5745 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5746 if (slp_node)
5747 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5750 if (slp_node)
5751 continue;
5753 if (j == 0)
5754 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5755 else
5756 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5757 prev_stmt_info = new_stmt_info;
5760 vec_oprnds0.release ();
5761 vec_oprnds1.release ();
5763 return true;
5767 /* Function vectorizable_operation.
5769 Check if STMT_INFO performs a binary, unary or ternary operation that can
5770 be vectorized.
5771 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5772 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5773 Return true if STMT_INFO is vectorizable in this way. */
5775 static bool
5776 vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5777 stmt_vec_info *vec_stmt, slp_tree slp_node,
5778 stmt_vector_for_cost *cost_vec)
5780 tree vec_dest;
5781 tree scalar_dest;
5782 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5783 tree vectype;
5784 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5785 enum tree_code code, orig_code;
5786 machine_mode vec_mode;
5787 tree new_temp;
5788 int op_type;
5789 optab optab;
5790 bool target_support_p;
5791 enum vect_def_type dt[3]
5792 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5793 int ndts = 3;
5794 stmt_vec_info prev_stmt_info;
5795 poly_uint64 nunits_in;
5796 poly_uint64 nunits_out;
5797 tree vectype_out;
5798 int ncopies;
5799 int j, i;
5800 vec<tree> vec_oprnds0 = vNULL;
5801 vec<tree> vec_oprnds1 = vNULL;
5802 vec<tree> vec_oprnds2 = vNULL;
5803 tree vop0, vop1, vop2;
5804 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5805 vec_info *vinfo = stmt_info->vinfo;
5807 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5808 return false;
5810 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5811 && ! vec_stmt)
5812 return false;
5814 /* Is STMT a vectorizable binary/unary operation? */
5815 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5816 if (!stmt)
5817 return false;
5819 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5820 return false;
5822 orig_code = code = gimple_assign_rhs_code (stmt);
5824 /* For pointer addition and subtraction, we should use the normal
5825 plus and minus for the vector operation. */
5826 if (code == POINTER_PLUS_EXPR)
5827 code = PLUS_EXPR;
5828 if (code == POINTER_DIFF_EXPR)
5829 code = MINUS_EXPR;
5831 /* Support only unary or binary operations. */
5832 op_type = TREE_CODE_LENGTH (code);
5833 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5835 if (dump_enabled_p ())
5836 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5837 "num. args = %d (not unary/binary/ternary op).\n",
5838 op_type);
5839 return false;
5842 scalar_dest = gimple_assign_lhs (stmt);
5843 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5845 /* Most operations cannot handle bit-precision types without extra
5846 truncations. */
5847 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5848 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5849 /* Exception are bitwise binary operations. */
5850 && code != BIT_IOR_EXPR
5851 && code != BIT_XOR_EXPR
5852 && code != BIT_AND_EXPR)
5854 if (dump_enabled_p ())
5855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5856 "bit-precision arithmetic not supported.\n");
5857 return false;
5860 op0 = gimple_assign_rhs1 (stmt);
5861 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5863 if (dump_enabled_p ())
5864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5865 "use not simple.\n");
5866 return false;
5868 /* If op0 is an external or constant def use a vector type with
5869 the same size as the output vector type. */
5870 if (!vectype)
5872 /* For boolean type we cannot determine vectype by
5873 invariant value (don't know whether it is a vector
5874 of booleans or vector of integers). We use output
5875 vectype because operations on boolean don't change
5876 type. */
5877 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5879 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5881 if (dump_enabled_p ())
5882 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5883 "not supported operation on bool value.\n");
5884 return false;
5886 vectype = vectype_out;
5888 else
5889 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5891 if (vec_stmt)
5892 gcc_assert (vectype);
5893 if (!vectype)
5895 if (dump_enabled_p ())
5897 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5898 "no vectype for scalar type ");
5899 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5900 TREE_TYPE (op0));
5901 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5904 return false;
5907 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5908 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5909 if (maybe_ne (nunits_out, nunits_in))
5910 return false;
5912 if (op_type == binary_op || op_type == ternary_op)
5914 op1 = gimple_assign_rhs2 (stmt);
5915 if (!vect_is_simple_use (op1, vinfo, &dt[1]))
5917 if (dump_enabled_p ())
5918 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5919 "use not simple.\n");
5920 return false;
5923 if (op_type == ternary_op)
5925 op2 = gimple_assign_rhs3 (stmt);
5926 if (!vect_is_simple_use (op2, vinfo, &dt[2]))
5928 if (dump_enabled_p ())
5929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5930 "use not simple.\n");
5931 return false;
5935 /* Multiple types in SLP are handled by creating the appropriate number of
5936 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5937 case of SLP. */
5938 if (slp_node)
5939 ncopies = 1;
5940 else
5941 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5943 gcc_assert (ncopies >= 1);
5945 /* Shifts are handled in vectorizable_shift (). */
5946 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5947 || code == RROTATE_EXPR)
5948 return false;
5950 /* Supportable by target? */
5952 vec_mode = TYPE_MODE (vectype);
5953 if (code == MULT_HIGHPART_EXPR)
5954 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5955 else
5957 optab = optab_for_tree_code (code, vectype, optab_default);
5958 if (!optab)
5960 if (dump_enabled_p ())
5961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5962 "no optab.\n");
5963 return false;
5965 target_support_p = (optab_handler (optab, vec_mode)
5966 != CODE_FOR_nothing);
5969 if (!target_support_p)
5971 if (dump_enabled_p ())
5972 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5973 "op not supported by target.\n");
5974 /* Check only during analysis. */
5975 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5976 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5977 return false;
5978 if (dump_enabled_p ())
5979 dump_printf_loc (MSG_NOTE, vect_location,
5980 "proceeding using word mode.\n");
5983 /* Worthwhile without SIMD support? Check only during analysis. */
5984 if (!VECTOR_MODE_P (vec_mode)
5985 && !vec_stmt
5986 && !vect_worthwhile_without_simd_p (vinfo, code))
5988 if (dump_enabled_p ())
5989 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5990 "not worthwhile without SIMD support.\n");
5991 return false;
5994 if (!vec_stmt) /* transformation not required. */
5996 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5997 DUMP_VECT_SCOPE ("vectorizable_operation");
5998 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5999 return true;
6002 /* Transform. */
6004 if (dump_enabled_p ())
6005 dump_printf_loc (MSG_NOTE, vect_location,
6006 "transform binary/unary operation.\n");
6008 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6009 vectors with unsigned elements, but the result is signed. So, we
6010 need to compute the MINUS_EXPR into vectype temporary and
6011 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6012 tree vec_cvt_dest = NULL_TREE;
6013 if (orig_code == POINTER_DIFF_EXPR)
6015 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6016 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
6018 /* Handle def. */
6019 else
6020 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6022 /* In case the vectorization factor (VF) is bigger than the number
6023 of elements that we can fit in a vectype (nunits), we have to generate
6024 more than one vector stmt - i.e - we need to "unroll" the
6025 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6026 from one copy of the vector stmt to the next, in the field
6027 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6028 stages to find the correct vector defs to be used when vectorizing
6029 stmts that use the defs of the current stmt. The example below
6030 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6031 we need to create 4 vectorized stmts):
6033 before vectorization:
6034 RELATED_STMT VEC_STMT
6035 S1: x = memref - -
6036 S2: z = x + 1 - -
6038 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6039 there):
6040 RELATED_STMT VEC_STMT
6041 VS1_0: vx0 = memref0 VS1_1 -
6042 VS1_1: vx1 = memref1 VS1_2 -
6043 VS1_2: vx2 = memref2 VS1_3 -
6044 VS1_3: vx3 = memref3 - -
6045 S1: x = load - VS1_0
6046 S2: z = x + 1 - -
6048 step2: vectorize stmt S2 (done here):
6049 To vectorize stmt S2 we first need to find the relevant vector
6050 def for the first operand 'x'. This is, as usual, obtained from
6051 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6052 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6053 relevant vector def 'vx0'. Having found 'vx0' we can generate
6054 the vector stmt VS2_0, and as usual, record it in the
6055 STMT_VINFO_VEC_STMT of stmt S2.
6056 When creating the second copy (VS2_1), we obtain the relevant vector
6057 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6058 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6059 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6060 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6061 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6062 chain of stmts and pointers:
6063 RELATED_STMT VEC_STMT
6064 VS1_0: vx0 = memref0 VS1_1 -
6065 VS1_1: vx1 = memref1 VS1_2 -
6066 VS1_2: vx2 = memref2 VS1_3 -
6067 VS1_3: vx3 = memref3 - -
6068 S1: x = load - VS1_0
6069 VS2_0: vz0 = vx0 + v1 VS2_1 -
6070 VS2_1: vz1 = vx1 + v1 VS2_2 -
6071 VS2_2: vz2 = vx2 + v1 VS2_3 -
6072 VS2_3: vz3 = vx3 + v1 - -
6073 S2: z = x + 1 - VS2_0 */
6075 prev_stmt_info = NULL;
6076 for (j = 0; j < ncopies; j++)
6078 /* Handle uses. */
6079 if (j == 0)
6081 if (op_type == binary_op)
6082 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
6083 slp_node);
6084 else if (op_type == ternary_op)
6086 if (slp_node)
6088 auto_vec<tree> ops(3);
6089 ops.quick_push (op0);
6090 ops.quick_push (op1);
6091 ops.quick_push (op2);
6092 auto_vec<vec<tree> > vec_defs(3);
6093 vect_get_slp_defs (ops, slp_node, &vec_defs);
6094 vec_oprnds0 = vec_defs[0];
6095 vec_oprnds1 = vec_defs[1];
6096 vec_oprnds2 = vec_defs[2];
6098 else
6100 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
6101 &vec_oprnds1, NULL);
6102 vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
6103 NULL, NULL);
6106 else
6107 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
6108 slp_node);
6110 else
6112 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
6113 if (op_type == ternary_op)
6115 tree vec_oprnd = vec_oprnds2.pop ();
6116 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo,
6117 vec_oprnd));
6121 /* Arguments are ready. Create the new vector stmt. */
6122 stmt_vec_info new_stmt_info = NULL;
6123 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6125 vop1 = ((op_type == binary_op || op_type == ternary_op)
6126 ? vec_oprnds1[i] : NULL_TREE);
6127 vop2 = ((op_type == ternary_op)
6128 ? vec_oprnds2[i] : NULL_TREE);
6129 gassign *new_stmt = gimple_build_assign (vec_dest, code,
6130 vop0, vop1, vop2);
6131 new_temp = make_ssa_name (vec_dest, new_stmt);
6132 gimple_assign_set_lhs (new_stmt, new_temp);
6133 new_stmt_info
6134 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6135 if (vec_cvt_dest)
6137 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
6138 gassign *new_stmt
6139 = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6140 new_temp);
6141 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6142 gimple_assign_set_lhs (new_stmt, new_temp);
6143 new_stmt_info
6144 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6146 if (slp_node)
6147 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
6150 if (slp_node)
6151 continue;
6153 if (j == 0)
6154 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6155 else
6156 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6157 prev_stmt_info = new_stmt_info;
6160 vec_oprnds0.release ();
6161 vec_oprnds1.release ();
6162 vec_oprnds2.release ();
6164 return true;
6167 /* A helper function to ensure data reference DR_INFO's base alignment. */
6169 static void
6170 ensure_base_align (dr_vec_info *dr_info)
6172 if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
6173 return;
6175 if (dr_info->base_misaligned)
6177 tree base_decl = dr_info->base_decl;
6179 unsigned int align_base_to
6180 = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT;
6182 if (decl_in_symtab_p (base_decl))
6183 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6184 else
6186 SET_DECL_ALIGN (base_decl, align_base_to);
6187 DECL_USER_ALIGN (base_decl) = 1;
6189 dr_info->base_misaligned = false;
6194 /* Function get_group_alias_ptr_type.
6196 Return the alias type for the group starting at FIRST_STMT_INFO. */
6198 static tree
6199 get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
6201 struct data_reference *first_dr, *next_dr;
6203 first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
6204 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info);
6205 while (next_stmt_info)
6207 next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
6208 if (get_alias_set (DR_REF (first_dr))
6209 != get_alias_set (DR_REF (next_dr)))
6211 if (dump_enabled_p ())
6212 dump_printf_loc (MSG_NOTE, vect_location,
6213 "conflicting alias set types.\n");
6214 return ptr_type_node;
6216 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6218 return reference_alias_ptr_type (DR_REF (first_dr));
6222 /* Function vectorizable_store.
6224 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
6225 that can be vectorized.
6226 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6227 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6228 Return true if STMT_INFO is vectorizable in this way. */
6230 static bool
6231 vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6232 stmt_vec_info *vec_stmt, slp_tree slp_node,
6233 stmt_vector_for_cost *cost_vec)
6235 tree data_ref;
6236 tree op;
6237 tree vec_oprnd = NULL_TREE;
6238 tree elem_type;
6239 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6240 struct loop *loop = NULL;
6241 machine_mode vec_mode;
6242 tree dummy;
6243 enum dr_alignment_support alignment_support_scheme;
6244 enum vect_def_type rhs_dt = vect_unknown_def_type;
6245 enum vect_def_type mask_dt = vect_unknown_def_type;
6246 stmt_vec_info prev_stmt_info = NULL;
6247 tree dataref_ptr = NULL_TREE;
6248 tree dataref_offset = NULL_TREE;
6249 gimple *ptr_incr = NULL;
6250 int ncopies;
6251 int j;
6252 stmt_vec_info first_stmt_info;
6253 bool grouped_store;
6254 unsigned int group_size, i;
6255 vec<tree> oprnds = vNULL;
6256 vec<tree> result_chain = vNULL;
6257 tree offset = NULL_TREE;
6258 vec<tree> vec_oprnds = vNULL;
6259 bool slp = (slp_node != NULL);
6260 unsigned int vec_num;
6261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6262 vec_info *vinfo = stmt_info->vinfo;
6263 tree aggr_type;
6264 gather_scatter_info gs_info;
6265 poly_uint64 vf;
6266 vec_load_store_type vls_type;
6267 tree ref_type;
6269 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6270 return false;
6272 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6273 && ! vec_stmt)
6274 return false;
6276 /* Is vectorizable store? */
6278 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6279 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
6281 tree scalar_dest = gimple_assign_lhs (assign);
6282 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6283 && is_pattern_stmt_p (stmt_info))
6284 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6285 if (TREE_CODE (scalar_dest) != ARRAY_REF
6286 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6287 && TREE_CODE (scalar_dest) != INDIRECT_REF
6288 && TREE_CODE (scalar_dest) != COMPONENT_REF
6289 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6290 && TREE_CODE (scalar_dest) != REALPART_EXPR
6291 && TREE_CODE (scalar_dest) != MEM_REF)
6292 return false;
6294 else
6296 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
6297 if (!call || !gimple_call_internal_p (call))
6298 return false;
6300 internal_fn ifn = gimple_call_internal_fn (call);
6301 if (!internal_store_fn_p (ifn))
6302 return false;
6304 if (slp_node != NULL)
6306 if (dump_enabled_p ())
6307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6308 "SLP of masked stores not supported.\n");
6309 return false;
6312 int mask_index = internal_fn_mask_index (ifn);
6313 if (mask_index >= 0)
6315 mask = gimple_call_arg (call, mask_index);
6316 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
6317 &mask_vectype))
6318 return false;
6322 op = vect_get_store_rhs (stmt_info);
6324 /* Cannot have hybrid store SLP -- that would mean storing to the
6325 same location twice. */
6326 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6328 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6329 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6331 if (loop_vinfo)
6333 loop = LOOP_VINFO_LOOP (loop_vinfo);
6334 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6336 else
6337 vf = 1;
6339 /* Multiple types in SLP are handled by creating the appropriate number of
6340 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6341 case of SLP. */
6342 if (slp)
6343 ncopies = 1;
6344 else
6345 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6347 gcc_assert (ncopies >= 1);
6349 /* FORNOW. This restriction should be relaxed. */
6350 if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
6352 if (dump_enabled_p ())
6353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6354 "multiple types in nested loop.\n");
6355 return false;
6358 if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
6359 return false;
6361 elem_type = TREE_TYPE (vectype);
6362 vec_mode = TYPE_MODE (vectype);
6364 if (!STMT_VINFO_DATA_REF (stmt_info))
6365 return false;
6367 vect_memory_access_type memory_access_type;
6368 if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
6369 &memory_access_type, &gs_info))
6370 return false;
6372 if (mask)
6374 if (memory_access_type == VMAT_CONTIGUOUS)
6376 if (!VECTOR_MODE_P (vec_mode)
6377 || !can_vec_mask_load_store_p (vec_mode,
6378 TYPE_MODE (mask_vectype), false))
6379 return false;
6381 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6382 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
6384 if (dump_enabled_p ())
6385 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6386 "unsupported access type for masked store.\n");
6387 return false;
6390 else
6392 /* FORNOW. In some cases can vectorize even if data-type not supported
6393 (e.g. - array initialization with 0). */
6394 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6395 return false;
6398 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
6399 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6400 && memory_access_type != VMAT_GATHER_SCATTER
6401 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6402 if (grouped_store)
6404 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
6405 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6406 group_size = DR_GROUP_SIZE (first_stmt_info);
6408 else
6410 first_stmt_info = stmt_info;
6411 first_dr_info = dr_info;
6412 group_size = vec_num = 1;
6415 if (!vec_stmt) /* transformation not required. */
6417 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6419 if (loop_vinfo
6420 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6421 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6422 memory_access_type, &gs_info);
6424 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6425 vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
6426 vls_type, slp_node, cost_vec);
6427 return true;
6429 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6431 /* Transform. */
6433 ensure_base_align (dr_info);
6435 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6437 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6438 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6439 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6440 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6441 edge pe = loop_preheader_edge (loop);
6442 gimple_seq seq;
6443 basic_block new_bb;
6444 enum { NARROW, NONE, WIDEN } modifier;
6445 poly_uint64 scatter_off_nunits
6446 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6448 if (known_eq (nunits, scatter_off_nunits))
6449 modifier = NONE;
6450 else if (known_eq (nunits * 2, scatter_off_nunits))
6452 modifier = WIDEN;
6454 /* Currently gathers and scatters are only supported for
6455 fixed-length vectors. */
6456 unsigned int count = scatter_off_nunits.to_constant ();
6457 vec_perm_builder sel (count, count, 1);
6458 for (i = 0; i < (unsigned int) count; ++i)
6459 sel.quick_push (i | (count / 2));
6461 vec_perm_indices indices (sel, 1, count);
6462 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6463 indices);
6464 gcc_assert (perm_mask != NULL_TREE);
6466 else if (known_eq (nunits, scatter_off_nunits * 2))
6468 modifier = NARROW;
6470 /* Currently gathers and scatters are only supported for
6471 fixed-length vectors. */
6472 unsigned int count = nunits.to_constant ();
6473 vec_perm_builder sel (count, count, 1);
6474 for (i = 0; i < (unsigned int) count; ++i)
6475 sel.quick_push (i | (count / 2));
6477 vec_perm_indices indices (sel, 2, count);
6478 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6479 gcc_assert (perm_mask != NULL_TREE);
6480 ncopies *= 2;
6482 else
6483 gcc_unreachable ();
6485 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6486 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6487 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6488 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6489 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6490 scaletype = TREE_VALUE (arglist);
6492 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6493 && TREE_CODE (rettype) == VOID_TYPE);
6495 ptr = fold_convert (ptrtype, gs_info.base);
6496 if (!is_gimple_min_invariant (ptr))
6498 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6499 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6500 gcc_assert (!new_bb);
6503 /* Currently we support only unconditional scatter stores,
6504 so mask should be all ones. */
6505 mask = build_int_cst (masktype, -1);
6506 mask = vect_init_vector (stmt_info, mask, masktype, NULL);
6508 scale = build_int_cst (scaletype, gs_info.scale);
6510 prev_stmt_info = NULL;
6511 for (j = 0; j < ncopies; ++j)
6513 if (j == 0)
6515 src = vec_oprnd1
6516 = vect_get_vec_def_for_operand (op, stmt_info);
6517 op = vec_oprnd0
6518 = vect_get_vec_def_for_operand (gs_info.offset, stmt_info);
6520 else if (modifier != NONE && (j & 1))
6522 if (modifier == WIDEN)
6524 src = vec_oprnd1
6525 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
6526 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6527 stmt_info, gsi);
6529 else if (modifier == NARROW)
6531 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6532 stmt_info, gsi);
6533 op = vec_oprnd0
6534 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
6536 else
6537 gcc_unreachable ();
6539 else
6541 src = vec_oprnd1
6542 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
6543 op = vec_oprnd0
6544 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
6547 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6549 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6550 TYPE_VECTOR_SUBPARTS (srctype)));
6551 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6552 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6553 gassign *new_stmt
6554 = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6555 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6556 src = var;
6559 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6561 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6562 TYPE_VECTOR_SUBPARTS (idxtype)));
6563 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6564 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6565 gassign *new_stmt
6566 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6567 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6568 op = var;
6571 gcall *new_stmt
6572 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6573 stmt_vec_info new_stmt_info
6574 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6576 if (prev_stmt_info == NULL)
6577 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6578 else
6579 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6580 prev_stmt_info = new_stmt_info;
6582 return true;
6585 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6586 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
6588 if (grouped_store)
6590 /* FORNOW */
6591 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
6593 /* We vectorize all the stmts of the interleaving group when we
6594 reach the last stmt in the group. */
6595 if (DR_GROUP_STORE_COUNT (first_stmt_info)
6596 < DR_GROUP_SIZE (first_stmt_info)
6597 && !slp)
6599 *vec_stmt = NULL;
6600 return true;
6603 if (slp)
6605 grouped_store = false;
6606 /* VEC_NUM is the number of vect stmts to be created for this
6607 group. */
6608 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6609 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6610 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
6611 == first_stmt_info);
6612 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6613 op = vect_get_store_rhs (first_stmt_info);
6615 else
6616 /* VEC_NUM is the number of vect stmts to be created for this
6617 group. */
6618 vec_num = group_size;
6620 ref_type = get_group_alias_ptr_type (first_stmt_info);
6622 else
6623 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
6625 if (dump_enabled_p ())
6626 dump_printf_loc (MSG_NOTE, vect_location,
6627 "transform store. ncopies = %d\n", ncopies);
6629 if (memory_access_type == VMAT_ELEMENTWISE
6630 || memory_access_type == VMAT_STRIDED_SLP)
6632 gimple_stmt_iterator incr_gsi;
6633 bool insert_after;
6634 gimple *incr;
6635 tree offvar;
6636 tree ivstep;
6637 tree running_off;
6638 tree stride_base, stride_step, alias_off;
6639 tree vec_oprnd;
6640 unsigned int g;
6641 /* Checked by get_load_store_type. */
6642 unsigned int const_nunits = nunits.to_constant ();
6644 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6645 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
6647 stride_base
6648 = fold_build_pointer_plus
6649 (DR_BASE_ADDRESS (first_dr_info->dr),
6650 size_binop (PLUS_EXPR,
6651 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
6652 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
6653 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
6655 /* For a store with loop-invariant (but other than power-of-2)
6656 stride (i.e. not a grouped access) like so:
6658 for (i = 0; i < n; i += stride)
6659 array[i] = ...;
6661 we generate a new induction variable and new stores from
6662 the components of the (vectorized) rhs:
6664 for (j = 0; ; j += VF*stride)
6665 vectemp = ...;
6666 tmp1 = vectemp[0];
6667 array[j] = tmp1;
6668 tmp2 = vectemp[1];
6669 array[j + stride] = tmp2;
6673 unsigned nstores = const_nunits;
6674 unsigned lnel = 1;
6675 tree ltype = elem_type;
6676 tree lvectype = vectype;
6677 if (slp)
6679 if (group_size < const_nunits
6680 && const_nunits % group_size == 0)
6682 nstores = const_nunits / group_size;
6683 lnel = group_size;
6684 ltype = build_vector_type (elem_type, group_size);
6685 lvectype = vectype;
6687 /* First check if vec_extract optab doesn't support extraction
6688 of vector elts directly. */
6689 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6690 machine_mode vmode;
6691 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6692 || !VECTOR_MODE_P (vmode)
6693 || !targetm.vector_mode_supported_p (vmode)
6694 || (convert_optab_handler (vec_extract_optab,
6695 TYPE_MODE (vectype), vmode)
6696 == CODE_FOR_nothing))
6698 /* Try to avoid emitting an extract of vector elements
6699 by performing the extracts using an integer type of the
6700 same size, extracting from a vector of those and then
6701 re-interpreting it as the original vector type if
6702 supported. */
6703 unsigned lsize
6704 = group_size * GET_MODE_BITSIZE (elmode);
6705 elmode = int_mode_for_size (lsize, 0).require ();
6706 unsigned int lnunits = const_nunits / group_size;
6707 /* If we can't construct such a vector fall back to
6708 element extracts from the original vector type and
6709 element size stores. */
6710 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6711 && VECTOR_MODE_P (vmode)
6712 && targetm.vector_mode_supported_p (vmode)
6713 && (convert_optab_handler (vec_extract_optab,
6714 vmode, elmode)
6715 != CODE_FOR_nothing))
6717 nstores = lnunits;
6718 lnel = group_size;
6719 ltype = build_nonstandard_integer_type (lsize, 1);
6720 lvectype = build_vector_type (ltype, nstores);
6722 /* Else fall back to vector extraction anyway.
6723 Fewer stores are more important than avoiding spilling
6724 of the vector we extract from. Compared to the
6725 construction case in vectorizable_load no store-forwarding
6726 issue exists here for reasonable archs. */
6729 else if (group_size >= const_nunits
6730 && group_size % const_nunits == 0)
6732 nstores = 1;
6733 lnel = const_nunits;
6734 ltype = vectype;
6735 lvectype = vectype;
6737 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6738 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6741 ivstep = stride_step;
6742 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6743 build_int_cst (TREE_TYPE (ivstep), vf));
6745 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6747 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6748 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6749 create_iv (stride_base, ivstep, NULL,
6750 loop, &incr_gsi, insert_after,
6751 &offvar, NULL);
6752 incr = gsi_stmt (incr_gsi);
6753 loop_vinfo->add_stmt (incr);
6755 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6757 prev_stmt_info = NULL;
6758 alias_off = build_int_cst (ref_type, 0);
6759 stmt_vec_info next_stmt_info = first_stmt_info;
6760 for (g = 0; g < group_size; g++)
6762 running_off = offvar;
6763 if (g)
6765 tree size = TYPE_SIZE_UNIT (ltype);
6766 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6767 size);
6768 tree newoff = copy_ssa_name (running_off, NULL);
6769 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6770 running_off, pos);
6771 vect_finish_stmt_generation (stmt_info, incr, gsi);
6772 running_off = newoff;
6774 unsigned int group_el = 0;
6775 unsigned HOST_WIDE_INT
6776 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6777 for (j = 0; j < ncopies; j++)
6779 /* We've set op and dt above, from vect_get_store_rhs,
6780 and first_stmt_info == stmt_info. */
6781 if (j == 0)
6783 if (slp)
6785 vect_get_vec_defs (op, NULL_TREE, stmt_info,
6786 &vec_oprnds, NULL, slp_node);
6787 vec_oprnd = vec_oprnds[0];
6789 else
6791 op = vect_get_store_rhs (next_stmt_info);
6792 vec_oprnd = vect_get_vec_def_for_operand
6793 (op, next_stmt_info);
6796 else
6798 if (slp)
6799 vec_oprnd = vec_oprnds[j];
6800 else
6801 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo,
6802 vec_oprnd);
6804 /* Pun the vector to extract from if necessary. */
6805 if (lvectype != vectype)
6807 tree tem = make_ssa_name (lvectype);
6808 gimple *pun
6809 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6810 lvectype, vec_oprnd));
6811 vect_finish_stmt_generation (stmt_info, pun, gsi);
6812 vec_oprnd = tem;
6814 for (i = 0; i < nstores; i++)
6816 tree newref, newoff;
6817 gimple *incr, *assign;
6818 tree size = TYPE_SIZE (ltype);
6819 /* Extract the i'th component. */
6820 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6821 bitsize_int (i), size);
6822 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6823 size, pos);
6825 elem = force_gimple_operand_gsi (gsi, elem, true,
6826 NULL_TREE, true,
6827 GSI_SAME_STMT);
6829 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6830 group_el * elsz);
6831 newref = build2 (MEM_REF, ltype,
6832 running_off, this_off);
6833 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
6835 /* And store it to *running_off. */
6836 assign = gimple_build_assign (newref, elem);
6837 stmt_vec_info assign_info
6838 = vect_finish_stmt_generation (stmt_info, assign, gsi);
6840 group_el += lnel;
6841 if (! slp
6842 || group_el == group_size)
6844 newoff = copy_ssa_name (running_off, NULL);
6845 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6846 running_off, stride_step);
6847 vect_finish_stmt_generation (stmt_info, incr, gsi);
6849 running_off = newoff;
6850 group_el = 0;
6852 if (g == group_size - 1
6853 && !slp)
6855 if (j == 0 && i == 0)
6856 STMT_VINFO_VEC_STMT (stmt_info)
6857 = *vec_stmt = assign_info;
6858 else
6859 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info;
6860 prev_stmt_info = assign_info;
6864 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6865 if (slp)
6866 break;
6869 vec_oprnds.release ();
6870 return true;
6873 auto_vec<tree> dr_chain (group_size);
6874 oprnds.create (group_size);
6876 alignment_support_scheme
6877 = vect_supportable_dr_alignment (first_dr_info, false);
6878 gcc_assert (alignment_support_scheme);
6879 vec_loop_masks *loop_masks
6880 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6881 ? &LOOP_VINFO_MASKS (loop_vinfo)
6882 : NULL);
6883 /* Targets with store-lane instructions must not require explicit
6884 realignment. vect_supportable_dr_alignment always returns either
6885 dr_aligned or dr_unaligned_supported for masked operations. */
6886 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6887 && !mask
6888 && !loop_masks)
6889 || alignment_support_scheme == dr_aligned
6890 || alignment_support_scheme == dr_unaligned_supported);
6892 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6893 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6894 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6896 tree bump;
6897 tree vec_offset = NULL_TREE;
6898 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6900 aggr_type = NULL_TREE;
6901 bump = NULL_TREE;
6903 else if (memory_access_type == VMAT_GATHER_SCATTER)
6905 aggr_type = elem_type;
6906 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
6907 &bump, &vec_offset);
6909 else
6911 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6912 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6913 else
6914 aggr_type = vectype;
6915 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
6916 memory_access_type);
6919 if (mask)
6920 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6922 /* In case the vectorization factor (VF) is bigger than the number
6923 of elements that we can fit in a vectype (nunits), we have to generate
6924 more than one vector stmt - i.e - we need to "unroll" the
6925 vector stmt by a factor VF/nunits. For more details see documentation in
6926 vect_get_vec_def_for_copy_stmt. */
6928 /* In case of interleaving (non-unit grouped access):
6930 S1: &base + 2 = x2
6931 S2: &base = x0
6932 S3: &base + 1 = x1
6933 S4: &base + 3 = x3
6935 We create vectorized stores starting from base address (the access of the
6936 first stmt in the chain (S2 in the above example), when the last store stmt
6937 of the chain (S4) is reached:
6939 VS1: &base = vx2
6940 VS2: &base + vec_size*1 = vx0
6941 VS3: &base + vec_size*2 = vx1
6942 VS4: &base + vec_size*3 = vx3
6944 Then permutation statements are generated:
6946 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6947 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6950 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6951 (the order of the data-refs in the output of vect_permute_store_chain
6952 corresponds to the order of scalar stmts in the interleaving chain - see
6953 the documentation of vect_permute_store_chain()).
6955 In case of both multiple types and interleaving, above vector stores and
6956 permutation stmts are created for every copy. The result vector stmts are
6957 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6958 STMT_VINFO_RELATED_STMT for the next copies.
6961 prev_stmt_info = NULL;
6962 tree vec_mask = NULL_TREE;
6963 for (j = 0; j < ncopies; j++)
6965 stmt_vec_info new_stmt_info;
6966 if (j == 0)
6968 if (slp)
6970 /* Get vectorized arguments for SLP_NODE. */
6971 vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
6972 NULL, slp_node);
6974 vec_oprnd = vec_oprnds[0];
6976 else
6978 /* For interleaved stores we collect vectorized defs for all the
6979 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6980 used as an input to vect_permute_store_chain(), and OPRNDS as
6981 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6983 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
6984 OPRNDS are of size 1. */
6985 stmt_vec_info next_stmt_info = first_stmt_info;
6986 for (i = 0; i < group_size; i++)
6988 /* Since gaps are not supported for interleaved stores,
6989 DR_GROUP_SIZE is the exact number of stmts in the chain.
6990 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
6991 that there is no interleaving, DR_GROUP_SIZE is 1,
6992 and only one iteration of the loop will be executed. */
6993 op = vect_get_store_rhs (next_stmt_info);
6994 vec_oprnd = vect_get_vec_def_for_operand
6995 (op, next_stmt_info);
6996 dr_chain.quick_push (vec_oprnd);
6997 oprnds.quick_push (vec_oprnd);
6998 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7000 if (mask)
7001 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
7002 mask_vectype);
7005 /* We should have catched mismatched types earlier. */
7006 gcc_assert (useless_type_conversion_p (vectype,
7007 TREE_TYPE (vec_oprnd)));
7008 bool simd_lane_access_p
7009 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7010 if (simd_lane_access_p
7011 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
7012 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
7013 && integer_zerop (DR_OFFSET (first_dr_info->dr))
7014 && integer_zerop (DR_INIT (first_dr_info->dr))
7015 && alias_sets_conflict_p (get_alias_set (aggr_type),
7016 get_alias_set (TREE_TYPE (ref_type))))
7018 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
7019 dataref_offset = build_int_cst (ref_type, 0);
7021 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7022 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
7023 &dataref_ptr, &vec_offset);
7024 else
7025 dataref_ptr
7026 = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
7027 simd_lane_access_p ? loop : NULL,
7028 offset, &dummy, gsi, &ptr_incr,
7029 simd_lane_access_p, NULL_TREE, bump);
7031 else
7033 /* For interleaved stores we created vectorized defs for all the
7034 defs stored in OPRNDS in the previous iteration (previous copy).
7035 DR_CHAIN is then used as an input to vect_permute_store_chain(),
7036 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7037 next copy.
7038 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7039 OPRNDS are of size 1. */
7040 for (i = 0; i < group_size; i++)
7042 op = oprnds[i];
7043 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op);
7044 dr_chain[i] = vec_oprnd;
7045 oprnds[i] = vec_oprnd;
7047 if (mask)
7048 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
7049 if (dataref_offset)
7050 dataref_offset
7051 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7052 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7053 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
7054 else
7055 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7056 stmt_info, bump);
7059 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7061 tree vec_array;
7063 /* Get an array into which we can store the individual vectors. */
7064 vec_array = create_vector_array (vectype, vec_num);
7066 /* Invalidate the current contents of VEC_ARRAY. This should
7067 become an RTL clobber too, which prevents the vector registers
7068 from being upward-exposed. */
7069 vect_clobber_variable (stmt_info, gsi, vec_array);
7071 /* Store the individual vectors into the array. */
7072 for (i = 0; i < vec_num; i++)
7074 vec_oprnd = dr_chain[i];
7075 write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
7078 tree final_mask = NULL;
7079 if (loop_masks)
7080 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
7081 vectype, j);
7082 if (vec_mask)
7083 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7084 vec_mask, gsi);
7086 gcall *call;
7087 if (final_mask)
7089 /* Emit:
7090 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7091 VEC_ARRAY). */
7092 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
7093 tree alias_ptr = build_int_cst (ref_type, align);
7094 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
7095 dataref_ptr, alias_ptr,
7096 final_mask, vec_array);
7098 else
7100 /* Emit:
7101 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7102 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7103 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
7104 vec_array);
7105 gimple_call_set_lhs (call, data_ref);
7107 gimple_call_set_nothrow (call, true);
7108 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
7110 /* Record that VEC_ARRAY is now dead. */
7111 vect_clobber_variable (stmt_info, gsi, vec_array);
7113 else
7115 new_stmt_info = NULL;
7116 if (grouped_store)
7118 if (j == 0)
7119 result_chain.create (group_size);
7120 /* Permute. */
7121 vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
7122 &result_chain);
7125 stmt_vec_info next_stmt_info = first_stmt_info;
7126 for (i = 0; i < vec_num; i++)
7128 unsigned align, misalign;
7130 tree final_mask = NULL_TREE;
7131 if (loop_masks)
7132 final_mask = vect_get_loop_mask (gsi, loop_masks,
7133 vec_num * ncopies,
7134 vectype, vec_num * j + i);
7135 if (vec_mask)
7136 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7137 vec_mask, gsi);
7139 if (memory_access_type == VMAT_GATHER_SCATTER)
7141 tree scale = size_int (gs_info.scale);
7142 gcall *call;
7143 if (loop_masks)
7144 call = gimple_build_call_internal
7145 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
7146 scale, vec_oprnd, final_mask);
7147 else
7148 call = gimple_build_call_internal
7149 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
7150 scale, vec_oprnd);
7151 gimple_call_set_nothrow (call, true);
7152 new_stmt_info
7153 = vect_finish_stmt_generation (stmt_info, call, gsi);
7154 break;
7157 if (i > 0)
7158 /* Bump the vector pointer. */
7159 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7160 stmt_info, bump);
7162 if (slp)
7163 vec_oprnd = vec_oprnds[i];
7164 else if (grouped_store)
7165 /* For grouped stores vectorized defs are interleaved in
7166 vect_permute_store_chain(). */
7167 vec_oprnd = result_chain[i];
7169 align = DR_TARGET_ALIGNMENT (first_dr_info);
7170 if (aligned_access_p (first_dr_info))
7171 misalign = 0;
7172 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7174 align = dr_alignment (vect_dr_behavior (first_dr_info));
7175 misalign = 0;
7177 else
7178 misalign = DR_MISALIGNMENT (first_dr_info);
7179 if (dataref_offset == NULL_TREE
7180 && TREE_CODE (dataref_ptr) == SSA_NAME)
7181 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7182 misalign);
7184 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7186 tree perm_mask = perm_mask_for_reverse (vectype);
7187 tree perm_dest = vect_create_destination_var
7188 (vect_get_store_rhs (stmt_info), vectype);
7189 tree new_temp = make_ssa_name (perm_dest);
7191 /* Generate the permute statement. */
7192 gimple *perm_stmt
7193 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7194 vec_oprnd, perm_mask);
7195 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7197 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7198 vec_oprnd = new_temp;
7201 /* Arguments are ready. Create the new vector stmt. */
7202 if (final_mask)
7204 align = least_bit_hwi (misalign | align);
7205 tree ptr = build_int_cst (ref_type, align);
7206 gcall *call
7207 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7208 dataref_ptr, ptr,
7209 final_mask, vec_oprnd);
7210 gimple_call_set_nothrow (call, true);
7211 new_stmt_info
7212 = vect_finish_stmt_generation (stmt_info, call, gsi);
7214 else
7216 data_ref = fold_build2 (MEM_REF, vectype,
7217 dataref_ptr,
7218 dataref_offset
7219 ? dataref_offset
7220 : build_int_cst (ref_type, 0));
7221 if (aligned_access_p (first_dr_info))
7223 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7224 TREE_TYPE (data_ref)
7225 = build_aligned_type (TREE_TYPE (data_ref),
7226 align * BITS_PER_UNIT);
7227 else
7228 TREE_TYPE (data_ref)
7229 = build_aligned_type (TREE_TYPE (data_ref),
7230 TYPE_ALIGN (elem_type));
7231 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7232 gassign *new_stmt
7233 = gimple_build_assign (data_ref, vec_oprnd);
7234 new_stmt_info
7235 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7238 if (slp)
7239 continue;
7241 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7242 if (!next_stmt_info)
7243 break;
7246 if (!slp)
7248 if (j == 0)
7249 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7250 else
7251 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7252 prev_stmt_info = new_stmt_info;
7256 oprnds.release ();
7257 result_chain.release ();
7258 vec_oprnds.release ();
7260 return true;
7263 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7264 VECTOR_CST mask. No checks are made that the target platform supports the
7265 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7266 vect_gen_perm_mask_checked. */
7268 tree
7269 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7271 tree mask_type;
7273 poly_uint64 nunits = sel.length ();
7274 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7276 mask_type = build_vector_type (ssizetype, nunits);
7277 return vec_perm_indices_to_tree (mask_type, sel);
7280 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7281 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7283 tree
7284 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7286 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7287 return vect_gen_perm_mask_any (vectype, sel);
7290 /* Given a vector variable X and Y, that was generated for the scalar
7291 STMT_INFO, generate instructions to permute the vector elements of X and Y
7292 using permutation mask MASK_VEC, insert them at *GSI and return the
7293 permuted vector variable. */
7295 static tree
7296 permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
7297 gimple_stmt_iterator *gsi)
7299 tree vectype = TREE_TYPE (x);
7300 tree perm_dest, data_ref;
7301 gimple *perm_stmt;
7303 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
7304 if (TREE_CODE (scalar_dest) == SSA_NAME)
7305 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7306 else
7307 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7308 data_ref = make_ssa_name (perm_dest);
7310 /* Generate the permute statement. */
7311 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7312 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7314 return data_ref;
7317 /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
7318 inserting them on the loops preheader edge. Returns true if we
7319 were successful in doing so (and thus STMT_INFO can be moved then),
7320 otherwise returns false. */
7322 static bool
7323 hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
7325 ssa_op_iter i;
7326 tree op;
7327 bool any = false;
7329 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7331 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7332 if (!gimple_nop_p (def_stmt)
7333 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7335 /* Make sure we don't need to recurse. While we could do
7336 so in simple cases when there are more complex use webs
7337 we don't have an easy way to preserve stmt order to fulfil
7338 dependencies within them. */
7339 tree op2;
7340 ssa_op_iter i2;
7341 if (gimple_code (def_stmt) == GIMPLE_PHI)
7342 return false;
7343 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7345 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7346 if (!gimple_nop_p (def_stmt2)
7347 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7348 return false;
7350 any = true;
7354 if (!any)
7355 return true;
7357 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7359 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7360 if (!gimple_nop_p (def_stmt)
7361 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7363 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7364 gsi_remove (&gsi, false);
7365 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7369 return true;
7372 /* vectorizable_load.
7374 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
7375 that can be vectorized.
7376 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7377 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7378 Return true if STMT_INFO is vectorizable in this way. */
7380 static bool
7381 vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7382 stmt_vec_info *vec_stmt, slp_tree slp_node,
7383 slp_instance slp_node_instance,
7384 stmt_vector_for_cost *cost_vec)
7386 tree scalar_dest;
7387 tree vec_dest = NULL;
7388 tree data_ref = NULL;
7389 stmt_vec_info prev_stmt_info;
7390 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7391 struct loop *loop = NULL;
7392 struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
7393 bool nested_in_vect_loop = false;
7394 tree elem_type;
7395 tree new_temp;
7396 machine_mode mode;
7397 tree dummy;
7398 enum dr_alignment_support alignment_support_scheme;
7399 tree dataref_ptr = NULL_TREE;
7400 tree dataref_offset = NULL_TREE;
7401 gimple *ptr_incr = NULL;
7402 int ncopies;
7403 int i, j;
7404 unsigned int group_size;
7405 poly_uint64 group_gap_adj;
7406 tree msq = NULL_TREE, lsq;
7407 tree offset = NULL_TREE;
7408 tree byte_offset = NULL_TREE;
7409 tree realignment_token = NULL_TREE;
7410 gphi *phi = NULL;
7411 vec<tree> dr_chain = vNULL;
7412 bool grouped_load = false;
7413 stmt_vec_info first_stmt_info;
7414 stmt_vec_info first_stmt_info_for_drptr = NULL;
7415 bool compute_in_loop = false;
7416 struct loop *at_loop;
7417 int vec_num;
7418 bool slp = (slp_node != NULL);
7419 bool slp_perm = false;
7420 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7421 poly_uint64 vf;
7422 tree aggr_type;
7423 gather_scatter_info gs_info;
7424 vec_info *vinfo = stmt_info->vinfo;
7425 tree ref_type;
7426 enum vect_def_type mask_dt = vect_unknown_def_type;
7428 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7429 return false;
7431 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7432 && ! vec_stmt)
7433 return false;
7435 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7436 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
7438 scalar_dest = gimple_assign_lhs (assign);
7439 if (TREE_CODE (scalar_dest) != SSA_NAME)
7440 return false;
7442 tree_code code = gimple_assign_rhs_code (assign);
7443 if (code != ARRAY_REF
7444 && code != BIT_FIELD_REF
7445 && code != INDIRECT_REF
7446 && code != COMPONENT_REF
7447 && code != IMAGPART_EXPR
7448 && code != REALPART_EXPR
7449 && code != MEM_REF
7450 && TREE_CODE_CLASS (code) != tcc_declaration)
7451 return false;
7453 else
7455 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
7456 if (!call || !gimple_call_internal_p (call))
7457 return false;
7459 internal_fn ifn = gimple_call_internal_fn (call);
7460 if (!internal_load_fn_p (ifn))
7461 return false;
7463 scalar_dest = gimple_call_lhs (call);
7464 if (!scalar_dest)
7465 return false;
7467 if (slp_node != NULL)
7469 if (dump_enabled_p ())
7470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7471 "SLP of masked loads not supported.\n");
7472 return false;
7475 int mask_index = internal_fn_mask_index (ifn);
7476 if (mask_index >= 0)
7478 mask = gimple_call_arg (call, mask_index);
7479 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
7480 &mask_vectype))
7481 return false;
7485 if (!STMT_VINFO_DATA_REF (stmt_info))
7486 return false;
7488 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7489 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7491 if (loop_vinfo)
7493 loop = LOOP_VINFO_LOOP (loop_vinfo);
7494 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
7495 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7497 else
7498 vf = 1;
7500 /* Multiple types in SLP are handled by creating the appropriate number of
7501 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7502 case of SLP. */
7503 if (slp)
7504 ncopies = 1;
7505 else
7506 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7508 gcc_assert (ncopies >= 1);
7510 /* FORNOW. This restriction should be relaxed. */
7511 if (nested_in_vect_loop && ncopies > 1)
7513 if (dump_enabled_p ())
7514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7515 "multiple types in nested loop.\n");
7516 return false;
7519 /* Invalidate assumptions made by dependence analysis when vectorization
7520 on the unrolled body effectively re-orders stmts. */
7521 if (ncopies > 1
7522 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7523 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7524 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7526 if (dump_enabled_p ())
7527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7528 "cannot perform implicit CSE when unrolling "
7529 "with negative dependence distance\n");
7530 return false;
7533 elem_type = TREE_TYPE (vectype);
7534 mode = TYPE_MODE (vectype);
7536 /* FORNOW. In some cases can vectorize even if data-type not supported
7537 (e.g. - data copies). */
7538 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7540 if (dump_enabled_p ())
7541 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7542 "Aligned load, but unsupported type.\n");
7543 return false;
7546 /* Check if the load is a part of an interleaving chain. */
7547 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7549 grouped_load = true;
7550 /* FORNOW */
7551 gcc_assert (!nested_in_vect_loop);
7552 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7554 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7555 group_size = DR_GROUP_SIZE (first_stmt_info);
7557 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7558 slp_perm = true;
7560 /* Invalidate assumptions made by dependence analysis when vectorization
7561 on the unrolled body effectively re-orders stmts. */
7562 if (!PURE_SLP_STMT (stmt_info)
7563 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7564 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7565 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7567 if (dump_enabled_p ())
7568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7569 "cannot perform implicit CSE when performing "
7570 "group loads with negative dependence distance\n");
7571 return false;
7574 /* Similarly when the stmt is a load that is both part of a SLP
7575 instance and a loop vectorized stmt via the same-dr mechanism
7576 we have to give up. */
7577 if (DR_GROUP_SAME_DR_STMT (stmt_info)
7578 && (STMT_SLP_TYPE (stmt_info)
7579 != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info))))
7581 if (dump_enabled_p ())
7582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7583 "conflicting SLP types for CSEd load\n");
7584 return false;
7587 else
7588 group_size = 1;
7590 vect_memory_access_type memory_access_type;
7591 if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
7592 &memory_access_type, &gs_info))
7593 return false;
7595 if (mask)
7597 if (memory_access_type == VMAT_CONTIGUOUS)
7599 machine_mode vec_mode = TYPE_MODE (vectype);
7600 if (!VECTOR_MODE_P (vec_mode)
7601 || !can_vec_mask_load_store_p (vec_mode,
7602 TYPE_MODE (mask_vectype), true))
7603 return false;
7605 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7607 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7608 tree masktype
7609 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7610 if (TREE_CODE (masktype) == INTEGER_TYPE)
7612 if (dump_enabled_p ())
7613 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7614 "masked gather with integer mask not"
7615 " supported.");
7616 return false;
7619 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7620 && memory_access_type != VMAT_GATHER_SCATTER)
7622 if (dump_enabled_p ())
7623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7624 "unsupported access type for masked load.\n");
7625 return false;
7629 if (!vec_stmt) /* transformation not required. */
7631 if (!slp)
7632 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7634 if (loop_vinfo
7635 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7636 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7637 memory_access_type, &gs_info);
7639 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7640 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7641 slp_node_instance, slp_node, cost_vec);
7642 return true;
7645 if (!slp)
7646 gcc_assert (memory_access_type
7647 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7649 if (dump_enabled_p ())
7650 dump_printf_loc (MSG_NOTE, vect_location,
7651 "transform load. ncopies = %d\n", ncopies);
7653 /* Transform. */
7655 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
7656 ensure_base_align (dr_info);
7658 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7660 vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask);
7661 return true;
7664 if (memory_access_type == VMAT_INVARIANT)
7666 gcc_assert (!grouped_load && !mask && !bb_vinfo);
7667 /* If we have versioned for aliasing or the loop doesn't
7668 have any data dependencies that would preclude this,
7669 then we are sure this is a loop invariant load and
7670 thus we can insert it on the preheader edge. */
7671 bool hoist_p = (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7672 && !nested_in_vect_loop
7673 && hoist_defs_of_uses (stmt_info, loop));
7674 if (hoist_p)
7676 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
7677 if (dump_enabled_p ())
7679 dump_printf_loc (MSG_NOTE, vect_location,
7680 "hoisting out of the vectorized loop: ");
7681 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7683 scalar_dest = copy_ssa_name (scalar_dest);
7684 tree rhs = unshare_expr (gimple_assign_rhs1 (stmt));
7685 gsi_insert_on_edge_immediate
7686 (loop_preheader_edge (loop),
7687 gimple_build_assign (scalar_dest, rhs));
7689 /* These copies are all equivalent, but currently the representation
7690 requires a separate STMT_VINFO_VEC_STMT for each one. */
7691 prev_stmt_info = NULL;
7692 gimple_stmt_iterator gsi2 = *gsi;
7693 gsi_next (&gsi2);
7694 for (j = 0; j < ncopies; j++)
7696 stmt_vec_info new_stmt_info;
7697 if (hoist_p)
7699 new_temp = vect_init_vector (stmt_info, scalar_dest,
7700 vectype, NULL);
7701 gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
7702 new_stmt_info = vinfo->add_stmt (new_stmt);
7704 else
7706 new_temp = vect_init_vector (stmt_info, scalar_dest,
7707 vectype, &gsi2);
7708 new_stmt_info = vinfo->lookup_def (new_temp);
7710 if (slp)
7711 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7712 else if (j == 0)
7713 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7714 else
7715 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7716 prev_stmt_info = new_stmt_info;
7718 return true;
7721 if (memory_access_type == VMAT_ELEMENTWISE
7722 || memory_access_type == VMAT_STRIDED_SLP)
7724 gimple_stmt_iterator incr_gsi;
7725 bool insert_after;
7726 gimple *incr;
7727 tree offvar;
7728 tree ivstep;
7729 tree running_off;
7730 vec<constructor_elt, va_gc> *v = NULL;
7731 tree stride_base, stride_step, alias_off;
7732 /* Checked by get_load_store_type. */
7733 unsigned int const_nunits = nunits.to_constant ();
7734 unsigned HOST_WIDE_INT cst_offset = 0;
7736 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7737 gcc_assert (!nested_in_vect_loop);
7739 if (grouped_load)
7741 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7742 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7744 else
7746 first_stmt_info = stmt_info;
7747 first_dr_info = dr_info;
7749 if (slp && grouped_load)
7751 group_size = DR_GROUP_SIZE (first_stmt_info);
7752 ref_type = get_group_alias_ptr_type (first_stmt_info);
7754 else
7756 if (grouped_load)
7757 cst_offset
7758 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7759 * vect_get_place_in_interleaving_chain (stmt_info,
7760 first_stmt_info));
7761 group_size = 1;
7762 ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
7765 stride_base
7766 = fold_build_pointer_plus
7767 (DR_BASE_ADDRESS (first_dr_info->dr),
7768 size_binop (PLUS_EXPR,
7769 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
7770 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
7771 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
7773 /* For a load with loop-invariant (but other than power-of-2)
7774 stride (i.e. not a grouped access) like so:
7776 for (i = 0; i < n; i += stride)
7777 ... = array[i];
7779 we generate a new induction variable and new accesses to
7780 form a new vector (or vectors, depending on ncopies):
7782 for (j = 0; ; j += VF*stride)
7783 tmp1 = array[j];
7784 tmp2 = array[j + stride];
7786 vectemp = {tmp1, tmp2, ...}
7789 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7790 build_int_cst (TREE_TYPE (stride_step), vf));
7792 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7794 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7795 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7796 create_iv (stride_base, ivstep, NULL,
7797 loop, &incr_gsi, insert_after,
7798 &offvar, NULL);
7799 incr = gsi_stmt (incr_gsi);
7800 loop_vinfo->add_stmt (incr);
7802 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7804 prev_stmt_info = NULL;
7805 running_off = offvar;
7806 alias_off = build_int_cst (ref_type, 0);
7807 int nloads = const_nunits;
7808 int lnel = 1;
7809 tree ltype = TREE_TYPE (vectype);
7810 tree lvectype = vectype;
7811 auto_vec<tree> dr_chain;
7812 if (memory_access_type == VMAT_STRIDED_SLP)
7814 if (group_size < const_nunits)
7816 /* First check if vec_init optab supports construction from
7817 vector elts directly. */
7818 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7819 machine_mode vmode;
7820 if (mode_for_vector (elmode, group_size).exists (&vmode)
7821 && VECTOR_MODE_P (vmode)
7822 && targetm.vector_mode_supported_p (vmode)
7823 && (convert_optab_handler (vec_init_optab,
7824 TYPE_MODE (vectype), vmode)
7825 != CODE_FOR_nothing))
7827 nloads = const_nunits / group_size;
7828 lnel = group_size;
7829 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7831 else
7833 /* Otherwise avoid emitting a constructor of vector elements
7834 by performing the loads using an integer type of the same
7835 size, constructing a vector of those and then
7836 re-interpreting it as the original vector type.
7837 This avoids a huge runtime penalty due to the general
7838 inability to perform store forwarding from smaller stores
7839 to a larger load. */
7840 unsigned lsize
7841 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7842 elmode = int_mode_for_size (lsize, 0).require ();
7843 unsigned int lnunits = const_nunits / group_size;
7844 /* If we can't construct such a vector fall back to
7845 element loads of the original vector type. */
7846 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7847 && VECTOR_MODE_P (vmode)
7848 && targetm.vector_mode_supported_p (vmode)
7849 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7850 != CODE_FOR_nothing))
7852 nloads = lnunits;
7853 lnel = group_size;
7854 ltype = build_nonstandard_integer_type (lsize, 1);
7855 lvectype = build_vector_type (ltype, nloads);
7859 else
7861 nloads = 1;
7862 lnel = const_nunits;
7863 ltype = vectype;
7865 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7867 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7868 else if (nloads == 1)
7869 ltype = vectype;
7871 if (slp)
7873 /* For SLP permutation support we need to load the whole group,
7874 not only the number of vector stmts the permutation result
7875 fits in. */
7876 if (slp_perm)
7878 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7879 variable VF. */
7880 unsigned int const_vf = vf.to_constant ();
7881 ncopies = CEIL (group_size * const_vf, const_nunits);
7882 dr_chain.create (ncopies);
7884 else
7885 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7887 unsigned int group_el = 0;
7888 unsigned HOST_WIDE_INT
7889 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7890 for (j = 0; j < ncopies; j++)
7892 if (nloads > 1)
7893 vec_alloc (v, nloads);
7894 stmt_vec_info new_stmt_info = NULL;
7895 for (i = 0; i < nloads; i++)
7897 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7898 group_el * elsz + cst_offset);
7899 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
7900 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7901 gassign *new_stmt
7902 = gimple_build_assign (make_ssa_name (ltype), data_ref);
7903 new_stmt_info
7904 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7905 if (nloads > 1)
7906 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7907 gimple_assign_lhs (new_stmt));
7909 group_el += lnel;
7910 if (! slp
7911 || group_el == group_size)
7913 tree newoff = copy_ssa_name (running_off);
7914 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7915 running_off, stride_step);
7916 vect_finish_stmt_generation (stmt_info, incr, gsi);
7918 running_off = newoff;
7919 group_el = 0;
7922 if (nloads > 1)
7924 tree vec_inv = build_constructor (lvectype, v);
7925 new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
7926 new_stmt_info = vinfo->lookup_def (new_temp);
7927 if (lvectype != vectype)
7929 gassign *new_stmt
7930 = gimple_build_assign (make_ssa_name (vectype),
7931 VIEW_CONVERT_EXPR,
7932 build1 (VIEW_CONVERT_EXPR,
7933 vectype, new_temp));
7934 new_stmt_info
7935 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7939 if (slp)
7941 if (slp_perm)
7942 dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt));
7943 else
7944 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7946 else
7948 if (j == 0)
7949 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7950 else
7951 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7952 prev_stmt_info = new_stmt_info;
7955 if (slp_perm)
7957 unsigned n_perms;
7958 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7959 slp_node_instance, false, &n_perms);
7961 return true;
7964 if (memory_access_type == VMAT_GATHER_SCATTER
7965 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
7966 grouped_load = false;
7968 if (grouped_load)
7970 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7971 group_size = DR_GROUP_SIZE (first_stmt_info);
7972 /* For SLP vectorization we directly vectorize a subchain
7973 without permutation. */
7974 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7975 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7976 /* For BB vectorization always use the first stmt to base
7977 the data ref pointer on. */
7978 if (bb_vinfo)
7979 first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7981 /* Check if the chain of loads is already vectorized. */
7982 if (STMT_VINFO_VEC_STMT (first_stmt_info)
7983 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7984 ??? But we can only do so if there is exactly one
7985 as we have no way to get at the rest. Leave the CSE
7986 opportunity alone.
7987 ??? With the group load eventually participating
7988 in multiple different permutations (having multiple
7989 slp nodes which refer to the same group) the CSE
7990 is even wrong code. See PR56270. */
7991 && !slp)
7993 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7994 return true;
7996 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7997 group_gap_adj = 0;
7999 /* VEC_NUM is the number of vect stmts to be created for this group. */
8000 if (slp)
8002 grouped_load = false;
8003 /* If an SLP permutation is from N elements to N elements,
8004 and if one vector holds a whole number of N, we can load
8005 the inputs to the permutation in the same way as an
8006 unpermuted sequence. In other cases we need to load the
8007 whole group, not only the number of vector stmts the
8008 permutation result fits in. */
8009 if (slp_perm
8010 && (group_size != SLP_INSTANCE_GROUP_SIZE (slp_node_instance)
8011 || !multiple_p (nunits, group_size)))
8013 /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for
8014 variable VF; see vect_transform_slp_perm_load. */
8015 unsigned int const_vf = vf.to_constant ();
8016 unsigned int const_nunits = nunits.to_constant ();
8017 vec_num = CEIL (group_size * const_vf, const_nunits);
8018 group_gap_adj = vf * group_size - nunits * vec_num;
8020 else
8022 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8023 group_gap_adj
8024 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
8027 else
8028 vec_num = group_size;
8030 ref_type = get_group_alias_ptr_type (first_stmt_info);
8032 else
8034 first_stmt_info = stmt_info;
8035 first_dr_info = dr_info;
8036 group_size = vec_num = 1;
8037 group_gap_adj = 0;
8038 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
8041 alignment_support_scheme
8042 = vect_supportable_dr_alignment (first_dr_info, false);
8043 gcc_assert (alignment_support_scheme);
8044 vec_loop_masks *loop_masks
8045 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8046 ? &LOOP_VINFO_MASKS (loop_vinfo)
8047 : NULL);
8048 /* Targets with store-lane instructions must not require explicit
8049 realignment. vect_supportable_dr_alignment always returns either
8050 dr_aligned or dr_unaligned_supported for masked operations. */
8051 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
8052 && !mask
8053 && !loop_masks)
8054 || alignment_support_scheme == dr_aligned
8055 || alignment_support_scheme == dr_unaligned_supported);
8057 /* In case the vectorization factor (VF) is bigger than the number
8058 of elements that we can fit in a vectype (nunits), we have to generate
8059 more than one vector stmt - i.e - we need to "unroll" the
8060 vector stmt by a factor VF/nunits. In doing so, we record a pointer
8061 from one copy of the vector stmt to the next, in the field
8062 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
8063 stages to find the correct vector defs to be used when vectorizing
8064 stmts that use the defs of the current stmt. The example below
8065 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
8066 need to create 4 vectorized stmts):
8068 before vectorization:
8069 RELATED_STMT VEC_STMT
8070 S1: x = memref - -
8071 S2: z = x + 1 - -
8073 step 1: vectorize stmt S1:
8074 We first create the vector stmt VS1_0, and, as usual, record a
8075 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
8076 Next, we create the vector stmt VS1_1, and record a pointer to
8077 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
8078 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
8079 stmts and pointers:
8080 RELATED_STMT VEC_STMT
8081 VS1_0: vx0 = memref0 VS1_1 -
8082 VS1_1: vx1 = memref1 VS1_2 -
8083 VS1_2: vx2 = memref2 VS1_3 -
8084 VS1_3: vx3 = memref3 - -
8085 S1: x = load - VS1_0
8086 S2: z = x + 1 - -
8088 See in documentation in vect_get_vec_def_for_stmt_copy for how the
8089 information we recorded in RELATED_STMT field is used to vectorize
8090 stmt S2. */
8092 /* In case of interleaving (non-unit grouped access):
8094 S1: x2 = &base + 2
8095 S2: x0 = &base
8096 S3: x1 = &base + 1
8097 S4: x3 = &base + 3
8099 Vectorized loads are created in the order of memory accesses
8100 starting from the access of the first stmt of the chain:
8102 VS1: vx0 = &base
8103 VS2: vx1 = &base + vec_size*1
8104 VS3: vx3 = &base + vec_size*2
8105 VS4: vx4 = &base + vec_size*3
8107 Then permutation statements are generated:
8109 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8110 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
8113 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8114 (the order of the data-refs in the output of vect_permute_load_chain
8115 corresponds to the order of scalar stmts in the interleaving chain - see
8116 the documentation of vect_permute_load_chain()).
8117 The generation of permutation stmts and recording them in
8118 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
8120 In case of both multiple types and interleaving, the vector loads and
8121 permutation stmts above are created for every copy. The result vector
8122 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8123 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
8125 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8126 on a target that supports unaligned accesses (dr_unaligned_supported)
8127 we generate the following code:
8128 p = initial_addr;
8129 indx = 0;
8130 loop {
8131 p = p + indx * vectype_size;
8132 vec_dest = *(p);
8133 indx = indx + 1;
8136 Otherwise, the data reference is potentially unaligned on a target that
8137 does not support unaligned accesses (dr_explicit_realign_optimized) -
8138 then generate the following code, in which the data in each iteration is
8139 obtained by two vector loads, one from the previous iteration, and one
8140 from the current iteration:
8141 p1 = initial_addr;
8142 msq_init = *(floor(p1))
8143 p2 = initial_addr + VS - 1;
8144 realignment_token = call target_builtin;
8145 indx = 0;
8146 loop {
8147 p2 = p2 + indx * vectype_size
8148 lsq = *(floor(p2))
8149 vec_dest = realign_load (msq, lsq, realignment_token)
8150 indx = indx + 1;
8151 msq = lsq;
8152 } */
8154 /* If the misalignment remains the same throughout the execution of the
8155 loop, we can create the init_addr and permutation mask at the loop
8156 preheader. Otherwise, it needs to be created inside the loop.
8157 This can only occur when vectorizing memory accesses in the inner-loop
8158 nested within an outer-loop that is being vectorized. */
8160 if (nested_in_vect_loop
8161 && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr),
8162 GET_MODE_SIZE (TYPE_MODE (vectype))))
8164 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
8165 compute_in_loop = true;
8168 if ((alignment_support_scheme == dr_explicit_realign_optimized
8169 || alignment_support_scheme == dr_explicit_realign)
8170 && !compute_in_loop)
8172 msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
8173 alignment_support_scheme, NULL_TREE,
8174 &at_loop);
8175 if (alignment_support_scheme == dr_explicit_realign_optimized)
8177 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
8178 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
8179 size_one_node);
8182 else
8183 at_loop = loop;
8185 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8186 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
8188 tree bump;
8189 tree vec_offset = NULL_TREE;
8190 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8192 aggr_type = NULL_TREE;
8193 bump = NULL_TREE;
8195 else if (memory_access_type == VMAT_GATHER_SCATTER)
8197 aggr_type = elem_type;
8198 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
8199 &bump, &vec_offset);
8201 else
8203 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8204 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8205 else
8206 aggr_type = vectype;
8207 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
8208 memory_access_type);
8211 tree vec_mask = NULL_TREE;
8212 prev_stmt_info = NULL;
8213 poly_uint64 group_elt = 0;
8214 for (j = 0; j < ncopies; j++)
8216 stmt_vec_info new_stmt_info = NULL;
8217 /* 1. Create the vector or array pointer update chain. */
8218 if (j == 0)
8220 bool simd_lane_access_p
8221 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
8222 if (simd_lane_access_p
8223 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
8224 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
8225 && integer_zerop (DR_OFFSET (first_dr_info->dr))
8226 && integer_zerop (DR_INIT (first_dr_info->dr))
8227 && alias_sets_conflict_p (get_alias_set (aggr_type),
8228 get_alias_set (TREE_TYPE (ref_type)))
8229 && (alignment_support_scheme == dr_aligned
8230 || alignment_support_scheme == dr_unaligned_supported))
8232 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
8233 dataref_offset = build_int_cst (ref_type, 0);
8235 else if (first_stmt_info_for_drptr
8236 && first_stmt_info != first_stmt_info_for_drptr)
8238 dataref_ptr
8239 = vect_create_data_ref_ptr (first_stmt_info_for_drptr,
8240 aggr_type, at_loop, offset, &dummy,
8241 gsi, &ptr_incr, simd_lane_access_p,
8242 byte_offset, bump);
8243 /* Adjust the pointer by the difference to first_stmt. */
8244 data_reference_p ptrdr
8245 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
8246 tree diff
8247 = fold_convert (sizetype,
8248 size_binop (MINUS_EXPR,
8249 DR_INIT (first_dr_info->dr),
8250 DR_INIT (ptrdr)));
8251 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8252 stmt_info, diff);
8254 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8255 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
8256 &dataref_ptr, &vec_offset);
8257 else
8258 dataref_ptr
8259 = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
8260 offset, &dummy, gsi, &ptr_incr,
8261 simd_lane_access_p,
8262 byte_offset, bump);
8263 if (mask)
8264 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
8265 mask_vectype);
8267 else
8269 if (dataref_offset)
8270 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8271 bump);
8272 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8273 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
8274 else
8275 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8276 stmt_info, bump);
8277 if (mask)
8278 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
8281 if (grouped_load || slp_perm)
8282 dr_chain.create (vec_num);
8284 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8286 tree vec_array;
8288 vec_array = create_vector_array (vectype, vec_num);
8290 tree final_mask = NULL_TREE;
8291 if (loop_masks)
8292 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8293 vectype, j);
8294 if (vec_mask)
8295 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8296 vec_mask, gsi);
8298 gcall *call;
8299 if (final_mask)
8301 /* Emit:
8302 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8303 VEC_MASK). */
8304 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8305 tree alias_ptr = build_int_cst (ref_type, align);
8306 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8307 dataref_ptr, alias_ptr,
8308 final_mask);
8310 else
8312 /* Emit:
8313 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8314 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8315 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8317 gimple_call_set_lhs (call, vec_array);
8318 gimple_call_set_nothrow (call, true);
8319 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
8321 /* Extract each vector into an SSA_NAME. */
8322 for (i = 0; i < vec_num; i++)
8324 new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
8325 vec_array, i);
8326 dr_chain.quick_push (new_temp);
8329 /* Record the mapping between SSA_NAMEs and statements. */
8330 vect_record_grouped_load_vectors (stmt_info, dr_chain);
8332 /* Record that VEC_ARRAY is now dead. */
8333 vect_clobber_variable (stmt_info, gsi, vec_array);
8335 else
8337 for (i = 0; i < vec_num; i++)
8339 tree final_mask = NULL_TREE;
8340 if (loop_masks
8341 && memory_access_type != VMAT_INVARIANT)
8342 final_mask = vect_get_loop_mask (gsi, loop_masks,
8343 vec_num * ncopies,
8344 vectype, vec_num * j + i);
8345 if (vec_mask)
8346 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8347 vec_mask, gsi);
8349 if (i > 0)
8350 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8351 stmt_info, bump);
8353 /* 2. Create the vector-load in the loop. */
8354 gimple *new_stmt = NULL;
8355 switch (alignment_support_scheme)
8357 case dr_aligned:
8358 case dr_unaligned_supported:
8360 unsigned int align, misalign;
8362 if (memory_access_type == VMAT_GATHER_SCATTER)
8364 tree scale = size_int (gs_info.scale);
8365 gcall *call;
8366 if (loop_masks)
8367 call = gimple_build_call_internal
8368 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8369 vec_offset, scale, final_mask);
8370 else
8371 call = gimple_build_call_internal
8372 (IFN_GATHER_LOAD, 3, dataref_ptr,
8373 vec_offset, scale);
8374 gimple_call_set_nothrow (call, true);
8375 new_stmt = call;
8376 data_ref = NULL_TREE;
8377 break;
8380 align = DR_TARGET_ALIGNMENT (dr_info);
8381 if (alignment_support_scheme == dr_aligned)
8383 gcc_assert (aligned_access_p (first_dr_info));
8384 misalign = 0;
8386 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8388 align = dr_alignment
8389 (vect_dr_behavior (first_dr_info));
8390 misalign = 0;
8392 else
8393 misalign = DR_MISALIGNMENT (first_dr_info);
8394 if (dataref_offset == NULL_TREE
8395 && TREE_CODE (dataref_ptr) == SSA_NAME)
8396 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8397 align, misalign);
8399 if (final_mask)
8401 align = least_bit_hwi (misalign | align);
8402 tree ptr = build_int_cst (ref_type, align);
8403 gcall *call
8404 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8405 dataref_ptr, ptr,
8406 final_mask);
8407 gimple_call_set_nothrow (call, true);
8408 new_stmt = call;
8409 data_ref = NULL_TREE;
8411 else
8413 data_ref
8414 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8415 dataref_offset
8416 ? dataref_offset
8417 : build_int_cst (ref_type, 0));
8418 if (alignment_support_scheme == dr_aligned)
8420 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8421 TREE_TYPE (data_ref)
8422 = build_aligned_type (TREE_TYPE (data_ref),
8423 align * BITS_PER_UNIT);
8424 else
8425 TREE_TYPE (data_ref)
8426 = build_aligned_type (TREE_TYPE (data_ref),
8427 TYPE_ALIGN (elem_type));
8429 break;
8431 case dr_explicit_realign:
8433 tree ptr, bump;
8435 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8437 if (compute_in_loop)
8438 msq = vect_setup_realignment (first_stmt_info, gsi,
8439 &realignment_token,
8440 dr_explicit_realign,
8441 dataref_ptr, NULL);
8443 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8444 ptr = copy_ssa_name (dataref_ptr);
8445 else
8446 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8447 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
8448 new_stmt = gimple_build_assign
8449 (ptr, BIT_AND_EXPR, dataref_ptr,
8450 build_int_cst
8451 (TREE_TYPE (dataref_ptr),
8452 -(HOST_WIDE_INT) align));
8453 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8454 data_ref
8455 = build2 (MEM_REF, vectype, ptr,
8456 build_int_cst (ref_type, 0));
8457 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8458 vec_dest = vect_create_destination_var (scalar_dest,
8459 vectype);
8460 new_stmt = gimple_build_assign (vec_dest, data_ref);
8461 new_temp = make_ssa_name (vec_dest, new_stmt);
8462 gimple_assign_set_lhs (new_stmt, new_temp);
8463 gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt));
8464 gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt));
8465 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8466 msq = new_temp;
8468 bump = size_binop (MULT_EXPR, vs,
8469 TYPE_SIZE_UNIT (elem_type));
8470 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8471 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
8472 stmt_info, bump);
8473 new_stmt = gimple_build_assign
8474 (NULL_TREE, BIT_AND_EXPR, ptr,
8475 build_int_cst
8476 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8477 ptr = copy_ssa_name (ptr, new_stmt);
8478 gimple_assign_set_lhs (new_stmt, ptr);
8479 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8480 data_ref
8481 = build2 (MEM_REF, vectype, ptr,
8482 build_int_cst (ref_type, 0));
8483 break;
8485 case dr_explicit_realign_optimized:
8487 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8488 new_temp = copy_ssa_name (dataref_ptr);
8489 else
8490 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8491 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
8492 new_stmt = gimple_build_assign
8493 (new_temp, BIT_AND_EXPR, dataref_ptr,
8494 build_int_cst (TREE_TYPE (dataref_ptr),
8495 -(HOST_WIDE_INT) align));
8496 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8497 data_ref
8498 = build2 (MEM_REF, vectype, new_temp,
8499 build_int_cst (ref_type, 0));
8500 break;
8502 default:
8503 gcc_unreachable ();
8505 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8506 /* DATA_REF is null if we've already built the statement. */
8507 if (data_ref)
8509 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8510 new_stmt = gimple_build_assign (vec_dest, data_ref);
8512 new_temp = make_ssa_name (vec_dest, new_stmt);
8513 gimple_set_lhs (new_stmt, new_temp);
8514 new_stmt_info
8515 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8517 /* 3. Handle explicit realignment if necessary/supported.
8518 Create in loop:
8519 vec_dest = realign_load (msq, lsq, realignment_token) */
8520 if (alignment_support_scheme == dr_explicit_realign_optimized
8521 || alignment_support_scheme == dr_explicit_realign)
8523 lsq = gimple_assign_lhs (new_stmt);
8524 if (!realignment_token)
8525 realignment_token = dataref_ptr;
8526 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8527 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8528 msq, lsq, realignment_token);
8529 new_temp = make_ssa_name (vec_dest, new_stmt);
8530 gimple_assign_set_lhs (new_stmt, new_temp);
8531 new_stmt_info
8532 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8534 if (alignment_support_scheme == dr_explicit_realign_optimized)
8536 gcc_assert (phi);
8537 if (i == vec_num - 1 && j == ncopies - 1)
8538 add_phi_arg (phi, lsq,
8539 loop_latch_edge (containing_loop),
8540 UNKNOWN_LOCATION);
8541 msq = lsq;
8545 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8547 tree perm_mask = perm_mask_for_reverse (vectype);
8548 new_temp = permute_vec_elements (new_temp, new_temp,
8549 perm_mask, stmt_info, gsi);
8550 new_stmt_info = vinfo->lookup_def (new_temp);
8553 /* Collect vector loads and later create their permutation in
8554 vect_transform_grouped_load (). */
8555 if (grouped_load || slp_perm)
8556 dr_chain.quick_push (new_temp);
8558 /* Store vector loads in the corresponding SLP_NODE. */
8559 if (slp && !slp_perm)
8560 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
8562 /* With SLP permutation we load the gaps as well, without
8563 we need to skip the gaps after we manage to fully load
8564 all elements. group_gap_adj is DR_GROUP_SIZE here. */
8565 group_elt += nunits;
8566 if (maybe_ne (group_gap_adj, 0U)
8567 && !slp_perm
8568 && known_eq (group_elt, group_size - group_gap_adj))
8570 poly_wide_int bump_val
8571 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8572 * group_gap_adj);
8573 tree bump = wide_int_to_tree (sizetype, bump_val);
8574 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8575 stmt_info, bump);
8576 group_elt = 0;
8579 /* Bump the vector pointer to account for a gap or for excess
8580 elements loaded for a permuted SLP load. */
8581 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8583 poly_wide_int bump_val
8584 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8585 * group_gap_adj);
8586 tree bump = wide_int_to_tree (sizetype, bump_val);
8587 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8588 stmt_info, bump);
8592 if (slp && !slp_perm)
8593 continue;
8595 if (slp_perm)
8597 unsigned n_perms;
8598 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8599 slp_node_instance, false,
8600 &n_perms))
8602 dr_chain.release ();
8603 return false;
8606 else
8608 if (grouped_load)
8610 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8611 vect_transform_grouped_load (stmt_info, dr_chain,
8612 group_size, gsi);
8613 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8615 else
8617 if (j == 0)
8618 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
8619 else
8620 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
8621 prev_stmt_info = new_stmt_info;
8624 dr_chain.release ();
8627 return true;
8630 /* Function vect_is_simple_cond.
8632 Input:
8633 LOOP - the loop that is being vectorized.
8634 COND - Condition that is checked for simple use.
8636 Output:
8637 *COMP_VECTYPE - the vector type for the comparison.
8638 *DTS - The def types for the arguments of the comparison
8640 Returns whether a COND can be vectorized. Checks whether
8641 condition operands are supportable using vec_is_simple_use. */
8643 static bool
8644 vect_is_simple_cond (tree cond, vec_info *vinfo,
8645 tree *comp_vectype, enum vect_def_type *dts,
8646 tree vectype)
8648 tree lhs, rhs;
8649 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8651 /* Mask case. */
8652 if (TREE_CODE (cond) == SSA_NAME
8653 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8655 if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype)
8656 || !*comp_vectype
8657 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8658 return false;
8659 return true;
8662 if (!COMPARISON_CLASS_P (cond))
8663 return false;
8665 lhs = TREE_OPERAND (cond, 0);
8666 rhs = TREE_OPERAND (cond, 1);
8668 if (TREE_CODE (lhs) == SSA_NAME)
8670 if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1))
8671 return false;
8673 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8674 || TREE_CODE (lhs) == FIXED_CST)
8675 dts[0] = vect_constant_def;
8676 else
8677 return false;
8679 if (TREE_CODE (rhs) == SSA_NAME)
8681 if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2))
8682 return false;
8684 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8685 || TREE_CODE (rhs) == FIXED_CST)
8686 dts[1] = vect_constant_def;
8687 else
8688 return false;
8690 if (vectype1 && vectype2
8691 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8692 TYPE_VECTOR_SUBPARTS (vectype2)))
8693 return false;
8695 *comp_vectype = vectype1 ? vectype1 : vectype2;
8696 /* Invariant comparison. */
8697 if (! *comp_vectype && vectype)
8699 tree scalar_type = TREE_TYPE (lhs);
8700 /* If we can widen the comparison to match vectype do so. */
8701 if (INTEGRAL_TYPE_P (scalar_type)
8702 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8703 TYPE_SIZE (TREE_TYPE (vectype))))
8704 scalar_type = build_nonstandard_integer_type
8705 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8706 TYPE_UNSIGNED (scalar_type));
8707 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8710 return true;
8713 /* vectorizable_condition.
8715 Check if STMT_INFO is conditional modify expression that can be vectorized.
8716 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
8717 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8718 at GSI.
8720 When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector
8721 variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1,
8722 and in else clause if it is 2).
8724 Return true if STMT_INFO is vectorizable in this way. */
8726 bool
8727 vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
8728 stmt_vec_info *vec_stmt, tree reduc_def,
8729 int reduc_index, slp_tree slp_node,
8730 stmt_vector_for_cost *cost_vec)
8732 vec_info *vinfo = stmt_info->vinfo;
8733 tree scalar_dest = NULL_TREE;
8734 tree vec_dest = NULL_TREE;
8735 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8736 tree then_clause, else_clause;
8737 tree comp_vectype = NULL_TREE;
8738 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8739 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8740 tree vec_compare;
8741 tree new_temp;
8742 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8743 enum vect_def_type dts[4]
8744 = {vect_unknown_def_type, vect_unknown_def_type,
8745 vect_unknown_def_type, vect_unknown_def_type};
8746 int ndts = 4;
8747 int ncopies;
8748 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8749 stmt_vec_info prev_stmt_info = NULL;
8750 int i, j;
8751 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8752 vec<tree> vec_oprnds0 = vNULL;
8753 vec<tree> vec_oprnds1 = vNULL;
8754 vec<tree> vec_oprnds2 = vNULL;
8755 vec<tree> vec_oprnds3 = vNULL;
8756 tree vec_cmp_type;
8757 bool masked = false;
8759 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8760 return false;
8762 vect_reduction_type reduction_type
8763 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8764 if (reduction_type == TREE_CODE_REDUCTION)
8766 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8767 return false;
8769 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8770 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8771 && reduc_def))
8772 return false;
8774 /* FORNOW: not yet supported. */
8775 if (STMT_VINFO_LIVE_P (stmt_info))
8777 if (dump_enabled_p ())
8778 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8779 "value used after loop.\n");
8780 return false;
8784 /* Is vectorizable conditional operation? */
8785 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
8786 if (!stmt)
8787 return false;
8789 code = gimple_assign_rhs_code (stmt);
8791 if (code != COND_EXPR)
8792 return false;
8794 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8795 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8797 if (slp_node)
8798 ncopies = 1;
8799 else
8800 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8802 gcc_assert (ncopies >= 1);
8803 if (reduc_index && ncopies > 1)
8804 return false; /* FORNOW */
8806 cond_expr = gimple_assign_rhs1 (stmt);
8807 then_clause = gimple_assign_rhs2 (stmt);
8808 else_clause = gimple_assign_rhs3 (stmt);
8810 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8811 &comp_vectype, &dts[0], slp_node ? NULL : vectype)
8812 || !comp_vectype)
8813 return false;
8815 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1))
8816 return false;
8817 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2))
8818 return false;
8820 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8821 return false;
8823 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8824 return false;
8826 masked = !COMPARISON_CLASS_P (cond_expr);
8827 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8829 if (vec_cmp_type == NULL_TREE)
8830 return false;
8832 cond_code = TREE_CODE (cond_expr);
8833 if (!masked)
8835 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8836 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8839 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8841 /* Boolean values may have another representation in vectors
8842 and therefore we prefer bit operations over comparison for
8843 them (which also works for scalar masks). We store opcodes
8844 to use in bitop1 and bitop2. Statement is vectorized as
8845 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8846 depending on bitop1 and bitop2 arity. */
8847 switch (cond_code)
8849 case GT_EXPR:
8850 bitop1 = BIT_NOT_EXPR;
8851 bitop2 = BIT_AND_EXPR;
8852 break;
8853 case GE_EXPR:
8854 bitop1 = BIT_NOT_EXPR;
8855 bitop2 = BIT_IOR_EXPR;
8856 break;
8857 case LT_EXPR:
8858 bitop1 = BIT_NOT_EXPR;
8859 bitop2 = BIT_AND_EXPR;
8860 std::swap (cond_expr0, cond_expr1);
8861 break;
8862 case LE_EXPR:
8863 bitop1 = BIT_NOT_EXPR;
8864 bitop2 = BIT_IOR_EXPR;
8865 std::swap (cond_expr0, cond_expr1);
8866 break;
8867 case NE_EXPR:
8868 bitop1 = BIT_XOR_EXPR;
8869 break;
8870 case EQ_EXPR:
8871 bitop1 = BIT_XOR_EXPR;
8872 bitop2 = BIT_NOT_EXPR;
8873 break;
8874 default:
8875 return false;
8877 cond_code = SSA_NAME;
8880 if (!vec_stmt)
8882 if (bitop1 != NOP_EXPR)
8884 machine_mode mode = TYPE_MODE (comp_vectype);
8885 optab optab;
8887 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8888 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8889 return false;
8891 if (bitop2 != NOP_EXPR)
8893 optab = optab_for_tree_code (bitop2, comp_vectype,
8894 optab_default);
8895 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8896 return false;
8899 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8900 cond_code))
8902 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8903 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
8904 cost_vec);
8905 return true;
8907 return false;
8910 /* Transform. */
8912 if (!slp_node)
8914 vec_oprnds0.create (1);
8915 vec_oprnds1.create (1);
8916 vec_oprnds2.create (1);
8917 vec_oprnds3.create (1);
8920 /* Handle def. */
8921 scalar_dest = gimple_assign_lhs (stmt);
8922 if (reduction_type != EXTRACT_LAST_REDUCTION)
8923 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8925 /* Handle cond expr. */
8926 for (j = 0; j < ncopies; j++)
8928 stmt_vec_info new_stmt_info = NULL;
8929 if (j == 0)
8931 if (slp_node)
8933 auto_vec<tree, 4> ops;
8934 auto_vec<vec<tree>, 4> vec_defs;
8936 if (masked)
8937 ops.safe_push (cond_expr);
8938 else
8940 ops.safe_push (cond_expr0);
8941 ops.safe_push (cond_expr1);
8943 ops.safe_push (then_clause);
8944 ops.safe_push (else_clause);
8945 vect_get_slp_defs (ops, slp_node, &vec_defs);
8946 vec_oprnds3 = vec_defs.pop ();
8947 vec_oprnds2 = vec_defs.pop ();
8948 if (!masked)
8949 vec_oprnds1 = vec_defs.pop ();
8950 vec_oprnds0 = vec_defs.pop ();
8952 else
8954 if (masked)
8956 vec_cond_lhs
8957 = vect_get_vec_def_for_operand (cond_expr, stmt_info,
8958 comp_vectype);
8959 vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]);
8961 else
8963 vec_cond_lhs
8964 = vect_get_vec_def_for_operand (cond_expr0,
8965 stmt_info, comp_vectype);
8966 vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]);
8968 vec_cond_rhs
8969 = vect_get_vec_def_for_operand (cond_expr1,
8970 stmt_info, comp_vectype);
8971 vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]);
8973 if (reduc_index == 1)
8974 vec_then_clause = reduc_def;
8975 else
8977 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8978 stmt_info);
8979 vect_is_simple_use (then_clause, loop_vinfo, &dts[2]);
8981 if (reduc_index == 2)
8982 vec_else_clause = reduc_def;
8983 else
8985 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8986 stmt_info);
8987 vect_is_simple_use (else_clause, loop_vinfo, &dts[3]);
8991 else
8993 vec_cond_lhs
8994 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ());
8995 if (!masked)
8996 vec_cond_rhs
8997 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ());
8999 vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9000 vec_oprnds2.pop ());
9001 vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo,
9002 vec_oprnds3.pop ());
9005 if (!slp_node)
9007 vec_oprnds0.quick_push (vec_cond_lhs);
9008 if (!masked)
9009 vec_oprnds1.quick_push (vec_cond_rhs);
9010 vec_oprnds2.quick_push (vec_then_clause);
9011 vec_oprnds3.quick_push (vec_else_clause);
9014 /* Arguments are ready. Create the new vector stmt. */
9015 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
9017 vec_then_clause = vec_oprnds2[i];
9018 vec_else_clause = vec_oprnds3[i];
9020 if (masked)
9021 vec_compare = vec_cond_lhs;
9022 else
9024 vec_cond_rhs = vec_oprnds1[i];
9025 if (bitop1 == NOP_EXPR)
9026 vec_compare = build2 (cond_code, vec_cmp_type,
9027 vec_cond_lhs, vec_cond_rhs);
9028 else
9030 new_temp = make_ssa_name (vec_cmp_type);
9031 gassign *new_stmt;
9032 if (bitop1 == BIT_NOT_EXPR)
9033 new_stmt = gimple_build_assign (new_temp, bitop1,
9034 vec_cond_rhs);
9035 else
9036 new_stmt
9037 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
9038 vec_cond_rhs);
9039 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9040 if (bitop2 == NOP_EXPR)
9041 vec_compare = new_temp;
9042 else if (bitop2 == BIT_NOT_EXPR)
9044 /* Instead of doing ~x ? y : z do x ? z : y. */
9045 vec_compare = new_temp;
9046 std::swap (vec_then_clause, vec_else_clause);
9048 else
9050 vec_compare = make_ssa_name (vec_cmp_type);
9051 new_stmt
9052 = gimple_build_assign (vec_compare, bitop2,
9053 vec_cond_lhs, new_temp);
9054 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9058 if (reduction_type == EXTRACT_LAST_REDUCTION)
9060 if (!is_gimple_val (vec_compare))
9062 tree vec_compare_name = make_ssa_name (vec_cmp_type);
9063 gassign *new_stmt = gimple_build_assign (vec_compare_name,
9064 vec_compare);
9065 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9066 vec_compare = vec_compare_name;
9068 gcc_assert (reduc_index == 2);
9069 gcall *new_stmt = gimple_build_call_internal
9070 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
9071 vec_then_clause);
9072 gimple_call_set_lhs (new_stmt, scalar_dest);
9073 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
9074 if (stmt_info->stmt == gsi_stmt (*gsi))
9075 new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
9076 else
9078 /* In this case we're moving the definition to later in the
9079 block. That doesn't matter because the only uses of the
9080 lhs are in phi statements. */
9081 gimple_stmt_iterator old_gsi
9082 = gsi_for_stmt (stmt_info->stmt);
9083 gsi_remove (&old_gsi, true);
9084 new_stmt_info
9085 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9088 else
9090 new_temp = make_ssa_name (vec_dest);
9091 gassign *new_stmt
9092 = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
9093 vec_then_clause, vec_else_clause);
9094 new_stmt_info
9095 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9097 if (slp_node)
9098 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9101 if (slp_node)
9102 continue;
9104 if (j == 0)
9105 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9106 else
9107 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9109 prev_stmt_info = new_stmt_info;
9112 vec_oprnds0.release ();
9113 vec_oprnds1.release ();
9114 vec_oprnds2.release ();
9115 vec_oprnds3.release ();
9117 return true;
9120 /* vectorizable_comparison.
9122 Check if STMT_INFO is comparison expression that can be vectorized.
9123 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
9124 comparison, put it in VEC_STMT, and insert it at GSI.
9126 Return true if STMT_INFO is vectorizable in this way. */
9128 static bool
9129 vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9130 stmt_vec_info *vec_stmt, tree reduc_def,
9131 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
9133 vec_info *vinfo = stmt_info->vinfo;
9134 tree lhs, rhs1, rhs2;
9135 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
9136 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
9137 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
9138 tree new_temp;
9139 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9140 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
9141 int ndts = 2;
9142 poly_uint64 nunits;
9143 int ncopies;
9144 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
9145 stmt_vec_info prev_stmt_info = NULL;
9146 int i, j;
9147 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9148 vec<tree> vec_oprnds0 = vNULL;
9149 vec<tree> vec_oprnds1 = vNULL;
9150 tree mask_type;
9151 tree mask;
9153 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
9154 return false;
9156 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
9157 return false;
9159 mask_type = vectype;
9160 nunits = TYPE_VECTOR_SUBPARTS (vectype);
9162 if (slp_node)
9163 ncopies = 1;
9164 else
9165 ncopies = vect_get_num_copies (loop_vinfo, vectype);
9167 gcc_assert (ncopies >= 1);
9168 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
9169 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9170 && reduc_def))
9171 return false;
9173 if (STMT_VINFO_LIVE_P (stmt_info))
9175 if (dump_enabled_p ())
9176 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9177 "value used after loop.\n");
9178 return false;
9181 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
9182 if (!stmt)
9183 return false;
9185 code = gimple_assign_rhs_code (stmt);
9187 if (TREE_CODE_CLASS (code) != tcc_comparison)
9188 return false;
9190 rhs1 = gimple_assign_rhs1 (stmt);
9191 rhs2 = gimple_assign_rhs2 (stmt);
9193 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1))
9194 return false;
9196 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2))
9197 return false;
9199 if (vectype1 && vectype2
9200 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
9201 TYPE_VECTOR_SUBPARTS (vectype2)))
9202 return false;
9204 vectype = vectype1 ? vectype1 : vectype2;
9206 /* Invariant comparison. */
9207 if (!vectype)
9209 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
9210 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
9211 return false;
9213 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
9214 return false;
9216 /* Can't compare mask and non-mask types. */
9217 if (vectype1 && vectype2
9218 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9219 return false;
9221 /* Boolean values may have another representation in vectors
9222 and therefore we prefer bit operations over comparison for
9223 them (which also works for scalar masks). We store opcodes
9224 to use in bitop1 and bitop2. Statement is vectorized as
9225 BITOP2 (rhs1 BITOP1 rhs2) or
9226 rhs1 BITOP2 (BITOP1 rhs2)
9227 depending on bitop1 and bitop2 arity. */
9228 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9230 if (code == GT_EXPR)
9232 bitop1 = BIT_NOT_EXPR;
9233 bitop2 = BIT_AND_EXPR;
9235 else if (code == GE_EXPR)
9237 bitop1 = BIT_NOT_EXPR;
9238 bitop2 = BIT_IOR_EXPR;
9240 else if (code == LT_EXPR)
9242 bitop1 = BIT_NOT_EXPR;
9243 bitop2 = BIT_AND_EXPR;
9244 std::swap (rhs1, rhs2);
9245 std::swap (dts[0], dts[1]);
9247 else if (code == LE_EXPR)
9249 bitop1 = BIT_NOT_EXPR;
9250 bitop2 = BIT_IOR_EXPR;
9251 std::swap (rhs1, rhs2);
9252 std::swap (dts[0], dts[1]);
9254 else
9256 bitop1 = BIT_XOR_EXPR;
9257 if (code == EQ_EXPR)
9258 bitop2 = BIT_NOT_EXPR;
9262 if (!vec_stmt)
9264 if (bitop1 == NOP_EXPR)
9266 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
9267 return false;
9269 else
9271 machine_mode mode = TYPE_MODE (vectype);
9272 optab optab;
9274 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9275 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9276 return false;
9278 if (bitop2 != NOP_EXPR)
9280 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9281 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9282 return false;
9286 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9287 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9288 dts, ndts, slp_node, cost_vec);
9289 return true;
9292 /* Transform. */
9293 if (!slp_node)
9295 vec_oprnds0.create (1);
9296 vec_oprnds1.create (1);
9299 /* Handle def. */
9300 lhs = gimple_assign_lhs (stmt);
9301 mask = vect_create_destination_var (lhs, mask_type);
9303 /* Handle cmp expr. */
9304 for (j = 0; j < ncopies; j++)
9306 stmt_vec_info new_stmt_info = NULL;
9307 if (j == 0)
9309 if (slp_node)
9311 auto_vec<tree, 2> ops;
9312 auto_vec<vec<tree>, 2> vec_defs;
9314 ops.safe_push (rhs1);
9315 ops.safe_push (rhs2);
9316 vect_get_slp_defs (ops, slp_node, &vec_defs);
9317 vec_oprnds1 = vec_defs.pop ();
9318 vec_oprnds0 = vec_defs.pop ();
9320 else
9322 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
9323 vectype);
9324 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
9325 vectype);
9328 else
9330 vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo,
9331 vec_oprnds0.pop ());
9332 vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo,
9333 vec_oprnds1.pop ());
9336 if (!slp_node)
9338 vec_oprnds0.quick_push (vec_rhs1);
9339 vec_oprnds1.quick_push (vec_rhs2);
9342 /* Arguments are ready. Create the new vector stmt. */
9343 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9345 vec_rhs2 = vec_oprnds1[i];
9347 new_temp = make_ssa_name (mask);
9348 if (bitop1 == NOP_EXPR)
9350 gassign *new_stmt = gimple_build_assign (new_temp, code,
9351 vec_rhs1, vec_rhs2);
9352 new_stmt_info
9353 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9355 else
9357 gassign *new_stmt;
9358 if (bitop1 == BIT_NOT_EXPR)
9359 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9360 else
9361 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9362 vec_rhs2);
9363 new_stmt_info
9364 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9365 if (bitop2 != NOP_EXPR)
9367 tree res = make_ssa_name (mask);
9368 if (bitop2 == BIT_NOT_EXPR)
9369 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9370 else
9371 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9372 new_temp);
9373 new_stmt_info
9374 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9377 if (slp_node)
9378 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9381 if (slp_node)
9382 continue;
9384 if (j == 0)
9385 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9386 else
9387 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9389 prev_stmt_info = new_stmt_info;
9392 vec_oprnds0.release ();
9393 vec_oprnds1.release ();
9395 return true;
9398 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9399 can handle all live statements in the node. Otherwise return true
9400 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
9401 GSI and VEC_STMT are as for vectorizable_live_operation. */
9403 static bool
9404 can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9405 slp_tree slp_node, stmt_vec_info *vec_stmt,
9406 stmt_vector_for_cost *cost_vec)
9408 if (slp_node)
9410 stmt_vec_info slp_stmt_info;
9411 unsigned int i;
9412 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
9414 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9415 && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i,
9416 vec_stmt, cost_vec))
9417 return false;
9420 else if (STMT_VINFO_LIVE_P (stmt_info)
9421 && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1,
9422 vec_stmt, cost_vec))
9423 return false;
9425 return true;
9428 /* Make sure the statement is vectorizable. */
9430 bool
9431 vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
9432 slp_tree node, slp_instance node_instance,
9433 stmt_vector_for_cost *cost_vec)
9435 vec_info *vinfo = stmt_info->vinfo;
9436 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9437 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9438 bool ok;
9439 gimple_seq pattern_def_seq;
9441 if (dump_enabled_p ())
9443 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
9444 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
9447 if (gimple_has_volatile_ops (stmt_info->stmt))
9449 if (dump_enabled_p ())
9450 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9451 "not vectorized: stmt has volatile operands\n");
9453 return false;
9456 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9457 && node == NULL
9458 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9460 gimple_stmt_iterator si;
9462 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9464 stmt_vec_info pattern_def_stmt_info
9465 = vinfo->lookup_stmt (gsi_stmt (si));
9466 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
9467 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
9469 /* Analyze def stmt of STMT if it's a pattern stmt. */
9470 if (dump_enabled_p ())
9472 dump_printf_loc (MSG_NOTE, vect_location,
9473 "==> examining pattern def statement: ");
9474 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
9475 pattern_def_stmt_info->stmt, 0);
9478 if (!vect_analyze_stmt (pattern_def_stmt_info,
9479 need_to_vectorize, node, node_instance,
9480 cost_vec))
9481 return false;
9486 /* Skip stmts that do not need to be vectorized. In loops this is expected
9487 to include:
9488 - the COND_EXPR which is the loop exit condition
9489 - any LABEL_EXPRs in the loop
9490 - computations that are used only for array indexing or loop control.
9491 In basic blocks we only analyze statements that are a part of some SLP
9492 instance, therefore, all the statements are relevant.
9494 Pattern statement needs to be analyzed instead of the original statement
9495 if the original statement is not relevant. Otherwise, we analyze both
9496 statements. In basic blocks we are called from some SLP instance
9497 traversal, don't analyze pattern stmts instead, the pattern stmts
9498 already will be part of SLP instance. */
9500 stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
9501 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9502 && !STMT_VINFO_LIVE_P (stmt_info))
9504 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9505 && pattern_stmt_info
9506 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9507 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9509 /* Analyze PATTERN_STMT instead of the original stmt. */
9510 stmt_info = pattern_stmt_info;
9511 if (dump_enabled_p ())
9513 dump_printf_loc (MSG_NOTE, vect_location,
9514 "==> examining pattern statement: ");
9515 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
9518 else
9520 if (dump_enabled_p ())
9521 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9523 return true;
9526 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9527 && node == NULL
9528 && pattern_stmt_info
9529 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9530 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9532 /* Analyze PATTERN_STMT too. */
9533 if (dump_enabled_p ())
9535 dump_printf_loc (MSG_NOTE, vect_location,
9536 "==> examining pattern statement: ");
9537 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0);
9540 if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
9541 node_instance, cost_vec))
9542 return false;
9545 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9547 case vect_internal_def:
9548 break;
9550 case vect_reduction_def:
9551 case vect_nested_cycle:
9552 gcc_assert (!bb_vinfo
9553 && (relevance == vect_used_in_outer
9554 || relevance == vect_used_in_outer_by_reduction
9555 || relevance == vect_used_by_reduction
9556 || relevance == vect_unused_in_scope
9557 || relevance == vect_used_only_live));
9558 break;
9560 case vect_induction_def:
9561 gcc_assert (!bb_vinfo);
9562 break;
9564 case vect_constant_def:
9565 case vect_external_def:
9566 case vect_unknown_def_type:
9567 default:
9568 gcc_unreachable ();
9571 if (STMT_VINFO_RELEVANT_P (stmt_info))
9573 tree type = gimple_expr_type (stmt_info->stmt);
9574 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
9575 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
9576 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9577 || (call && gimple_call_lhs (call) == NULL_TREE));
9578 *need_to_vectorize = true;
9581 if (PURE_SLP_STMT (stmt_info) && !node)
9583 dump_printf_loc (MSG_NOTE, vect_location,
9584 "handled only by SLP analysis\n");
9585 return true;
9588 ok = true;
9589 if (!bb_vinfo
9590 && (STMT_VINFO_RELEVANT_P (stmt_info)
9591 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9592 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec)
9593 || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
9594 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9595 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9596 || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
9597 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9598 cost_vec)
9599 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9600 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9601 || vectorizable_reduction (stmt_info, NULL, NULL, node,
9602 node_instance, cost_vec)
9603 || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
9604 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
9605 cost_vec)
9606 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
9607 cost_vec));
9608 else
9610 if (bb_vinfo)
9611 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
9612 cost_vec)
9613 || vectorizable_conversion (stmt_info, NULL, NULL, node,
9614 cost_vec)
9615 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9616 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9617 || vectorizable_assignment (stmt_info, NULL, NULL, node,
9618 cost_vec)
9619 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9620 cost_vec)
9621 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9622 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9623 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
9624 cost_vec)
9625 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
9626 cost_vec));
9629 if (!ok)
9631 if (dump_enabled_p ())
9633 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9634 "not vectorized: relevant stmt not ");
9635 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
9636 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9637 stmt_info->stmt, 0);
9640 return false;
9643 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9644 need extra handling, except for vectorizable reductions. */
9645 if (!bb_vinfo
9646 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9647 && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
9649 if (dump_enabled_p ())
9651 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9652 "not vectorized: live stmt not supported: ");
9653 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9654 stmt_info->stmt, 0);
9657 return false;
9660 return true;
9664 /* Function vect_transform_stmt.
9666 Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
9668 bool
9669 vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9670 slp_tree slp_node, slp_instance slp_node_instance)
9672 vec_info *vinfo = stmt_info->vinfo;
9673 bool is_store = false;
9674 stmt_vec_info vec_stmt = NULL;
9675 bool done;
9677 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9678 stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
9680 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9681 && nested_in_vect_loop_p
9682 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9683 stmt_info));
9685 gimple *stmt = stmt_info->stmt;
9686 switch (STMT_VINFO_TYPE (stmt_info))
9688 case type_demotion_vec_info_type:
9689 case type_promotion_vec_info_type:
9690 case type_conversion_vec_info_type:
9691 done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
9692 NULL);
9693 gcc_assert (done);
9694 break;
9696 case induc_vec_info_type:
9697 done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
9698 NULL);
9699 gcc_assert (done);
9700 break;
9702 case shift_vec_info_type:
9703 done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9704 gcc_assert (done);
9705 break;
9707 case op_vec_info_type:
9708 done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
9709 NULL);
9710 gcc_assert (done);
9711 break;
9713 case assignment_vec_info_type:
9714 done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
9715 NULL);
9716 gcc_assert (done);
9717 break;
9719 case load_vec_info_type:
9720 done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
9721 slp_node_instance, NULL);
9722 gcc_assert (done);
9723 break;
9725 case store_vec_info_type:
9726 done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9727 gcc_assert (done);
9728 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9730 /* In case of interleaving, the whole chain is vectorized when the
9731 last store in the chain is reached. Store stmts before the last
9732 one are skipped, and there vec_stmt_info shouldn't be freed
9733 meanwhile. */
9734 stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9735 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
9736 is_store = true;
9738 else
9739 is_store = true;
9740 break;
9742 case condition_vec_info_type:
9743 done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0,
9744 slp_node, NULL);
9745 gcc_assert (done);
9746 break;
9748 case comparison_vec_info_type:
9749 done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL,
9750 slp_node, NULL);
9751 gcc_assert (done);
9752 break;
9754 case call_vec_info_type:
9755 done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9756 stmt = gsi_stmt (*gsi);
9757 break;
9759 case call_simd_clone_vec_info_type:
9760 done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
9761 slp_node, NULL);
9762 stmt = gsi_stmt (*gsi);
9763 break;
9765 case reduc_vec_info_type:
9766 done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node,
9767 slp_node_instance, NULL);
9768 gcc_assert (done);
9769 break;
9771 default:
9772 if (!STMT_VINFO_LIVE_P (stmt_info))
9774 if (dump_enabled_p ())
9775 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9776 "stmt not supported.\n");
9777 gcc_unreachable ();
9781 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9782 This would break hybrid SLP vectorization. */
9783 if (slp_node)
9784 gcc_assert (!vec_stmt
9785 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info);
9787 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9788 is being vectorized, but outside the immediately enclosing loop. */
9789 if (vec_stmt
9790 && nested_p
9791 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9792 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9793 || STMT_VINFO_RELEVANT (stmt_info) ==
9794 vect_used_in_outer_by_reduction))
9796 struct loop *innerloop = LOOP_VINFO_LOOP (
9797 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9798 imm_use_iterator imm_iter;
9799 use_operand_p use_p;
9800 tree scalar_dest;
9802 if (dump_enabled_p ())
9803 dump_printf_loc (MSG_NOTE, vect_location,
9804 "Record the vdef for outer-loop vectorization.\n");
9806 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9807 (to be used when vectorizing outer-loop stmts that use the DEF of
9808 STMT). */
9809 if (gimple_code (stmt) == GIMPLE_PHI)
9810 scalar_dest = PHI_RESULT (stmt);
9811 else
9812 scalar_dest = gimple_get_lhs (stmt);
9814 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9815 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9817 stmt_vec_info exit_phi_info
9818 = vinfo->lookup_stmt (USE_STMT (use_p));
9819 STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt;
9823 /* Handle stmts whose DEF is used outside the loop-nest that is
9824 being vectorized. */
9825 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9827 done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt,
9828 NULL);
9829 gcc_assert (done);
9832 if (vec_stmt)
9833 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9835 return is_store;
9839 /* Remove a group of stores (for SLP or interleaving), free their
9840 stmt_vec_info. */
9842 void
9843 vect_remove_stores (stmt_vec_info first_stmt_info)
9845 vec_info *vinfo = first_stmt_info->vinfo;
9846 stmt_vec_info next_stmt_info = first_stmt_info;
9848 while (next_stmt_info)
9850 stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
9851 next_stmt_info = vect_orig_stmt (next_stmt_info);
9852 /* Free the attached stmt_vec_info and remove the stmt. */
9853 vinfo->remove_stmt (next_stmt_info);
9854 next_stmt_info = tmp;
9858 /* Function get_vectype_for_scalar_type_and_size.
9860 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9861 by the target. */
9863 tree
9864 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9866 tree orig_scalar_type = scalar_type;
9867 scalar_mode inner_mode;
9868 machine_mode simd_mode;
9869 poly_uint64 nunits;
9870 tree vectype;
9872 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9873 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9874 return NULL_TREE;
9876 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9878 /* For vector types of elements whose mode precision doesn't
9879 match their types precision we use a element type of mode
9880 precision. The vectorization routines will have to make sure
9881 they support the proper result truncation/extension.
9882 We also make sure to build vector types with INTEGER_TYPE
9883 component type only. */
9884 if (INTEGRAL_TYPE_P (scalar_type)
9885 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9886 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9887 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9888 TYPE_UNSIGNED (scalar_type));
9890 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9891 When the component mode passes the above test simply use a type
9892 corresponding to that mode. The theory is that any use that
9893 would cause problems with this will disable vectorization anyway. */
9894 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9895 && !INTEGRAL_TYPE_P (scalar_type))
9896 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9898 /* We can't build a vector type of elements with alignment bigger than
9899 their size. */
9900 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9901 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9902 TYPE_UNSIGNED (scalar_type));
9904 /* If we felt back to using the mode fail if there was
9905 no scalar type for it. */
9906 if (scalar_type == NULL_TREE)
9907 return NULL_TREE;
9909 /* If no size was supplied use the mode the target prefers. Otherwise
9910 lookup a vector mode of the specified size. */
9911 if (known_eq (size, 0U))
9912 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9913 else if (!multiple_p (size, nbytes, &nunits)
9914 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9915 return NULL_TREE;
9916 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9917 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9918 return NULL_TREE;
9920 vectype = build_vector_type (scalar_type, nunits);
9922 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9923 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9924 return NULL_TREE;
9926 /* Re-attach the address-space qualifier if we canonicalized the scalar
9927 type. */
9928 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9929 return build_qualified_type
9930 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9932 return vectype;
9935 poly_uint64 current_vector_size;
9937 /* Function get_vectype_for_scalar_type.
9939 Returns the vector type corresponding to SCALAR_TYPE as supported
9940 by the target. */
9942 tree
9943 get_vectype_for_scalar_type (tree scalar_type)
9945 tree vectype;
9946 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9947 current_vector_size);
9948 if (vectype
9949 && known_eq (current_vector_size, 0U))
9950 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9951 return vectype;
9954 /* Function get_mask_type_for_scalar_type.
9956 Returns the mask type corresponding to a result of comparison
9957 of vectors of specified SCALAR_TYPE as supported by target. */
9959 tree
9960 get_mask_type_for_scalar_type (tree scalar_type)
9962 tree vectype = get_vectype_for_scalar_type (scalar_type);
9964 if (!vectype)
9965 return NULL;
9967 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9968 current_vector_size);
9971 /* Function get_same_sized_vectype
9973 Returns a vector type corresponding to SCALAR_TYPE of size
9974 VECTOR_TYPE if supported by the target. */
9976 tree
9977 get_same_sized_vectype (tree scalar_type, tree vector_type)
9979 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9980 return build_same_sized_truth_vector_type (vector_type);
9982 return get_vectype_for_scalar_type_and_size
9983 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9986 /* Function vect_is_simple_use.
9988 Input:
9989 VINFO - the vect info of the loop or basic block that is being vectorized.
9990 OPERAND - operand in the loop or bb.
9991 Output:
9992 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
9993 case OPERAND is an SSA_NAME that is defined in the vectorizable region
9994 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
9995 the definition could be anywhere in the function
9996 DT - the type of definition
9998 Returns whether a stmt with OPERAND can be vectorized.
9999 For loops, supportable operands are constants, loop invariants, and operands
10000 that are defined by the current iteration of the loop. Unsupportable
10001 operands are those that are defined by a previous iteration of the loop (as
10002 is the case in reduction/induction computations).
10003 For basic blocks, supportable operands are constants and bb invariants.
10004 For now, operands defined outside the basic block are not supported. */
10006 bool
10007 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10008 stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out)
10010 if (def_stmt_info_out)
10011 *def_stmt_info_out = NULL;
10012 if (def_stmt_out)
10013 *def_stmt_out = NULL;
10014 *dt = vect_unknown_def_type;
10016 if (dump_enabled_p ())
10018 dump_printf_loc (MSG_NOTE, vect_location,
10019 "vect_is_simple_use: operand ");
10020 if (TREE_CODE (operand) == SSA_NAME
10021 && !SSA_NAME_IS_DEFAULT_DEF (operand))
10022 dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0);
10023 else
10024 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
10027 if (CONSTANT_CLASS_P (operand))
10028 *dt = vect_constant_def;
10029 else if (is_gimple_min_invariant (operand))
10030 *dt = vect_external_def;
10031 else if (TREE_CODE (operand) != SSA_NAME)
10032 *dt = vect_unknown_def_type;
10033 else if (SSA_NAME_IS_DEFAULT_DEF (operand))
10034 *dt = vect_external_def;
10035 else
10037 gimple *def_stmt = SSA_NAME_DEF_STMT (operand);
10038 stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand);
10039 if (!stmt_vinfo)
10040 *dt = vect_external_def;
10041 else
10043 stmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
10044 def_stmt = stmt_vinfo->stmt;
10045 switch (gimple_code (def_stmt))
10047 case GIMPLE_PHI:
10048 case GIMPLE_ASSIGN:
10049 case GIMPLE_CALL:
10050 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
10051 break;
10052 default:
10053 *dt = vect_unknown_def_type;
10054 break;
10056 if (def_stmt_info_out)
10057 *def_stmt_info_out = stmt_vinfo;
10059 if (def_stmt_out)
10060 *def_stmt_out = def_stmt;
10063 if (dump_enabled_p ())
10065 dump_printf (MSG_NOTE, ", type of def: ");
10066 switch (*dt)
10068 case vect_uninitialized_def:
10069 dump_printf (MSG_NOTE, "uninitialized\n");
10070 break;
10071 case vect_constant_def:
10072 dump_printf (MSG_NOTE, "constant\n");
10073 break;
10074 case vect_external_def:
10075 dump_printf (MSG_NOTE, "external\n");
10076 break;
10077 case vect_internal_def:
10078 dump_printf (MSG_NOTE, "internal\n");
10079 break;
10080 case vect_induction_def:
10081 dump_printf (MSG_NOTE, "induction\n");
10082 break;
10083 case vect_reduction_def:
10084 dump_printf (MSG_NOTE, "reduction\n");
10085 break;
10086 case vect_double_reduction_def:
10087 dump_printf (MSG_NOTE, "double reduction\n");
10088 break;
10089 case vect_nested_cycle:
10090 dump_printf (MSG_NOTE, "nested cycle\n");
10091 break;
10092 case vect_unknown_def_type:
10093 dump_printf (MSG_NOTE, "unknown\n");
10094 break;
10098 if (*dt == vect_unknown_def_type)
10100 if (dump_enabled_p ())
10101 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10102 "Unsupported pattern.\n");
10103 return false;
10106 return true;
10109 /* Function vect_is_simple_use.
10111 Same as vect_is_simple_use but also determines the vector operand
10112 type of OPERAND and stores it to *VECTYPE. If the definition of
10113 OPERAND is vect_uninitialized_def, vect_constant_def or
10114 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10115 is responsible to compute the best suited vector type for the
10116 scalar operand. */
10118 bool
10119 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10120 tree *vectype, stmt_vec_info *def_stmt_info_out,
10121 gimple **def_stmt_out)
10123 stmt_vec_info def_stmt_info;
10124 gimple *def_stmt;
10125 if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt))
10126 return false;
10128 if (def_stmt_out)
10129 *def_stmt_out = def_stmt;
10130 if (def_stmt_info_out)
10131 *def_stmt_info_out = def_stmt_info;
10133 /* Now get a vector type if the def is internal, otherwise supply
10134 NULL_TREE and leave it up to the caller to figure out a proper
10135 type for the use stmt. */
10136 if (*dt == vect_internal_def
10137 || *dt == vect_induction_def
10138 || *dt == vect_reduction_def
10139 || *dt == vect_double_reduction_def
10140 || *dt == vect_nested_cycle)
10142 *vectype = STMT_VINFO_VECTYPE (def_stmt_info);
10143 gcc_assert (*vectype != NULL_TREE);
10144 if (dump_enabled_p ())
10146 dump_printf_loc (MSG_NOTE, vect_location,
10147 "vect_is_simple_use: vectype ");
10148 dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype);
10149 dump_printf (MSG_NOTE, "\n");
10152 else if (*dt == vect_uninitialized_def
10153 || *dt == vect_constant_def
10154 || *dt == vect_external_def)
10155 *vectype = NULL_TREE;
10156 else
10157 gcc_unreachable ();
10159 return true;
10163 /* Function supportable_widening_operation
10165 Check whether an operation represented by the code CODE is a
10166 widening operation that is supported by the target platform in
10167 vector form (i.e., when operating on arguments of type VECTYPE_IN
10168 producing a result of type VECTYPE_OUT).
10170 Widening operations we currently support are NOP (CONVERT), FLOAT,
10171 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10172 are supported by the target platform either directly (via vector
10173 tree-codes), or via target builtins.
10175 Output:
10176 - CODE1 and CODE2 are codes of vector operations to be used when
10177 vectorizing the operation, if available.
10178 - MULTI_STEP_CVT determines the number of required intermediate steps in
10179 case of multi-step conversion (like char->short->int - in that case
10180 MULTI_STEP_CVT will be 1).
10181 - INTERM_TYPES contains the intermediate type required to perform the
10182 widening operation (short in the above example). */
10184 bool
10185 supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
10186 tree vectype_out, tree vectype_in,
10187 enum tree_code *code1, enum tree_code *code2,
10188 int *multi_step_cvt,
10189 vec<tree> *interm_types)
10191 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10192 struct loop *vect_loop = NULL;
10193 machine_mode vec_mode;
10194 enum insn_code icode1, icode2;
10195 optab optab1, optab2;
10196 tree vectype = vectype_in;
10197 tree wide_vectype = vectype_out;
10198 enum tree_code c1, c2;
10199 int i;
10200 tree prev_type, intermediate_type;
10201 machine_mode intermediate_mode, prev_mode;
10202 optab optab3, optab4;
10204 *multi_step_cvt = 0;
10205 if (loop_info)
10206 vect_loop = LOOP_VINFO_LOOP (loop_info);
10208 switch (code)
10210 case WIDEN_MULT_EXPR:
10211 /* The result of a vectorized widening operation usually requires
10212 two vectors (because the widened results do not fit into one vector).
10213 The generated vector results would normally be expected to be
10214 generated in the same order as in the original scalar computation,
10215 i.e. if 8 results are generated in each vector iteration, they are
10216 to be organized as follows:
10217 vect1: [res1,res2,res3,res4],
10218 vect2: [res5,res6,res7,res8].
10220 However, in the special case that the result of the widening
10221 operation is used in a reduction computation only, the order doesn't
10222 matter (because when vectorizing a reduction we change the order of
10223 the computation). Some targets can take advantage of this and
10224 generate more efficient code. For example, targets like Altivec,
10225 that support widen_mult using a sequence of {mult_even,mult_odd}
10226 generate the following vectors:
10227 vect1: [res1,res3,res5,res7],
10228 vect2: [res2,res4,res6,res8].
10230 When vectorizing outer-loops, we execute the inner-loop sequentially
10231 (each vectorized inner-loop iteration contributes to VF outer-loop
10232 iterations in parallel). We therefore don't allow to change the
10233 order of the computation in the inner-loop during outer-loop
10234 vectorization. */
10235 /* TODO: Another case in which order doesn't *really* matter is when we
10236 widen and then contract again, e.g. (short)((int)x * y >> 8).
10237 Normally, pack_trunc performs an even/odd permute, whereas the
10238 repack from an even/odd expansion would be an interleave, which
10239 would be significantly simpler for e.g. AVX2. */
10240 /* In any case, in order to avoid duplicating the code below, recurse
10241 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10242 are properly set up for the caller. If we fail, we'll continue with
10243 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10244 if (vect_loop
10245 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10246 && !nested_in_vect_loop_p (vect_loop, stmt_info)
10247 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10248 stmt_info, vectype_out,
10249 vectype_in, code1, code2,
10250 multi_step_cvt, interm_types))
10252 /* Elements in a vector with vect_used_by_reduction property cannot
10253 be reordered if the use chain with this property does not have the
10254 same operation. One such an example is s += a * b, where elements
10255 in a and b cannot be reordered. Here we check if the vector defined
10256 by STMT is only directly used in the reduction statement. */
10257 tree lhs = gimple_assign_lhs (stmt_info->stmt);
10258 stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
10259 if (use_stmt_info
10260 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10261 return true;
10263 c1 = VEC_WIDEN_MULT_LO_EXPR;
10264 c2 = VEC_WIDEN_MULT_HI_EXPR;
10265 break;
10267 case DOT_PROD_EXPR:
10268 c1 = DOT_PROD_EXPR;
10269 c2 = DOT_PROD_EXPR;
10270 break;
10272 case SAD_EXPR:
10273 c1 = SAD_EXPR;
10274 c2 = SAD_EXPR;
10275 break;
10277 case VEC_WIDEN_MULT_EVEN_EXPR:
10278 /* Support the recursion induced just above. */
10279 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10280 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10281 break;
10283 case WIDEN_LSHIFT_EXPR:
10284 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10285 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10286 break;
10288 CASE_CONVERT:
10289 c1 = VEC_UNPACK_LO_EXPR;
10290 c2 = VEC_UNPACK_HI_EXPR;
10291 break;
10293 case FLOAT_EXPR:
10294 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10295 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10296 break;
10298 case FIX_TRUNC_EXPR:
10299 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
10300 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
10301 break;
10303 default:
10304 gcc_unreachable ();
10307 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10308 std::swap (c1, c2);
10310 if (code == FIX_TRUNC_EXPR)
10312 /* The signedness is determined from output operand. */
10313 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10314 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10316 else
10318 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10319 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10322 if (!optab1 || !optab2)
10323 return false;
10325 vec_mode = TYPE_MODE (vectype);
10326 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10327 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10328 return false;
10330 *code1 = c1;
10331 *code2 = c2;
10333 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10334 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10335 /* For scalar masks we may have different boolean
10336 vector types having the same QImode. Thus we
10337 add additional check for elements number. */
10338 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10339 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10340 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10342 /* Check if it's a multi-step conversion that can be done using intermediate
10343 types. */
10345 prev_type = vectype;
10346 prev_mode = vec_mode;
10348 if (!CONVERT_EXPR_CODE_P (code))
10349 return false;
10351 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10352 intermediate steps in promotion sequence. We try
10353 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10354 not. */
10355 interm_types->create (MAX_INTERM_CVT_STEPS);
10356 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10358 intermediate_mode = insn_data[icode1].operand[0].mode;
10359 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10361 intermediate_type = vect_halve_mask_nunits (prev_type);
10362 if (intermediate_mode != TYPE_MODE (intermediate_type))
10363 return false;
10365 else
10366 intermediate_type
10367 = lang_hooks.types.type_for_mode (intermediate_mode,
10368 TYPE_UNSIGNED (prev_type));
10370 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10371 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10373 if (!optab3 || !optab4
10374 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10375 || insn_data[icode1].operand[0].mode != intermediate_mode
10376 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10377 || insn_data[icode2].operand[0].mode != intermediate_mode
10378 || ((icode1 = optab_handler (optab3, intermediate_mode))
10379 == CODE_FOR_nothing)
10380 || ((icode2 = optab_handler (optab4, intermediate_mode))
10381 == CODE_FOR_nothing))
10382 break;
10384 interm_types->quick_push (intermediate_type);
10385 (*multi_step_cvt)++;
10387 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10388 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10389 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10390 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10391 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10393 prev_type = intermediate_type;
10394 prev_mode = intermediate_mode;
10397 interm_types->release ();
10398 return false;
10402 /* Function supportable_narrowing_operation
10404 Check whether an operation represented by the code CODE is a
10405 narrowing operation that is supported by the target platform in
10406 vector form (i.e., when operating on arguments of type VECTYPE_IN
10407 and producing a result of type VECTYPE_OUT).
10409 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10410 and FLOAT. This function checks if these operations are supported by
10411 the target platform directly via vector tree-codes.
10413 Output:
10414 - CODE1 is the code of a vector operation to be used when
10415 vectorizing the operation, if available.
10416 - MULTI_STEP_CVT determines the number of required intermediate steps in
10417 case of multi-step conversion (like int->short->char - in that case
10418 MULTI_STEP_CVT will be 1).
10419 - INTERM_TYPES contains the intermediate type required to perform the
10420 narrowing operation (short in the above example). */
10422 bool
10423 supportable_narrowing_operation (enum tree_code code,
10424 tree vectype_out, tree vectype_in,
10425 enum tree_code *code1, int *multi_step_cvt,
10426 vec<tree> *interm_types)
10428 machine_mode vec_mode;
10429 enum insn_code icode1;
10430 optab optab1, interm_optab;
10431 tree vectype = vectype_in;
10432 tree narrow_vectype = vectype_out;
10433 enum tree_code c1;
10434 tree intermediate_type, prev_type;
10435 machine_mode intermediate_mode, prev_mode;
10436 int i;
10437 bool uns;
10439 *multi_step_cvt = 0;
10440 switch (code)
10442 CASE_CONVERT:
10443 c1 = VEC_PACK_TRUNC_EXPR;
10444 break;
10446 case FIX_TRUNC_EXPR:
10447 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10448 break;
10450 case FLOAT_EXPR:
10451 c1 = VEC_PACK_FLOAT_EXPR;
10452 break;
10454 default:
10455 gcc_unreachable ();
10458 if (code == FIX_TRUNC_EXPR)
10459 /* The signedness is determined from output operand. */
10460 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10461 else
10462 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10464 if (!optab1)
10465 return false;
10467 vec_mode = TYPE_MODE (vectype);
10468 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10469 return false;
10471 *code1 = c1;
10473 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10474 /* For scalar masks we may have different boolean
10475 vector types having the same QImode. Thus we
10476 add additional check for elements number. */
10477 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10478 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10479 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10481 if (code == FLOAT_EXPR)
10482 return false;
10484 /* Check if it's a multi-step conversion that can be done using intermediate
10485 types. */
10486 prev_mode = vec_mode;
10487 prev_type = vectype;
10488 if (code == FIX_TRUNC_EXPR)
10489 uns = TYPE_UNSIGNED (vectype_out);
10490 else
10491 uns = TYPE_UNSIGNED (vectype);
10493 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10494 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10495 costly than signed. */
10496 if (code == FIX_TRUNC_EXPR && uns)
10498 enum insn_code icode2;
10500 intermediate_type
10501 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10502 interm_optab
10503 = optab_for_tree_code (c1, intermediate_type, optab_default);
10504 if (interm_optab != unknown_optab
10505 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10506 && insn_data[icode1].operand[0].mode
10507 == insn_data[icode2].operand[0].mode)
10509 uns = false;
10510 optab1 = interm_optab;
10511 icode1 = icode2;
10515 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10516 intermediate steps in promotion sequence. We try
10517 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10518 interm_types->create (MAX_INTERM_CVT_STEPS);
10519 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10521 intermediate_mode = insn_data[icode1].operand[0].mode;
10522 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10524 intermediate_type = vect_double_mask_nunits (prev_type);
10525 if (intermediate_mode != TYPE_MODE (intermediate_type))
10526 return false;
10528 else
10529 intermediate_type
10530 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10531 interm_optab
10532 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10533 optab_default);
10534 if (!interm_optab
10535 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10536 || insn_data[icode1].operand[0].mode != intermediate_mode
10537 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10538 == CODE_FOR_nothing))
10539 break;
10541 interm_types->quick_push (intermediate_type);
10542 (*multi_step_cvt)++;
10544 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10545 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10546 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10547 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10549 prev_mode = intermediate_mode;
10550 prev_type = intermediate_type;
10551 optab1 = interm_optab;
10554 interm_types->release ();
10555 return false;
10558 /* Generate and return a statement that sets vector mask MASK such that
10559 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10561 gcall *
10562 vect_gen_while (tree mask, tree start_index, tree end_index)
10564 tree cmp_type = TREE_TYPE (start_index);
10565 tree mask_type = TREE_TYPE (mask);
10566 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10567 cmp_type, mask_type,
10568 OPTIMIZE_FOR_SPEED));
10569 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10570 start_index, end_index,
10571 build_zero_cst (mask_type));
10572 gimple_call_set_lhs (call, mask);
10573 return call;
10576 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10577 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10579 tree
10580 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10581 tree end_index)
10583 tree tmp = make_ssa_name (mask_type);
10584 gcall *call = vect_gen_while (tmp, start_index, end_index);
10585 gimple_seq_add_stmt (seq, call);
10586 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
10589 /* Try to compute the vector types required to vectorize STMT_INFO,
10590 returning true on success and false if vectorization isn't possible.
10592 On success:
10594 - Set *STMT_VECTYPE_OUT to:
10595 - NULL_TREE if the statement doesn't need to be vectorized;
10596 - boolean_type_node if the statement is a boolean operation whose
10597 vector type can only be determined once all the other vector types
10598 are known; and
10599 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10601 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10602 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10603 statement does not help to determine the overall number of units. */
10605 bool
10606 vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
10607 tree *stmt_vectype_out,
10608 tree *nunits_vectype_out)
10610 gimple *stmt = stmt_info->stmt;
10612 *stmt_vectype_out = NULL_TREE;
10613 *nunits_vectype_out = NULL_TREE;
10615 if (gimple_get_lhs (stmt) == NULL_TREE
10616 /* MASK_STORE has no lhs, but is ok. */
10617 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
10619 if (is_a <gcall *> (stmt))
10621 /* Ignore calls with no lhs. These must be calls to
10622 #pragma omp simd functions, and what vectorization factor
10623 it really needs can't be determined until
10624 vectorizable_simd_clone_call. */
10625 if (dump_enabled_p ())
10626 dump_printf_loc (MSG_NOTE, vect_location,
10627 "defer to SIMD clone analysis.\n");
10628 return true;
10631 if (dump_enabled_p ())
10633 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10634 "not vectorized: irregular stmt.");
10635 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10637 return false;
10640 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
10642 if (dump_enabled_p ())
10644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10645 "not vectorized: vector stmt in loop:");
10646 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10648 return false;
10651 tree vectype;
10652 tree scalar_type = NULL_TREE;
10653 if (STMT_VINFO_VECTYPE (stmt_info))
10654 *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
10655 else
10657 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
10658 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
10659 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
10660 else
10661 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
10663 /* Pure bool ops don't participate in number-of-units computation.
10664 For comparisons use the types being compared. */
10665 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
10666 && is_gimple_assign (stmt)
10667 && gimple_assign_rhs_code (stmt) != COND_EXPR)
10669 *stmt_vectype_out = boolean_type_node;
10671 tree rhs1 = gimple_assign_rhs1 (stmt);
10672 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10673 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
10674 scalar_type = TREE_TYPE (rhs1);
10675 else
10677 if (dump_enabled_p ())
10678 dump_printf_loc (MSG_NOTE, vect_location,
10679 "pure bool operation.\n");
10680 return true;
10684 if (dump_enabled_p ())
10686 dump_printf_loc (MSG_NOTE, vect_location,
10687 "get vectype for scalar type: ");
10688 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10689 dump_printf (MSG_NOTE, "\n");
10691 vectype = get_vectype_for_scalar_type (scalar_type);
10692 if (!vectype)
10694 if (dump_enabled_p ())
10696 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10697 "not vectorized: unsupported data-type ");
10698 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10699 scalar_type);
10700 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10702 return false;
10705 if (!*stmt_vectype_out)
10706 *stmt_vectype_out = vectype;
10708 if (dump_enabled_p ())
10710 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10711 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
10712 dump_printf (MSG_NOTE, "\n");
10716 /* Don't try to compute scalar types if the stmt produces a boolean
10717 vector; use the existing vector type instead. */
10718 tree nunits_vectype;
10719 if (VECTOR_BOOLEAN_TYPE_P (vectype))
10720 nunits_vectype = vectype;
10721 else
10723 /* The number of units is set according to the smallest scalar
10724 type (or the largest vector size, but we only support one
10725 vector size per vectorization). */
10726 if (*stmt_vectype_out != boolean_type_node)
10728 HOST_WIDE_INT dummy;
10729 scalar_type = vect_get_smallest_scalar_type (stmt_info,
10730 &dummy, &dummy);
10732 if (dump_enabled_p ())
10734 dump_printf_loc (MSG_NOTE, vect_location,
10735 "get vectype for scalar type: ");
10736 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10737 dump_printf (MSG_NOTE, "\n");
10739 nunits_vectype = get_vectype_for_scalar_type (scalar_type);
10741 if (!nunits_vectype)
10743 if (dump_enabled_p ())
10745 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10746 "not vectorized: unsupported data-type ");
10747 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
10748 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10750 return false;
10753 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
10754 GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
10756 if (dump_enabled_p ())
10758 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10759 "not vectorized: different sized vector "
10760 "types in statement, ");
10761 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
10762 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10763 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype);
10764 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10766 return false;
10769 if (dump_enabled_p ())
10771 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10772 dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
10773 dump_printf (MSG_NOTE, "\n");
10775 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
10776 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
10777 dump_printf (MSG_NOTE, "\n");
10780 *nunits_vectype_out = nunits_vectype;
10781 return true;
10784 /* Try to determine the correct vector type for STMT_INFO, which is a
10785 statement that produces a scalar boolean result. Return the vector
10786 type on success, otherwise return NULL_TREE. */
10788 tree
10789 vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
10791 gimple *stmt = stmt_info->stmt;
10792 tree mask_type = NULL;
10793 tree vectype, scalar_type;
10795 if (is_gimple_assign (stmt)
10796 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10797 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
10799 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
10800 mask_type = get_mask_type_for_scalar_type (scalar_type);
10802 if (!mask_type)
10804 if (dump_enabled_p ())
10805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10806 "not vectorized: unsupported mask\n");
10807 return NULL_TREE;
10810 else
10812 tree rhs;
10813 ssa_op_iter iter;
10814 enum vect_def_type dt;
10816 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
10818 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
10820 if (dump_enabled_p ())
10822 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10823 "not vectorized: can't compute mask type "
10824 "for statement, ");
10825 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
10828 return NULL_TREE;
10831 /* No vectype probably means external definition.
10832 Allow it in case there is another operand which
10833 allows to determine mask type. */
10834 if (!vectype)
10835 continue;
10837 if (!mask_type)
10838 mask_type = vectype;
10839 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
10840 TYPE_VECTOR_SUBPARTS (vectype)))
10842 if (dump_enabled_p ())
10844 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10845 "not vectorized: different sized masks "
10846 "types in statement, ");
10847 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10848 mask_type);
10849 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10850 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10851 vectype);
10852 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10854 return NULL_TREE;
10856 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
10857 != VECTOR_BOOLEAN_TYPE_P (vectype))
10859 if (dump_enabled_p ())
10861 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10862 "not vectorized: mixed mask and "
10863 "nonmask vector types in statement, ");
10864 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10865 mask_type);
10866 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10867 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10868 vectype);
10869 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10871 return NULL_TREE;
10875 /* We may compare boolean value loaded as vector of integers.
10876 Fix mask_type in such case. */
10877 if (mask_type
10878 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
10879 && gimple_code (stmt) == GIMPLE_ASSIGN
10880 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10881 mask_type = build_same_sized_truth_vector_type (mask_type);
10884 /* No mask_type should mean loop invariant predicate.
10885 This is probably a subject for optimization in if-conversion. */
10886 if (!mask_type && dump_enabled_p ())
10888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10889 "not vectorized: can't compute mask type "
10890 "for statement, ");
10891 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10893 return mask_type;