2018-08-01 Jan Willem Jagersma <jwjagersma@gmail.com>
[official-gcc.git] / gcc / tree-vect-stmts.c
blob4869b634a609395f28c5b6411bf4259bf89355cc
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
101 stmt_info_for_cost si = { count, kind, where, stmt_info, misalign };
102 body_cost_vec->safe_push (si);
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
111 static tree
112 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
114 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
115 "vect_array");
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT_INFO and the vector is associated
121 with scalar destination SCALAR_DEST. */
123 static tree
124 read_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
125 tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
127 tree vect_type, vect, vect_name, array_ref;
128 gimple *new_stmt;
130 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
131 vect_type = TREE_TYPE (TREE_TYPE (array));
132 vect = vect_create_destination_var (scalar_dest, vect_type);
133 array_ref = build4 (ARRAY_REF, vect_type, array,
134 build_int_cst (size_type_node, n),
135 NULL_TREE, NULL_TREE);
137 new_stmt = gimple_build_assign (vect, array_ref);
138 vect_name = make_ssa_name (vect, new_stmt);
139 gimple_assign_set_lhs (new_stmt, vect_name);
140 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
142 return vect_name;
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT_INFO. */
149 static void
150 write_vector_array (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
151 tree vect, tree array, unsigned HOST_WIDE_INT n)
153 tree array_ref;
154 gimple *new_stmt;
156 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
157 build_int_cst (size_type_node, n),
158 NULL_TREE, NULL_TREE);
160 new_stmt = gimple_build_assign (array_ref, vect);
161 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
166 (and its group). */
168 static tree
169 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
171 tree mem_ref;
173 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
174 /* Arrays have the same alignment as their type. */
175 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
176 return mem_ref;
179 /* Add a clobber of variable VAR to the vectorization of STMT_INFO.
180 Emit the clobber before *GSI. */
182 static void
183 vect_clobber_variable (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
184 tree var)
186 tree clobber = build_clobber (TREE_TYPE (var));
187 gimple *new_stmt = gimple_build_assign (var, clobber);
188 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
199 enum vect_relevant relevant, bool live_p)
201 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
202 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 if (dump_enabled_p ())
206 dump_printf_loc (MSG_NOTE, vect_location,
207 "mark relevant %d, live %d: ", relevant, live_p);
208 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
211 /* If this stmt is an original stmt in a pattern, we might need to mark its
212 related pattern stmt instead of the original stmt. However, such stmts
213 may have their own uses that are not in any pattern, in such cases the
214 stmt itself should be marked. */
215 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
217 /* This is the last stmt in a sequence that was detected as a
218 pattern that can potentially be vectorized. Don't mark the stmt
219 as relevant/live because it's not going to be vectorized.
220 Instead mark the pattern-stmt that replaces it. */
222 if (dump_enabled_p ())
223 dump_printf_loc (MSG_NOTE, vect_location,
224 "last stmt in pattern. don't mark"
225 " relevant/live.\n");
226 stmt_vec_info old_stmt_info = stmt_info;
227 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
228 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
229 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
230 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
233 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
234 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
235 STMT_VINFO_RELEVANT (stmt_info) = relevant;
237 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
238 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
240 if (dump_enabled_p ())
241 dump_printf_loc (MSG_NOTE, vect_location,
242 "already marked relevant/live.\n");
243 return;
246 worklist->safe_push (stmt_info);
250 /* Function is_simple_and_all_uses_invariant
252 Return true if STMT_INFO is simple and all uses of it are invariant. */
254 bool
255 is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
256 loop_vec_info loop_vinfo)
258 tree op;
259 ssa_op_iter iter;
261 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
262 if (!stmt)
263 return false;
265 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
267 enum vect_def_type dt = vect_uninitialized_def;
269 if (!vect_is_simple_use (op, loop_vinfo, &dt))
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
273 "use not simple.\n");
274 return false;
277 if (dt != vect_external_def && dt != vect_constant_def)
278 return false;
280 return true;
283 /* Function vect_stmt_relevant_p.
285 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
286 is "relevant for vectorization".
288 A stmt is considered "relevant for vectorization" if:
289 - it has uses outside the loop.
290 - it has vdefs (it alters memory).
291 - control stmts in the loop (except for the exit condition).
293 CHECKME: what other side effects would the vectorizer allow? */
295 static bool
296 vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
297 enum vect_relevant *relevant, bool *live_p)
299 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
300 ssa_op_iter op_iter;
301 imm_use_iterator imm_iter;
302 use_operand_p use_p;
303 def_operand_p def_p;
305 *relevant = vect_unused_in_scope;
306 *live_p = false;
308 /* cond stmt other than loop exit cond. */
309 if (is_ctrl_stmt (stmt_info->stmt)
310 && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type)
311 *relevant = vect_used_in_scope;
313 /* changing memory. */
314 if (gimple_code (stmt_info->stmt) != GIMPLE_PHI)
315 if (gimple_vdef (stmt_info->stmt)
316 && !gimple_clobber_p (stmt_info->stmt))
318 if (dump_enabled_p ())
319 dump_printf_loc (MSG_NOTE, vect_location,
320 "vec_stmt_relevant_p: stmt has vdefs.\n");
321 *relevant = vect_used_in_scope;
324 /* uses outside the loop. */
325 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
327 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
329 basic_block bb = gimple_bb (USE_STMT (use_p));
330 if (!flow_bb_inside_loop_p (loop, bb))
332 if (dump_enabled_p ())
333 dump_printf_loc (MSG_NOTE, vect_location,
334 "vec_stmt_relevant_p: used out of loop.\n");
336 if (is_gimple_debug (USE_STMT (use_p)))
337 continue;
339 /* We expect all such uses to be in the loop exit phis
340 (because of loop closed form) */
341 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
342 gcc_assert (bb == single_exit (loop)->dest);
344 *live_p = true;
349 if (*live_p && *relevant == vect_unused_in_scope
350 && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo))
352 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE, vect_location,
354 "vec_stmt_relevant_p: stmt live but not relevant.\n");
355 *relevant = vect_used_only_live;
358 return (*live_p || *relevant);
362 /* Function exist_non_indexing_operands_for_use_p
364 USE is one of the uses attached to STMT_INFO. Check if USE is
365 used in STMT_INFO for anything other than indexing an array. */
367 static bool
368 exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
370 tree operand;
372 /* USE corresponds to some operand in STMT. If there is no data
373 reference in STMT, then any operand that corresponds to USE
374 is not indexing an array. */
375 if (!STMT_VINFO_DATA_REF (stmt_info))
376 return true;
378 /* STMT has a data_ref. FORNOW this means that its of one of
379 the following forms:
380 -1- ARRAY_REF = var
381 -2- var = ARRAY_REF
382 (This should have been verified in analyze_data_refs).
384 'var' in the second case corresponds to a def, not a use,
385 so USE cannot correspond to any operands that are not used
386 for array indexing.
388 Therefore, all we need to check is if STMT falls into the
389 first case, and whether var corresponds to USE. */
391 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
392 if (!assign || !gimple_assign_copy_p (assign))
394 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
395 if (call && gimple_call_internal_p (call))
397 internal_fn ifn = gimple_call_internal_fn (call);
398 int mask_index = internal_fn_mask_index (ifn);
399 if (mask_index >= 0
400 && use == gimple_call_arg (call, mask_index))
401 return true;
402 int stored_value_index = internal_fn_stored_value_index (ifn);
403 if (stored_value_index >= 0
404 && use == gimple_call_arg (call, stored_value_index))
405 return true;
406 if (internal_gather_scatter_fn_p (ifn)
407 && use == gimple_call_arg (call, 1))
408 return true;
410 return false;
413 if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME)
414 return false;
415 operand = gimple_assign_rhs1 (assign);
416 if (TREE_CODE (operand) != SSA_NAME)
417 return false;
419 if (operand == use)
420 return true;
422 return false;
427 Function process_use.
429 Inputs:
430 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
431 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
432 that defined USE. This is done by calling mark_relevant and passing it
433 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
434 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 be performed.
437 Outputs:
438 Generally, LIVE_P and RELEVANT are used to define the liveness and
439 relevance info of the DEF_STMT of this USE:
440 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
441 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
442 Exceptions:
443 - case 1: If USE is used only for address computations (e.g. array indexing),
444 which does not need to be directly vectorized, then the liveness/relevance
445 of the respective DEF_STMT is left unchanged.
446 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
447 we skip DEF_STMT cause it had already been processed.
448 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
449 "relevant" will be modified accordingly.
451 Return true if everything is as expected. Return false otherwise. */
453 static bool
454 process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
455 enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
456 bool force)
458 stmt_vec_info dstmt_vinfo;
459 basic_block bb, def_bb;
460 enum vect_def_type dt;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
465 return true;
467 if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
471 "not vectorized: unsupported use in stmt.\n");
472 return false;
475 if (!dstmt_vinfo)
476 return true;
478 def_bb = gimple_bb (dstmt_vinfo->stmt);
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
481 DSTMT_VINFO must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DSTMT_VINFO in the loop. So we just
484 check that everything is as expected, and we are done. */
485 bb = gimple_bb (stmt_vinfo->stmt);
486 if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
487 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
488 && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
490 && bb->loop_father == def_bb->loop_father)
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_NOTE, vect_location,
494 "reduc-stmt defining reduc-phi in the same nest.\n");
495 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
496 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
497 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
498 return true;
501 /* case 3a: outer-loop stmt defining an inner-loop stmt:
502 outer-loop-header-bb:
503 d = dstmt_vinfo
504 inner-loop:
505 stmt # use (d)
506 outer-loop-tail-bb:
507 ... */
508 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
510 if (dump_enabled_p ())
511 dump_printf_loc (MSG_NOTE, vect_location,
512 "outer-loop def-stmt defining inner-loop stmt.\n");
514 switch (relevant)
516 case vect_unused_in_scope:
517 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
518 vect_used_in_scope : vect_unused_in_scope;
519 break;
521 case vect_used_in_outer_by_reduction:
522 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
523 relevant = vect_used_by_reduction;
524 break;
526 case vect_used_in_outer:
527 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
528 relevant = vect_used_in_scope;
529 break;
531 case vect_used_in_scope:
532 break;
534 default:
535 gcc_unreachable ();
539 /* case 3b: inner-loop stmt defining an outer-loop stmt:
540 outer-loop-header-bb:
542 inner-loop:
543 d = dstmt_vinfo
544 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
545 stmt # use (d) */
546 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
548 if (dump_enabled_p ())
549 dump_printf_loc (MSG_NOTE, vect_location,
550 "inner-loop def-stmt defining outer-loop stmt.\n");
552 switch (relevant)
554 case vect_unused_in_scope:
555 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
556 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
557 vect_used_in_outer_by_reduction : vect_unused_in_scope;
558 break;
560 case vect_used_by_reduction:
561 case vect_used_only_live:
562 relevant = vect_used_in_outer_by_reduction;
563 break;
565 case vect_used_in_scope:
566 relevant = vect_used_in_outer;
567 break;
569 default:
570 gcc_unreachable ();
573 /* We are also not interested in uses on loop PHI backedges that are
574 inductions. Otherwise we'll needlessly vectorize the IV increment
575 and cause hybrid SLP for SLP inductions. Unless the PHI is live
576 of course. */
577 else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
578 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
579 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
580 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
581 loop_latch_edge (bb->loop_father))
582 == use))
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE, vect_location,
586 "induction value on backedge.\n");
587 return true;
591 vect_mark_relevant (worklist, dstmt_vinfo, relevant, false);
592 return true;
596 /* Function vect_mark_stmts_to_be_vectorized.
598 Not all stmts in the loop need to be vectorized. For example:
600 for i...
601 for j...
602 1. T0 = i + j
603 2. T1 = a[T0]
605 3. j = j + 1
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
610 This pass detects such stmts. */
612 bool
613 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
615 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
616 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
617 unsigned int nbbs = loop->num_nodes;
618 gimple_stmt_iterator si;
619 unsigned int i;
620 basic_block bb;
621 bool live_p;
622 enum vect_relevant relevant;
624 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
626 auto_vec<stmt_vec_info, 64> worklist;
628 /* 1. Init worklist. */
629 for (i = 0; i < nbbs; i++)
631 bb = bbs[i];
632 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
634 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
638 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
641 if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
642 vect_mark_relevant (&worklist, phi_info, relevant, live_p);
644 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
646 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
653 if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
654 vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
658 /* 2. Process_worklist */
659 while (worklist.length () > 0)
661 use_operand_p use_p;
662 ssa_op_iter iter;
664 stmt_vec_info stmt_vinfo = worklist.pop ();
665 if (dump_enabled_p ())
667 dump_printf_loc (MSG_NOTE, vect_location,
668 "worklist: examine stmt: ");
669 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_vinfo->stmt, 0);
672 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
673 (DEF_STMT) as relevant/irrelevant according to the relevance property
674 of STMT. */
675 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
677 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
678 propagated as is to the DEF_STMTs of its USEs.
680 One exception is when STMT has been identified as defining a reduction
681 variable; in this case we set the relevance to vect_used_by_reduction.
682 This is because we distinguish between two kinds of relevant stmts -
683 those that are used by a reduction computation, and those that are
684 (also) used by a regular computation. This allows us later on to
685 identify stmts that are used solely by a reduction, and therefore the
686 order of the results that they produce does not have to be kept. */
688 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
690 case vect_reduction_def:
691 gcc_assert (relevant != vect_unused_in_scope);
692 if (relevant != vect_unused_in_scope
693 && relevant != vect_used_in_scope
694 && relevant != vect_used_by_reduction
695 && relevant != vect_used_only_live)
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
702 break;
704 case vect_nested_cycle:
705 if (relevant != vect_unused_in_scope
706 && relevant != vect_used_in_outer_by_reduction
707 && relevant != vect_used_in_outer)
709 if (dump_enabled_p ())
710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
711 "unsupported use of nested cycle.\n");
713 return false;
715 break;
717 case vect_double_reduction_def:
718 if (relevant != vect_unused_in_scope
719 && relevant != vect_used_by_reduction
720 && relevant != vect_used_only_live)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of double reduction.\n");
726 return false;
728 break;
730 default:
731 break;
734 if (is_pattern_stmt_p (stmt_vinfo))
736 /* Pattern statements are not inserted into the code, so
737 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
738 have to scan the RHS or function arguments instead. */
739 if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
741 enum tree_code rhs_code = gimple_assign_rhs_code (assign);
742 tree op = gimple_assign_rhs1 (assign);
744 i = 1;
745 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
747 if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0),
748 loop_vinfo, relevant, &worklist, false)
749 || !process_use (stmt_vinfo, TREE_OPERAND (op, 1),
750 loop_vinfo, relevant, &worklist, false))
751 return false;
752 i = 2;
754 for (; i < gimple_num_ops (assign); i++)
756 op = gimple_op (assign, i);
757 if (TREE_CODE (op) == SSA_NAME
758 && !process_use (stmt_vinfo, op, loop_vinfo, relevant,
759 &worklist, false))
760 return false;
763 else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
765 for (i = 0; i < gimple_call_num_args (call); i++)
767 tree arg = gimple_call_arg (call, i);
768 if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant,
769 &worklist, false))
770 return false;
774 else
775 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
777 tree op = USE_FROM_PTR (use_p);
778 if (!process_use (stmt_vinfo, op, loop_vinfo, relevant,
779 &worklist, false))
780 return false;
783 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
785 gather_scatter_info gs_info;
786 if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
787 gcc_unreachable ();
788 if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
789 &worklist, true))
790 return false;
792 } /* while worklist */
794 return true;
797 /* Compute the prologue cost for invariant or constant operands. */
799 static unsigned
800 vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
801 unsigned opno, enum vect_def_type dt,
802 stmt_vector_for_cost *cost_vec)
804 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
805 tree op = gimple_op (stmt, opno);
806 unsigned prologue_cost = 0;
808 /* Without looking at the actual initializer a vector of
809 constants can be implemented as load from the constant pool.
810 When all elements are the same we can use a splat. */
811 tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
812 unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
813 unsigned num_vects_to_check;
814 unsigned HOST_WIDE_INT const_nunits;
815 unsigned nelt_limit;
816 if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
817 && ! multiple_p (const_nunits, group_size))
819 num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
820 nelt_limit = const_nunits;
822 else
824 /* If either the vector has variable length or the vectors
825 are composed of repeated whole groups we only need to
826 cost construction once. All vectors will be the same. */
827 num_vects_to_check = 1;
828 nelt_limit = group_size;
830 tree elt = NULL_TREE;
831 unsigned nelt = 0;
832 for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j)
834 unsigned si = j % group_size;
835 if (nelt == 0)
836 elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt, opno);
837 /* ??? We're just tracking whether all operands of a single
838 vector initializer are the same, ideally we'd check if
839 we emitted the same one already. */
840 else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si]->stmt,
841 opno))
842 elt = NULL_TREE;
843 nelt++;
844 if (nelt == nelt_limit)
846 /* ??? We need to pass down stmt_info for a vector type
847 even if it points to the wrong stmt. */
848 prologue_cost += record_stmt_cost
849 (cost_vec, 1,
850 dt == vect_external_def
851 ? (elt ? scalar_to_vec : vec_construct)
852 : vector_load,
853 stmt_info, 0, vect_prologue);
854 nelt = 0;
858 return prologue_cost;
861 /* Function vect_model_simple_cost.
863 Models cost for simple operations, i.e. those that only emit ncopies of a
864 single op. Right now, this does not account for multiple insns that could
865 be generated for the single vector op. We will handle that shortly. */
867 static void
868 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
869 enum vect_def_type *dt,
870 int ndts,
871 slp_tree node,
872 stmt_vector_for_cost *cost_vec)
874 int inside_cost = 0, prologue_cost = 0;
876 gcc_assert (cost_vec != NULL);
878 /* ??? Somehow we need to fix this at the callers. */
879 if (node)
880 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
882 if (node)
884 /* Scan operands and account for prologue cost of constants/externals.
885 ??? This over-estimates cost for multiple uses and should be
886 re-engineered. */
887 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0]->stmt;
888 tree lhs = gimple_get_lhs (stmt);
889 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
891 tree op = gimple_op (stmt, i);
892 enum vect_def_type dt;
893 if (!op || op == lhs)
894 continue;
895 if (vect_is_simple_use (op, stmt_info->vinfo, &dt)
896 && (dt == vect_constant_def || dt == vect_external_def))
897 prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
898 i, dt, cost_vec);
901 else
902 /* Cost the "broadcast" of a scalar operand in to a vector operand.
903 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
904 cost model. */
905 for (int i = 0; i < ndts; i++)
906 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
907 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
908 stmt_info, 0, vect_prologue);
910 /* Adjust for two-operator SLP nodes. */
911 if (node && SLP_TREE_TWO_OPERATORS (node))
913 ncopies *= 2;
914 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm,
915 stmt_info, 0, vect_body);
918 /* Pass the inside-of-loop statements to the target-specific cost model. */
919 inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
920 stmt_info, 0, vect_body);
922 if (dump_enabled_p ())
923 dump_printf_loc (MSG_NOTE, vect_location,
924 "vect_model_simple_cost: inside_cost = %d, "
925 "prologue_cost = %d .\n", inside_cost, prologue_cost);
929 /* Model cost for type demotion and promotion operations. PWR is normally
930 zero for single-step promotions and demotions. It will be one if
931 two-step promotion/demotion is required, and so on. Each additional
932 step doubles the number of instructions required. */
934 static void
935 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
936 enum vect_def_type *dt, int pwr,
937 stmt_vector_for_cost *cost_vec)
939 int i, tmp;
940 int inside_cost = 0, prologue_cost = 0;
942 for (i = 0; i < pwr + 1; i++)
944 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
945 (i + 1) : i;
946 inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp),
947 vec_promote_demote, stmt_info, 0,
948 vect_body);
951 /* FORNOW: Assuming maximum 2 args per stmts. */
952 for (i = 0; i < 2; i++)
953 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
954 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
955 stmt_info, 0, vect_prologue);
957 if (dump_enabled_p ())
958 dump_printf_loc (MSG_NOTE, vect_location,
959 "vect_model_promotion_demotion_cost: inside_cost = %d, "
960 "prologue_cost = %d .\n", inside_cost, prologue_cost);
963 /* Function vect_model_store_cost
965 Models cost for stores. In the case of grouped accesses, one access
966 has the overhead of the grouped access attributed to it. */
968 static void
969 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
970 enum vect_def_type dt,
971 vect_memory_access_type memory_access_type,
972 vec_load_store_type vls_type, slp_tree slp_node,
973 stmt_vector_for_cost *cost_vec)
975 unsigned int inside_cost = 0, prologue_cost = 0;
976 stmt_vec_info first_stmt_info = stmt_info;
977 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
979 /* ??? Somehow we need to fix this at the callers. */
980 if (slp_node)
981 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
983 if (vls_type == VLS_STORE_INVARIANT)
985 if (slp_node)
986 prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
987 1, dt, cost_vec);
988 else
989 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
990 stmt_info, 0, vect_prologue);
993 /* Grouped stores update all elements in the group at once,
994 so we want the DR for the first statement. */
995 if (!slp_node && grouped_access_p)
996 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
998 /* True if we should include any once-per-group costs as well as
999 the cost of the statement itself. For SLP we only get called
1000 once per group anyhow. */
1001 bool first_stmt_p = (first_stmt_info == stmt_info);
1003 /* We assume that the cost of a single store-lanes instruction is
1004 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
1005 access is instead being provided by a permute-and-store operation,
1006 include the cost of the permutes. */
1007 if (first_stmt_p
1008 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1010 /* Uses a high and low interleave or shuffle operations for each
1011 needed permute. */
1012 int group_size = DR_GROUP_SIZE (first_stmt_info);
1013 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1014 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
1015 stmt_info, 0, vect_body);
1017 if (dump_enabled_p ())
1018 dump_printf_loc (MSG_NOTE, vect_location,
1019 "vect_model_store_cost: strided group_size = %d .\n",
1020 group_size);
1023 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1024 /* Costs of the stores. */
1025 if (memory_access_type == VMAT_ELEMENTWISE
1026 || memory_access_type == VMAT_GATHER_SCATTER)
1028 /* N scalar stores plus extracting the elements. */
1029 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1030 inside_cost += record_stmt_cost (cost_vec,
1031 ncopies * assumed_nunits,
1032 scalar_store, stmt_info, 0, vect_body);
1034 else
1035 vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
1037 if (memory_access_type == VMAT_ELEMENTWISE
1038 || memory_access_type == VMAT_STRIDED_SLP)
1040 /* N scalar stores plus extracting the elements. */
1041 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1042 inside_cost += record_stmt_cost (cost_vec,
1043 ncopies * assumed_nunits,
1044 vec_to_scalar, stmt_info, 0, vect_body);
1047 if (dump_enabled_p ())
1048 dump_printf_loc (MSG_NOTE, vect_location,
1049 "vect_model_store_cost: inside_cost = %d, "
1050 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1054 /* Calculate cost of DR's memory access. */
1055 void
1056 vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
1057 unsigned int *inside_cost,
1058 stmt_vector_for_cost *body_cost_vec)
1060 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1061 int alignment_support_scheme
1062 = vect_supportable_dr_alignment (dr_info, false);
1064 switch (alignment_support_scheme)
1066 case dr_aligned:
1068 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1069 vector_store, stmt_info, 0,
1070 vect_body);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE, vect_location,
1074 "vect_model_store_cost: aligned.\n");
1075 break;
1078 case dr_unaligned_supported:
1080 /* Here, we assign an additional cost for the unaligned store. */
1081 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1082 unaligned_store, stmt_info,
1083 DR_MISALIGNMENT (dr_info),
1084 vect_body);
1085 if (dump_enabled_p ())
1086 dump_printf_loc (MSG_NOTE, vect_location,
1087 "vect_model_store_cost: unaligned supported by "
1088 "hardware.\n");
1089 break;
1092 case dr_unaligned_unsupported:
1094 *inside_cost = VECT_MAX_COST;
1096 if (dump_enabled_p ())
1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1098 "vect_model_store_cost: unsupported access.\n");
1099 break;
1102 default:
1103 gcc_unreachable ();
1108 /* Function vect_model_load_cost
1110 Models cost for loads. In the case of grouped accesses, one access has
1111 the overhead of the grouped access attributed to it. Since unaligned
1112 accesses are supported for loads, we also account for the costs of the
1113 access scheme chosen. */
1115 static void
1116 vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
1117 vect_memory_access_type memory_access_type,
1118 slp_instance instance,
1119 slp_tree slp_node,
1120 stmt_vector_for_cost *cost_vec)
1122 unsigned int inside_cost = 0, prologue_cost = 0;
1123 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1125 gcc_assert (cost_vec);
1127 /* ??? Somehow we need to fix this at the callers. */
1128 if (slp_node)
1129 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1131 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1133 /* If the load is permuted then the alignment is determined by
1134 the first group element not by the first scalar stmt DR. */
1135 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1136 /* Record the cost for the permutation. */
1137 unsigned n_perms;
1138 unsigned assumed_nunits
1139 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info));
1140 unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
1141 vect_transform_slp_perm_load (slp_node, vNULL, NULL,
1142 slp_vf, instance, true,
1143 &n_perms);
1144 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
1145 first_stmt_info, 0, vect_body);
1146 /* And adjust the number of loads performed. This handles
1147 redundancies as well as loads that are later dead. */
1148 auto_sbitmap perm (DR_GROUP_SIZE (first_stmt_info));
1149 bitmap_clear (perm);
1150 for (unsigned i = 0;
1151 i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
1152 bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
1153 ncopies = 0;
1154 bool load_seen = false;
1155 for (unsigned i = 0; i < DR_GROUP_SIZE (first_stmt_info); ++i)
1157 if (i % assumed_nunits == 0)
1159 if (load_seen)
1160 ncopies++;
1161 load_seen = false;
1163 if (bitmap_bit_p (perm, i))
1164 load_seen = true;
1166 if (load_seen)
1167 ncopies++;
1168 gcc_assert (ncopies
1169 <= (DR_GROUP_SIZE (first_stmt_info)
1170 - DR_GROUP_GAP (first_stmt_info)
1171 + assumed_nunits - 1) / assumed_nunits);
1174 /* Grouped loads read all elements in the group at once,
1175 so we want the DR for the first statement. */
1176 stmt_vec_info first_stmt_info = stmt_info;
1177 if (!slp_node && grouped_access_p)
1178 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1180 /* True if we should include any once-per-group costs as well as
1181 the cost of the statement itself. For SLP we only get called
1182 once per group anyhow. */
1183 bool first_stmt_p = (first_stmt_info == stmt_info);
1185 /* We assume that the cost of a single load-lanes instruction is
1186 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1187 access is instead being provided by a load-and-permute operation,
1188 include the cost of the permutes. */
1189 if (first_stmt_p
1190 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1192 /* Uses an even and odd extract operations or shuffle operations
1193 for each needed permute. */
1194 int group_size = DR_GROUP_SIZE (first_stmt_info);
1195 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1196 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1197 stmt_info, 0, vect_body);
1199 if (dump_enabled_p ())
1200 dump_printf_loc (MSG_NOTE, vect_location,
1201 "vect_model_load_cost: strided group_size = %d .\n",
1202 group_size);
1205 /* The loads themselves. */
1206 if (memory_access_type == VMAT_ELEMENTWISE
1207 || memory_access_type == VMAT_GATHER_SCATTER)
1209 /* N scalar loads plus gathering them into a vector. */
1210 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1211 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1212 inside_cost += record_stmt_cost (cost_vec,
1213 ncopies * assumed_nunits,
1214 scalar_load, stmt_info, 0, vect_body);
1216 else
1217 vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
1218 &inside_cost, &prologue_cost,
1219 cost_vec, cost_vec, true);
1220 if (memory_access_type == VMAT_ELEMENTWISE
1221 || memory_access_type == VMAT_STRIDED_SLP)
1222 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
1223 stmt_info, 0, vect_body);
1225 if (dump_enabled_p ())
1226 dump_printf_loc (MSG_NOTE, vect_location,
1227 "vect_model_load_cost: inside_cost = %d, "
1228 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1232 /* Calculate cost of DR's memory access. */
1233 void
1234 vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
1235 bool add_realign_cost, unsigned int *inside_cost,
1236 unsigned int *prologue_cost,
1237 stmt_vector_for_cost *prologue_cost_vec,
1238 stmt_vector_for_cost *body_cost_vec,
1239 bool record_prologue_costs)
1241 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1242 int alignment_support_scheme
1243 = vect_supportable_dr_alignment (dr_info, false);
1245 switch (alignment_support_scheme)
1247 case dr_aligned:
1249 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1250 stmt_info, 0, vect_body);
1252 if (dump_enabled_p ())
1253 dump_printf_loc (MSG_NOTE, vect_location,
1254 "vect_model_load_cost: aligned.\n");
1256 break;
1258 case dr_unaligned_supported:
1260 /* Here, we assign an additional cost for the unaligned load. */
1261 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1262 unaligned_load, stmt_info,
1263 DR_MISALIGNMENT (dr_info),
1264 vect_body);
1266 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_NOTE, vect_location,
1268 "vect_model_load_cost: unaligned supported by "
1269 "hardware.\n");
1271 break;
1273 case dr_explicit_realign:
1275 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1276 vector_load, stmt_info, 0, vect_body);
1277 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1278 vec_perm, stmt_info, 0, vect_body);
1280 /* FIXME: If the misalignment remains fixed across the iterations of
1281 the containing loop, the following cost should be added to the
1282 prologue costs. */
1283 if (targetm.vectorize.builtin_mask_for_load)
1284 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1285 stmt_info, 0, vect_body);
1287 if (dump_enabled_p ())
1288 dump_printf_loc (MSG_NOTE, vect_location,
1289 "vect_model_load_cost: explicit realign\n");
1291 break;
1293 case dr_explicit_realign_optimized:
1295 if (dump_enabled_p ())
1296 dump_printf_loc (MSG_NOTE, vect_location,
1297 "vect_model_load_cost: unaligned software "
1298 "pipelined.\n");
1300 /* Unaligned software pipeline has a load of an address, an initial
1301 load, and possibly a mask operation to "prime" the loop. However,
1302 if this is an access in a group of loads, which provide grouped
1303 access, then the above cost should only be considered for one
1304 access in the group. Inside the loop, there is a load op
1305 and a realignment op. */
1307 if (add_realign_cost && record_prologue_costs)
1309 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1310 vector_stmt, stmt_info,
1311 0, vect_prologue);
1312 if (targetm.vectorize.builtin_mask_for_load)
1313 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1314 vector_stmt, stmt_info,
1315 0, vect_prologue);
1318 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1319 stmt_info, 0, vect_body);
1320 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1321 stmt_info, 0, vect_body);
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_NOTE, vect_location,
1325 "vect_model_load_cost: explicit realign optimized"
1326 "\n");
1328 break;
1331 case dr_unaligned_unsupported:
1333 *inside_cost = VECT_MAX_COST;
1335 if (dump_enabled_p ())
1336 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1337 "vect_model_load_cost: unsupported access.\n");
1338 break;
1341 default:
1342 gcc_unreachable ();
1346 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1347 the loop preheader for the vectorized stmt STMT_VINFO. */
1349 static void
1350 vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
1351 gimple_stmt_iterator *gsi)
1353 if (gsi)
1354 vect_finish_stmt_generation (stmt_vinfo, new_stmt, gsi);
1355 else
1357 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1359 if (loop_vinfo)
1361 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1362 basic_block new_bb;
1363 edge pe;
1365 if (nested_in_vect_loop_p (loop, stmt_vinfo))
1366 loop = loop->inner;
1368 pe = loop_preheader_edge (loop);
1369 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1370 gcc_assert (!new_bb);
1372 else
1374 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1375 basic_block bb;
1376 gimple_stmt_iterator gsi_bb_start;
1378 gcc_assert (bb_vinfo);
1379 bb = BB_VINFO_BB (bb_vinfo);
1380 gsi_bb_start = gsi_after_labels (bb);
1381 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1385 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE, vect_location,
1388 "created new init_stmt: ");
1389 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1393 /* Function vect_init_vector.
1395 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1396 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1397 vector type a vector with all elements equal to VAL is created first.
1398 Place the initialization at BSI if it is not NULL. Otherwise, place the
1399 initialization at the loop preheader.
1400 Return the DEF of INIT_STMT.
1401 It will be used in the vectorization of STMT_INFO. */
1403 tree
1404 vect_init_vector (stmt_vec_info stmt_info, tree val, tree type,
1405 gimple_stmt_iterator *gsi)
1407 gimple *init_stmt;
1408 tree new_temp;
1410 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1411 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1413 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1414 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1416 /* Scalar boolean value should be transformed into
1417 all zeros or all ones value before building a vector. */
1418 if (VECTOR_BOOLEAN_TYPE_P (type))
1420 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1421 tree false_val = build_zero_cst (TREE_TYPE (type));
1423 if (CONSTANT_CLASS_P (val))
1424 val = integer_zerop (val) ? false_val : true_val;
1425 else
1427 new_temp = make_ssa_name (TREE_TYPE (type));
1428 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1429 val, true_val, false_val);
1430 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1431 val = new_temp;
1434 else if (CONSTANT_CLASS_P (val))
1435 val = fold_convert (TREE_TYPE (type), val);
1436 else
1438 new_temp = make_ssa_name (TREE_TYPE (type));
1439 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1440 init_stmt = gimple_build_assign (new_temp,
1441 fold_build1 (VIEW_CONVERT_EXPR,
1442 TREE_TYPE (type),
1443 val));
1444 else
1445 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1446 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1447 val = new_temp;
1450 val = build_vector_from_val (type, val);
1453 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1454 init_stmt = gimple_build_assign (new_temp, val);
1455 vect_init_vector_1 (stmt_info, init_stmt, gsi);
1456 return new_temp;
1459 /* Function vect_get_vec_def_for_operand_1.
1461 For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
1462 with type DT that will be used in the vectorized stmt. */
1464 tree
1465 vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info,
1466 enum vect_def_type dt)
1468 tree vec_oprnd;
1469 stmt_vec_info vec_stmt_info;
1471 switch (dt)
1473 /* operand is a constant or a loop invariant. */
1474 case vect_constant_def:
1475 case vect_external_def:
1476 /* Code should use vect_get_vec_def_for_operand. */
1477 gcc_unreachable ();
1479 /* operand is defined inside the loop. */
1480 case vect_internal_def:
1482 /* Get the def from the vectorized stmt. */
1483 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1484 /* Get vectorized pattern statement. */
1485 if (!vec_stmt_info
1486 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1487 && !STMT_VINFO_RELEVANT (def_stmt_info))
1488 vec_stmt_info = (STMT_VINFO_VEC_STMT
1489 (STMT_VINFO_RELATED_STMT (def_stmt_info)));
1490 gcc_assert (vec_stmt_info);
1491 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1492 vec_oprnd = PHI_RESULT (phi);
1493 else
1494 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1495 return vec_oprnd;
1498 /* operand is defined by a loop header phi. */
1499 case vect_reduction_def:
1500 case vect_double_reduction_def:
1501 case vect_nested_cycle:
1502 case vect_induction_def:
1504 gcc_assert (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI);
1506 /* Get the def from the vectorized stmt. */
1507 vec_stmt_info = STMT_VINFO_VEC_STMT (def_stmt_info);
1508 if (gphi *phi = dyn_cast <gphi *> (vec_stmt_info->stmt))
1509 vec_oprnd = PHI_RESULT (phi);
1510 else
1511 vec_oprnd = gimple_get_lhs (vec_stmt_info->stmt);
1512 return vec_oprnd;
1515 default:
1516 gcc_unreachable ();
1521 /* Function vect_get_vec_def_for_operand.
1523 OP is an operand in STMT_VINFO. This function returns a (vector) def
1524 that will be used in the vectorized stmt for STMT_VINFO.
1526 In the case that OP is an SSA_NAME which is defined in the loop, then
1527 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1529 In case OP is an invariant or constant, a new stmt that creates a vector def
1530 needs to be introduced. VECTYPE may be used to specify a required type for
1531 vector invariant. */
1533 tree
1534 vect_get_vec_def_for_operand (tree op, stmt_vec_info stmt_vinfo, tree vectype)
1536 gimple *def_stmt;
1537 enum vect_def_type dt;
1538 bool is_simple_use;
1539 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1541 if (dump_enabled_p ())
1543 dump_printf_loc (MSG_NOTE, vect_location,
1544 "vect_get_vec_def_for_operand: ");
1545 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1546 dump_printf (MSG_NOTE, "\n");
1549 stmt_vec_info def_stmt_info;
1550 is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
1551 &def_stmt_info, &def_stmt);
1552 gcc_assert (is_simple_use);
1553 if (def_stmt && dump_enabled_p ())
1555 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1556 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1559 if (dt == vect_constant_def || dt == vect_external_def)
1561 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1562 tree vector_type;
1564 if (vectype)
1565 vector_type = vectype;
1566 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1567 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1568 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1569 else
1570 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1572 gcc_assert (vector_type);
1573 return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
1575 else
1576 return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
1580 /* Function vect_get_vec_def_for_stmt_copy
1582 Return a vector-def for an operand. This function is used when the
1583 vectorized stmt to be created (by the caller to this function) is a "copy"
1584 created in case the vectorized result cannot fit in one vector, and several
1585 copies of the vector-stmt are required. In this case the vector-def is
1586 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1587 of the stmt that defines VEC_OPRND. VINFO describes the vectorization.
1589 Context:
1590 In case the vectorization factor (VF) is bigger than the number
1591 of elements that can fit in a vectype (nunits), we have to generate
1592 more than one vector stmt to vectorize the scalar stmt. This situation
1593 arises when there are multiple data-types operated upon in the loop; the
1594 smallest data-type determines the VF, and as a result, when vectorizing
1595 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1596 vector stmt (each computing a vector of 'nunits' results, and together
1597 computing 'VF' results in each iteration). This function is called when
1598 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1599 which VF=16 and nunits=4, so the number of copies required is 4):
1601 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1603 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1604 VS1.1: vx.1 = memref1 VS1.2
1605 VS1.2: vx.2 = memref2 VS1.3
1606 VS1.3: vx.3 = memref3
1608 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1609 VSnew.1: vz1 = vx.1 + ... VSnew.2
1610 VSnew.2: vz2 = vx.2 + ... VSnew.3
1611 VSnew.3: vz3 = vx.3 + ...
1613 The vectorization of S1 is explained in vectorizable_load.
1614 The vectorization of S2:
1615 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1616 the function 'vect_get_vec_def_for_operand' is called to
1617 get the relevant vector-def for each operand of S2. For operand x it
1618 returns the vector-def 'vx.0'.
1620 To create the remaining copies of the vector-stmt (VSnew.j), this
1621 function is called to get the relevant vector-def for each operand. It is
1622 obtained from the respective VS1.j stmt, which is recorded in the
1623 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1625 For example, to obtain the vector-def 'vx.1' in order to create the
1626 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1627 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1628 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1629 and return its def ('vx.1').
1630 Overall, to create the above sequence this function will be called 3 times:
1631 vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0);
1632 vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1);
1633 vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */
1635 tree
1636 vect_get_vec_def_for_stmt_copy (vec_info *vinfo, tree vec_oprnd)
1638 stmt_vec_info def_stmt_info = vinfo->lookup_def (vec_oprnd);
1639 if (!def_stmt_info)
1640 /* Do nothing; can reuse same def. */
1641 return vec_oprnd;
1643 def_stmt_info = STMT_VINFO_RELATED_STMT (def_stmt_info);
1644 gcc_assert (def_stmt_info);
1645 if (gphi *phi = dyn_cast <gphi *> (def_stmt_info->stmt))
1646 vec_oprnd = PHI_RESULT (phi);
1647 else
1648 vec_oprnd = gimple_get_lhs (def_stmt_info->stmt);
1649 return vec_oprnd;
1653 /* Get vectorized definitions for the operands to create a copy of an original
1654 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1656 void
1657 vect_get_vec_defs_for_stmt_copy (vec_info *vinfo,
1658 vec<tree> *vec_oprnds0,
1659 vec<tree> *vec_oprnds1)
1661 tree vec_oprnd = vec_oprnds0->pop ();
1663 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1664 vec_oprnds0->quick_push (vec_oprnd);
1666 if (vec_oprnds1 && vec_oprnds1->length ())
1668 vec_oprnd = vec_oprnds1->pop ();
1669 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
1670 vec_oprnds1->quick_push (vec_oprnd);
1675 /* Get vectorized definitions for OP0 and OP1. */
1677 void
1678 vect_get_vec_defs (tree op0, tree op1, stmt_vec_info stmt_info,
1679 vec<tree> *vec_oprnds0,
1680 vec<tree> *vec_oprnds1,
1681 slp_tree slp_node)
1683 if (slp_node)
1685 int nops = (op1 == NULL_TREE) ? 1 : 2;
1686 auto_vec<tree> ops (nops);
1687 auto_vec<vec<tree> > vec_defs (nops);
1689 ops.quick_push (op0);
1690 if (op1)
1691 ops.quick_push (op1);
1693 vect_get_slp_defs (ops, slp_node, &vec_defs);
1695 *vec_oprnds0 = vec_defs[0];
1696 if (op1)
1697 *vec_oprnds1 = vec_defs[1];
1699 else
1701 tree vec_oprnd;
1703 vec_oprnds0->create (1);
1704 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt_info);
1705 vec_oprnds0->quick_push (vec_oprnd);
1707 if (op1)
1709 vec_oprnds1->create (1);
1710 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt_info);
1711 vec_oprnds1->quick_push (vec_oprnd);
1716 /* Helper function called by vect_finish_replace_stmt and
1717 vect_finish_stmt_generation. Set the location of the new
1718 statement and create and return a stmt_vec_info for it. */
1720 static stmt_vec_info
1721 vect_finish_stmt_generation_1 (stmt_vec_info stmt_info, gimple *vec_stmt)
1723 vec_info *vinfo = stmt_info->vinfo;
1725 stmt_vec_info vec_stmt_info = vinfo->add_stmt (vec_stmt);
1727 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1730 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1733 gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
1735 /* While EH edges will generally prevent vectorization, stmt might
1736 e.g. be in a must-not-throw region. Ensure newly created stmts
1737 that could throw are part of the same region. */
1738 int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
1739 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1740 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1742 return vec_stmt_info;
1745 /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1746 which sets the same scalar result as STMT_INFO did. Create and return a
1747 stmt_vec_info for VEC_STMT. */
1749 stmt_vec_info
1750 vect_finish_replace_stmt (stmt_vec_info stmt_info, gimple *vec_stmt)
1752 gcc_assert (gimple_get_lhs (stmt_info->stmt) == gimple_get_lhs (vec_stmt));
1754 gimple_stmt_iterator gsi = gsi_for_stmt (stmt_info->stmt);
1755 gsi_replace (&gsi, vec_stmt, false);
1757 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1760 /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
1761 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
1763 stmt_vec_info
1764 vect_finish_stmt_generation (stmt_vec_info stmt_info, gimple *vec_stmt,
1765 gimple_stmt_iterator *gsi)
1767 gcc_assert (gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
1769 if (!gsi_end_p (*gsi)
1770 && gimple_has_mem_ops (vec_stmt))
1772 gimple *at_stmt = gsi_stmt (*gsi);
1773 tree vuse = gimple_vuse (at_stmt);
1774 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1776 tree vdef = gimple_vdef (at_stmt);
1777 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1778 /* If we have an SSA vuse and insert a store, update virtual
1779 SSA form to avoid triggering the renamer. Do so only
1780 if we can easily see all uses - which is what almost always
1781 happens with the way vectorized stmts are inserted. */
1782 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1783 && ((is_gimple_assign (vec_stmt)
1784 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1785 || (is_gimple_call (vec_stmt)
1786 && !(gimple_call_flags (vec_stmt)
1787 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1789 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1790 gimple_set_vdef (vec_stmt, new_vdef);
1791 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1795 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1796 return vect_finish_stmt_generation_1 (stmt_info, vec_stmt);
1799 /* We want to vectorize a call to combined function CFN with function
1800 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1801 as the types of all inputs. Check whether this is possible using
1802 an internal function, returning its code if so or IFN_LAST if not. */
1804 static internal_fn
1805 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1806 tree vectype_out, tree vectype_in)
1808 internal_fn ifn;
1809 if (internal_fn_p (cfn))
1810 ifn = as_internal_fn (cfn);
1811 else
1812 ifn = associated_internal_fn (fndecl);
1813 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1815 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1816 if (info.vectorizable)
1818 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1819 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1820 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1821 OPTIMIZE_FOR_SPEED))
1822 return ifn;
1825 return IFN_LAST;
1829 static tree permute_vec_elements (tree, tree, tree, stmt_vec_info,
1830 gimple_stmt_iterator *);
1832 /* Check whether a load or store statement in the loop described by
1833 LOOP_VINFO is possible in a fully-masked loop. This is testing
1834 whether the vectorizer pass has the appropriate support, as well as
1835 whether the target does.
1837 VLS_TYPE says whether the statement is a load or store and VECTYPE
1838 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1839 says how the load or store is going to be implemented and GROUP_SIZE
1840 is the number of load or store statements in the containing group.
1841 If the access is a gather load or scatter store, GS_INFO describes
1842 its arguments.
1844 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1845 supported, otherwise record the required mask types. */
1847 static void
1848 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1849 vec_load_store_type vls_type, int group_size,
1850 vect_memory_access_type memory_access_type,
1851 gather_scatter_info *gs_info)
1853 /* Invariant loads need no special support. */
1854 if (memory_access_type == VMAT_INVARIANT)
1855 return;
1857 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1858 machine_mode vecmode = TYPE_MODE (vectype);
1859 bool is_load = (vls_type == VLS_LOAD);
1860 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1862 if (is_load
1863 ? !vect_load_lanes_supported (vectype, group_size, true)
1864 : !vect_store_lanes_supported (vectype, group_size, true))
1866 if (dump_enabled_p ())
1867 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1868 "can't use a fully-masked loop because the"
1869 " target doesn't have an appropriate masked"
1870 " load/store-lanes instruction.\n");
1871 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1872 return;
1874 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1875 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1876 return;
1879 if (memory_access_type == VMAT_GATHER_SCATTER)
1881 internal_fn ifn = (is_load
1882 ? IFN_MASK_GATHER_LOAD
1883 : IFN_MASK_SCATTER_STORE);
1884 tree offset_type = TREE_TYPE (gs_info->offset);
1885 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1886 gs_info->memory_type,
1887 TYPE_SIGN (offset_type),
1888 gs_info->scale))
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "can't use a fully-masked loop because the"
1893 " target doesn't have an appropriate masked"
1894 " gather load or scatter store instruction.\n");
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1896 return;
1898 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1899 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1900 return;
1903 if (memory_access_type != VMAT_CONTIGUOUS
1904 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1906 /* Element X of the data must come from iteration i * VF + X of the
1907 scalar loop. We need more work to support other mappings. */
1908 if (dump_enabled_p ())
1909 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1910 "can't use a fully-masked loop because an access"
1911 " isn't contiguous.\n");
1912 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1913 return;
1916 machine_mode mask_mode;
1917 if (!(targetm.vectorize.get_mask_mode
1918 (GET_MODE_NUNITS (vecmode),
1919 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1920 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "can't use a fully-masked loop because the target"
1925 " doesn't have the appropriate masked load or"
1926 " store.\n");
1927 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1928 return;
1930 /* We might load more scalars than we need for permuting SLP loads.
1931 We checked in get_group_load_store_type that the extra elements
1932 don't leak into a new vector. */
1933 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1934 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1935 unsigned int nvectors;
1936 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1937 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1938 else
1939 gcc_unreachable ();
1942 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1943 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1944 that needs to be applied to all loads and stores in a vectorized loop.
1945 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1947 MASK_TYPE is the type of both masks. If new statements are needed,
1948 insert them before GSI. */
1950 static tree
1951 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1952 gimple_stmt_iterator *gsi)
1954 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1955 if (!loop_mask)
1956 return vec_mask;
1958 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1959 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1960 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1961 vec_mask, loop_mask);
1962 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1963 return and_res;
1966 /* Determine whether we can use a gather load or scatter store to vectorize
1967 strided load or store STMT_INFO by truncating the current offset to a
1968 smaller width. We need to be able to construct an offset vector:
1970 { 0, X, X*2, X*3, ... }
1972 without loss of precision, where X is STMT_INFO's DR_STEP.
1974 Return true if this is possible, describing the gather load or scatter
1975 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1977 static bool
1978 vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
1979 loop_vec_info loop_vinfo, bool masked_p,
1980 gather_scatter_info *gs_info)
1982 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1983 data_reference *dr = dr_info->dr;
1984 tree step = DR_STEP (dr);
1985 if (TREE_CODE (step) != INTEGER_CST)
1987 /* ??? Perhaps we could use range information here? */
1988 if (dump_enabled_p ())
1989 dump_printf_loc (MSG_NOTE, vect_location,
1990 "cannot truncate variable step.\n");
1991 return false;
1994 /* Get the number of bits in an element. */
1995 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1996 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1997 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1999 /* Set COUNT to the upper limit on the number of elements - 1.
2000 Start with the maximum vectorization factor. */
2001 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
2003 /* Try lowering COUNT to the number of scalar latch iterations. */
2004 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2005 widest_int max_iters;
2006 if (max_loop_iterations (loop, &max_iters)
2007 && max_iters < count)
2008 count = max_iters.to_shwi ();
2010 /* Try scales of 1 and the element size. */
2011 int scales[] = { 1, vect_get_scalar_dr_size (dr_info) };
2012 wi::overflow_type overflow = wi::OVF_NONE;
2013 for (int i = 0; i < 2; ++i)
2015 int scale = scales[i];
2016 widest_int factor;
2017 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
2018 continue;
2020 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
2021 in OFFSET_BITS bits. */
2022 widest_int range = wi::mul (count, factor, SIGNED, &overflow);
2023 if (overflow)
2024 continue;
2025 signop sign = range >= 0 ? UNSIGNED : SIGNED;
2026 if (wi::min_precision (range, sign) > element_bits)
2028 overflow = wi::OVF_UNKNOWN;
2029 continue;
2032 /* See whether the target supports the operation. */
2033 tree memory_type = TREE_TYPE (DR_REF (dr));
2034 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
2035 memory_type, element_bits, sign, scale,
2036 &gs_info->ifn, &gs_info->element_type))
2037 continue;
2039 tree offset_type = build_nonstandard_integer_type (element_bits,
2040 sign == UNSIGNED);
2042 gs_info->decl = NULL_TREE;
2043 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2044 but we don't need to store that here. */
2045 gs_info->base = NULL_TREE;
2046 gs_info->offset = fold_convert (offset_type, step);
2047 gs_info->offset_dt = vect_constant_def;
2048 gs_info->offset_vectype = NULL_TREE;
2049 gs_info->scale = scale;
2050 gs_info->memory_type = memory_type;
2051 return true;
2054 if (overflow && dump_enabled_p ())
2055 dump_printf_loc (MSG_NOTE, vect_location,
2056 "truncating gather/scatter offset to %d bits"
2057 " might change its value.\n", element_bits);
2059 return false;
2062 /* Return true if we can use gather/scatter internal functions to
2063 vectorize STMT_INFO, which is a grouped or strided load or store.
2064 MASKED_P is true if load or store is conditional. When returning
2065 true, fill in GS_INFO with the information required to perform the
2066 operation. */
2068 static bool
2069 vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info,
2070 loop_vec_info loop_vinfo, bool masked_p,
2071 gather_scatter_info *gs_info)
2073 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)
2074 || gs_info->decl)
2075 return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo,
2076 masked_p, gs_info);
2078 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
2079 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2080 tree offset_type = TREE_TYPE (gs_info->offset);
2081 unsigned int offset_bits = TYPE_PRECISION (offset_type);
2083 /* Enforced by vect_check_gather_scatter. */
2084 gcc_assert (element_bits >= offset_bits);
2086 /* If the elements are wider than the offset, convert the offset to the
2087 same width, without changing its sign. */
2088 if (element_bits > offset_bits)
2090 bool unsigned_p = TYPE_UNSIGNED (offset_type);
2091 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
2092 gs_info->offset = fold_convert (offset_type, gs_info->offset);
2095 if (dump_enabled_p ())
2096 dump_printf_loc (MSG_NOTE, vect_location,
2097 "using gather/scatter for strided/grouped access,"
2098 " scale = %d\n", gs_info->scale);
2100 return true;
2103 /* STMT_INFO is a non-strided load or store, meaning that it accesses
2104 elements with a known constant step. Return -1 if that step
2105 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2107 static int
2108 compare_step_with_zero (stmt_vec_info stmt_info)
2110 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2111 return tree_int_cst_compare (vect_dr_behavior (dr_info)->step,
2112 size_zero_node);
2115 /* If the target supports a permute mask that reverses the elements in
2116 a vector of type VECTYPE, return that mask, otherwise return null. */
2118 static tree
2119 perm_mask_for_reverse (tree vectype)
2121 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2123 /* The encoding has a single stepped pattern. */
2124 vec_perm_builder sel (nunits, 1, 3);
2125 for (int i = 0; i < 3; ++i)
2126 sel.quick_push (nunits - 1 - i);
2128 vec_perm_indices indices (sel, 1, nunits);
2129 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2130 return NULL_TREE;
2131 return vect_gen_perm_mask_checked (vectype, indices);
2134 /* STMT_INFO is either a masked or unconditional store. Return the value
2135 being stored. */
2137 tree
2138 vect_get_store_rhs (stmt_vec_info stmt_info)
2140 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
2142 gcc_assert (gimple_assign_single_p (assign));
2143 return gimple_assign_rhs1 (assign);
2145 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
2147 internal_fn ifn = gimple_call_internal_fn (call);
2148 int index = internal_fn_stored_value_index (ifn);
2149 gcc_assert (index >= 0);
2150 return gimple_call_arg (call, index);
2152 gcc_unreachable ();
2155 /* A subroutine of get_load_store_type, with a subset of the same
2156 arguments. Handle the case where STMT_INFO is part of a grouped load
2157 or store.
2159 For stores, the statements in the group are all consecutive
2160 and there is no gap at the end. For loads, the statements in the
2161 group might not be consecutive; there can be gaps between statements
2162 as well as at the end. */
2164 static bool
2165 get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2166 bool masked_p, vec_load_store_type vls_type,
2167 vect_memory_access_type *memory_access_type,
2168 gather_scatter_info *gs_info)
2170 vec_info *vinfo = stmt_info->vinfo;
2171 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2172 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2173 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2174 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2175 unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
2176 bool single_element_p = (stmt_info == first_stmt_info
2177 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
2178 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
2179 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2181 /* True if the vectorized statements would access beyond the last
2182 statement in the group. */
2183 bool overrun_p = false;
2185 /* True if we can cope with such overrun by peeling for gaps, so that
2186 there is at least one final scalar iteration after the vector loop. */
2187 bool can_overrun_p = (!masked_p
2188 && vls_type == VLS_LOAD
2189 && loop_vinfo
2190 && !loop->inner);
2192 /* There can only be a gap at the end of the group if the stride is
2193 known at compile time. */
2194 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
2196 /* Stores can't yet have gaps. */
2197 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2199 if (slp)
2201 if (STMT_VINFO_STRIDED_P (stmt_info))
2203 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2204 separated by the stride, until we have a complete vector.
2205 Fall back to scalar accesses if that isn't possible. */
2206 if (multiple_p (nunits, group_size))
2207 *memory_access_type = VMAT_STRIDED_SLP;
2208 else
2209 *memory_access_type = VMAT_ELEMENTWISE;
2211 else
2213 overrun_p = loop_vinfo && gap != 0;
2214 if (overrun_p && vls_type != VLS_LOAD)
2216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2217 "Grouped store with gaps requires"
2218 " non-consecutive accesses\n");
2219 return false;
2221 /* An overrun is fine if the trailing elements are smaller
2222 than the alignment boundary B. Every vector access will
2223 be a multiple of B and so we are guaranteed to access a
2224 non-gap element in the same B-sized block. */
2225 if (overrun_p
2226 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2227 / vect_get_scalar_dr_size (first_dr_info)))
2228 overrun_p = false;
2229 if (overrun_p && !can_overrun_p)
2231 if (dump_enabled_p ())
2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2233 "Peeling for outer loop is not supported\n");
2234 return false;
2236 *memory_access_type = VMAT_CONTIGUOUS;
2239 else
2241 /* We can always handle this case using elementwise accesses,
2242 but see if something more efficient is available. */
2243 *memory_access_type = VMAT_ELEMENTWISE;
2245 /* If there is a gap at the end of the group then these optimizations
2246 would access excess elements in the last iteration. */
2247 bool would_overrun_p = (gap != 0);
2248 /* An overrun is fine if the trailing elements are smaller than the
2249 alignment boundary B. Every vector access will be a multiple of B
2250 and so we are guaranteed to access a non-gap element in the
2251 same B-sized block. */
2252 if (would_overrun_p
2253 && !masked_p
2254 && gap < (vect_known_alignment_in_bytes (first_dr_info)
2255 / vect_get_scalar_dr_size (first_dr_info)))
2256 would_overrun_p = false;
2258 if (!STMT_VINFO_STRIDED_P (stmt_info)
2259 && (can_overrun_p || !would_overrun_p)
2260 && compare_step_with_zero (stmt_info) > 0)
2262 /* First cope with the degenerate case of a single-element
2263 vector. */
2264 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2265 *memory_access_type = VMAT_CONTIGUOUS;
2267 /* Otherwise try using LOAD/STORE_LANES. */
2268 if (*memory_access_type == VMAT_ELEMENTWISE
2269 && (vls_type == VLS_LOAD
2270 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2271 : vect_store_lanes_supported (vectype, group_size,
2272 masked_p)))
2274 *memory_access_type = VMAT_LOAD_STORE_LANES;
2275 overrun_p = would_overrun_p;
2278 /* If that fails, try using permuting loads. */
2279 if (*memory_access_type == VMAT_ELEMENTWISE
2280 && (vls_type == VLS_LOAD
2281 ? vect_grouped_load_supported (vectype, single_element_p,
2282 group_size)
2283 : vect_grouped_store_supported (vectype, group_size)))
2285 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2286 overrun_p = would_overrun_p;
2290 /* As a last resort, trying using a gather load or scatter store.
2292 ??? Although the code can handle all group sizes correctly,
2293 it probably isn't a win to use separate strided accesses based
2294 on nearby locations. Or, even if it's a win over scalar code,
2295 it might not be a win over vectorizing at a lower VF, if that
2296 allows us to use contiguous accesses. */
2297 if (*memory_access_type == VMAT_ELEMENTWISE
2298 && single_element_p
2299 && loop_vinfo
2300 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2301 masked_p, gs_info))
2302 *memory_access_type = VMAT_GATHER_SCATTER;
2305 if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
2307 /* STMT is the leader of the group. Check the operands of all the
2308 stmts of the group. */
2309 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
2310 while (next_stmt_info)
2312 tree op = vect_get_store_rhs (next_stmt_info);
2313 enum vect_def_type dt;
2314 if (!vect_is_simple_use (op, vinfo, &dt))
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2318 "use not simple.\n");
2319 return false;
2321 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
2325 if (overrun_p)
2327 gcc_assert (can_overrun_p);
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2330 "Data access with gaps requires scalar "
2331 "epilogue loop\n");
2332 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2335 return true;
2338 /* A subroutine of get_load_store_type, with a subset of the same
2339 arguments. Handle the case where STMT_INFO is a load or store that
2340 accesses consecutive elements with a negative step. */
2342 static vect_memory_access_type
2343 get_negative_load_store_type (stmt_vec_info stmt_info, tree vectype,
2344 vec_load_store_type vls_type,
2345 unsigned int ncopies)
2347 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2348 dr_alignment_support alignment_support_scheme;
2350 if (ncopies > 1)
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2354 "multiple types with negative step.\n");
2355 return VMAT_ELEMENTWISE;
2358 alignment_support_scheme = vect_supportable_dr_alignment (dr_info, false);
2359 if (alignment_support_scheme != dr_aligned
2360 && alignment_support_scheme != dr_unaligned_supported)
2362 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2364 "negative step but alignment required.\n");
2365 return VMAT_ELEMENTWISE;
2368 if (vls_type == VLS_STORE_INVARIANT)
2370 if (dump_enabled_p ())
2371 dump_printf_loc (MSG_NOTE, vect_location,
2372 "negative step with invariant source;"
2373 " no permute needed.\n");
2374 return VMAT_CONTIGUOUS_DOWN;
2377 if (!perm_mask_for_reverse (vectype))
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2381 "negative step and reversing not supported.\n");
2382 return VMAT_ELEMENTWISE;
2385 return VMAT_CONTIGUOUS_REVERSE;
2388 /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2389 if there is a memory access type that the vectorized form can use,
2390 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2391 or scatters, fill in GS_INFO accordingly.
2393 SLP says whether we're performing SLP rather than loop vectorization.
2394 MASKED_P is true if the statement is conditional on a vectorized mask.
2395 VECTYPE is the vector type that the vectorized statements will use.
2396 NCOPIES is the number of vector statements that will be needed. */
2398 static bool
2399 get_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
2400 bool masked_p, vec_load_store_type vls_type,
2401 unsigned int ncopies,
2402 vect_memory_access_type *memory_access_type,
2403 gather_scatter_info *gs_info)
2405 vec_info *vinfo = stmt_info->vinfo;
2406 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2407 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2408 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2410 *memory_access_type = VMAT_GATHER_SCATTER;
2411 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
2412 gcc_unreachable ();
2413 else if (!vect_is_simple_use (gs_info->offset, vinfo,
2414 &gs_info->offset_dt,
2415 &gs_info->offset_vectype))
2417 if (dump_enabled_p ())
2418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2419 "%s index use not simple.\n",
2420 vls_type == VLS_LOAD ? "gather" : "scatter");
2421 return false;
2424 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2426 if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
2427 vls_type, memory_access_type, gs_info))
2428 return false;
2430 else if (STMT_VINFO_STRIDED_P (stmt_info))
2432 gcc_assert (!slp);
2433 if (loop_vinfo
2434 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2435 masked_p, gs_info))
2436 *memory_access_type = VMAT_GATHER_SCATTER;
2437 else
2438 *memory_access_type = VMAT_ELEMENTWISE;
2440 else
2442 int cmp = compare_step_with_zero (stmt_info);
2443 if (cmp < 0)
2444 *memory_access_type = get_negative_load_store_type
2445 (stmt_info, vectype, vls_type, ncopies);
2446 else if (cmp == 0)
2448 gcc_assert (vls_type == VLS_LOAD);
2449 *memory_access_type = VMAT_INVARIANT;
2451 else
2452 *memory_access_type = VMAT_CONTIGUOUS;
2455 if ((*memory_access_type == VMAT_ELEMENTWISE
2456 || *memory_access_type == VMAT_STRIDED_SLP)
2457 && !nunits.is_constant ())
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2461 "Not using elementwise accesses due to variable "
2462 "vectorization factor.\n");
2463 return false;
2466 /* FIXME: At the moment the cost model seems to underestimate the
2467 cost of using elementwise accesses. This check preserves the
2468 traditional behavior until that can be fixed. */
2469 if (*memory_access_type == VMAT_ELEMENTWISE
2470 && !STMT_VINFO_STRIDED_P (stmt_info)
2471 && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
2472 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2473 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2475 if (dump_enabled_p ())
2476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2477 "not falling back to elementwise accesses\n");
2478 return false;
2480 return true;
2483 /* Return true if boolean argument MASK is suitable for vectorizing
2484 conditional load or store STMT_INFO. When returning true, store the type
2485 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2486 in *MASK_VECTYPE_OUT. */
2488 static bool
2489 vect_check_load_store_mask (stmt_vec_info stmt_info, tree mask,
2490 vect_def_type *mask_dt_out,
2491 tree *mask_vectype_out)
2493 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2495 if (dump_enabled_p ())
2496 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2497 "mask argument is not a boolean.\n");
2498 return false;
2501 if (TREE_CODE (mask) != SSA_NAME)
2503 if (dump_enabled_p ())
2504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2505 "mask argument is not an SSA name.\n");
2506 return false;
2509 enum vect_def_type mask_dt;
2510 tree mask_vectype;
2511 if (!vect_is_simple_use (mask, stmt_info->vinfo, &mask_dt, &mask_vectype))
2513 if (dump_enabled_p ())
2514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2515 "mask use not simple.\n");
2516 return false;
2519 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2520 if (!mask_vectype)
2521 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2523 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2525 if (dump_enabled_p ())
2526 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2527 "could not find an appropriate vector mask type.\n");
2528 return false;
2531 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2532 TYPE_VECTOR_SUBPARTS (vectype)))
2534 if (dump_enabled_p ())
2536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2537 "vector mask type ");
2538 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2539 dump_printf (MSG_MISSED_OPTIMIZATION,
2540 " does not match vector data type ");
2541 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2542 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2544 return false;
2547 *mask_dt_out = mask_dt;
2548 *mask_vectype_out = mask_vectype;
2549 return true;
2552 /* Return true if stored value RHS is suitable for vectorizing store
2553 statement STMT_INFO. When returning true, store the type of the
2554 definition in *RHS_DT_OUT, the type of the vectorized store value in
2555 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2557 static bool
2558 vect_check_store_rhs (stmt_vec_info stmt_info, tree rhs,
2559 vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
2560 vec_load_store_type *vls_type_out)
2562 /* In the case this is a store from a constant make sure
2563 native_encode_expr can handle it. */
2564 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2566 if (dump_enabled_p ())
2567 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2568 "cannot encode constant as a byte sequence.\n");
2569 return false;
2572 enum vect_def_type rhs_dt;
2573 tree rhs_vectype;
2574 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &rhs_dt, &rhs_vectype))
2576 if (dump_enabled_p ())
2577 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2578 "use not simple.\n");
2579 return false;
2582 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2583 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2585 if (dump_enabled_p ())
2586 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2587 "incompatible vector types.\n");
2588 return false;
2591 *rhs_dt_out = rhs_dt;
2592 *rhs_vectype_out = rhs_vectype;
2593 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2594 *vls_type_out = VLS_STORE_INVARIANT;
2595 else
2596 *vls_type_out = VLS_STORE;
2597 return true;
2600 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
2601 Note that we support masks with floating-point type, in which case the
2602 floats are interpreted as a bitmask. */
2604 static tree
2605 vect_build_all_ones_mask (stmt_vec_info stmt_info, tree masktype)
2607 if (TREE_CODE (masktype) == INTEGER_TYPE)
2608 return build_int_cst (masktype, -1);
2609 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2611 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2612 mask = build_vector_from_val (masktype, mask);
2613 return vect_init_vector (stmt_info, mask, masktype, NULL);
2615 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2617 REAL_VALUE_TYPE r;
2618 long tmp[6];
2619 for (int j = 0; j < 6; ++j)
2620 tmp[j] = -1;
2621 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2622 tree mask = build_real (TREE_TYPE (masktype), r);
2623 mask = build_vector_from_val (masktype, mask);
2624 return vect_init_vector (stmt_info, mask, masktype, NULL);
2626 gcc_unreachable ();
2629 /* Build an all-zero merge value of type VECTYPE while vectorizing
2630 STMT_INFO as a gather load. */
2632 static tree
2633 vect_build_zero_merge_argument (stmt_vec_info stmt_info, tree vectype)
2635 tree merge;
2636 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2637 merge = build_int_cst (TREE_TYPE (vectype), 0);
2638 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2640 REAL_VALUE_TYPE r;
2641 long tmp[6];
2642 for (int j = 0; j < 6; ++j)
2643 tmp[j] = 0;
2644 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2645 merge = build_real (TREE_TYPE (vectype), r);
2647 else
2648 gcc_unreachable ();
2649 merge = build_vector_from_val (vectype, merge);
2650 return vect_init_vector (stmt_info, merge, vectype, NULL);
2653 /* Build a gather load call while vectorizing STMT_INFO. Insert new
2654 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2655 the gather load operation. If the load is conditional, MASK is the
2656 unvectorized condition and MASK_DT is its definition type, otherwise
2657 MASK is null. */
2659 static void
2660 vect_build_gather_load_calls (stmt_vec_info stmt_info,
2661 gimple_stmt_iterator *gsi,
2662 stmt_vec_info *vec_stmt,
2663 gather_scatter_info *gs_info,
2664 tree mask)
2666 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2667 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2668 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2669 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2670 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2671 edge pe = loop_preheader_edge (loop);
2672 enum { NARROW, NONE, WIDEN } modifier;
2673 poly_uint64 gather_off_nunits
2674 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2676 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2677 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2678 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2679 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2680 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2681 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2682 tree scaletype = TREE_VALUE (arglist);
2683 gcc_checking_assert (types_compatible_p (srctype, rettype)
2684 && (!mask || types_compatible_p (srctype, masktype)));
2686 tree perm_mask = NULL_TREE;
2687 tree mask_perm_mask = NULL_TREE;
2688 if (known_eq (nunits, gather_off_nunits))
2689 modifier = NONE;
2690 else if (known_eq (nunits * 2, gather_off_nunits))
2692 modifier = WIDEN;
2694 /* Currently widening gathers and scatters are only supported for
2695 fixed-length vectors. */
2696 int count = gather_off_nunits.to_constant ();
2697 vec_perm_builder sel (count, count, 1);
2698 for (int i = 0; i < count; ++i)
2699 sel.quick_push (i | (count / 2));
2701 vec_perm_indices indices (sel, 1, count);
2702 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2703 indices);
2705 else if (known_eq (nunits, gather_off_nunits * 2))
2707 modifier = NARROW;
2709 /* Currently narrowing gathers and scatters are only supported for
2710 fixed-length vectors. */
2711 int count = nunits.to_constant ();
2712 vec_perm_builder sel (count, count, 1);
2713 sel.quick_grow (count);
2714 for (int i = 0; i < count; ++i)
2715 sel[i] = i < count / 2 ? i : i + count / 2;
2716 vec_perm_indices indices (sel, 2, count);
2717 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2719 ncopies *= 2;
2721 if (mask)
2723 for (int i = 0; i < count; ++i)
2724 sel[i] = i | (count / 2);
2725 indices.new_vector (sel, 2, count);
2726 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2729 else
2730 gcc_unreachable ();
2732 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
2733 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
2735 tree ptr = fold_convert (ptrtype, gs_info->base);
2736 if (!is_gimple_min_invariant (ptr))
2738 gimple_seq seq;
2739 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2740 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2741 gcc_assert (!new_bb);
2744 tree scale = build_int_cst (scaletype, gs_info->scale);
2746 tree vec_oprnd0 = NULL_TREE;
2747 tree vec_mask = NULL_TREE;
2748 tree src_op = NULL_TREE;
2749 tree mask_op = NULL_TREE;
2750 tree prev_res = NULL_TREE;
2751 stmt_vec_info prev_stmt_info = NULL;
2753 if (!mask)
2755 src_op = vect_build_zero_merge_argument (stmt_info, rettype);
2756 mask_op = vect_build_all_ones_mask (stmt_info, masktype);
2759 for (int j = 0; j < ncopies; ++j)
2761 tree op, var;
2762 if (modifier == WIDEN && (j & 1))
2763 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2764 perm_mask, stmt_info, gsi);
2765 else if (j == 0)
2766 op = vec_oprnd0
2767 = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
2768 else
2769 op = vec_oprnd0 = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2770 vec_oprnd0);
2772 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2774 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2775 TYPE_VECTOR_SUBPARTS (idxtype)));
2776 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2777 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2778 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2779 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2780 op = var;
2783 if (mask)
2785 if (mask_perm_mask && (j & 1))
2786 mask_op = permute_vec_elements (mask_op, mask_op,
2787 mask_perm_mask, stmt_info, gsi);
2788 else
2790 if (j == 0)
2791 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
2792 else
2793 vec_mask = vect_get_vec_def_for_stmt_copy (loop_vinfo,
2794 vec_mask);
2796 mask_op = vec_mask;
2797 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2799 gcc_assert
2800 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2801 TYPE_VECTOR_SUBPARTS (masktype)));
2802 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2803 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2804 gassign *new_stmt
2805 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2806 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2807 mask_op = var;
2810 src_op = mask_op;
2813 gcall *new_call = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2814 mask_op, scale);
2816 stmt_vec_info new_stmt_info;
2817 if (!useless_type_conversion_p (vectype, rettype))
2819 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2820 TYPE_VECTOR_SUBPARTS (rettype)));
2821 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2822 gimple_call_set_lhs (new_call, op);
2823 vect_finish_stmt_generation (stmt_info, new_call, gsi);
2824 var = make_ssa_name (vec_dest);
2825 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2826 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2827 new_stmt_info
2828 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
2830 else
2832 var = make_ssa_name (vec_dest, new_call);
2833 gimple_call_set_lhs (new_call, var);
2834 new_stmt_info
2835 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
2838 if (modifier == NARROW)
2840 if ((j & 1) == 0)
2842 prev_res = var;
2843 continue;
2845 var = permute_vec_elements (prev_res, var, perm_mask,
2846 stmt_info, gsi);
2847 new_stmt_info = loop_vinfo->lookup_def (var);
2850 if (prev_stmt_info == NULL)
2851 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
2852 else
2853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
2854 prev_stmt_info = new_stmt_info;
2858 /* Prepare the base and offset in GS_INFO for vectorization.
2859 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2860 to the vectorized offset argument for the first copy of STMT_INFO.
2861 STMT_INFO is the statement described by GS_INFO and LOOP is the
2862 containing loop. */
2864 static void
2865 vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
2866 gather_scatter_info *gs_info,
2867 tree *dataref_ptr, tree *vec_offset)
2869 gimple_seq stmts = NULL;
2870 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2871 if (stmts != NULL)
2873 basic_block new_bb;
2874 edge pe = loop_preheader_edge (loop);
2875 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2876 gcc_assert (!new_bb);
2878 tree offset_type = TREE_TYPE (gs_info->offset);
2879 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2880 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt_info,
2881 offset_vectype);
2884 /* Prepare to implement a grouped or strided load or store using
2885 the gather load or scatter store operation described by GS_INFO.
2886 STMT_INFO is the load or store statement.
2888 Set *DATAREF_BUMP to the amount that should be added to the base
2889 address after each copy of the vectorized statement. Set *VEC_OFFSET
2890 to an invariant offset vector in which element I has the value
2891 I * DR_STEP / SCALE. */
2893 static void
2894 vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
2895 loop_vec_info loop_vinfo,
2896 gather_scatter_info *gs_info,
2897 tree *dataref_bump, tree *vec_offset)
2899 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2900 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2901 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2902 gimple_seq stmts;
2904 tree bump = size_binop (MULT_EXPR,
2905 fold_convert (sizetype, DR_STEP (dr)),
2906 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2907 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2908 if (stmts)
2909 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2911 /* The offset given in GS_INFO can have pointer type, so use the element
2912 type of the vector instead. */
2913 tree offset_type = TREE_TYPE (gs_info->offset);
2914 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2915 offset_type = TREE_TYPE (offset_vectype);
2917 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2918 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2919 ssize_int (gs_info->scale));
2920 step = fold_convert (offset_type, step);
2921 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2923 /* Create {0, X, X*2, X*3, ...}. */
2924 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2925 build_zero_cst (offset_type), step);
2926 if (stmts)
2927 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2930 /* Return the amount that should be added to a vector pointer to move
2931 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
2932 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2933 vectorization. */
2935 static tree
2936 vect_get_data_ptr_increment (dr_vec_info *dr_info, tree aggr_type,
2937 vect_memory_access_type memory_access_type)
2939 if (memory_access_type == VMAT_INVARIANT)
2940 return size_zero_node;
2942 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2943 tree step = vect_dr_behavior (dr_info)->step;
2944 if (tree_int_cst_sgn (step) == -1)
2945 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2946 return iv_step;
2949 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2951 static bool
2952 vectorizable_bswap (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
2953 stmt_vec_info *vec_stmt, slp_tree slp_node,
2954 tree vectype_in, stmt_vector_for_cost *cost_vec)
2956 tree op, vectype;
2957 gcall *stmt = as_a <gcall *> (stmt_info->stmt);
2958 vec_info *vinfo = stmt_info->vinfo;
2959 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2960 unsigned ncopies;
2961 unsigned HOST_WIDE_INT nunits, num_bytes;
2963 op = gimple_call_arg (stmt, 0);
2964 vectype = STMT_VINFO_VECTYPE (stmt_info);
2966 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2967 return false;
2969 /* Multiple types in SLP are handled by creating the appropriate number of
2970 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2971 case of SLP. */
2972 if (slp_node)
2973 ncopies = 1;
2974 else
2975 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2977 gcc_assert (ncopies >= 1);
2979 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2980 if (! char_vectype)
2981 return false;
2983 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2984 return false;
2986 unsigned word_bytes = num_bytes / nunits;
2988 /* The encoding uses one stepped pattern for each byte in the word. */
2989 vec_perm_builder elts (num_bytes, word_bytes, 3);
2990 for (unsigned i = 0; i < 3; ++i)
2991 for (unsigned j = 0; j < word_bytes; ++j)
2992 elts.quick_push ((i + 1) * word_bytes - j - 1);
2994 vec_perm_indices indices (elts, 1, num_bytes);
2995 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2996 return false;
2998 if (! vec_stmt)
3000 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3001 DUMP_VECT_SCOPE ("vectorizable_bswap");
3002 if (! slp_node)
3004 record_stmt_cost (cost_vec,
3005 1, vector_stmt, stmt_info, 0, vect_prologue);
3006 record_stmt_cost (cost_vec,
3007 ncopies, vec_perm, stmt_info, 0, vect_body);
3009 return true;
3012 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
3014 /* Transform. */
3015 vec<tree> vec_oprnds = vNULL;
3016 stmt_vec_info new_stmt_info = NULL;
3017 stmt_vec_info prev_stmt_info = NULL;
3018 for (unsigned j = 0; j < ncopies; j++)
3020 /* Handle uses. */
3021 if (j == 0)
3022 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
3023 else
3024 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
3026 /* Arguments are ready. create the new vector stmt. */
3027 unsigned i;
3028 tree vop;
3029 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3031 gimple *new_stmt;
3032 tree tem = make_ssa_name (char_vectype);
3033 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3034 char_vectype, vop));
3035 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3036 tree tem2 = make_ssa_name (char_vectype);
3037 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3038 tem, tem, bswap_vconst);
3039 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3040 tem = make_ssa_name (vectype);
3041 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3042 vectype, tem2));
3043 new_stmt_info
3044 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3045 if (slp_node)
3046 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3049 if (slp_node)
3050 continue;
3052 if (j == 0)
3053 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3054 else
3055 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3057 prev_stmt_info = new_stmt_info;
3060 vec_oprnds.release ();
3061 return true;
3064 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3065 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3066 in a single step. On success, store the binary pack code in
3067 *CONVERT_CODE. */
3069 static bool
3070 simple_integer_narrowing (tree vectype_out, tree vectype_in,
3071 tree_code *convert_code)
3073 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3074 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3075 return false;
3077 tree_code code;
3078 int multi_step_cvt = 0;
3079 auto_vec <tree, 8> interm_types;
3080 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3081 &code, &multi_step_cvt,
3082 &interm_types)
3083 || multi_step_cvt)
3084 return false;
3086 *convert_code = code;
3087 return true;
3090 /* Function vectorizable_call.
3092 Check if STMT_INFO performs a function call that can be vectorized.
3093 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3094 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3095 Return true if STMT_INFO is vectorizable in this way. */
3097 static bool
3098 vectorizable_call (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
3099 stmt_vec_info *vec_stmt, slp_tree slp_node,
3100 stmt_vector_for_cost *cost_vec)
3102 gcall *stmt;
3103 tree vec_dest;
3104 tree scalar_dest;
3105 tree op;
3106 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3107 stmt_vec_info prev_stmt_info;
3108 tree vectype_out, vectype_in;
3109 poly_uint64 nunits_in;
3110 poly_uint64 nunits_out;
3111 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3112 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3113 vec_info *vinfo = stmt_info->vinfo;
3114 tree fndecl, new_temp, rhs_type;
3115 enum vect_def_type dt[4]
3116 = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
3117 vect_unknown_def_type };
3118 int ndts = ARRAY_SIZE (dt);
3119 int ncopies, j;
3120 auto_vec<tree, 8> vargs;
3121 auto_vec<tree, 8> orig_vargs;
3122 enum { NARROW, NONE, WIDEN } modifier;
3123 size_t i, nargs;
3124 tree lhs;
3126 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3127 return false;
3129 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3130 && ! vec_stmt)
3131 return false;
3133 /* Is STMT_INFO a vectorizable call? */
3134 stmt = dyn_cast <gcall *> (stmt_info->stmt);
3135 if (!stmt)
3136 return false;
3138 if (gimple_call_internal_p (stmt)
3139 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3140 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3141 /* Handled by vectorizable_load and vectorizable_store. */
3142 return false;
3144 if (gimple_call_lhs (stmt) == NULL_TREE
3145 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3146 return false;
3148 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3150 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3152 /* Process function arguments. */
3153 rhs_type = NULL_TREE;
3154 vectype_in = NULL_TREE;
3155 nargs = gimple_call_num_args (stmt);
3157 /* Bail out if the function has more than three arguments, we do not have
3158 interesting builtin functions to vectorize with more than two arguments
3159 except for fma. No arguments is also not good. */
3160 if (nargs == 0 || nargs > 4)
3161 return false;
3163 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3164 combined_fn cfn = gimple_call_combined_fn (stmt);
3165 if (cfn == CFN_GOMP_SIMD_LANE)
3167 nargs = 0;
3168 rhs_type = unsigned_type_node;
3171 int mask_opno = -1;
3172 if (internal_fn_p (cfn))
3173 mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
3175 for (i = 0; i < nargs; i++)
3177 tree opvectype;
3179 op = gimple_call_arg (stmt, i);
3180 if (!vect_is_simple_use (op, vinfo, &dt[i], &opvectype))
3182 if (dump_enabled_p ())
3183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3184 "use not simple.\n");
3185 return false;
3188 /* Skip the mask argument to an internal function. This operand
3189 has been converted via a pattern if necessary. */
3190 if ((int) i == mask_opno)
3191 continue;
3193 /* We can only handle calls with arguments of the same type. */
3194 if (rhs_type
3195 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3197 if (dump_enabled_p ())
3198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3199 "argument types differ.\n");
3200 return false;
3202 if (!rhs_type)
3203 rhs_type = TREE_TYPE (op);
3205 if (!vectype_in)
3206 vectype_in = opvectype;
3207 else if (opvectype
3208 && opvectype != vectype_in)
3210 if (dump_enabled_p ())
3211 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3212 "argument vector types differ.\n");
3213 return false;
3216 /* If all arguments are external or constant defs use a vector type with
3217 the same size as the output vector type. */
3218 if (!vectype_in)
3219 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3220 if (vec_stmt)
3221 gcc_assert (vectype_in);
3222 if (!vectype_in)
3224 if (dump_enabled_p ())
3226 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3227 "no vectype for scalar type ");
3228 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3229 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3232 return false;
3235 /* FORNOW */
3236 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3237 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3238 if (known_eq (nunits_in * 2, nunits_out))
3239 modifier = NARROW;
3240 else if (known_eq (nunits_out, nunits_in))
3241 modifier = NONE;
3242 else if (known_eq (nunits_out * 2, nunits_in))
3243 modifier = WIDEN;
3244 else
3245 return false;
3247 /* We only handle functions that do not read or clobber memory. */
3248 if (gimple_vuse (stmt))
3250 if (dump_enabled_p ())
3251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3252 "function reads from or writes to memory.\n");
3253 return false;
3256 /* For now, we only vectorize functions if a target specific builtin
3257 is available. TODO -- in some cases, it might be profitable to
3258 insert the calls for pieces of the vector, in order to be able
3259 to vectorize other operations in the loop. */
3260 fndecl = NULL_TREE;
3261 internal_fn ifn = IFN_LAST;
3262 tree callee = gimple_call_fndecl (stmt);
3264 /* First try using an internal function. */
3265 tree_code convert_code = ERROR_MARK;
3266 if (cfn != CFN_LAST
3267 && (modifier == NONE
3268 || (modifier == NARROW
3269 && simple_integer_narrowing (vectype_out, vectype_in,
3270 &convert_code))))
3271 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3272 vectype_in);
3274 /* If that fails, try asking for a target-specific built-in function. */
3275 if (ifn == IFN_LAST)
3277 if (cfn != CFN_LAST)
3278 fndecl = targetm.vectorize.builtin_vectorized_function
3279 (cfn, vectype_out, vectype_in);
3280 else if (callee)
3281 fndecl = targetm.vectorize.builtin_md_vectorized_function
3282 (callee, vectype_out, vectype_in);
3285 if (ifn == IFN_LAST && !fndecl)
3287 if (cfn == CFN_GOMP_SIMD_LANE
3288 && !slp_node
3289 && loop_vinfo
3290 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3291 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3292 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3293 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3295 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3296 { 0, 1, 2, ... vf - 1 } vector. */
3297 gcc_assert (nargs == 0);
3299 else if (modifier == NONE
3300 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3301 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3302 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3303 return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
3304 vectype_in, cost_vec);
3305 else
3307 if (dump_enabled_p ())
3308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3309 "function is not vectorizable.\n");
3310 return false;
3314 if (slp_node)
3315 ncopies = 1;
3316 else if (modifier == NARROW && ifn == IFN_LAST)
3317 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3318 else
3319 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3321 /* Sanity check: make sure that at least one copy of the vectorized stmt
3322 needs to be generated. */
3323 gcc_assert (ncopies >= 1);
3325 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
3326 if (!vec_stmt) /* transformation not required. */
3328 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3329 DUMP_VECT_SCOPE ("vectorizable_call");
3330 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
3331 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3332 record_stmt_cost (cost_vec, ncopies / 2,
3333 vec_promote_demote, stmt_info, 0, vect_body);
3335 if (loop_vinfo && mask_opno >= 0)
3337 unsigned int nvectors = (slp_node
3338 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
3339 : ncopies);
3340 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype_out);
3342 return true;
3345 /* Transform. */
3347 if (dump_enabled_p ())
3348 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3350 /* Handle def. */
3351 scalar_dest = gimple_call_lhs (stmt);
3352 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3354 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
3356 stmt_vec_info new_stmt_info = NULL;
3357 prev_stmt_info = NULL;
3358 if (modifier == NONE || ifn != IFN_LAST)
3360 tree prev_res = NULL_TREE;
3361 vargs.safe_grow (nargs);
3362 orig_vargs.safe_grow (nargs);
3363 for (j = 0; j < ncopies; ++j)
3365 /* Build argument list for the vectorized call. */
3366 if (slp_node)
3368 auto_vec<vec<tree> > vec_defs (nargs);
3369 vec<tree> vec_oprnds0;
3371 for (i = 0; i < nargs; i++)
3372 vargs[i] = gimple_call_arg (stmt, i);
3373 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3374 vec_oprnds0 = vec_defs[0];
3376 /* Arguments are ready. Create the new vector stmt. */
3377 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3379 size_t k;
3380 for (k = 0; k < nargs; k++)
3382 vec<tree> vec_oprndsk = vec_defs[k];
3383 vargs[k] = vec_oprndsk[i];
3385 if (modifier == NARROW)
3387 /* We don't define any narrowing conditional functions
3388 at present. */
3389 gcc_assert (mask_opno < 0);
3390 tree half_res = make_ssa_name (vectype_in);
3391 gcall *call
3392 = gimple_build_call_internal_vec (ifn, vargs);
3393 gimple_call_set_lhs (call, half_res);
3394 gimple_call_set_nothrow (call, true);
3395 new_stmt_info
3396 = vect_finish_stmt_generation (stmt_info, call, gsi);
3397 if ((i & 1) == 0)
3399 prev_res = half_res;
3400 continue;
3402 new_temp = make_ssa_name (vec_dest);
3403 gimple *new_stmt
3404 = gimple_build_assign (new_temp, convert_code,
3405 prev_res, half_res);
3406 new_stmt_info
3407 = vect_finish_stmt_generation (stmt_info, new_stmt,
3408 gsi);
3410 else
3412 if (mask_opno >= 0 && masked_loop_p)
3414 unsigned int vec_num = vec_oprnds0.length ();
3415 /* Always true for SLP. */
3416 gcc_assert (ncopies == 1);
3417 tree mask = vect_get_loop_mask (gsi, masks, vec_num,
3418 vectype_out, i);
3419 vargs[mask_opno] = prepare_load_store_mask
3420 (TREE_TYPE (mask), mask, vargs[mask_opno], gsi);
3423 gcall *call;
3424 if (ifn != IFN_LAST)
3425 call = gimple_build_call_internal_vec (ifn, vargs);
3426 else
3427 call = gimple_build_call_vec (fndecl, vargs);
3428 new_temp = make_ssa_name (vec_dest, call);
3429 gimple_call_set_lhs (call, new_temp);
3430 gimple_call_set_nothrow (call, true);
3431 new_stmt_info
3432 = vect_finish_stmt_generation (stmt_info, call, gsi);
3434 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3437 for (i = 0; i < nargs; i++)
3439 vec<tree> vec_oprndsi = vec_defs[i];
3440 vec_oprndsi.release ();
3442 continue;
3445 for (i = 0; i < nargs; i++)
3447 op = gimple_call_arg (stmt, i);
3448 if (j == 0)
3449 vec_oprnd0
3450 = vect_get_vec_def_for_operand (op, stmt_info);
3451 else
3452 vec_oprnd0
3453 = vect_get_vec_def_for_stmt_copy (vinfo, orig_vargs[i]);
3455 orig_vargs[i] = vargs[i] = vec_oprnd0;
3458 if (mask_opno >= 0 && masked_loop_p)
3460 tree mask = vect_get_loop_mask (gsi, masks, ncopies,
3461 vectype_out, j);
3462 vargs[mask_opno]
3463 = prepare_load_store_mask (TREE_TYPE (mask), mask,
3464 vargs[mask_opno], gsi);
3467 if (cfn == CFN_GOMP_SIMD_LANE)
3469 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3470 tree new_var
3471 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3472 gimple *init_stmt = gimple_build_assign (new_var, cst);
3473 vect_init_vector_1 (stmt_info, init_stmt, NULL);
3474 new_temp = make_ssa_name (vec_dest);
3475 gimple *new_stmt = gimple_build_assign (new_temp, new_var);
3476 new_stmt_info
3477 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3479 else if (modifier == NARROW)
3481 /* We don't define any narrowing conditional functions at
3482 present. */
3483 gcc_assert (mask_opno < 0);
3484 tree half_res = make_ssa_name (vectype_in);
3485 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3486 gimple_call_set_lhs (call, half_res);
3487 gimple_call_set_nothrow (call, true);
3488 new_stmt_info
3489 = vect_finish_stmt_generation (stmt_info, call, gsi);
3490 if ((j & 1) == 0)
3492 prev_res = half_res;
3493 continue;
3495 new_temp = make_ssa_name (vec_dest);
3496 gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
3497 prev_res, half_res);
3498 new_stmt_info
3499 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3501 else
3503 gcall *call;
3504 if (ifn != IFN_LAST)
3505 call = gimple_build_call_internal_vec (ifn, vargs);
3506 else
3507 call = gimple_build_call_vec (fndecl, vargs);
3508 new_temp = make_ssa_name (vec_dest, call);
3509 gimple_call_set_lhs (call, new_temp);
3510 gimple_call_set_nothrow (call, true);
3511 new_stmt_info
3512 = vect_finish_stmt_generation (stmt_info, call, gsi);
3515 if (j == (modifier == NARROW ? 1 : 0))
3516 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
3517 else
3518 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3520 prev_stmt_info = new_stmt_info;
3523 else if (modifier == NARROW)
3525 /* We don't define any narrowing conditional functions at present. */
3526 gcc_assert (mask_opno < 0);
3527 for (j = 0; j < ncopies; ++j)
3529 /* Build argument list for the vectorized call. */
3530 if (j == 0)
3531 vargs.create (nargs * 2);
3532 else
3533 vargs.truncate (0);
3535 if (slp_node)
3537 auto_vec<vec<tree> > vec_defs (nargs);
3538 vec<tree> vec_oprnds0;
3540 for (i = 0; i < nargs; i++)
3541 vargs.quick_push (gimple_call_arg (stmt, i));
3542 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3543 vec_oprnds0 = vec_defs[0];
3545 /* Arguments are ready. Create the new vector stmt. */
3546 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3548 size_t k;
3549 vargs.truncate (0);
3550 for (k = 0; k < nargs; k++)
3552 vec<tree> vec_oprndsk = vec_defs[k];
3553 vargs.quick_push (vec_oprndsk[i]);
3554 vargs.quick_push (vec_oprndsk[i + 1]);
3556 gcall *call;
3557 if (ifn != IFN_LAST)
3558 call = gimple_build_call_internal_vec (ifn, vargs);
3559 else
3560 call = gimple_build_call_vec (fndecl, vargs);
3561 new_temp = make_ssa_name (vec_dest, call);
3562 gimple_call_set_lhs (call, new_temp);
3563 gimple_call_set_nothrow (call, true);
3564 new_stmt_info
3565 = vect_finish_stmt_generation (stmt_info, call, gsi);
3566 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
3569 for (i = 0; i < nargs; i++)
3571 vec<tree> vec_oprndsi = vec_defs[i];
3572 vec_oprndsi.release ();
3574 continue;
3577 for (i = 0; i < nargs; i++)
3579 op = gimple_call_arg (stmt, i);
3580 if (j == 0)
3582 vec_oprnd0
3583 = vect_get_vec_def_for_operand (op, stmt_info);
3584 vec_oprnd1
3585 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3587 else
3589 vec_oprnd1 = gimple_call_arg (new_stmt_info->stmt,
3590 2 * i + 1);
3591 vec_oprnd0
3592 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
3593 vec_oprnd1
3594 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
3597 vargs.quick_push (vec_oprnd0);
3598 vargs.quick_push (vec_oprnd1);
3601 gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
3602 new_temp = make_ssa_name (vec_dest, new_stmt);
3603 gimple_call_set_lhs (new_stmt, new_temp);
3604 new_stmt_info
3605 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
3607 if (j == 0)
3608 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
3609 else
3610 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
3612 prev_stmt_info = new_stmt_info;
3615 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3617 else
3618 /* No current target implements this case. */
3619 return false;
3621 vargs.release ();
3623 /* The call in STMT might prevent it from being removed in dce.
3624 We however cannot remove it here, due to the way the ssa name
3625 it defines is mapped to the new definition. So just replace
3626 rhs of the statement with something harmless. */
3628 if (slp_node)
3629 return true;
3631 if (is_pattern_stmt_p (stmt_info))
3632 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
3633 lhs = gimple_get_lhs (stmt_info->stmt);
3635 gassign *new_stmt
3636 = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3637 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
3639 return true;
3643 struct simd_call_arg_info
3645 tree vectype;
3646 tree op;
3647 HOST_WIDE_INT linear_step;
3648 enum vect_def_type dt;
3649 unsigned int align;
3650 bool simd_lane_linear;
3653 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3654 is linear within simd lane (but not within whole loop), note it in
3655 *ARGINFO. */
3657 static void
3658 vect_simd_lane_linear (tree op, struct loop *loop,
3659 struct simd_call_arg_info *arginfo)
3661 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3663 if (!is_gimple_assign (def_stmt)
3664 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3665 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3666 return;
3668 tree base = gimple_assign_rhs1 (def_stmt);
3669 HOST_WIDE_INT linear_step = 0;
3670 tree v = gimple_assign_rhs2 (def_stmt);
3671 while (TREE_CODE (v) == SSA_NAME)
3673 tree t;
3674 def_stmt = SSA_NAME_DEF_STMT (v);
3675 if (is_gimple_assign (def_stmt))
3676 switch (gimple_assign_rhs_code (def_stmt))
3678 case PLUS_EXPR:
3679 t = gimple_assign_rhs2 (def_stmt);
3680 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3681 return;
3682 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3683 v = gimple_assign_rhs1 (def_stmt);
3684 continue;
3685 case MULT_EXPR:
3686 t = gimple_assign_rhs2 (def_stmt);
3687 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3688 return;
3689 linear_step = tree_to_shwi (t);
3690 v = gimple_assign_rhs1 (def_stmt);
3691 continue;
3692 CASE_CONVERT:
3693 t = gimple_assign_rhs1 (def_stmt);
3694 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3695 || (TYPE_PRECISION (TREE_TYPE (v))
3696 < TYPE_PRECISION (TREE_TYPE (t))))
3697 return;
3698 if (!linear_step)
3699 linear_step = 1;
3700 v = t;
3701 continue;
3702 default:
3703 return;
3705 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3706 && loop->simduid
3707 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3708 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3709 == loop->simduid))
3711 if (!linear_step)
3712 linear_step = 1;
3713 arginfo->linear_step = linear_step;
3714 arginfo->op = base;
3715 arginfo->simd_lane_linear = true;
3716 return;
3721 /* Return the number of elements in vector type VECTYPE, which is associated
3722 with a SIMD clone. At present these vectors always have a constant
3723 length. */
3725 static unsigned HOST_WIDE_INT
3726 simd_clone_subparts (tree vectype)
3728 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3731 /* Function vectorizable_simd_clone_call.
3733 Check if STMT_INFO performs a function call that can be vectorized
3734 by calling a simd clone of the function.
3735 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3736 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3737 Return true if STMT_INFO is vectorizable in this way. */
3739 static bool
3740 vectorizable_simd_clone_call (stmt_vec_info stmt_info,
3741 gimple_stmt_iterator *gsi,
3742 stmt_vec_info *vec_stmt, slp_tree slp_node,
3743 stmt_vector_for_cost *)
3745 tree vec_dest;
3746 tree scalar_dest;
3747 tree op, type;
3748 tree vec_oprnd0 = NULL_TREE;
3749 stmt_vec_info prev_stmt_info;
3750 tree vectype;
3751 unsigned int nunits;
3752 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3753 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3754 vec_info *vinfo = stmt_info->vinfo;
3755 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3756 tree fndecl, new_temp;
3757 int ncopies, j;
3758 auto_vec<simd_call_arg_info> arginfo;
3759 vec<tree> vargs = vNULL;
3760 size_t i, nargs;
3761 tree lhs, rtype, ratype;
3762 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3764 /* Is STMT a vectorizable call? */
3765 gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
3766 if (!stmt)
3767 return false;
3769 fndecl = gimple_call_fndecl (stmt);
3770 if (fndecl == NULL_TREE)
3771 return false;
3773 struct cgraph_node *node = cgraph_node::get (fndecl);
3774 if (node == NULL || node->simd_clones == NULL)
3775 return false;
3777 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3778 return false;
3780 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3781 && ! vec_stmt)
3782 return false;
3784 if (gimple_call_lhs (stmt)
3785 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3786 return false;
3788 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3790 vectype = STMT_VINFO_VECTYPE (stmt_info);
3792 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
3793 return false;
3795 /* FORNOW */
3796 if (slp_node)
3797 return false;
3799 /* Process function arguments. */
3800 nargs = gimple_call_num_args (stmt);
3802 /* Bail out if the function has zero arguments. */
3803 if (nargs == 0)
3804 return false;
3806 arginfo.reserve (nargs, true);
3808 for (i = 0; i < nargs; i++)
3810 simd_call_arg_info thisarginfo;
3811 affine_iv iv;
3813 thisarginfo.linear_step = 0;
3814 thisarginfo.align = 0;
3815 thisarginfo.op = NULL_TREE;
3816 thisarginfo.simd_lane_linear = false;
3818 op = gimple_call_arg (stmt, i);
3819 if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
3820 &thisarginfo.vectype)
3821 || thisarginfo.dt == vect_uninitialized_def)
3823 if (dump_enabled_p ())
3824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3825 "use not simple.\n");
3826 return false;
3829 if (thisarginfo.dt == vect_constant_def
3830 || thisarginfo.dt == vect_external_def)
3831 gcc_assert (thisarginfo.vectype == NULL_TREE);
3832 else
3833 gcc_assert (thisarginfo.vectype != NULL_TREE);
3835 /* For linear arguments, the analyze phase should have saved
3836 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3837 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3838 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3840 gcc_assert (vec_stmt);
3841 thisarginfo.linear_step
3842 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3843 thisarginfo.op
3844 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3845 thisarginfo.simd_lane_linear
3846 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3847 == boolean_true_node);
3848 /* If loop has been peeled for alignment, we need to adjust it. */
3849 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3850 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3851 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3853 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3854 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3855 tree opt = TREE_TYPE (thisarginfo.op);
3856 bias = fold_convert (TREE_TYPE (step), bias);
3857 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3858 thisarginfo.op
3859 = fold_build2 (POINTER_TYPE_P (opt)
3860 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3861 thisarginfo.op, bias);
3864 else if (!vec_stmt
3865 && thisarginfo.dt != vect_constant_def
3866 && thisarginfo.dt != vect_external_def
3867 && loop_vinfo
3868 && TREE_CODE (op) == SSA_NAME
3869 && simple_iv (loop, loop_containing_stmt (stmt), op,
3870 &iv, false)
3871 && tree_fits_shwi_p (iv.step))
3873 thisarginfo.linear_step = tree_to_shwi (iv.step);
3874 thisarginfo.op = iv.base;
3876 else if ((thisarginfo.dt == vect_constant_def
3877 || thisarginfo.dt == vect_external_def)
3878 && POINTER_TYPE_P (TREE_TYPE (op)))
3879 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3880 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3881 linear too. */
3882 if (POINTER_TYPE_P (TREE_TYPE (op))
3883 && !thisarginfo.linear_step
3884 && !vec_stmt
3885 && thisarginfo.dt != vect_constant_def
3886 && thisarginfo.dt != vect_external_def
3887 && loop_vinfo
3888 && !slp_node
3889 && TREE_CODE (op) == SSA_NAME)
3890 vect_simd_lane_linear (op, loop, &thisarginfo);
3892 arginfo.quick_push (thisarginfo);
3895 unsigned HOST_WIDE_INT vf;
3896 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3898 if (dump_enabled_p ())
3899 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3900 "not considering SIMD clones; not yet supported"
3901 " for variable-width vectors.\n");
3902 return NULL;
3905 unsigned int badness = 0;
3906 struct cgraph_node *bestn = NULL;
3907 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3908 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3909 else
3910 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3911 n = n->simdclone->next_clone)
3913 unsigned int this_badness = 0;
3914 if (n->simdclone->simdlen > vf
3915 || n->simdclone->nargs != nargs)
3916 continue;
3917 if (n->simdclone->simdlen < vf)
3918 this_badness += (exact_log2 (vf)
3919 - exact_log2 (n->simdclone->simdlen)) * 1024;
3920 if (n->simdclone->inbranch)
3921 this_badness += 2048;
3922 int target_badness = targetm.simd_clone.usable (n);
3923 if (target_badness < 0)
3924 continue;
3925 this_badness += target_badness * 512;
3926 /* FORNOW: Have to add code to add the mask argument. */
3927 if (n->simdclone->inbranch)
3928 continue;
3929 for (i = 0; i < nargs; i++)
3931 switch (n->simdclone->args[i].arg_type)
3933 case SIMD_CLONE_ARG_TYPE_VECTOR:
3934 if (!useless_type_conversion_p
3935 (n->simdclone->args[i].orig_type,
3936 TREE_TYPE (gimple_call_arg (stmt, i))))
3937 i = -1;
3938 else if (arginfo[i].dt == vect_constant_def
3939 || arginfo[i].dt == vect_external_def
3940 || arginfo[i].linear_step)
3941 this_badness += 64;
3942 break;
3943 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3944 if (arginfo[i].dt != vect_constant_def
3945 && arginfo[i].dt != vect_external_def)
3946 i = -1;
3947 break;
3948 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3949 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3950 if (arginfo[i].dt == vect_constant_def
3951 || arginfo[i].dt == vect_external_def
3952 || (arginfo[i].linear_step
3953 != n->simdclone->args[i].linear_step))
3954 i = -1;
3955 break;
3956 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3957 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3958 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3959 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3960 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3961 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3962 /* FORNOW */
3963 i = -1;
3964 break;
3965 case SIMD_CLONE_ARG_TYPE_MASK:
3966 gcc_unreachable ();
3968 if (i == (size_t) -1)
3969 break;
3970 if (n->simdclone->args[i].alignment > arginfo[i].align)
3972 i = -1;
3973 break;
3975 if (arginfo[i].align)
3976 this_badness += (exact_log2 (arginfo[i].align)
3977 - exact_log2 (n->simdclone->args[i].alignment));
3979 if (i == (size_t) -1)
3980 continue;
3981 if (bestn == NULL || this_badness < badness)
3983 bestn = n;
3984 badness = this_badness;
3988 if (bestn == NULL)
3989 return false;
3991 for (i = 0; i < nargs; i++)
3992 if ((arginfo[i].dt == vect_constant_def
3993 || arginfo[i].dt == vect_external_def)
3994 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3996 arginfo[i].vectype
3997 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3998 i)));
3999 if (arginfo[i].vectype == NULL
4000 || (simd_clone_subparts (arginfo[i].vectype)
4001 > bestn->simdclone->simdlen))
4002 return false;
4005 fndecl = bestn->decl;
4006 nunits = bestn->simdclone->simdlen;
4007 ncopies = vf / nunits;
4009 /* If the function isn't const, only allow it in simd loops where user
4010 has asserted that at least nunits consecutive iterations can be
4011 performed using SIMD instructions. */
4012 if ((loop == NULL || (unsigned) loop->safelen < nunits)
4013 && gimple_vuse (stmt))
4014 return false;
4016 /* Sanity check: make sure that at least one copy of the vectorized stmt
4017 needs to be generated. */
4018 gcc_assert (ncopies >= 1);
4020 if (!vec_stmt) /* transformation not required. */
4022 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4023 for (i = 0; i < nargs; i++)
4024 if ((bestn->simdclone->args[i].arg_type
4025 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4026 || (bestn->simdclone->args[i].arg_type
4027 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
4029 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
4030 + 1);
4031 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4032 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4033 ? size_type_node : TREE_TYPE (arginfo[i].op);
4034 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4035 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
4036 tree sll = arginfo[i].simd_lane_linear
4037 ? boolean_true_node : boolean_false_node;
4038 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
4040 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
4041 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4042 /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
4043 return true;
4046 /* Transform. */
4048 if (dump_enabled_p ())
4049 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4051 /* Handle def. */
4052 scalar_dest = gimple_call_lhs (stmt);
4053 vec_dest = NULL_TREE;
4054 rtype = NULL_TREE;
4055 ratype = NULL_TREE;
4056 if (scalar_dest)
4058 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4059 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4060 if (TREE_CODE (rtype) == ARRAY_TYPE)
4062 ratype = rtype;
4063 rtype = TREE_TYPE (ratype);
4067 prev_stmt_info = NULL;
4068 for (j = 0; j < ncopies; ++j)
4070 /* Build argument list for the vectorized call. */
4071 if (j == 0)
4072 vargs.create (nargs);
4073 else
4074 vargs.truncate (0);
4076 for (i = 0; i < nargs; i++)
4078 unsigned int k, l, m, o;
4079 tree atype;
4080 op = gimple_call_arg (stmt, i);
4081 switch (bestn->simdclone->args[i].arg_type)
4083 case SIMD_CLONE_ARG_TYPE_VECTOR:
4084 atype = bestn->simdclone->args[i].vector_type;
4085 o = nunits / simd_clone_subparts (atype);
4086 for (m = j * o; m < (j + 1) * o; m++)
4088 if (simd_clone_subparts (atype)
4089 < simd_clone_subparts (arginfo[i].vectype))
4091 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
4092 k = (simd_clone_subparts (arginfo[i].vectype)
4093 / simd_clone_subparts (atype));
4094 gcc_assert ((k & (k - 1)) == 0);
4095 if (m == 0)
4096 vec_oprnd0
4097 = vect_get_vec_def_for_operand (op, stmt_info);
4098 else
4100 vec_oprnd0 = arginfo[i].op;
4101 if ((m & (k - 1)) == 0)
4102 vec_oprnd0
4103 = vect_get_vec_def_for_stmt_copy (vinfo,
4104 vec_oprnd0);
4106 arginfo[i].op = vec_oprnd0;
4107 vec_oprnd0
4108 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
4109 bitsize_int (prec),
4110 bitsize_int ((m & (k - 1)) * prec));
4111 gassign *new_stmt
4112 = gimple_build_assign (make_ssa_name (atype),
4113 vec_oprnd0);
4114 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4115 vargs.safe_push (gimple_assign_lhs (new_stmt));
4117 else
4119 k = (simd_clone_subparts (atype)
4120 / simd_clone_subparts (arginfo[i].vectype));
4121 gcc_assert ((k & (k - 1)) == 0);
4122 vec<constructor_elt, va_gc> *ctor_elts;
4123 if (k != 1)
4124 vec_alloc (ctor_elts, k);
4125 else
4126 ctor_elts = NULL;
4127 for (l = 0; l < k; l++)
4129 if (m == 0 && l == 0)
4130 vec_oprnd0
4131 = vect_get_vec_def_for_operand (op, stmt_info);
4132 else
4133 vec_oprnd0
4134 = vect_get_vec_def_for_stmt_copy (vinfo,
4135 arginfo[i].op);
4136 arginfo[i].op = vec_oprnd0;
4137 if (k == 1)
4138 break;
4139 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4140 vec_oprnd0);
4142 if (k == 1)
4143 vargs.safe_push (vec_oprnd0);
4144 else
4146 vec_oprnd0 = build_constructor (atype, ctor_elts);
4147 gassign *new_stmt
4148 = gimple_build_assign (make_ssa_name (atype),
4149 vec_oprnd0);
4150 vect_finish_stmt_generation (stmt_info, new_stmt,
4151 gsi);
4152 vargs.safe_push (gimple_assign_lhs (new_stmt));
4156 break;
4157 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4158 vargs.safe_push (op);
4159 break;
4160 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4161 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4162 if (j == 0)
4164 gimple_seq stmts;
4165 arginfo[i].op
4166 = force_gimple_operand (arginfo[i].op, &stmts, true,
4167 NULL_TREE);
4168 if (stmts != NULL)
4170 basic_block new_bb;
4171 edge pe = loop_preheader_edge (loop);
4172 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4173 gcc_assert (!new_bb);
4175 if (arginfo[i].simd_lane_linear)
4177 vargs.safe_push (arginfo[i].op);
4178 break;
4180 tree phi_res = copy_ssa_name (op);
4181 gphi *new_phi = create_phi_node (phi_res, loop->header);
4182 loop_vinfo->add_stmt (new_phi);
4183 add_phi_arg (new_phi, arginfo[i].op,
4184 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4185 enum tree_code code
4186 = POINTER_TYPE_P (TREE_TYPE (op))
4187 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4188 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4189 ? sizetype : TREE_TYPE (op);
4190 widest_int cst
4191 = wi::mul (bestn->simdclone->args[i].linear_step,
4192 ncopies * nunits);
4193 tree tcst = wide_int_to_tree (type, cst);
4194 tree phi_arg = copy_ssa_name (op);
4195 gassign *new_stmt
4196 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4197 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4198 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4199 loop_vinfo->add_stmt (new_stmt);
4200 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4201 UNKNOWN_LOCATION);
4202 arginfo[i].op = phi_res;
4203 vargs.safe_push (phi_res);
4205 else
4207 enum tree_code code
4208 = POINTER_TYPE_P (TREE_TYPE (op))
4209 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4210 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4211 ? sizetype : TREE_TYPE (op);
4212 widest_int cst
4213 = wi::mul (bestn->simdclone->args[i].linear_step,
4214 j * nunits);
4215 tree tcst = wide_int_to_tree (type, cst);
4216 new_temp = make_ssa_name (TREE_TYPE (op));
4217 gassign *new_stmt
4218 = gimple_build_assign (new_temp, code,
4219 arginfo[i].op, tcst);
4220 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4221 vargs.safe_push (new_temp);
4223 break;
4224 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4225 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4226 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4227 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4228 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4229 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4230 default:
4231 gcc_unreachable ();
4235 gcall *new_call = gimple_build_call_vec (fndecl, vargs);
4236 if (vec_dest)
4238 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4239 if (ratype)
4240 new_temp = create_tmp_var (ratype);
4241 else if (simd_clone_subparts (vectype)
4242 == simd_clone_subparts (rtype))
4243 new_temp = make_ssa_name (vec_dest, new_call);
4244 else
4245 new_temp = make_ssa_name (rtype, new_call);
4246 gimple_call_set_lhs (new_call, new_temp);
4248 stmt_vec_info new_stmt_info
4249 = vect_finish_stmt_generation (stmt_info, new_call, gsi);
4251 if (vec_dest)
4253 if (simd_clone_subparts (vectype) < nunits)
4255 unsigned int k, l;
4256 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4257 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4258 k = nunits / simd_clone_subparts (vectype);
4259 gcc_assert ((k & (k - 1)) == 0);
4260 for (l = 0; l < k; l++)
4262 tree t;
4263 if (ratype)
4265 t = build_fold_addr_expr (new_temp);
4266 t = build2 (MEM_REF, vectype, t,
4267 build_int_cst (TREE_TYPE (t), l * bytes));
4269 else
4270 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4271 bitsize_int (prec), bitsize_int (l * prec));
4272 gimple *new_stmt
4273 = gimple_build_assign (make_ssa_name (vectype), t);
4274 new_stmt_info
4275 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4277 if (j == 0 && l == 0)
4278 STMT_VINFO_VEC_STMT (stmt_info)
4279 = *vec_stmt = new_stmt_info;
4280 else
4281 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4283 prev_stmt_info = new_stmt_info;
4286 if (ratype)
4287 vect_clobber_variable (stmt_info, gsi, new_temp);
4288 continue;
4290 else if (simd_clone_subparts (vectype) > nunits)
4292 unsigned int k = (simd_clone_subparts (vectype)
4293 / simd_clone_subparts (rtype));
4294 gcc_assert ((k & (k - 1)) == 0);
4295 if ((j & (k - 1)) == 0)
4296 vec_alloc (ret_ctor_elts, k);
4297 if (ratype)
4299 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4300 for (m = 0; m < o; m++)
4302 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4303 size_int (m), NULL_TREE, NULL_TREE);
4304 gimple *new_stmt
4305 = gimple_build_assign (make_ssa_name (rtype), tem);
4306 new_stmt_info
4307 = vect_finish_stmt_generation (stmt_info, new_stmt,
4308 gsi);
4309 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4310 gimple_assign_lhs (new_stmt));
4312 vect_clobber_variable (stmt_info, gsi, new_temp);
4314 else
4315 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4316 if ((j & (k - 1)) != k - 1)
4317 continue;
4318 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4319 gimple *new_stmt
4320 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4321 new_stmt_info
4322 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4324 if ((unsigned) j == k - 1)
4325 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4326 else
4327 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4329 prev_stmt_info = new_stmt_info;
4330 continue;
4332 else if (ratype)
4334 tree t = build_fold_addr_expr (new_temp);
4335 t = build2 (MEM_REF, vectype, t,
4336 build_int_cst (TREE_TYPE (t), 0));
4337 gimple *new_stmt
4338 = gimple_build_assign (make_ssa_name (vec_dest), t);
4339 new_stmt_info
4340 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4341 vect_clobber_variable (stmt_info, gsi, new_temp);
4345 if (j == 0)
4346 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
4347 else
4348 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
4350 prev_stmt_info = new_stmt_info;
4353 vargs.release ();
4355 /* The call in STMT might prevent it from being removed in dce.
4356 We however cannot remove it here, due to the way the ssa name
4357 it defines is mapped to the new definition. So just replace
4358 rhs of the statement with something harmless. */
4360 if (slp_node)
4361 return true;
4363 gimple *new_stmt;
4364 if (scalar_dest)
4366 type = TREE_TYPE (scalar_dest);
4367 if (is_pattern_stmt_p (stmt_info))
4368 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info)->stmt);
4369 else
4370 lhs = gimple_call_lhs (stmt);
4371 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4373 else
4374 new_stmt = gimple_build_nop ();
4375 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
4376 unlink_stmt_vdef (stmt);
4378 return true;
4382 /* Function vect_gen_widened_results_half
4384 Create a vector stmt whose code, type, number of arguments, and result
4385 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4386 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4387 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4388 needs to be created (DECL is a function-decl of a target-builtin).
4389 STMT_INFO is the original scalar stmt that we are vectorizing. */
4391 static gimple *
4392 vect_gen_widened_results_half (enum tree_code code,
4393 tree decl,
4394 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4395 tree vec_dest, gimple_stmt_iterator *gsi,
4396 stmt_vec_info stmt_info)
4398 gimple *new_stmt;
4399 tree new_temp;
4401 /* Generate half of the widened result: */
4402 if (code == CALL_EXPR)
4404 /* Target specific support */
4405 if (op_type == binary_op)
4406 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4407 else
4408 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4409 new_temp = make_ssa_name (vec_dest, new_stmt);
4410 gimple_call_set_lhs (new_stmt, new_temp);
4412 else
4414 /* Generic support */
4415 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4416 if (op_type != binary_op)
4417 vec_oprnd1 = NULL;
4418 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4419 new_temp = make_ssa_name (vec_dest, new_stmt);
4420 gimple_assign_set_lhs (new_stmt, new_temp);
4422 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4424 return new_stmt;
4428 /* Get vectorized definitions for loop-based vectorization of STMT_INFO.
4429 For the first operand we call vect_get_vec_def_for_operand (with OPRND
4430 containing scalar operand), and for the rest we get a copy with
4431 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4432 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4433 The vectors are collected into VEC_OPRNDS. */
4435 static void
4436 vect_get_loop_based_defs (tree *oprnd, stmt_vec_info stmt_info,
4437 vec<tree> *vec_oprnds, int multi_step_cvt)
4439 vec_info *vinfo = stmt_info->vinfo;
4440 tree vec_oprnd;
4442 /* Get first vector operand. */
4443 /* All the vector operands except the very first one (that is scalar oprnd)
4444 are stmt copies. */
4445 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4446 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt_info);
4447 else
4448 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, *oprnd);
4450 vec_oprnds->quick_push (vec_oprnd);
4452 /* Get second vector operand. */
4453 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd);
4454 vec_oprnds->quick_push (vec_oprnd);
4456 *oprnd = vec_oprnd;
4458 /* For conversion in multiple steps, continue to get operands
4459 recursively. */
4460 if (multi_step_cvt)
4461 vect_get_loop_based_defs (oprnd, stmt_info, vec_oprnds,
4462 multi_step_cvt - 1);
4466 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4467 For multi-step conversions store the resulting vectors and call the function
4468 recursively. */
4470 static void
4471 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4472 int multi_step_cvt,
4473 stmt_vec_info stmt_info,
4474 vec<tree> vec_dsts,
4475 gimple_stmt_iterator *gsi,
4476 slp_tree slp_node, enum tree_code code,
4477 stmt_vec_info *prev_stmt_info)
4479 unsigned int i;
4480 tree vop0, vop1, new_tmp, vec_dest;
4482 vec_dest = vec_dsts.pop ();
4484 for (i = 0; i < vec_oprnds->length (); i += 2)
4486 /* Create demotion operation. */
4487 vop0 = (*vec_oprnds)[i];
4488 vop1 = (*vec_oprnds)[i + 1];
4489 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4490 new_tmp = make_ssa_name (vec_dest, new_stmt);
4491 gimple_assign_set_lhs (new_stmt, new_tmp);
4492 stmt_vec_info new_stmt_info
4493 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4495 if (multi_step_cvt)
4496 /* Store the resulting vector for next recursive call. */
4497 (*vec_oprnds)[i/2] = new_tmp;
4498 else
4500 /* This is the last step of the conversion sequence. Store the
4501 vectors in SLP_NODE or in vector info of the scalar statement
4502 (or in STMT_VINFO_RELATED_STMT chain). */
4503 if (slp_node)
4504 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
4505 else
4507 if (!*prev_stmt_info)
4508 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
4509 else
4510 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt_info;
4512 *prev_stmt_info = new_stmt_info;
4517 /* For multi-step demotion operations we first generate demotion operations
4518 from the source type to the intermediate types, and then combine the
4519 results (stored in VEC_OPRNDS) in demotion operation to the destination
4520 type. */
4521 if (multi_step_cvt)
4523 /* At each level of recursion we have half of the operands we had at the
4524 previous level. */
4525 vec_oprnds->truncate ((i+1)/2);
4526 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4527 stmt_info, vec_dsts, gsi,
4528 slp_node, VEC_PACK_TRUNC_EXPR,
4529 prev_stmt_info);
4532 vec_dsts.quick_push (vec_dest);
4536 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4537 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4538 STMT_INFO. For multi-step conversions store the resulting vectors and
4539 call the function recursively. */
4541 static void
4542 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4543 vec<tree> *vec_oprnds1,
4544 stmt_vec_info stmt_info, tree vec_dest,
4545 gimple_stmt_iterator *gsi,
4546 enum tree_code code1,
4547 enum tree_code code2, tree decl1,
4548 tree decl2, int op_type)
4550 int i;
4551 tree vop0, vop1, new_tmp1, new_tmp2;
4552 gimple *new_stmt1, *new_stmt2;
4553 vec<tree> vec_tmp = vNULL;
4555 vec_tmp.create (vec_oprnds0->length () * 2);
4556 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4558 if (op_type == binary_op)
4559 vop1 = (*vec_oprnds1)[i];
4560 else
4561 vop1 = NULL_TREE;
4563 /* Generate the two halves of promotion operation. */
4564 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4565 op_type, vec_dest, gsi,
4566 stmt_info);
4567 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4568 op_type, vec_dest, gsi,
4569 stmt_info);
4570 if (is_gimple_call (new_stmt1))
4572 new_tmp1 = gimple_call_lhs (new_stmt1);
4573 new_tmp2 = gimple_call_lhs (new_stmt2);
4575 else
4577 new_tmp1 = gimple_assign_lhs (new_stmt1);
4578 new_tmp2 = gimple_assign_lhs (new_stmt2);
4581 /* Store the results for the next step. */
4582 vec_tmp.quick_push (new_tmp1);
4583 vec_tmp.quick_push (new_tmp2);
4586 vec_oprnds0->release ();
4587 *vec_oprnds0 = vec_tmp;
4591 /* Check if STMT_INFO performs a conversion operation that can be vectorized.
4592 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4593 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4594 Return true if STMT_INFO is vectorizable in this way. */
4596 static bool
4597 vectorizable_conversion (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
4598 stmt_vec_info *vec_stmt, slp_tree slp_node,
4599 stmt_vector_for_cost *cost_vec)
4601 tree vec_dest;
4602 tree scalar_dest;
4603 tree op0, op1 = NULL_TREE;
4604 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4605 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4606 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4607 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4608 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4609 tree new_temp;
4610 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4611 int ndts = 2;
4612 stmt_vec_info prev_stmt_info;
4613 poly_uint64 nunits_in;
4614 poly_uint64 nunits_out;
4615 tree vectype_out, vectype_in;
4616 int ncopies, i, j;
4617 tree lhs_type, rhs_type;
4618 enum { NARROW, NONE, WIDEN } modifier;
4619 vec<tree> vec_oprnds0 = vNULL;
4620 vec<tree> vec_oprnds1 = vNULL;
4621 tree vop0;
4622 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4623 vec_info *vinfo = stmt_info->vinfo;
4624 int multi_step_cvt = 0;
4625 vec<tree> interm_types = vNULL;
4626 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4627 int op_type;
4628 unsigned short fltsz;
4630 /* Is STMT a vectorizable conversion? */
4632 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4633 return false;
4635 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4636 && ! vec_stmt)
4637 return false;
4639 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
4640 if (!stmt)
4641 return false;
4643 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4644 return false;
4646 code = gimple_assign_rhs_code (stmt);
4647 if (!CONVERT_EXPR_CODE_P (code)
4648 && code != FIX_TRUNC_EXPR
4649 && code != FLOAT_EXPR
4650 && code != WIDEN_MULT_EXPR
4651 && code != WIDEN_LSHIFT_EXPR)
4652 return false;
4654 op_type = TREE_CODE_LENGTH (code);
4656 /* Check types of lhs and rhs. */
4657 scalar_dest = gimple_assign_lhs (stmt);
4658 lhs_type = TREE_TYPE (scalar_dest);
4659 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4661 op0 = gimple_assign_rhs1 (stmt);
4662 rhs_type = TREE_TYPE (op0);
4664 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4665 && !((INTEGRAL_TYPE_P (lhs_type)
4666 && INTEGRAL_TYPE_P (rhs_type))
4667 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4668 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4669 return false;
4671 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4672 && ((INTEGRAL_TYPE_P (lhs_type)
4673 && !type_has_mode_precision_p (lhs_type))
4674 || (INTEGRAL_TYPE_P (rhs_type)
4675 && !type_has_mode_precision_p (rhs_type))))
4677 if (dump_enabled_p ())
4678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4679 "type conversion to/from bit-precision unsupported."
4680 "\n");
4681 return false;
4684 /* Check the operands of the operation. */
4685 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype_in))
4687 if (dump_enabled_p ())
4688 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4689 "use not simple.\n");
4690 return false;
4692 if (op_type == binary_op)
4694 bool ok;
4696 op1 = gimple_assign_rhs2 (stmt);
4697 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4698 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4699 OP1. */
4700 if (CONSTANT_CLASS_P (op0))
4701 ok = vect_is_simple_use (op1, vinfo, &dt[1], &vectype_in);
4702 else
4703 ok = vect_is_simple_use (op1, vinfo, &dt[1]);
4705 if (!ok)
4707 if (dump_enabled_p ())
4708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4709 "use not simple.\n");
4710 return false;
4714 /* If op0 is an external or constant defs use a vector type of
4715 the same size as the output vector type. */
4716 if (!vectype_in)
4717 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4718 if (vec_stmt)
4719 gcc_assert (vectype_in);
4720 if (!vectype_in)
4722 if (dump_enabled_p ())
4724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4725 "no vectype for scalar type ");
4726 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4727 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4730 return false;
4733 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4734 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4736 if (dump_enabled_p ())
4738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4739 "can't convert between boolean and non "
4740 "boolean vectors");
4741 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4742 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4745 return false;
4748 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4749 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4750 if (known_eq (nunits_out, nunits_in))
4751 modifier = NONE;
4752 else if (multiple_p (nunits_out, nunits_in))
4753 modifier = NARROW;
4754 else
4756 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4757 modifier = WIDEN;
4760 /* Multiple types in SLP are handled by creating the appropriate number of
4761 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4762 case of SLP. */
4763 if (slp_node)
4764 ncopies = 1;
4765 else if (modifier == NARROW)
4766 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4767 else
4768 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4770 /* Sanity check: make sure that at least one copy of the vectorized stmt
4771 needs to be generated. */
4772 gcc_assert (ncopies >= 1);
4774 bool found_mode = false;
4775 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4776 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4777 opt_scalar_mode rhs_mode_iter;
4779 /* Supportable by target? */
4780 switch (modifier)
4782 case NONE:
4783 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4784 return false;
4785 if (supportable_convert_operation (code, vectype_out, vectype_in,
4786 &decl1, &code1))
4787 break;
4788 /* FALLTHRU */
4789 unsupported:
4790 if (dump_enabled_p ())
4791 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4792 "conversion not supported by target.\n");
4793 return false;
4795 case WIDEN:
4796 if (supportable_widening_operation (code, stmt_info, vectype_out,
4797 vectype_in, &code1, &code2,
4798 &multi_step_cvt, &interm_types))
4800 /* Binary widening operation can only be supported directly by the
4801 architecture. */
4802 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4803 break;
4806 if (code != FLOAT_EXPR
4807 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4808 goto unsupported;
4810 fltsz = GET_MODE_SIZE (lhs_mode);
4811 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4813 rhs_mode = rhs_mode_iter.require ();
4814 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4815 break;
4817 cvt_type
4818 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4819 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4820 if (cvt_type == NULL_TREE)
4821 goto unsupported;
4823 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4825 if (!supportable_convert_operation (code, vectype_out,
4826 cvt_type, &decl1, &codecvt1))
4827 goto unsupported;
4829 else if (!supportable_widening_operation (code, stmt_info,
4830 vectype_out, cvt_type,
4831 &codecvt1, &codecvt2,
4832 &multi_step_cvt,
4833 &interm_types))
4834 continue;
4835 else
4836 gcc_assert (multi_step_cvt == 0);
4838 if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
4839 vectype_in, &code1, &code2,
4840 &multi_step_cvt, &interm_types))
4842 found_mode = true;
4843 break;
4847 if (!found_mode)
4848 goto unsupported;
4850 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4851 codecvt2 = ERROR_MARK;
4852 else
4854 multi_step_cvt++;
4855 interm_types.safe_push (cvt_type);
4856 cvt_type = NULL_TREE;
4858 break;
4860 case NARROW:
4861 gcc_assert (op_type == unary_op);
4862 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4863 &code1, &multi_step_cvt,
4864 &interm_types))
4865 break;
4867 if (code != FIX_TRUNC_EXPR
4868 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4869 goto unsupported;
4871 cvt_type
4872 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4873 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4874 if (cvt_type == NULL_TREE)
4875 goto unsupported;
4876 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4877 &decl1, &codecvt1))
4878 goto unsupported;
4879 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4880 &code1, &multi_step_cvt,
4881 &interm_types))
4882 break;
4883 goto unsupported;
4885 default:
4886 gcc_unreachable ();
4889 if (!vec_stmt) /* transformation not required. */
4891 DUMP_VECT_SCOPE ("vectorizable_conversion");
4892 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4894 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4895 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
4896 cost_vec);
4898 else if (modifier == NARROW)
4900 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4901 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4902 cost_vec);
4904 else
4906 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4907 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4908 cost_vec);
4910 interm_types.release ();
4911 return true;
4914 /* Transform. */
4915 if (dump_enabled_p ())
4916 dump_printf_loc (MSG_NOTE, vect_location,
4917 "transform conversion. ncopies = %d.\n", ncopies);
4919 if (op_type == binary_op)
4921 if (CONSTANT_CLASS_P (op0))
4922 op0 = fold_convert (TREE_TYPE (op1), op0);
4923 else if (CONSTANT_CLASS_P (op1))
4924 op1 = fold_convert (TREE_TYPE (op0), op1);
4927 /* In case of multi-step conversion, we first generate conversion operations
4928 to the intermediate types, and then from that types to the final one.
4929 We create vector destinations for the intermediate type (TYPES) received
4930 from supportable_*_operation, and store them in the correct order
4931 for future use in vect_create_vectorized_*_stmts (). */
4932 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4933 vec_dest = vect_create_destination_var (scalar_dest,
4934 (cvt_type && modifier == WIDEN)
4935 ? cvt_type : vectype_out);
4936 vec_dsts.quick_push (vec_dest);
4938 if (multi_step_cvt)
4940 for (i = interm_types.length () - 1;
4941 interm_types.iterate (i, &intermediate_type); i--)
4943 vec_dest = vect_create_destination_var (scalar_dest,
4944 intermediate_type);
4945 vec_dsts.quick_push (vec_dest);
4949 if (cvt_type)
4950 vec_dest = vect_create_destination_var (scalar_dest,
4951 modifier == WIDEN
4952 ? vectype_out : cvt_type);
4954 if (!slp_node)
4956 if (modifier == WIDEN)
4958 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4959 if (op_type == binary_op)
4960 vec_oprnds1.create (1);
4962 else if (modifier == NARROW)
4963 vec_oprnds0.create (
4964 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4966 else if (code == WIDEN_LSHIFT_EXPR)
4967 vec_oprnds1.create (slp_node->vec_stmts_size);
4969 last_oprnd = op0;
4970 prev_stmt_info = NULL;
4971 switch (modifier)
4973 case NONE:
4974 for (j = 0; j < ncopies; j++)
4976 if (j == 0)
4977 vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
4978 NULL, slp_node);
4979 else
4980 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, NULL);
4982 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4984 stmt_vec_info new_stmt_info;
4985 /* Arguments are ready, create the new vector stmt. */
4986 if (code1 == CALL_EXPR)
4988 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
4989 new_temp = make_ssa_name (vec_dest, new_stmt);
4990 gimple_call_set_lhs (new_stmt, new_temp);
4991 new_stmt_info
4992 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
4994 else
4996 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4997 gassign *new_stmt
4998 = gimple_build_assign (vec_dest, code1, vop0);
4999 new_temp = make_ssa_name (vec_dest, new_stmt);
5000 gimple_assign_set_lhs (new_stmt, new_temp);
5001 new_stmt_info
5002 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5005 if (slp_node)
5006 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5007 else
5009 if (!prev_stmt_info)
5010 STMT_VINFO_VEC_STMT (stmt_info)
5011 = *vec_stmt = new_stmt_info;
5012 else
5013 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5014 prev_stmt_info = new_stmt_info;
5018 break;
5020 case WIDEN:
5021 /* In case the vectorization factor (VF) is bigger than the number
5022 of elements that we can fit in a vectype (nunits), we have to
5023 generate more than one vector stmt - i.e - we need to "unroll"
5024 the vector stmt by a factor VF/nunits. */
5025 for (j = 0; j < ncopies; j++)
5027 /* Handle uses. */
5028 if (j == 0)
5030 if (slp_node)
5032 if (code == WIDEN_LSHIFT_EXPR)
5034 unsigned int k;
5036 vec_oprnd1 = op1;
5037 /* Store vec_oprnd1 for every vector stmt to be created
5038 for SLP_NODE. We check during the analysis that all
5039 the shift arguments are the same. */
5040 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5041 vec_oprnds1.quick_push (vec_oprnd1);
5043 vect_get_vec_defs (op0, NULL_TREE, stmt_info,
5044 &vec_oprnds0, NULL, slp_node);
5046 else
5047 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
5048 &vec_oprnds1, slp_node);
5050 else
5052 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
5053 vec_oprnds0.quick_push (vec_oprnd0);
5054 if (op_type == binary_op)
5056 if (code == WIDEN_LSHIFT_EXPR)
5057 vec_oprnd1 = op1;
5058 else
5059 vec_oprnd1
5060 = vect_get_vec_def_for_operand (op1, stmt_info);
5061 vec_oprnds1.quick_push (vec_oprnd1);
5065 else
5067 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
5068 vec_oprnds0.truncate (0);
5069 vec_oprnds0.quick_push (vec_oprnd0);
5070 if (op_type == binary_op)
5072 if (code == WIDEN_LSHIFT_EXPR)
5073 vec_oprnd1 = op1;
5074 else
5075 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (vinfo,
5076 vec_oprnd1);
5077 vec_oprnds1.truncate (0);
5078 vec_oprnds1.quick_push (vec_oprnd1);
5082 /* Arguments are ready. Create the new vector stmts. */
5083 for (i = multi_step_cvt; i >= 0; i--)
5085 tree this_dest = vec_dsts[i];
5086 enum tree_code c1 = code1, c2 = code2;
5087 if (i == 0 && codecvt2 != ERROR_MARK)
5089 c1 = codecvt1;
5090 c2 = codecvt2;
5092 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
5093 &vec_oprnds1, stmt_info,
5094 this_dest, gsi,
5095 c1, c2, decl1, decl2,
5096 op_type);
5099 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5101 stmt_vec_info new_stmt_info;
5102 if (cvt_type)
5104 if (codecvt1 == CALL_EXPR)
5106 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5107 new_temp = make_ssa_name (vec_dest, new_stmt);
5108 gimple_call_set_lhs (new_stmt, new_temp);
5109 new_stmt_info
5110 = vect_finish_stmt_generation (stmt_info, new_stmt,
5111 gsi);
5113 else
5115 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5116 new_temp = make_ssa_name (vec_dest);
5117 gassign *new_stmt
5118 = gimple_build_assign (new_temp, codecvt1, vop0);
5119 new_stmt_info
5120 = vect_finish_stmt_generation (stmt_info, new_stmt,
5121 gsi);
5124 else
5125 new_stmt_info = vinfo->lookup_def (vop0);
5127 if (slp_node)
5128 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5129 else
5131 if (!prev_stmt_info)
5132 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
5133 else
5134 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5135 prev_stmt_info = new_stmt_info;
5140 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5141 break;
5143 case NARROW:
5144 /* In case the vectorization factor (VF) is bigger than the number
5145 of elements that we can fit in a vectype (nunits), we have to
5146 generate more than one vector stmt - i.e - we need to "unroll"
5147 the vector stmt by a factor VF/nunits. */
5148 for (j = 0; j < ncopies; j++)
5150 /* Handle uses. */
5151 if (slp_node)
5152 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5153 slp_node);
5154 else
5156 vec_oprnds0.truncate (0);
5157 vect_get_loop_based_defs (&last_oprnd, stmt_info, &vec_oprnds0,
5158 vect_pow2 (multi_step_cvt) - 1);
5161 /* Arguments are ready. Create the new vector stmts. */
5162 if (cvt_type)
5163 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5165 if (codecvt1 == CALL_EXPR)
5167 gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
5168 new_temp = make_ssa_name (vec_dest, new_stmt);
5169 gimple_call_set_lhs (new_stmt, new_temp);
5170 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5172 else
5174 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5175 new_temp = make_ssa_name (vec_dest);
5176 gassign *new_stmt
5177 = gimple_build_assign (new_temp, codecvt1, vop0);
5178 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5181 vec_oprnds0[i] = new_temp;
5184 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5185 stmt_info, vec_dsts, gsi,
5186 slp_node, code1,
5187 &prev_stmt_info);
5190 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5191 break;
5194 vec_oprnds0.release ();
5195 vec_oprnds1.release ();
5196 interm_types.release ();
5198 return true;
5202 /* Function vectorizable_assignment.
5204 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5205 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5206 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5207 Return true if STMT_INFO is vectorizable in this way. */
5209 static bool
5210 vectorizable_assignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5211 stmt_vec_info *vec_stmt, slp_tree slp_node,
5212 stmt_vector_for_cost *cost_vec)
5214 tree vec_dest;
5215 tree scalar_dest;
5216 tree op;
5217 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5218 tree new_temp;
5219 enum vect_def_type dt[1] = {vect_unknown_def_type};
5220 int ndts = 1;
5221 int ncopies;
5222 int i, j;
5223 vec<tree> vec_oprnds = vNULL;
5224 tree vop;
5225 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5226 vec_info *vinfo = stmt_info->vinfo;
5227 stmt_vec_info prev_stmt_info = NULL;
5228 enum tree_code code;
5229 tree vectype_in;
5231 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5232 return false;
5234 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5235 && ! vec_stmt)
5236 return false;
5238 /* Is vectorizable assignment? */
5239 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5240 if (!stmt)
5241 return false;
5243 scalar_dest = gimple_assign_lhs (stmt);
5244 if (TREE_CODE (scalar_dest) != SSA_NAME)
5245 return false;
5247 code = gimple_assign_rhs_code (stmt);
5248 if (gimple_assign_single_p (stmt)
5249 || code == PAREN_EXPR
5250 || CONVERT_EXPR_CODE_P (code))
5251 op = gimple_assign_rhs1 (stmt);
5252 else
5253 return false;
5255 if (code == VIEW_CONVERT_EXPR)
5256 op = TREE_OPERAND (op, 0);
5258 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5259 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5261 /* Multiple types in SLP are handled by creating the appropriate number of
5262 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5263 case of SLP. */
5264 if (slp_node)
5265 ncopies = 1;
5266 else
5267 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5269 gcc_assert (ncopies >= 1);
5271 if (!vect_is_simple_use (op, vinfo, &dt[0], &vectype_in))
5273 if (dump_enabled_p ())
5274 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5275 "use not simple.\n");
5276 return false;
5279 /* We can handle NOP_EXPR conversions that do not change the number
5280 of elements or the vector size. */
5281 if ((CONVERT_EXPR_CODE_P (code)
5282 || code == VIEW_CONVERT_EXPR)
5283 && (!vectype_in
5284 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5285 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5286 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5287 return false;
5289 /* We do not handle bit-precision changes. */
5290 if ((CONVERT_EXPR_CODE_P (code)
5291 || code == VIEW_CONVERT_EXPR)
5292 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5293 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5294 || !type_has_mode_precision_p (TREE_TYPE (op)))
5295 /* But a conversion that does not change the bit-pattern is ok. */
5296 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5297 > TYPE_PRECISION (TREE_TYPE (op)))
5298 && TYPE_UNSIGNED (TREE_TYPE (op)))
5299 /* Conversion between boolean types of different sizes is
5300 a simple assignment in case their vectypes are same
5301 boolean vectors. */
5302 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5303 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5305 if (dump_enabled_p ())
5306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5307 "type conversion to/from bit-precision "
5308 "unsupported.\n");
5309 return false;
5312 if (!vec_stmt) /* transformation not required. */
5314 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5315 DUMP_VECT_SCOPE ("vectorizable_assignment");
5316 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5317 return true;
5320 /* Transform. */
5321 if (dump_enabled_p ())
5322 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5324 /* Handle def. */
5325 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5327 /* Handle use. */
5328 for (j = 0; j < ncopies; j++)
5330 /* Handle uses. */
5331 if (j == 0)
5332 vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
5333 else
5334 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds, NULL);
5336 /* Arguments are ready. create the new vector stmt. */
5337 stmt_vec_info new_stmt_info = NULL;
5338 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5340 if (CONVERT_EXPR_CODE_P (code)
5341 || code == VIEW_CONVERT_EXPR)
5342 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5343 gassign *new_stmt = gimple_build_assign (vec_dest, vop);
5344 new_temp = make_ssa_name (vec_dest, new_stmt);
5345 gimple_assign_set_lhs (new_stmt, new_temp);
5346 new_stmt_info
5347 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5348 if (slp_node)
5349 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5352 if (slp_node)
5353 continue;
5355 if (j == 0)
5356 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5357 else
5358 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5360 prev_stmt_info = new_stmt_info;
5363 vec_oprnds.release ();
5364 return true;
5368 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5369 either as shift by a scalar or by a vector. */
5371 bool
5372 vect_supportable_shift (enum tree_code code, tree scalar_type)
5375 machine_mode vec_mode;
5376 optab optab;
5377 int icode;
5378 tree vectype;
5380 vectype = get_vectype_for_scalar_type (scalar_type);
5381 if (!vectype)
5382 return false;
5384 optab = optab_for_tree_code (code, vectype, optab_scalar);
5385 if (!optab
5386 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5388 optab = optab_for_tree_code (code, vectype, optab_vector);
5389 if (!optab
5390 || (optab_handler (optab, TYPE_MODE (vectype))
5391 == CODE_FOR_nothing))
5392 return false;
5395 vec_mode = TYPE_MODE (vectype);
5396 icode = (int) optab_handler (optab, vec_mode);
5397 if (icode == CODE_FOR_nothing)
5398 return false;
5400 return true;
5404 /* Function vectorizable_shift.
5406 Check if STMT_INFO performs a shift operation that can be vectorized.
5407 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5408 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5409 Return true if STMT_INFO is vectorizable in this way. */
5411 static bool
5412 vectorizable_shift (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5413 stmt_vec_info *vec_stmt, slp_tree slp_node,
5414 stmt_vector_for_cost *cost_vec)
5416 tree vec_dest;
5417 tree scalar_dest;
5418 tree op0, op1 = NULL;
5419 tree vec_oprnd1 = NULL_TREE;
5420 tree vectype;
5421 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5422 enum tree_code code;
5423 machine_mode vec_mode;
5424 tree new_temp;
5425 optab optab;
5426 int icode;
5427 machine_mode optab_op2_mode;
5428 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5429 int ndts = 2;
5430 stmt_vec_info prev_stmt_info;
5431 poly_uint64 nunits_in;
5432 poly_uint64 nunits_out;
5433 tree vectype_out;
5434 tree op1_vectype;
5435 int ncopies;
5436 int j, i;
5437 vec<tree> vec_oprnds0 = vNULL;
5438 vec<tree> vec_oprnds1 = vNULL;
5439 tree vop0, vop1;
5440 unsigned int k;
5441 bool scalar_shift_arg = true;
5442 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5443 vec_info *vinfo = stmt_info->vinfo;
5445 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5446 return false;
5448 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5449 && ! vec_stmt)
5450 return false;
5452 /* Is STMT a vectorizable binary/unary operation? */
5453 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5454 if (!stmt)
5455 return false;
5457 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5458 return false;
5460 code = gimple_assign_rhs_code (stmt);
5462 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5463 || code == RROTATE_EXPR))
5464 return false;
5466 scalar_dest = gimple_assign_lhs (stmt);
5467 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5468 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5470 if (dump_enabled_p ())
5471 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5472 "bit-precision shifts not supported.\n");
5473 return false;
5476 op0 = gimple_assign_rhs1 (stmt);
5477 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5479 if (dump_enabled_p ())
5480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5481 "use not simple.\n");
5482 return false;
5484 /* If op0 is an external or constant def use a vector type with
5485 the same size as the output vector type. */
5486 if (!vectype)
5487 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5488 if (vec_stmt)
5489 gcc_assert (vectype);
5490 if (!vectype)
5492 if (dump_enabled_p ())
5493 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5494 "no vectype for scalar type\n");
5495 return false;
5498 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5499 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5500 if (maybe_ne (nunits_out, nunits_in))
5501 return false;
5503 op1 = gimple_assign_rhs2 (stmt);
5504 stmt_vec_info op1_def_stmt_info;
5505 if (!vect_is_simple_use (op1, vinfo, &dt[1], &op1_vectype,
5506 &op1_def_stmt_info))
5508 if (dump_enabled_p ())
5509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5510 "use not simple.\n");
5511 return false;
5514 /* Multiple types in SLP are handled by creating the appropriate number of
5515 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5516 case of SLP. */
5517 if (slp_node)
5518 ncopies = 1;
5519 else
5520 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5522 gcc_assert (ncopies >= 1);
5524 /* Determine whether the shift amount is a vector, or scalar. If the
5525 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5527 if ((dt[1] == vect_internal_def
5528 || dt[1] == vect_induction_def)
5529 && !slp_node)
5530 scalar_shift_arg = false;
5531 else if (dt[1] == vect_constant_def
5532 || dt[1] == vect_external_def
5533 || dt[1] == vect_internal_def)
5535 /* In SLP, need to check whether the shift count is the same,
5536 in loops if it is a constant or invariant, it is always
5537 a scalar shift. */
5538 if (slp_node)
5540 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5541 stmt_vec_info slpstmt_info;
5543 FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
5545 gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
5546 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5547 scalar_shift_arg = false;
5551 /* If the shift amount is computed by a pattern stmt we cannot
5552 use the scalar amount directly thus give up and use a vector
5553 shift. */
5554 if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info))
5555 scalar_shift_arg = false;
5557 else
5559 if (dump_enabled_p ())
5560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5561 "operand mode requires invariant argument.\n");
5562 return false;
5565 /* Vector shifted by vector. */
5566 if (!scalar_shift_arg)
5568 optab = optab_for_tree_code (code, vectype, optab_vector);
5569 if (dump_enabled_p ())
5570 dump_printf_loc (MSG_NOTE, vect_location,
5571 "vector/vector shift/rotate found.\n");
5573 if (!op1_vectype)
5574 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5575 if (op1_vectype == NULL_TREE
5576 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5578 if (dump_enabled_p ())
5579 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5580 "unusable type for last operand in"
5581 " vector/vector shift/rotate.\n");
5582 return false;
5585 /* See if the machine has a vector shifted by scalar insn and if not
5586 then see if it has a vector shifted by vector insn. */
5587 else
5589 optab = optab_for_tree_code (code, vectype, optab_scalar);
5590 if (optab
5591 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5593 if (dump_enabled_p ())
5594 dump_printf_loc (MSG_NOTE, vect_location,
5595 "vector/scalar shift/rotate found.\n");
5597 else
5599 optab = optab_for_tree_code (code, vectype, optab_vector);
5600 if (optab
5601 && (optab_handler (optab, TYPE_MODE (vectype))
5602 != CODE_FOR_nothing))
5604 scalar_shift_arg = false;
5606 if (dump_enabled_p ())
5607 dump_printf_loc (MSG_NOTE, vect_location,
5608 "vector/vector shift/rotate found.\n");
5610 /* Unlike the other binary operators, shifts/rotates have
5611 the rhs being int, instead of the same type as the lhs,
5612 so make sure the scalar is the right type if we are
5613 dealing with vectors of long long/long/short/char. */
5614 if (dt[1] == vect_constant_def)
5615 op1 = fold_convert (TREE_TYPE (vectype), op1);
5616 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5617 TREE_TYPE (op1)))
5619 if (slp_node
5620 && TYPE_MODE (TREE_TYPE (vectype))
5621 != TYPE_MODE (TREE_TYPE (op1)))
5623 if (dump_enabled_p ())
5624 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5625 "unusable type for last operand in"
5626 " vector/vector shift/rotate.\n");
5627 return false;
5629 if (vec_stmt && !slp_node)
5631 op1 = fold_convert (TREE_TYPE (vectype), op1);
5632 op1 = vect_init_vector (stmt_info, op1,
5633 TREE_TYPE (vectype), NULL);
5640 /* Supportable by target? */
5641 if (!optab)
5643 if (dump_enabled_p ())
5644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5645 "no optab.\n");
5646 return false;
5648 vec_mode = TYPE_MODE (vectype);
5649 icode = (int) optab_handler (optab, vec_mode);
5650 if (icode == CODE_FOR_nothing)
5652 if (dump_enabled_p ())
5653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5654 "op not supported by target.\n");
5655 /* Check only during analysis. */
5656 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5657 || (!vec_stmt
5658 && !vect_worthwhile_without_simd_p (vinfo, code)))
5659 return false;
5660 if (dump_enabled_p ())
5661 dump_printf_loc (MSG_NOTE, vect_location,
5662 "proceeding using word mode.\n");
5665 /* Worthwhile without SIMD support? Check only during analysis. */
5666 if (!vec_stmt
5667 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5668 && !vect_worthwhile_without_simd_p (vinfo, code))
5670 if (dump_enabled_p ())
5671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5672 "not worthwhile without SIMD support.\n");
5673 return false;
5676 if (!vec_stmt) /* transformation not required. */
5678 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5679 DUMP_VECT_SCOPE ("vectorizable_shift");
5680 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5681 return true;
5684 /* Transform. */
5686 if (dump_enabled_p ())
5687 dump_printf_loc (MSG_NOTE, vect_location,
5688 "transform binary/unary operation.\n");
5690 /* Handle def. */
5691 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5693 prev_stmt_info = NULL;
5694 for (j = 0; j < ncopies; j++)
5696 /* Handle uses. */
5697 if (j == 0)
5699 if (scalar_shift_arg)
5701 /* Vector shl and shr insn patterns can be defined with scalar
5702 operand 2 (shift operand). In this case, use constant or loop
5703 invariant op1 directly, without extending it to vector mode
5704 first. */
5705 optab_op2_mode = insn_data[icode].operand[2].mode;
5706 if (!VECTOR_MODE_P (optab_op2_mode))
5708 if (dump_enabled_p ())
5709 dump_printf_loc (MSG_NOTE, vect_location,
5710 "operand 1 using scalar mode.\n");
5711 vec_oprnd1 = op1;
5712 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5713 vec_oprnds1.quick_push (vec_oprnd1);
5714 if (slp_node)
5716 /* Store vec_oprnd1 for every vector stmt to be created
5717 for SLP_NODE. We check during the analysis that all
5718 the shift arguments are the same.
5719 TODO: Allow different constants for different vector
5720 stmts generated for an SLP instance. */
5721 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5722 vec_oprnds1.quick_push (vec_oprnd1);
5727 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5728 (a special case for certain kind of vector shifts); otherwise,
5729 operand 1 should be of a vector type (the usual case). */
5730 if (vec_oprnd1)
5731 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
5732 slp_node);
5733 else
5734 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
5735 slp_node);
5737 else
5738 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
5740 /* Arguments are ready. Create the new vector stmt. */
5741 stmt_vec_info new_stmt_info = NULL;
5742 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5744 vop1 = vec_oprnds1[i];
5745 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5746 new_temp = make_ssa_name (vec_dest, new_stmt);
5747 gimple_assign_set_lhs (new_stmt, new_temp);
5748 new_stmt_info
5749 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
5750 if (slp_node)
5751 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5754 if (slp_node)
5755 continue;
5757 if (j == 0)
5758 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5759 else
5760 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
5761 prev_stmt_info = new_stmt_info;
5764 vec_oprnds0.release ();
5765 vec_oprnds1.release ();
5767 return true;
5771 /* Function vectorizable_operation.
5773 Check if STMT_INFO performs a binary, unary or ternary operation that can
5774 be vectorized.
5775 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5776 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5777 Return true if STMT_INFO is vectorizable in this way. */
5779 static bool
5780 vectorizable_operation (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5781 stmt_vec_info *vec_stmt, slp_tree slp_node,
5782 stmt_vector_for_cost *cost_vec)
5784 tree vec_dest;
5785 tree scalar_dest;
5786 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5787 tree vectype;
5788 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5789 enum tree_code code, orig_code;
5790 machine_mode vec_mode;
5791 tree new_temp;
5792 int op_type;
5793 optab optab;
5794 bool target_support_p;
5795 enum vect_def_type dt[3]
5796 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5797 int ndts = 3;
5798 stmt_vec_info prev_stmt_info;
5799 poly_uint64 nunits_in;
5800 poly_uint64 nunits_out;
5801 tree vectype_out;
5802 int ncopies;
5803 int j, i;
5804 vec<tree> vec_oprnds0 = vNULL;
5805 vec<tree> vec_oprnds1 = vNULL;
5806 vec<tree> vec_oprnds2 = vNULL;
5807 tree vop0, vop1, vop2;
5808 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5809 vec_info *vinfo = stmt_info->vinfo;
5811 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5812 return false;
5814 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5815 && ! vec_stmt)
5816 return false;
5818 /* Is STMT a vectorizable binary/unary operation? */
5819 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5820 if (!stmt)
5821 return false;
5823 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5824 return false;
5826 orig_code = code = gimple_assign_rhs_code (stmt);
5828 /* For pointer addition and subtraction, we should use the normal
5829 plus and minus for the vector operation. */
5830 if (code == POINTER_PLUS_EXPR)
5831 code = PLUS_EXPR;
5832 if (code == POINTER_DIFF_EXPR)
5833 code = MINUS_EXPR;
5835 /* Support only unary or binary operations. */
5836 op_type = TREE_CODE_LENGTH (code);
5837 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5839 if (dump_enabled_p ())
5840 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5841 "num. args = %d (not unary/binary/ternary op).\n",
5842 op_type);
5843 return false;
5846 scalar_dest = gimple_assign_lhs (stmt);
5847 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5849 /* Most operations cannot handle bit-precision types without extra
5850 truncations. */
5851 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5852 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5853 /* Exception are bitwise binary operations. */
5854 && code != BIT_IOR_EXPR
5855 && code != BIT_XOR_EXPR
5856 && code != BIT_AND_EXPR)
5858 if (dump_enabled_p ())
5859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5860 "bit-precision arithmetic not supported.\n");
5861 return false;
5864 op0 = gimple_assign_rhs1 (stmt);
5865 if (!vect_is_simple_use (op0, vinfo, &dt[0], &vectype))
5867 if (dump_enabled_p ())
5868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5869 "use not simple.\n");
5870 return false;
5872 /* If op0 is an external or constant def use a vector type with
5873 the same size as the output vector type. */
5874 if (!vectype)
5876 /* For boolean type we cannot determine vectype by
5877 invariant value (don't know whether it is a vector
5878 of booleans or vector of integers). We use output
5879 vectype because operations on boolean don't change
5880 type. */
5881 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5883 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5885 if (dump_enabled_p ())
5886 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5887 "not supported operation on bool value.\n");
5888 return false;
5890 vectype = vectype_out;
5892 else
5893 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5895 if (vec_stmt)
5896 gcc_assert (vectype);
5897 if (!vectype)
5899 if (dump_enabled_p ())
5901 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5902 "no vectype for scalar type ");
5903 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5904 TREE_TYPE (op0));
5905 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5908 return false;
5911 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5912 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5913 if (maybe_ne (nunits_out, nunits_in))
5914 return false;
5916 if (op_type == binary_op || op_type == ternary_op)
5918 op1 = gimple_assign_rhs2 (stmt);
5919 if (!vect_is_simple_use (op1, vinfo, &dt[1]))
5921 if (dump_enabled_p ())
5922 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5923 "use not simple.\n");
5924 return false;
5927 if (op_type == ternary_op)
5929 op2 = gimple_assign_rhs3 (stmt);
5930 if (!vect_is_simple_use (op2, vinfo, &dt[2]))
5932 if (dump_enabled_p ())
5933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5934 "use not simple.\n");
5935 return false;
5939 /* Multiple types in SLP are handled by creating the appropriate number of
5940 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5941 case of SLP. */
5942 if (slp_node)
5943 ncopies = 1;
5944 else
5945 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5947 gcc_assert (ncopies >= 1);
5949 /* Shifts are handled in vectorizable_shift (). */
5950 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5951 || code == RROTATE_EXPR)
5952 return false;
5954 /* Supportable by target? */
5956 vec_mode = TYPE_MODE (vectype);
5957 if (code == MULT_HIGHPART_EXPR)
5958 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5959 else
5961 optab = optab_for_tree_code (code, vectype, optab_default);
5962 if (!optab)
5964 if (dump_enabled_p ())
5965 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5966 "no optab.\n");
5967 return false;
5969 target_support_p = (optab_handler (optab, vec_mode)
5970 != CODE_FOR_nothing);
5973 if (!target_support_p)
5975 if (dump_enabled_p ())
5976 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5977 "op not supported by target.\n");
5978 /* Check only during analysis. */
5979 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5980 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5981 return false;
5982 if (dump_enabled_p ())
5983 dump_printf_loc (MSG_NOTE, vect_location,
5984 "proceeding using word mode.\n");
5987 /* Worthwhile without SIMD support? Check only during analysis. */
5988 if (!VECTOR_MODE_P (vec_mode)
5989 && !vec_stmt
5990 && !vect_worthwhile_without_simd_p (vinfo, code))
5992 if (dump_enabled_p ())
5993 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5994 "not worthwhile without SIMD support.\n");
5995 return false;
5998 if (!vec_stmt) /* transformation not required. */
6000 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
6001 DUMP_VECT_SCOPE ("vectorizable_operation");
6002 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
6003 return true;
6006 /* Transform. */
6008 if (dump_enabled_p ())
6009 dump_printf_loc (MSG_NOTE, vect_location,
6010 "transform binary/unary operation.\n");
6012 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6013 vectors with unsigned elements, but the result is signed. So, we
6014 need to compute the MINUS_EXPR into vectype temporary and
6015 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6016 tree vec_cvt_dest = NULL_TREE;
6017 if (orig_code == POINTER_DIFF_EXPR)
6019 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6020 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
6022 /* Handle def. */
6023 else
6024 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6026 /* In case the vectorization factor (VF) is bigger than the number
6027 of elements that we can fit in a vectype (nunits), we have to generate
6028 more than one vector stmt - i.e - we need to "unroll" the
6029 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6030 from one copy of the vector stmt to the next, in the field
6031 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6032 stages to find the correct vector defs to be used when vectorizing
6033 stmts that use the defs of the current stmt. The example below
6034 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6035 we need to create 4 vectorized stmts):
6037 before vectorization:
6038 RELATED_STMT VEC_STMT
6039 S1: x = memref - -
6040 S2: z = x + 1 - -
6042 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6043 there):
6044 RELATED_STMT VEC_STMT
6045 VS1_0: vx0 = memref0 VS1_1 -
6046 VS1_1: vx1 = memref1 VS1_2 -
6047 VS1_2: vx2 = memref2 VS1_3 -
6048 VS1_3: vx3 = memref3 - -
6049 S1: x = load - VS1_0
6050 S2: z = x + 1 - -
6052 step2: vectorize stmt S2 (done here):
6053 To vectorize stmt S2 we first need to find the relevant vector
6054 def for the first operand 'x'. This is, as usual, obtained from
6055 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6056 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6057 relevant vector def 'vx0'. Having found 'vx0' we can generate
6058 the vector stmt VS2_0, and as usual, record it in the
6059 STMT_VINFO_VEC_STMT of stmt S2.
6060 When creating the second copy (VS2_1), we obtain the relevant vector
6061 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6062 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6063 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6064 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6065 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6066 chain of stmts and pointers:
6067 RELATED_STMT VEC_STMT
6068 VS1_0: vx0 = memref0 VS1_1 -
6069 VS1_1: vx1 = memref1 VS1_2 -
6070 VS1_2: vx2 = memref2 VS1_3 -
6071 VS1_3: vx3 = memref3 - -
6072 S1: x = load - VS1_0
6073 VS2_0: vz0 = vx0 + v1 VS2_1 -
6074 VS2_1: vz1 = vx1 + v1 VS2_2 -
6075 VS2_2: vz2 = vx2 + v1 VS2_3 -
6076 VS2_3: vz3 = vx3 + v1 - -
6077 S2: z = x + 1 - VS2_0 */
6079 prev_stmt_info = NULL;
6080 for (j = 0; j < ncopies; j++)
6082 /* Handle uses. */
6083 if (j == 0)
6085 if (op_type == binary_op)
6086 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
6087 slp_node);
6088 else if (op_type == ternary_op)
6090 if (slp_node)
6092 auto_vec<tree> ops(3);
6093 ops.quick_push (op0);
6094 ops.quick_push (op1);
6095 ops.quick_push (op2);
6096 auto_vec<vec<tree> > vec_defs(3);
6097 vect_get_slp_defs (ops, slp_node, &vec_defs);
6098 vec_oprnds0 = vec_defs[0];
6099 vec_oprnds1 = vec_defs[1];
6100 vec_oprnds2 = vec_defs[2];
6102 else
6104 vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
6105 &vec_oprnds1, NULL);
6106 vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
6107 NULL, NULL);
6110 else
6111 vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
6112 slp_node);
6114 else
6116 vect_get_vec_defs_for_stmt_copy (vinfo, &vec_oprnds0, &vec_oprnds1);
6117 if (op_type == ternary_op)
6119 tree vec_oprnd = vec_oprnds2.pop ();
6120 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (vinfo,
6121 vec_oprnd));
6125 /* Arguments are ready. Create the new vector stmt. */
6126 stmt_vec_info new_stmt_info = NULL;
6127 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6129 vop1 = ((op_type == binary_op || op_type == ternary_op)
6130 ? vec_oprnds1[i] : NULL_TREE);
6131 vop2 = ((op_type == ternary_op)
6132 ? vec_oprnds2[i] : NULL_TREE);
6133 gassign *new_stmt = gimple_build_assign (vec_dest, code,
6134 vop0, vop1, vop2);
6135 new_temp = make_ssa_name (vec_dest, new_stmt);
6136 gimple_assign_set_lhs (new_stmt, new_temp);
6137 new_stmt_info
6138 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6139 if (vec_cvt_dest)
6141 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
6142 gassign *new_stmt
6143 = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6144 new_temp);
6145 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6146 gimple_assign_set_lhs (new_stmt, new_temp);
6147 new_stmt_info
6148 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6150 if (slp_node)
6151 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
6154 if (slp_node)
6155 continue;
6157 if (j == 0)
6158 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6159 else
6160 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6161 prev_stmt_info = new_stmt_info;
6164 vec_oprnds0.release ();
6165 vec_oprnds1.release ();
6166 vec_oprnds2.release ();
6168 return true;
6171 /* A helper function to ensure data reference DR_INFO's base alignment. */
6173 static void
6174 ensure_base_align (dr_vec_info *dr_info)
6176 if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
6177 return;
6179 if (dr_info->base_misaligned)
6181 tree base_decl = dr_info->base_decl;
6183 unsigned int align_base_to
6184 = DR_TARGET_ALIGNMENT (dr_info) * BITS_PER_UNIT;
6186 if (decl_in_symtab_p (base_decl))
6187 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6188 else
6190 SET_DECL_ALIGN (base_decl, align_base_to);
6191 DECL_USER_ALIGN (base_decl) = 1;
6193 dr_info->base_misaligned = false;
6198 /* Function get_group_alias_ptr_type.
6200 Return the alias type for the group starting at FIRST_STMT_INFO. */
6202 static tree
6203 get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
6205 struct data_reference *first_dr, *next_dr;
6207 first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
6208 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info);
6209 while (next_stmt_info)
6211 next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
6212 if (get_alias_set (DR_REF (first_dr))
6213 != get_alias_set (DR_REF (next_dr)))
6215 if (dump_enabled_p ())
6216 dump_printf_loc (MSG_NOTE, vect_location,
6217 "conflicting alias set types.\n");
6218 return ptr_type_node;
6220 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6222 return reference_alias_ptr_type (DR_REF (first_dr));
6226 /* Function vectorizable_store.
6228 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
6229 that can be vectorized.
6230 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6231 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6232 Return true if STMT_INFO is vectorizable in this way. */
6234 static bool
6235 vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6236 stmt_vec_info *vec_stmt, slp_tree slp_node,
6237 stmt_vector_for_cost *cost_vec)
6239 tree data_ref;
6240 tree op;
6241 tree vec_oprnd = NULL_TREE;
6242 tree elem_type;
6243 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6244 struct loop *loop = NULL;
6245 machine_mode vec_mode;
6246 tree dummy;
6247 enum dr_alignment_support alignment_support_scheme;
6248 enum vect_def_type rhs_dt = vect_unknown_def_type;
6249 enum vect_def_type mask_dt = vect_unknown_def_type;
6250 stmt_vec_info prev_stmt_info = NULL;
6251 tree dataref_ptr = NULL_TREE;
6252 tree dataref_offset = NULL_TREE;
6253 gimple *ptr_incr = NULL;
6254 int ncopies;
6255 int j;
6256 stmt_vec_info first_stmt_info;
6257 bool grouped_store;
6258 unsigned int group_size, i;
6259 vec<tree> oprnds = vNULL;
6260 vec<tree> result_chain = vNULL;
6261 bool inv_p;
6262 tree offset = NULL_TREE;
6263 vec<tree> vec_oprnds = vNULL;
6264 bool slp = (slp_node != NULL);
6265 unsigned int vec_num;
6266 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6267 vec_info *vinfo = stmt_info->vinfo;
6268 tree aggr_type;
6269 gather_scatter_info gs_info;
6270 poly_uint64 vf;
6271 vec_load_store_type vls_type;
6272 tree ref_type;
6274 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6275 return false;
6277 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6278 && ! vec_stmt)
6279 return false;
6281 /* Is vectorizable store? */
6283 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6284 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
6286 tree scalar_dest = gimple_assign_lhs (assign);
6287 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6288 && is_pattern_stmt_p (stmt_info))
6289 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6290 if (TREE_CODE (scalar_dest) != ARRAY_REF
6291 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6292 && TREE_CODE (scalar_dest) != INDIRECT_REF
6293 && TREE_CODE (scalar_dest) != COMPONENT_REF
6294 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6295 && TREE_CODE (scalar_dest) != REALPART_EXPR
6296 && TREE_CODE (scalar_dest) != MEM_REF)
6297 return false;
6299 else
6301 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
6302 if (!call || !gimple_call_internal_p (call))
6303 return false;
6305 internal_fn ifn = gimple_call_internal_fn (call);
6306 if (!internal_store_fn_p (ifn))
6307 return false;
6309 if (slp_node != NULL)
6311 if (dump_enabled_p ())
6312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6313 "SLP of masked stores not supported.\n");
6314 return false;
6317 int mask_index = internal_fn_mask_index (ifn);
6318 if (mask_index >= 0)
6320 mask = gimple_call_arg (call, mask_index);
6321 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
6322 &mask_vectype))
6323 return false;
6327 op = vect_get_store_rhs (stmt_info);
6329 /* Cannot have hybrid store SLP -- that would mean storing to the
6330 same location twice. */
6331 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6333 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6334 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6336 if (loop_vinfo)
6338 loop = LOOP_VINFO_LOOP (loop_vinfo);
6339 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6341 else
6342 vf = 1;
6344 /* Multiple types in SLP are handled by creating the appropriate number of
6345 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6346 case of SLP. */
6347 if (slp)
6348 ncopies = 1;
6349 else
6350 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6352 gcc_assert (ncopies >= 1);
6354 /* FORNOW. This restriction should be relaxed. */
6355 if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
6357 if (dump_enabled_p ())
6358 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6359 "multiple types in nested loop.\n");
6360 return false;
6363 if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
6364 return false;
6366 elem_type = TREE_TYPE (vectype);
6367 vec_mode = TYPE_MODE (vectype);
6369 if (!STMT_VINFO_DATA_REF (stmt_info))
6370 return false;
6372 vect_memory_access_type memory_access_type;
6373 if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
6374 &memory_access_type, &gs_info))
6375 return false;
6377 if (mask)
6379 if (memory_access_type == VMAT_CONTIGUOUS)
6381 if (!VECTOR_MODE_P (vec_mode)
6382 || !can_vec_mask_load_store_p (vec_mode,
6383 TYPE_MODE (mask_vectype), false))
6384 return false;
6386 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6387 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
6389 if (dump_enabled_p ())
6390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6391 "unsupported access type for masked store.\n");
6392 return false;
6395 else
6397 /* FORNOW. In some cases can vectorize even if data-type not supported
6398 (e.g. - array initialization with 0). */
6399 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6400 return false;
6403 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
6404 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6405 && memory_access_type != VMAT_GATHER_SCATTER
6406 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6407 if (grouped_store)
6409 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
6410 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6411 group_size = DR_GROUP_SIZE (first_stmt_info);
6413 else
6415 first_stmt_info = stmt_info;
6416 first_dr_info = dr_info;
6417 group_size = vec_num = 1;
6420 if (!vec_stmt) /* transformation not required. */
6422 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6424 if (loop_vinfo
6425 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6426 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6427 memory_access_type, &gs_info);
6429 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6430 vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
6431 vls_type, slp_node, cost_vec);
6432 return true;
6434 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6436 /* Transform. */
6438 ensure_base_align (dr_info);
6440 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6442 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6443 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6444 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6445 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6446 edge pe = loop_preheader_edge (loop);
6447 gimple_seq seq;
6448 basic_block new_bb;
6449 enum { NARROW, NONE, WIDEN } modifier;
6450 poly_uint64 scatter_off_nunits
6451 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6453 if (known_eq (nunits, scatter_off_nunits))
6454 modifier = NONE;
6455 else if (known_eq (nunits * 2, scatter_off_nunits))
6457 modifier = WIDEN;
6459 /* Currently gathers and scatters are only supported for
6460 fixed-length vectors. */
6461 unsigned int count = scatter_off_nunits.to_constant ();
6462 vec_perm_builder sel (count, count, 1);
6463 for (i = 0; i < (unsigned int) count; ++i)
6464 sel.quick_push (i | (count / 2));
6466 vec_perm_indices indices (sel, 1, count);
6467 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6468 indices);
6469 gcc_assert (perm_mask != NULL_TREE);
6471 else if (known_eq (nunits, scatter_off_nunits * 2))
6473 modifier = NARROW;
6475 /* Currently gathers and scatters are only supported for
6476 fixed-length vectors. */
6477 unsigned int count = nunits.to_constant ();
6478 vec_perm_builder sel (count, count, 1);
6479 for (i = 0; i < (unsigned int) count; ++i)
6480 sel.quick_push (i | (count / 2));
6482 vec_perm_indices indices (sel, 2, count);
6483 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6484 gcc_assert (perm_mask != NULL_TREE);
6485 ncopies *= 2;
6487 else
6488 gcc_unreachable ();
6490 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6491 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6492 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6493 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6494 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6495 scaletype = TREE_VALUE (arglist);
6497 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6498 && TREE_CODE (rettype) == VOID_TYPE);
6500 ptr = fold_convert (ptrtype, gs_info.base);
6501 if (!is_gimple_min_invariant (ptr))
6503 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6504 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6505 gcc_assert (!new_bb);
6508 /* Currently we support only unconditional scatter stores,
6509 so mask should be all ones. */
6510 mask = build_int_cst (masktype, -1);
6511 mask = vect_init_vector (stmt_info, mask, masktype, NULL);
6513 scale = build_int_cst (scaletype, gs_info.scale);
6515 prev_stmt_info = NULL;
6516 for (j = 0; j < ncopies; ++j)
6518 if (j == 0)
6520 src = vec_oprnd1
6521 = vect_get_vec_def_for_operand (op, stmt_info);
6522 op = vec_oprnd0
6523 = vect_get_vec_def_for_operand (gs_info.offset, stmt_info);
6525 else if (modifier != NONE && (j & 1))
6527 if (modifier == WIDEN)
6529 src = vec_oprnd1
6530 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
6531 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6532 stmt_info, gsi);
6534 else if (modifier == NARROW)
6536 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6537 stmt_info, gsi);
6538 op = vec_oprnd0
6539 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
6541 else
6542 gcc_unreachable ();
6544 else
6546 src = vec_oprnd1
6547 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd1);
6548 op = vec_oprnd0
6549 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnd0);
6552 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6554 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6555 TYPE_VECTOR_SUBPARTS (srctype)));
6556 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6557 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6558 gassign *new_stmt
6559 = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6560 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6561 src = var;
6564 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6566 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6567 TYPE_VECTOR_SUBPARTS (idxtype)));
6568 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6569 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6570 gassign *new_stmt
6571 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6572 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6573 op = var;
6576 gcall *new_stmt
6577 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6578 stmt_vec_info new_stmt_info
6579 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
6581 if (prev_stmt_info == NULL)
6582 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
6583 else
6584 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6585 prev_stmt_info = new_stmt_info;
6587 return true;
6590 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6591 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
6593 if (grouped_store)
6595 /* FORNOW */
6596 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
6598 /* We vectorize all the stmts of the interleaving group when we
6599 reach the last stmt in the group. */
6600 if (DR_GROUP_STORE_COUNT (first_stmt_info)
6601 < DR_GROUP_SIZE (first_stmt_info)
6602 && !slp)
6604 *vec_stmt = NULL;
6605 return true;
6608 if (slp)
6610 grouped_store = false;
6611 /* VEC_NUM is the number of vect stmts to be created for this
6612 group. */
6613 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6614 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6615 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
6616 == first_stmt_info);
6617 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
6618 op = vect_get_store_rhs (first_stmt_info);
6620 else
6621 /* VEC_NUM is the number of vect stmts to be created for this
6622 group. */
6623 vec_num = group_size;
6625 ref_type = get_group_alias_ptr_type (first_stmt_info);
6627 else
6628 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
6630 if (dump_enabled_p ())
6631 dump_printf_loc (MSG_NOTE, vect_location,
6632 "transform store. ncopies = %d\n", ncopies);
6634 if (memory_access_type == VMAT_ELEMENTWISE
6635 || memory_access_type == VMAT_STRIDED_SLP)
6637 gimple_stmt_iterator incr_gsi;
6638 bool insert_after;
6639 gimple *incr;
6640 tree offvar;
6641 tree ivstep;
6642 tree running_off;
6643 tree stride_base, stride_step, alias_off;
6644 tree vec_oprnd;
6645 unsigned int g;
6646 /* Checked by get_load_store_type. */
6647 unsigned int const_nunits = nunits.to_constant ();
6649 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6650 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
6652 stride_base
6653 = fold_build_pointer_plus
6654 (DR_BASE_ADDRESS (first_dr_info->dr),
6655 size_binop (PLUS_EXPR,
6656 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
6657 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
6658 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
6660 /* For a store with loop-invariant (but other than power-of-2)
6661 stride (i.e. not a grouped access) like so:
6663 for (i = 0; i < n; i += stride)
6664 array[i] = ...;
6666 we generate a new induction variable and new stores from
6667 the components of the (vectorized) rhs:
6669 for (j = 0; ; j += VF*stride)
6670 vectemp = ...;
6671 tmp1 = vectemp[0];
6672 array[j] = tmp1;
6673 tmp2 = vectemp[1];
6674 array[j + stride] = tmp2;
6678 unsigned nstores = const_nunits;
6679 unsigned lnel = 1;
6680 tree ltype = elem_type;
6681 tree lvectype = vectype;
6682 if (slp)
6684 if (group_size < const_nunits
6685 && const_nunits % group_size == 0)
6687 nstores = const_nunits / group_size;
6688 lnel = group_size;
6689 ltype = build_vector_type (elem_type, group_size);
6690 lvectype = vectype;
6692 /* First check if vec_extract optab doesn't support extraction
6693 of vector elts directly. */
6694 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6695 machine_mode vmode;
6696 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6697 || !VECTOR_MODE_P (vmode)
6698 || !targetm.vector_mode_supported_p (vmode)
6699 || (convert_optab_handler (vec_extract_optab,
6700 TYPE_MODE (vectype), vmode)
6701 == CODE_FOR_nothing))
6703 /* Try to avoid emitting an extract of vector elements
6704 by performing the extracts using an integer type of the
6705 same size, extracting from a vector of those and then
6706 re-interpreting it as the original vector type if
6707 supported. */
6708 unsigned lsize
6709 = group_size * GET_MODE_BITSIZE (elmode);
6710 elmode = int_mode_for_size (lsize, 0).require ();
6711 unsigned int lnunits = const_nunits / group_size;
6712 /* If we can't construct such a vector fall back to
6713 element extracts from the original vector type and
6714 element size stores. */
6715 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6716 && VECTOR_MODE_P (vmode)
6717 && targetm.vector_mode_supported_p (vmode)
6718 && (convert_optab_handler (vec_extract_optab,
6719 vmode, elmode)
6720 != CODE_FOR_nothing))
6722 nstores = lnunits;
6723 lnel = group_size;
6724 ltype = build_nonstandard_integer_type (lsize, 1);
6725 lvectype = build_vector_type (ltype, nstores);
6727 /* Else fall back to vector extraction anyway.
6728 Fewer stores are more important than avoiding spilling
6729 of the vector we extract from. Compared to the
6730 construction case in vectorizable_load no store-forwarding
6731 issue exists here for reasonable archs. */
6734 else if (group_size >= const_nunits
6735 && group_size % const_nunits == 0)
6737 nstores = 1;
6738 lnel = const_nunits;
6739 ltype = vectype;
6740 lvectype = vectype;
6742 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6743 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6746 ivstep = stride_step;
6747 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6748 build_int_cst (TREE_TYPE (ivstep), vf));
6750 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6752 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6753 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6754 create_iv (stride_base, ivstep, NULL,
6755 loop, &incr_gsi, insert_after,
6756 &offvar, NULL);
6757 incr = gsi_stmt (incr_gsi);
6758 loop_vinfo->add_stmt (incr);
6760 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6762 prev_stmt_info = NULL;
6763 alias_off = build_int_cst (ref_type, 0);
6764 stmt_vec_info next_stmt_info = first_stmt_info;
6765 for (g = 0; g < group_size; g++)
6767 running_off = offvar;
6768 if (g)
6770 tree size = TYPE_SIZE_UNIT (ltype);
6771 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6772 size);
6773 tree newoff = copy_ssa_name (running_off, NULL);
6774 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6775 running_off, pos);
6776 vect_finish_stmt_generation (stmt_info, incr, gsi);
6777 running_off = newoff;
6779 unsigned int group_el = 0;
6780 unsigned HOST_WIDE_INT
6781 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6782 for (j = 0; j < ncopies; j++)
6784 /* We've set op and dt above, from vect_get_store_rhs,
6785 and first_stmt_info == stmt_info. */
6786 if (j == 0)
6788 if (slp)
6790 vect_get_vec_defs (op, NULL_TREE, stmt_info,
6791 &vec_oprnds, NULL, slp_node);
6792 vec_oprnd = vec_oprnds[0];
6794 else
6796 op = vect_get_store_rhs (next_stmt_info);
6797 vec_oprnd = vect_get_vec_def_for_operand
6798 (op, next_stmt_info);
6801 else
6803 if (slp)
6804 vec_oprnd = vec_oprnds[j];
6805 else
6806 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo,
6807 vec_oprnd);
6809 /* Pun the vector to extract from if necessary. */
6810 if (lvectype != vectype)
6812 tree tem = make_ssa_name (lvectype);
6813 gimple *pun
6814 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6815 lvectype, vec_oprnd));
6816 vect_finish_stmt_generation (stmt_info, pun, gsi);
6817 vec_oprnd = tem;
6819 for (i = 0; i < nstores; i++)
6821 tree newref, newoff;
6822 gimple *incr, *assign;
6823 tree size = TYPE_SIZE (ltype);
6824 /* Extract the i'th component. */
6825 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6826 bitsize_int (i), size);
6827 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6828 size, pos);
6830 elem = force_gimple_operand_gsi (gsi, elem, true,
6831 NULL_TREE, true,
6832 GSI_SAME_STMT);
6834 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6835 group_el * elsz);
6836 newref = build2 (MEM_REF, ltype,
6837 running_off, this_off);
6838 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
6840 /* And store it to *running_off. */
6841 assign = gimple_build_assign (newref, elem);
6842 stmt_vec_info assign_info
6843 = vect_finish_stmt_generation (stmt_info, assign, gsi);
6845 group_el += lnel;
6846 if (! slp
6847 || group_el == group_size)
6849 newoff = copy_ssa_name (running_off, NULL);
6850 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6851 running_off, stride_step);
6852 vect_finish_stmt_generation (stmt_info, incr, gsi);
6854 running_off = newoff;
6855 group_el = 0;
6857 if (g == group_size - 1
6858 && !slp)
6860 if (j == 0 && i == 0)
6861 STMT_VINFO_VEC_STMT (stmt_info)
6862 = *vec_stmt = assign_info;
6863 else
6864 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign_info;
6865 prev_stmt_info = assign_info;
6869 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6870 if (slp)
6871 break;
6874 vec_oprnds.release ();
6875 return true;
6878 auto_vec<tree> dr_chain (group_size);
6879 oprnds.create (group_size);
6881 alignment_support_scheme
6882 = vect_supportable_dr_alignment (first_dr_info, false);
6883 gcc_assert (alignment_support_scheme);
6884 vec_loop_masks *loop_masks
6885 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6886 ? &LOOP_VINFO_MASKS (loop_vinfo)
6887 : NULL);
6888 /* Targets with store-lane instructions must not require explicit
6889 realignment. vect_supportable_dr_alignment always returns either
6890 dr_aligned or dr_unaligned_supported for masked operations. */
6891 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6892 && !mask
6893 && !loop_masks)
6894 || alignment_support_scheme == dr_aligned
6895 || alignment_support_scheme == dr_unaligned_supported);
6897 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6898 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6899 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6901 tree bump;
6902 tree vec_offset = NULL_TREE;
6903 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6905 aggr_type = NULL_TREE;
6906 bump = NULL_TREE;
6908 else if (memory_access_type == VMAT_GATHER_SCATTER)
6910 aggr_type = elem_type;
6911 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
6912 &bump, &vec_offset);
6914 else
6916 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6917 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6918 else
6919 aggr_type = vectype;
6920 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
6921 memory_access_type);
6924 if (mask)
6925 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6927 /* In case the vectorization factor (VF) is bigger than the number
6928 of elements that we can fit in a vectype (nunits), we have to generate
6929 more than one vector stmt - i.e - we need to "unroll" the
6930 vector stmt by a factor VF/nunits. For more details see documentation in
6931 vect_get_vec_def_for_copy_stmt. */
6933 /* In case of interleaving (non-unit grouped access):
6935 S1: &base + 2 = x2
6936 S2: &base = x0
6937 S3: &base + 1 = x1
6938 S4: &base + 3 = x3
6940 We create vectorized stores starting from base address (the access of the
6941 first stmt in the chain (S2 in the above example), when the last store stmt
6942 of the chain (S4) is reached:
6944 VS1: &base = vx2
6945 VS2: &base + vec_size*1 = vx0
6946 VS3: &base + vec_size*2 = vx1
6947 VS4: &base + vec_size*3 = vx3
6949 Then permutation statements are generated:
6951 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6952 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6955 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6956 (the order of the data-refs in the output of vect_permute_store_chain
6957 corresponds to the order of scalar stmts in the interleaving chain - see
6958 the documentation of vect_permute_store_chain()).
6960 In case of both multiple types and interleaving, above vector stores and
6961 permutation stmts are created for every copy. The result vector stmts are
6962 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6963 STMT_VINFO_RELATED_STMT for the next copies.
6966 prev_stmt_info = NULL;
6967 tree vec_mask = NULL_TREE;
6968 for (j = 0; j < ncopies; j++)
6970 stmt_vec_info new_stmt_info;
6971 if (j == 0)
6973 if (slp)
6975 /* Get vectorized arguments for SLP_NODE. */
6976 vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
6977 NULL, slp_node);
6979 vec_oprnd = vec_oprnds[0];
6981 else
6983 /* For interleaved stores we collect vectorized defs for all the
6984 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6985 used as an input to vect_permute_store_chain(), and OPRNDS as
6986 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6988 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
6989 OPRNDS are of size 1. */
6990 stmt_vec_info next_stmt_info = first_stmt_info;
6991 for (i = 0; i < group_size; i++)
6993 /* Since gaps are not supported for interleaved stores,
6994 DR_GROUP_SIZE is the exact number of stmts in the chain.
6995 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
6996 that there is no interleaving, DR_GROUP_SIZE is 1,
6997 and only one iteration of the loop will be executed. */
6998 op = vect_get_store_rhs (next_stmt_info);
6999 vec_oprnd = vect_get_vec_def_for_operand
7000 (op, next_stmt_info);
7001 dr_chain.quick_push (vec_oprnd);
7002 oprnds.quick_push (vec_oprnd);
7003 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7005 if (mask)
7006 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
7007 mask_vectype);
7010 /* We should have catched mismatched types earlier. */
7011 gcc_assert (useless_type_conversion_p (vectype,
7012 TREE_TYPE (vec_oprnd)));
7013 bool simd_lane_access_p
7014 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7015 if (simd_lane_access_p
7016 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
7017 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
7018 && integer_zerop (DR_OFFSET (first_dr_info->dr))
7019 && integer_zerop (DR_INIT (first_dr_info->dr))
7020 && alias_sets_conflict_p (get_alias_set (aggr_type),
7021 get_alias_set (TREE_TYPE (ref_type))))
7023 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
7024 dataref_offset = build_int_cst (ref_type, 0);
7025 inv_p = false;
7027 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7029 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
7030 &dataref_ptr, &vec_offset);
7031 inv_p = false;
7033 else
7034 dataref_ptr
7035 = vect_create_data_ref_ptr (first_stmt_info, aggr_type,
7036 simd_lane_access_p ? loop : NULL,
7037 offset, &dummy, gsi, &ptr_incr,
7038 simd_lane_access_p, &inv_p,
7039 NULL_TREE, bump);
7040 gcc_assert (bb_vinfo || !inv_p);
7042 else
7044 /* For interleaved stores we created vectorized defs for all the
7045 defs stored in OPRNDS in the previous iteration (previous copy).
7046 DR_CHAIN is then used as an input to vect_permute_store_chain(),
7047 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7048 next copy.
7049 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7050 OPRNDS are of size 1. */
7051 for (i = 0; i < group_size; i++)
7053 op = oprnds[i];
7054 vec_oprnd = vect_get_vec_def_for_stmt_copy (vinfo, op);
7055 dr_chain[i] = vec_oprnd;
7056 oprnds[i] = vec_oprnd;
7058 if (mask)
7059 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
7060 if (dataref_offset)
7061 dataref_offset
7062 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7063 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7064 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
7065 else
7066 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7067 stmt_info, bump);
7070 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7072 tree vec_array;
7074 /* Get an array into which we can store the individual vectors. */
7075 vec_array = create_vector_array (vectype, vec_num);
7077 /* Invalidate the current contents of VEC_ARRAY. This should
7078 become an RTL clobber too, which prevents the vector registers
7079 from being upward-exposed. */
7080 vect_clobber_variable (stmt_info, gsi, vec_array);
7082 /* Store the individual vectors into the array. */
7083 for (i = 0; i < vec_num; i++)
7085 vec_oprnd = dr_chain[i];
7086 write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
7089 tree final_mask = NULL;
7090 if (loop_masks)
7091 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
7092 vectype, j);
7093 if (vec_mask)
7094 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7095 vec_mask, gsi);
7097 gcall *call;
7098 if (final_mask)
7100 /* Emit:
7101 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7102 VEC_ARRAY). */
7103 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
7104 tree alias_ptr = build_int_cst (ref_type, align);
7105 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
7106 dataref_ptr, alias_ptr,
7107 final_mask, vec_array);
7109 else
7111 /* Emit:
7112 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7113 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7114 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
7115 vec_array);
7116 gimple_call_set_lhs (call, data_ref);
7118 gimple_call_set_nothrow (call, true);
7119 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
7121 /* Record that VEC_ARRAY is now dead. */
7122 vect_clobber_variable (stmt_info, gsi, vec_array);
7124 else
7126 new_stmt_info = NULL;
7127 if (grouped_store)
7129 if (j == 0)
7130 result_chain.create (group_size);
7131 /* Permute. */
7132 vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
7133 &result_chain);
7136 stmt_vec_info next_stmt_info = first_stmt_info;
7137 for (i = 0; i < vec_num; i++)
7139 unsigned align, misalign;
7141 tree final_mask = NULL_TREE;
7142 if (loop_masks)
7143 final_mask = vect_get_loop_mask (gsi, loop_masks,
7144 vec_num * ncopies,
7145 vectype, vec_num * j + i);
7146 if (vec_mask)
7147 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7148 vec_mask, gsi);
7150 if (memory_access_type == VMAT_GATHER_SCATTER)
7152 tree scale = size_int (gs_info.scale);
7153 gcall *call;
7154 if (loop_masks)
7155 call = gimple_build_call_internal
7156 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
7157 scale, vec_oprnd, final_mask);
7158 else
7159 call = gimple_build_call_internal
7160 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
7161 scale, vec_oprnd);
7162 gimple_call_set_nothrow (call, true);
7163 new_stmt_info
7164 = vect_finish_stmt_generation (stmt_info, call, gsi);
7165 break;
7168 if (i > 0)
7169 /* Bump the vector pointer. */
7170 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7171 stmt_info, bump);
7173 if (slp)
7174 vec_oprnd = vec_oprnds[i];
7175 else if (grouped_store)
7176 /* For grouped stores vectorized defs are interleaved in
7177 vect_permute_store_chain(). */
7178 vec_oprnd = result_chain[i];
7180 align = DR_TARGET_ALIGNMENT (first_dr_info);
7181 if (aligned_access_p (first_dr_info))
7182 misalign = 0;
7183 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7185 align = dr_alignment (vect_dr_behavior (first_dr_info));
7186 misalign = 0;
7188 else
7189 misalign = DR_MISALIGNMENT (first_dr_info);
7190 if (dataref_offset == NULL_TREE
7191 && TREE_CODE (dataref_ptr) == SSA_NAME)
7192 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7193 misalign);
7195 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7197 tree perm_mask = perm_mask_for_reverse (vectype);
7198 tree perm_dest = vect_create_destination_var
7199 (vect_get_store_rhs (stmt_info), vectype);
7200 tree new_temp = make_ssa_name (perm_dest);
7202 /* Generate the permute statement. */
7203 gimple *perm_stmt
7204 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7205 vec_oprnd, perm_mask);
7206 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7208 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7209 vec_oprnd = new_temp;
7212 /* Arguments are ready. Create the new vector stmt. */
7213 if (final_mask)
7215 align = least_bit_hwi (misalign | align);
7216 tree ptr = build_int_cst (ref_type, align);
7217 gcall *call
7218 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7219 dataref_ptr, ptr,
7220 final_mask, vec_oprnd);
7221 gimple_call_set_nothrow (call, true);
7222 new_stmt_info
7223 = vect_finish_stmt_generation (stmt_info, call, gsi);
7225 else
7227 data_ref = fold_build2 (MEM_REF, vectype,
7228 dataref_ptr,
7229 dataref_offset
7230 ? dataref_offset
7231 : build_int_cst (ref_type, 0));
7232 if (aligned_access_p (first_dr_info))
7234 else if (DR_MISALIGNMENT (first_dr_info) == -1)
7235 TREE_TYPE (data_ref)
7236 = build_aligned_type (TREE_TYPE (data_ref),
7237 align * BITS_PER_UNIT);
7238 else
7239 TREE_TYPE (data_ref)
7240 = build_aligned_type (TREE_TYPE (data_ref),
7241 TYPE_ALIGN (elem_type));
7242 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7243 gassign *new_stmt
7244 = gimple_build_assign (data_ref, vec_oprnd);
7245 new_stmt_info
7246 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7249 if (slp)
7250 continue;
7252 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
7253 if (!next_stmt_info)
7254 break;
7257 if (!slp)
7259 if (j == 0)
7260 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7261 else
7262 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7263 prev_stmt_info = new_stmt_info;
7267 oprnds.release ();
7268 result_chain.release ();
7269 vec_oprnds.release ();
7271 return true;
7274 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7275 VECTOR_CST mask. No checks are made that the target platform supports the
7276 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7277 vect_gen_perm_mask_checked. */
7279 tree
7280 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7282 tree mask_type;
7284 poly_uint64 nunits = sel.length ();
7285 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7287 mask_type = build_vector_type (ssizetype, nunits);
7288 return vec_perm_indices_to_tree (mask_type, sel);
7291 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7292 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7294 tree
7295 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7297 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7298 return vect_gen_perm_mask_any (vectype, sel);
7301 /* Given a vector variable X and Y, that was generated for the scalar
7302 STMT_INFO, generate instructions to permute the vector elements of X and Y
7303 using permutation mask MASK_VEC, insert them at *GSI and return the
7304 permuted vector variable. */
7306 static tree
7307 permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
7308 gimple_stmt_iterator *gsi)
7310 tree vectype = TREE_TYPE (x);
7311 tree perm_dest, data_ref;
7312 gimple *perm_stmt;
7314 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
7315 if (TREE_CODE (scalar_dest) == SSA_NAME)
7316 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7317 else
7318 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7319 data_ref = make_ssa_name (perm_dest);
7321 /* Generate the permute statement. */
7322 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7323 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
7325 return data_ref;
7328 /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
7329 inserting them on the loops preheader edge. Returns true if we
7330 were successful in doing so (and thus STMT_INFO can be moved then),
7331 otherwise returns false. */
7333 static bool
7334 hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
7336 ssa_op_iter i;
7337 tree op;
7338 bool any = false;
7340 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7342 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7343 if (!gimple_nop_p (def_stmt)
7344 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7346 /* Make sure we don't need to recurse. While we could do
7347 so in simple cases when there are more complex use webs
7348 we don't have an easy way to preserve stmt order to fulfil
7349 dependencies within them. */
7350 tree op2;
7351 ssa_op_iter i2;
7352 if (gimple_code (def_stmt) == GIMPLE_PHI)
7353 return false;
7354 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7356 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7357 if (!gimple_nop_p (def_stmt2)
7358 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7359 return false;
7361 any = true;
7365 if (!any)
7366 return true;
7368 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
7370 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7371 if (!gimple_nop_p (def_stmt)
7372 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7374 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7375 gsi_remove (&gsi, false);
7376 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7380 return true;
7383 /* vectorizable_load.
7385 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
7386 that can be vectorized.
7387 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7388 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7389 Return true if STMT_INFO is vectorizable in this way. */
7391 static bool
7392 vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7393 stmt_vec_info *vec_stmt, slp_tree slp_node,
7394 slp_instance slp_node_instance,
7395 stmt_vector_for_cost *cost_vec)
7397 tree scalar_dest;
7398 tree vec_dest = NULL;
7399 tree data_ref = NULL;
7400 stmt_vec_info prev_stmt_info;
7401 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7402 struct loop *loop = NULL;
7403 struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
7404 bool nested_in_vect_loop = false;
7405 tree elem_type;
7406 tree new_temp;
7407 machine_mode mode;
7408 tree dummy;
7409 enum dr_alignment_support alignment_support_scheme;
7410 tree dataref_ptr = NULL_TREE;
7411 tree dataref_offset = NULL_TREE;
7412 gimple *ptr_incr = NULL;
7413 int ncopies;
7414 int i, j;
7415 unsigned int group_size;
7416 poly_uint64 group_gap_adj;
7417 tree msq = NULL_TREE, lsq;
7418 tree offset = NULL_TREE;
7419 tree byte_offset = NULL_TREE;
7420 tree realignment_token = NULL_TREE;
7421 gphi *phi = NULL;
7422 vec<tree> dr_chain = vNULL;
7423 bool grouped_load = false;
7424 stmt_vec_info first_stmt_info;
7425 stmt_vec_info first_stmt_info_for_drptr = NULL;
7426 bool inv_p;
7427 bool compute_in_loop = false;
7428 struct loop *at_loop;
7429 int vec_num;
7430 bool slp = (slp_node != NULL);
7431 bool slp_perm = false;
7432 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7433 poly_uint64 vf;
7434 tree aggr_type;
7435 gather_scatter_info gs_info;
7436 vec_info *vinfo = stmt_info->vinfo;
7437 tree ref_type;
7438 enum vect_def_type mask_dt = vect_unknown_def_type;
7440 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7441 return false;
7443 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7444 && ! vec_stmt)
7445 return false;
7447 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7448 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
7450 scalar_dest = gimple_assign_lhs (assign);
7451 if (TREE_CODE (scalar_dest) != SSA_NAME)
7452 return false;
7454 tree_code code = gimple_assign_rhs_code (assign);
7455 if (code != ARRAY_REF
7456 && code != BIT_FIELD_REF
7457 && code != INDIRECT_REF
7458 && code != COMPONENT_REF
7459 && code != IMAGPART_EXPR
7460 && code != REALPART_EXPR
7461 && code != MEM_REF
7462 && TREE_CODE_CLASS (code) != tcc_declaration)
7463 return false;
7465 else
7467 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
7468 if (!call || !gimple_call_internal_p (call))
7469 return false;
7471 internal_fn ifn = gimple_call_internal_fn (call);
7472 if (!internal_load_fn_p (ifn))
7473 return false;
7475 scalar_dest = gimple_call_lhs (call);
7476 if (!scalar_dest)
7477 return false;
7479 if (slp_node != NULL)
7481 if (dump_enabled_p ())
7482 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7483 "SLP of masked loads not supported.\n");
7484 return false;
7487 int mask_index = internal_fn_mask_index (ifn);
7488 if (mask_index >= 0)
7490 mask = gimple_call_arg (call, mask_index);
7491 if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
7492 &mask_vectype))
7493 return false;
7497 if (!STMT_VINFO_DATA_REF (stmt_info))
7498 return false;
7500 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7501 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7503 if (loop_vinfo)
7505 loop = LOOP_VINFO_LOOP (loop_vinfo);
7506 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
7507 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7509 else
7510 vf = 1;
7512 /* Multiple types in SLP are handled by creating the appropriate number of
7513 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7514 case of SLP. */
7515 if (slp)
7516 ncopies = 1;
7517 else
7518 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7520 gcc_assert (ncopies >= 1);
7522 /* FORNOW. This restriction should be relaxed. */
7523 if (nested_in_vect_loop && ncopies > 1)
7525 if (dump_enabled_p ())
7526 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7527 "multiple types in nested loop.\n");
7528 return false;
7531 /* Invalidate assumptions made by dependence analysis when vectorization
7532 on the unrolled body effectively re-orders stmts. */
7533 if (ncopies > 1
7534 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7535 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7536 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7538 if (dump_enabled_p ())
7539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7540 "cannot perform implicit CSE when unrolling "
7541 "with negative dependence distance\n");
7542 return false;
7545 elem_type = TREE_TYPE (vectype);
7546 mode = TYPE_MODE (vectype);
7548 /* FORNOW. In some cases can vectorize even if data-type not supported
7549 (e.g. - data copies). */
7550 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7552 if (dump_enabled_p ())
7553 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7554 "Aligned load, but unsupported type.\n");
7555 return false;
7558 /* Check if the load is a part of an interleaving chain. */
7559 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7561 grouped_load = true;
7562 /* FORNOW */
7563 gcc_assert (!nested_in_vect_loop);
7564 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7566 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7567 group_size = DR_GROUP_SIZE (first_stmt_info);
7569 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7570 slp_perm = true;
7572 /* Invalidate assumptions made by dependence analysis when vectorization
7573 on the unrolled body effectively re-orders stmts. */
7574 if (!PURE_SLP_STMT (stmt_info)
7575 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7576 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7577 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7579 if (dump_enabled_p ())
7580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7581 "cannot perform implicit CSE when performing "
7582 "group loads with negative dependence distance\n");
7583 return false;
7586 /* Similarly when the stmt is a load that is both part of a SLP
7587 instance and a loop vectorized stmt via the same-dr mechanism
7588 we have to give up. */
7589 if (DR_GROUP_SAME_DR_STMT (stmt_info)
7590 && (STMT_SLP_TYPE (stmt_info)
7591 != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info))))
7593 if (dump_enabled_p ())
7594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7595 "conflicting SLP types for CSEd load\n");
7596 return false;
7599 else
7600 group_size = 1;
7602 vect_memory_access_type memory_access_type;
7603 if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
7604 &memory_access_type, &gs_info))
7605 return false;
7607 if (mask)
7609 if (memory_access_type == VMAT_CONTIGUOUS)
7611 machine_mode vec_mode = TYPE_MODE (vectype);
7612 if (!VECTOR_MODE_P (vec_mode)
7613 || !can_vec_mask_load_store_p (vec_mode,
7614 TYPE_MODE (mask_vectype), true))
7615 return false;
7617 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7619 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7620 tree masktype
7621 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7622 if (TREE_CODE (masktype) == INTEGER_TYPE)
7624 if (dump_enabled_p ())
7625 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7626 "masked gather with integer mask not"
7627 " supported.");
7628 return false;
7631 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7632 && memory_access_type != VMAT_GATHER_SCATTER)
7634 if (dump_enabled_p ())
7635 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7636 "unsupported access type for masked load.\n");
7637 return false;
7641 if (!vec_stmt) /* transformation not required. */
7643 if (!slp)
7644 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7646 if (loop_vinfo
7647 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7648 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7649 memory_access_type, &gs_info);
7651 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7652 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7653 slp_node_instance, slp_node, cost_vec);
7654 return true;
7657 if (!slp)
7658 gcc_assert (memory_access_type
7659 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7661 if (dump_enabled_p ())
7662 dump_printf_loc (MSG_NOTE, vect_location,
7663 "transform load. ncopies = %d\n", ncopies);
7665 /* Transform. */
7667 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
7668 ensure_base_align (dr_info);
7670 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7672 vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask);
7673 return true;
7676 if (memory_access_type == VMAT_ELEMENTWISE
7677 || memory_access_type == VMAT_STRIDED_SLP)
7679 gimple_stmt_iterator incr_gsi;
7680 bool insert_after;
7681 gimple *incr;
7682 tree offvar;
7683 tree ivstep;
7684 tree running_off;
7685 vec<constructor_elt, va_gc> *v = NULL;
7686 tree stride_base, stride_step, alias_off;
7687 /* Checked by get_load_store_type. */
7688 unsigned int const_nunits = nunits.to_constant ();
7689 unsigned HOST_WIDE_INT cst_offset = 0;
7691 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7692 gcc_assert (!nested_in_vect_loop);
7694 if (grouped_load)
7696 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7697 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7699 else
7701 first_stmt_info = stmt_info;
7702 first_dr_info = dr_info;
7704 if (slp && grouped_load)
7706 group_size = DR_GROUP_SIZE (first_stmt_info);
7707 ref_type = get_group_alias_ptr_type (first_stmt_info);
7709 else
7711 if (grouped_load)
7712 cst_offset
7713 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7714 * vect_get_place_in_interleaving_chain (stmt_info,
7715 first_stmt_info));
7716 group_size = 1;
7717 ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
7720 stride_base
7721 = fold_build_pointer_plus
7722 (DR_BASE_ADDRESS (first_dr_info->dr),
7723 size_binop (PLUS_EXPR,
7724 convert_to_ptrofftype (DR_OFFSET (first_dr_info->dr)),
7725 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
7726 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
7728 /* For a load with loop-invariant (but other than power-of-2)
7729 stride (i.e. not a grouped access) like so:
7731 for (i = 0; i < n; i += stride)
7732 ... = array[i];
7734 we generate a new induction variable and new accesses to
7735 form a new vector (or vectors, depending on ncopies):
7737 for (j = 0; ; j += VF*stride)
7738 tmp1 = array[j];
7739 tmp2 = array[j + stride];
7741 vectemp = {tmp1, tmp2, ...}
7744 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7745 build_int_cst (TREE_TYPE (stride_step), vf));
7747 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7749 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7750 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7751 create_iv (stride_base, ivstep, NULL,
7752 loop, &incr_gsi, insert_after,
7753 &offvar, NULL);
7754 incr = gsi_stmt (incr_gsi);
7755 loop_vinfo->add_stmt (incr);
7757 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7759 prev_stmt_info = NULL;
7760 running_off = offvar;
7761 alias_off = build_int_cst (ref_type, 0);
7762 int nloads = const_nunits;
7763 int lnel = 1;
7764 tree ltype = TREE_TYPE (vectype);
7765 tree lvectype = vectype;
7766 auto_vec<tree> dr_chain;
7767 if (memory_access_type == VMAT_STRIDED_SLP)
7769 if (group_size < const_nunits)
7771 /* First check if vec_init optab supports construction from
7772 vector elts directly. */
7773 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7774 machine_mode vmode;
7775 if (mode_for_vector (elmode, group_size).exists (&vmode)
7776 && VECTOR_MODE_P (vmode)
7777 && targetm.vector_mode_supported_p (vmode)
7778 && (convert_optab_handler (vec_init_optab,
7779 TYPE_MODE (vectype), vmode)
7780 != CODE_FOR_nothing))
7782 nloads = const_nunits / group_size;
7783 lnel = group_size;
7784 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7786 else
7788 /* Otherwise avoid emitting a constructor of vector elements
7789 by performing the loads using an integer type of the same
7790 size, constructing a vector of those and then
7791 re-interpreting it as the original vector type.
7792 This avoids a huge runtime penalty due to the general
7793 inability to perform store forwarding from smaller stores
7794 to a larger load. */
7795 unsigned lsize
7796 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7797 elmode = int_mode_for_size (lsize, 0).require ();
7798 unsigned int lnunits = const_nunits / group_size;
7799 /* If we can't construct such a vector fall back to
7800 element loads of the original vector type. */
7801 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7802 && VECTOR_MODE_P (vmode)
7803 && targetm.vector_mode_supported_p (vmode)
7804 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7805 != CODE_FOR_nothing))
7807 nloads = lnunits;
7808 lnel = group_size;
7809 ltype = build_nonstandard_integer_type (lsize, 1);
7810 lvectype = build_vector_type (ltype, nloads);
7814 else
7816 nloads = 1;
7817 lnel = const_nunits;
7818 ltype = vectype;
7820 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7822 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7823 else if (nloads == 1)
7824 ltype = vectype;
7826 if (slp)
7828 /* For SLP permutation support we need to load the whole group,
7829 not only the number of vector stmts the permutation result
7830 fits in. */
7831 if (slp_perm)
7833 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7834 variable VF. */
7835 unsigned int const_vf = vf.to_constant ();
7836 ncopies = CEIL (group_size * const_vf, const_nunits);
7837 dr_chain.create (ncopies);
7839 else
7840 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7842 unsigned int group_el = 0;
7843 unsigned HOST_WIDE_INT
7844 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7845 for (j = 0; j < ncopies; j++)
7847 if (nloads > 1)
7848 vec_alloc (v, nloads);
7849 stmt_vec_info new_stmt_info = NULL;
7850 for (i = 0; i < nloads; i++)
7852 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7853 group_el * elsz + cst_offset);
7854 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
7855 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
7856 gassign *new_stmt
7857 = gimple_build_assign (make_ssa_name (ltype), data_ref);
7858 new_stmt_info
7859 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7860 if (nloads > 1)
7861 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7862 gimple_assign_lhs (new_stmt));
7864 group_el += lnel;
7865 if (! slp
7866 || group_el == group_size)
7868 tree newoff = copy_ssa_name (running_off);
7869 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7870 running_off, stride_step);
7871 vect_finish_stmt_generation (stmt_info, incr, gsi);
7873 running_off = newoff;
7874 group_el = 0;
7877 if (nloads > 1)
7879 tree vec_inv = build_constructor (lvectype, v);
7880 new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
7881 new_stmt_info = vinfo->lookup_def (new_temp);
7882 if (lvectype != vectype)
7884 gassign *new_stmt
7885 = gimple_build_assign (make_ssa_name (vectype),
7886 VIEW_CONVERT_EXPR,
7887 build1 (VIEW_CONVERT_EXPR,
7888 vectype, new_temp));
7889 new_stmt_info
7890 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
7894 if (slp)
7896 if (slp_perm)
7897 dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt));
7898 else
7899 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7901 else
7903 if (j == 0)
7904 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7905 else
7906 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7907 prev_stmt_info = new_stmt_info;
7910 if (slp_perm)
7912 unsigned n_perms;
7913 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7914 slp_node_instance, false, &n_perms);
7916 return true;
7919 if (memory_access_type == VMAT_GATHER_SCATTER
7920 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
7921 grouped_load = false;
7923 if (grouped_load)
7925 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7926 group_size = DR_GROUP_SIZE (first_stmt_info);
7927 /* For SLP vectorization we directly vectorize a subchain
7928 without permutation. */
7929 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7930 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7931 /* For BB vectorization always use the first stmt to base
7932 the data ref pointer on. */
7933 if (bb_vinfo)
7934 first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7936 /* Check if the chain of loads is already vectorized. */
7937 if (STMT_VINFO_VEC_STMT (first_stmt_info)
7938 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7939 ??? But we can only do so if there is exactly one
7940 as we have no way to get at the rest. Leave the CSE
7941 opportunity alone.
7942 ??? With the group load eventually participating
7943 in multiple different permutations (having multiple
7944 slp nodes which refer to the same group) the CSE
7945 is even wrong code. See PR56270. */
7946 && !slp)
7948 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7949 return true;
7951 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7952 group_gap_adj = 0;
7954 /* VEC_NUM is the number of vect stmts to be created for this group. */
7955 if (slp)
7957 grouped_load = false;
7958 /* For SLP permutation support we need to load the whole group,
7959 not only the number of vector stmts the permutation result
7960 fits in. */
7961 if (slp_perm)
7963 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7964 variable VF. */
7965 unsigned int const_vf = vf.to_constant ();
7966 unsigned int const_nunits = nunits.to_constant ();
7967 vec_num = CEIL (group_size * const_vf, const_nunits);
7968 group_gap_adj = vf * group_size - nunits * vec_num;
7970 else
7972 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7973 group_gap_adj
7974 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7977 else
7978 vec_num = group_size;
7980 ref_type = get_group_alias_ptr_type (first_stmt_info);
7982 else
7984 first_stmt_info = stmt_info;
7985 first_dr_info = dr_info;
7986 group_size = vec_num = 1;
7987 group_gap_adj = 0;
7988 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
7991 alignment_support_scheme
7992 = vect_supportable_dr_alignment (first_dr_info, false);
7993 gcc_assert (alignment_support_scheme);
7994 vec_loop_masks *loop_masks
7995 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
7996 ? &LOOP_VINFO_MASKS (loop_vinfo)
7997 : NULL);
7998 /* Targets with store-lane instructions must not require explicit
7999 realignment. vect_supportable_dr_alignment always returns either
8000 dr_aligned or dr_unaligned_supported for masked operations. */
8001 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
8002 && !mask
8003 && !loop_masks)
8004 || alignment_support_scheme == dr_aligned
8005 || alignment_support_scheme == dr_unaligned_supported);
8007 /* In case the vectorization factor (VF) is bigger than the number
8008 of elements that we can fit in a vectype (nunits), we have to generate
8009 more than one vector stmt - i.e - we need to "unroll" the
8010 vector stmt by a factor VF/nunits. In doing so, we record a pointer
8011 from one copy of the vector stmt to the next, in the field
8012 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
8013 stages to find the correct vector defs to be used when vectorizing
8014 stmts that use the defs of the current stmt. The example below
8015 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
8016 need to create 4 vectorized stmts):
8018 before vectorization:
8019 RELATED_STMT VEC_STMT
8020 S1: x = memref - -
8021 S2: z = x + 1 - -
8023 step 1: vectorize stmt S1:
8024 We first create the vector stmt VS1_0, and, as usual, record a
8025 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
8026 Next, we create the vector stmt VS1_1, and record a pointer to
8027 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
8028 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
8029 stmts and pointers:
8030 RELATED_STMT VEC_STMT
8031 VS1_0: vx0 = memref0 VS1_1 -
8032 VS1_1: vx1 = memref1 VS1_2 -
8033 VS1_2: vx2 = memref2 VS1_3 -
8034 VS1_3: vx3 = memref3 - -
8035 S1: x = load - VS1_0
8036 S2: z = x + 1 - -
8038 See in documentation in vect_get_vec_def_for_stmt_copy for how the
8039 information we recorded in RELATED_STMT field is used to vectorize
8040 stmt S2. */
8042 /* In case of interleaving (non-unit grouped access):
8044 S1: x2 = &base + 2
8045 S2: x0 = &base
8046 S3: x1 = &base + 1
8047 S4: x3 = &base + 3
8049 Vectorized loads are created in the order of memory accesses
8050 starting from the access of the first stmt of the chain:
8052 VS1: vx0 = &base
8053 VS2: vx1 = &base + vec_size*1
8054 VS3: vx3 = &base + vec_size*2
8055 VS4: vx4 = &base + vec_size*3
8057 Then permutation statements are generated:
8059 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8060 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
8063 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8064 (the order of the data-refs in the output of vect_permute_load_chain
8065 corresponds to the order of scalar stmts in the interleaving chain - see
8066 the documentation of vect_permute_load_chain()).
8067 The generation of permutation stmts and recording them in
8068 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
8070 In case of both multiple types and interleaving, the vector loads and
8071 permutation stmts above are created for every copy. The result vector
8072 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8073 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
8075 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8076 on a target that supports unaligned accesses (dr_unaligned_supported)
8077 we generate the following code:
8078 p = initial_addr;
8079 indx = 0;
8080 loop {
8081 p = p + indx * vectype_size;
8082 vec_dest = *(p);
8083 indx = indx + 1;
8086 Otherwise, the data reference is potentially unaligned on a target that
8087 does not support unaligned accesses (dr_explicit_realign_optimized) -
8088 then generate the following code, in which the data in each iteration is
8089 obtained by two vector loads, one from the previous iteration, and one
8090 from the current iteration:
8091 p1 = initial_addr;
8092 msq_init = *(floor(p1))
8093 p2 = initial_addr + VS - 1;
8094 realignment_token = call target_builtin;
8095 indx = 0;
8096 loop {
8097 p2 = p2 + indx * vectype_size
8098 lsq = *(floor(p2))
8099 vec_dest = realign_load (msq, lsq, realignment_token)
8100 indx = indx + 1;
8101 msq = lsq;
8102 } */
8104 /* If the misalignment remains the same throughout the execution of the
8105 loop, we can create the init_addr and permutation mask at the loop
8106 preheader. Otherwise, it needs to be created inside the loop.
8107 This can only occur when vectorizing memory accesses in the inner-loop
8108 nested within an outer-loop that is being vectorized. */
8110 if (nested_in_vect_loop
8111 && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr),
8112 GET_MODE_SIZE (TYPE_MODE (vectype))))
8114 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
8115 compute_in_loop = true;
8118 if ((alignment_support_scheme == dr_explicit_realign_optimized
8119 || alignment_support_scheme == dr_explicit_realign)
8120 && !compute_in_loop)
8122 msq = vect_setup_realignment (first_stmt_info, gsi, &realignment_token,
8123 alignment_support_scheme, NULL_TREE,
8124 &at_loop);
8125 if (alignment_support_scheme == dr_explicit_realign_optimized)
8127 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
8128 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
8129 size_one_node);
8132 else
8133 at_loop = loop;
8135 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8136 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
8138 tree bump;
8139 tree vec_offset = NULL_TREE;
8140 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8142 aggr_type = NULL_TREE;
8143 bump = NULL_TREE;
8145 else if (memory_access_type == VMAT_GATHER_SCATTER)
8147 aggr_type = elem_type;
8148 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
8149 &bump, &vec_offset);
8151 else
8153 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8154 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8155 else
8156 aggr_type = vectype;
8157 bump = vect_get_data_ptr_increment (dr_info, aggr_type,
8158 memory_access_type);
8161 tree vec_mask = NULL_TREE;
8162 prev_stmt_info = NULL;
8163 poly_uint64 group_elt = 0;
8164 for (j = 0; j < ncopies; j++)
8166 stmt_vec_info new_stmt_info = NULL;
8167 /* 1. Create the vector or array pointer update chain. */
8168 if (j == 0)
8170 bool simd_lane_access_p
8171 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
8172 if (simd_lane_access_p
8173 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
8174 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
8175 && integer_zerop (DR_OFFSET (first_dr_info->dr))
8176 && integer_zerop (DR_INIT (first_dr_info->dr))
8177 && alias_sets_conflict_p (get_alias_set (aggr_type),
8178 get_alias_set (TREE_TYPE (ref_type)))
8179 && (alignment_support_scheme == dr_aligned
8180 || alignment_support_scheme == dr_unaligned_supported))
8182 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
8183 dataref_offset = build_int_cst (ref_type, 0);
8184 inv_p = false;
8186 else if (first_stmt_info_for_drptr
8187 && first_stmt_info != first_stmt_info_for_drptr)
8189 dataref_ptr
8190 = vect_create_data_ref_ptr (first_stmt_info_for_drptr,
8191 aggr_type, at_loop, offset, &dummy,
8192 gsi, &ptr_incr, simd_lane_access_p,
8193 &inv_p, byte_offset, bump);
8194 /* Adjust the pointer by the difference to first_stmt. */
8195 data_reference_p ptrdr
8196 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
8197 tree diff
8198 = fold_convert (sizetype,
8199 size_binop (MINUS_EXPR,
8200 DR_INIT (first_dr_info->dr),
8201 DR_INIT (ptrdr)));
8202 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8203 stmt_info, diff);
8205 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8207 vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
8208 &dataref_ptr, &vec_offset);
8209 inv_p = false;
8211 else
8212 dataref_ptr
8213 = vect_create_data_ref_ptr (first_stmt_info, aggr_type, at_loop,
8214 offset, &dummy, gsi, &ptr_incr,
8215 simd_lane_access_p, &inv_p,
8216 byte_offset, bump);
8217 if (mask)
8218 vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
8219 mask_vectype);
8221 else
8223 if (dataref_offset)
8224 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8225 bump);
8226 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8227 vec_offset = vect_get_vec_def_for_stmt_copy (vinfo, vec_offset);
8228 else
8229 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8230 stmt_info, bump);
8231 if (mask)
8232 vec_mask = vect_get_vec_def_for_stmt_copy (vinfo, vec_mask);
8235 if (grouped_load || slp_perm)
8236 dr_chain.create (vec_num);
8238 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8240 tree vec_array;
8242 vec_array = create_vector_array (vectype, vec_num);
8244 tree final_mask = NULL_TREE;
8245 if (loop_masks)
8246 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8247 vectype, j);
8248 if (vec_mask)
8249 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8250 vec_mask, gsi);
8252 gcall *call;
8253 if (final_mask)
8255 /* Emit:
8256 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8257 VEC_MASK). */
8258 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8259 tree alias_ptr = build_int_cst (ref_type, align);
8260 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8261 dataref_ptr, alias_ptr,
8262 final_mask);
8264 else
8266 /* Emit:
8267 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8268 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8269 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8271 gimple_call_set_lhs (call, vec_array);
8272 gimple_call_set_nothrow (call, true);
8273 new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
8275 /* Extract each vector into an SSA_NAME. */
8276 for (i = 0; i < vec_num; i++)
8278 new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
8279 vec_array, i);
8280 dr_chain.quick_push (new_temp);
8283 /* Record the mapping between SSA_NAMEs and statements. */
8284 vect_record_grouped_load_vectors (stmt_info, dr_chain);
8286 /* Record that VEC_ARRAY is now dead. */
8287 vect_clobber_variable (stmt_info, gsi, vec_array);
8289 else
8291 for (i = 0; i < vec_num; i++)
8293 tree final_mask = NULL_TREE;
8294 if (loop_masks
8295 && memory_access_type != VMAT_INVARIANT)
8296 final_mask = vect_get_loop_mask (gsi, loop_masks,
8297 vec_num * ncopies,
8298 vectype, vec_num * j + i);
8299 if (vec_mask)
8300 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8301 vec_mask, gsi);
8303 if (i > 0)
8304 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8305 stmt_info, bump);
8307 /* 2. Create the vector-load in the loop. */
8308 gimple *new_stmt = NULL;
8309 switch (alignment_support_scheme)
8311 case dr_aligned:
8312 case dr_unaligned_supported:
8314 unsigned int align, misalign;
8316 if (memory_access_type == VMAT_GATHER_SCATTER)
8318 tree scale = size_int (gs_info.scale);
8319 gcall *call;
8320 if (loop_masks)
8321 call = gimple_build_call_internal
8322 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8323 vec_offset, scale, final_mask);
8324 else
8325 call = gimple_build_call_internal
8326 (IFN_GATHER_LOAD, 3, dataref_ptr,
8327 vec_offset, scale);
8328 gimple_call_set_nothrow (call, true);
8329 new_stmt = call;
8330 data_ref = NULL_TREE;
8331 break;
8334 align = DR_TARGET_ALIGNMENT (dr_info);
8335 if (alignment_support_scheme == dr_aligned)
8337 gcc_assert (aligned_access_p (first_dr_info));
8338 misalign = 0;
8340 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8342 align = dr_alignment
8343 (vect_dr_behavior (first_dr_info));
8344 misalign = 0;
8346 else
8347 misalign = DR_MISALIGNMENT (first_dr_info);
8348 if (dataref_offset == NULL_TREE
8349 && TREE_CODE (dataref_ptr) == SSA_NAME)
8350 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8351 align, misalign);
8353 if (final_mask)
8355 align = least_bit_hwi (misalign | align);
8356 tree ptr = build_int_cst (ref_type, align);
8357 gcall *call
8358 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8359 dataref_ptr, ptr,
8360 final_mask);
8361 gimple_call_set_nothrow (call, true);
8362 new_stmt = call;
8363 data_ref = NULL_TREE;
8365 else
8367 data_ref
8368 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8369 dataref_offset
8370 ? dataref_offset
8371 : build_int_cst (ref_type, 0));
8372 if (alignment_support_scheme == dr_aligned)
8374 else if (DR_MISALIGNMENT (first_dr_info) == -1)
8375 TREE_TYPE (data_ref)
8376 = build_aligned_type (TREE_TYPE (data_ref),
8377 align * BITS_PER_UNIT);
8378 else
8379 TREE_TYPE (data_ref)
8380 = build_aligned_type (TREE_TYPE (data_ref),
8381 TYPE_ALIGN (elem_type));
8383 break;
8385 case dr_explicit_realign:
8387 tree ptr, bump;
8389 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8391 if (compute_in_loop)
8392 msq = vect_setup_realignment (first_stmt_info, gsi,
8393 &realignment_token,
8394 dr_explicit_realign,
8395 dataref_ptr, NULL);
8397 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8398 ptr = copy_ssa_name (dataref_ptr);
8399 else
8400 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8401 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
8402 new_stmt = gimple_build_assign
8403 (ptr, BIT_AND_EXPR, dataref_ptr,
8404 build_int_cst
8405 (TREE_TYPE (dataref_ptr),
8406 -(HOST_WIDE_INT) align));
8407 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8408 data_ref
8409 = build2 (MEM_REF, vectype, ptr,
8410 build_int_cst (ref_type, 0));
8411 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8412 vec_dest = vect_create_destination_var (scalar_dest,
8413 vectype);
8414 new_stmt = gimple_build_assign (vec_dest, data_ref);
8415 new_temp = make_ssa_name (vec_dest, new_stmt);
8416 gimple_assign_set_lhs (new_stmt, new_temp);
8417 gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt));
8418 gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt));
8419 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8420 msq = new_temp;
8422 bump = size_binop (MULT_EXPR, vs,
8423 TYPE_SIZE_UNIT (elem_type));
8424 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8425 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
8426 stmt_info, bump);
8427 new_stmt = gimple_build_assign
8428 (NULL_TREE, BIT_AND_EXPR, ptr,
8429 build_int_cst
8430 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8431 ptr = copy_ssa_name (ptr, new_stmt);
8432 gimple_assign_set_lhs (new_stmt, ptr);
8433 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8434 data_ref
8435 = build2 (MEM_REF, vectype, ptr,
8436 build_int_cst (ref_type, 0));
8437 break;
8439 case dr_explicit_realign_optimized:
8441 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8442 new_temp = copy_ssa_name (dataref_ptr);
8443 else
8444 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8445 unsigned int align = DR_TARGET_ALIGNMENT (first_dr_info);
8446 new_stmt = gimple_build_assign
8447 (new_temp, BIT_AND_EXPR, dataref_ptr,
8448 build_int_cst (TREE_TYPE (dataref_ptr),
8449 -(HOST_WIDE_INT) align));
8450 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8451 data_ref
8452 = build2 (MEM_REF, vectype, new_temp,
8453 build_int_cst (ref_type, 0));
8454 break;
8456 default:
8457 gcc_unreachable ();
8459 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8460 /* DATA_REF is null if we've already built the statement. */
8461 if (data_ref)
8463 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8464 new_stmt = gimple_build_assign (vec_dest, data_ref);
8466 new_temp = make_ssa_name (vec_dest, new_stmt);
8467 gimple_set_lhs (new_stmt, new_temp);
8468 new_stmt_info
8469 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8471 /* 3. Handle explicit realignment if necessary/supported.
8472 Create in loop:
8473 vec_dest = realign_load (msq, lsq, realignment_token) */
8474 if (alignment_support_scheme == dr_explicit_realign_optimized
8475 || alignment_support_scheme == dr_explicit_realign)
8477 lsq = gimple_assign_lhs (new_stmt);
8478 if (!realignment_token)
8479 realignment_token = dataref_ptr;
8480 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8481 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8482 msq, lsq, realignment_token);
8483 new_temp = make_ssa_name (vec_dest, new_stmt);
8484 gimple_assign_set_lhs (new_stmt, new_temp);
8485 new_stmt_info
8486 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
8488 if (alignment_support_scheme == dr_explicit_realign_optimized)
8490 gcc_assert (phi);
8491 if (i == vec_num - 1 && j == ncopies - 1)
8492 add_phi_arg (phi, lsq,
8493 loop_latch_edge (containing_loop),
8494 UNKNOWN_LOCATION);
8495 msq = lsq;
8499 /* 4. Handle invariant-load. */
8500 if (inv_p && !bb_vinfo)
8502 gcc_assert (!grouped_load);
8503 /* If we have versioned for aliasing or the loop doesn't
8504 have any data dependencies that would preclude this,
8505 then we are sure this is a loop invariant load and
8506 thus we can insert it on the preheader edge. */
8507 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
8508 && !nested_in_vect_loop
8509 && hoist_defs_of_uses (stmt_info, loop))
8511 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
8512 if (dump_enabled_p ())
8514 dump_printf_loc (MSG_NOTE, vect_location,
8515 "hoisting out of the vectorized "
8516 "loop: ");
8517 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8519 tree tem = copy_ssa_name (scalar_dest);
8520 gsi_insert_on_edge_immediate
8521 (loop_preheader_edge (loop),
8522 gimple_build_assign (tem,
8523 unshare_expr
8524 (gimple_assign_rhs1 (stmt))));
8525 new_temp = vect_init_vector (stmt_info, tem,
8526 vectype, NULL);
8527 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8528 new_stmt_info = vinfo->add_stmt (new_stmt);
8530 else
8532 gimple_stmt_iterator gsi2 = *gsi;
8533 gsi_next (&gsi2);
8534 new_temp = vect_init_vector (stmt_info, scalar_dest,
8535 vectype, &gsi2);
8536 new_stmt_info = vinfo->lookup_def (new_temp);
8540 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8542 tree perm_mask = perm_mask_for_reverse (vectype);
8543 new_temp = permute_vec_elements (new_temp, new_temp,
8544 perm_mask, stmt_info, gsi);
8545 new_stmt_info = vinfo->lookup_def (new_temp);
8548 /* Collect vector loads and later create their permutation in
8549 vect_transform_grouped_load (). */
8550 if (grouped_load || slp_perm)
8551 dr_chain.quick_push (new_temp);
8553 /* Store vector loads in the corresponding SLP_NODE. */
8554 if (slp && !slp_perm)
8555 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
8557 /* With SLP permutation we load the gaps as well, without
8558 we need to skip the gaps after we manage to fully load
8559 all elements. group_gap_adj is DR_GROUP_SIZE here. */
8560 group_elt += nunits;
8561 if (maybe_ne (group_gap_adj, 0U)
8562 && !slp_perm
8563 && known_eq (group_elt, group_size - group_gap_adj))
8565 poly_wide_int bump_val
8566 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8567 * group_gap_adj);
8568 tree bump = wide_int_to_tree (sizetype, bump_val);
8569 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8570 stmt_info, bump);
8571 group_elt = 0;
8574 /* Bump the vector pointer to account for a gap or for excess
8575 elements loaded for a permuted SLP load. */
8576 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8578 poly_wide_int bump_val
8579 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8580 * group_gap_adj);
8581 tree bump = wide_int_to_tree (sizetype, bump_val);
8582 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8583 stmt_info, bump);
8587 if (slp && !slp_perm)
8588 continue;
8590 if (slp_perm)
8592 unsigned n_perms;
8593 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8594 slp_node_instance, false,
8595 &n_perms))
8597 dr_chain.release ();
8598 return false;
8601 else
8603 if (grouped_load)
8605 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8606 vect_transform_grouped_load (stmt_info, dr_chain,
8607 group_size, gsi);
8608 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8610 else
8612 if (j == 0)
8613 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
8614 else
8615 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
8616 prev_stmt_info = new_stmt_info;
8619 dr_chain.release ();
8622 return true;
8625 /* Function vect_is_simple_cond.
8627 Input:
8628 LOOP - the loop that is being vectorized.
8629 COND - Condition that is checked for simple use.
8631 Output:
8632 *COMP_VECTYPE - the vector type for the comparison.
8633 *DTS - The def types for the arguments of the comparison
8635 Returns whether a COND can be vectorized. Checks whether
8636 condition operands are supportable using vec_is_simple_use. */
8638 static bool
8639 vect_is_simple_cond (tree cond, vec_info *vinfo,
8640 tree *comp_vectype, enum vect_def_type *dts,
8641 tree vectype)
8643 tree lhs, rhs;
8644 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8646 /* Mask case. */
8647 if (TREE_CODE (cond) == SSA_NAME
8648 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8650 if (!vect_is_simple_use (cond, vinfo, &dts[0], comp_vectype)
8651 || !*comp_vectype
8652 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8653 return false;
8654 return true;
8657 if (!COMPARISON_CLASS_P (cond))
8658 return false;
8660 lhs = TREE_OPERAND (cond, 0);
8661 rhs = TREE_OPERAND (cond, 1);
8663 if (TREE_CODE (lhs) == SSA_NAME)
8665 if (!vect_is_simple_use (lhs, vinfo, &dts[0], &vectype1))
8666 return false;
8668 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8669 || TREE_CODE (lhs) == FIXED_CST)
8670 dts[0] = vect_constant_def;
8671 else
8672 return false;
8674 if (TREE_CODE (rhs) == SSA_NAME)
8676 if (!vect_is_simple_use (rhs, vinfo, &dts[1], &vectype2))
8677 return false;
8679 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8680 || TREE_CODE (rhs) == FIXED_CST)
8681 dts[1] = vect_constant_def;
8682 else
8683 return false;
8685 if (vectype1 && vectype2
8686 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8687 TYPE_VECTOR_SUBPARTS (vectype2)))
8688 return false;
8690 *comp_vectype = vectype1 ? vectype1 : vectype2;
8691 /* Invariant comparison. */
8692 if (! *comp_vectype && vectype)
8694 tree scalar_type = TREE_TYPE (lhs);
8695 /* If we can widen the comparison to match vectype do so. */
8696 if (INTEGRAL_TYPE_P (scalar_type)
8697 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8698 TYPE_SIZE (TREE_TYPE (vectype))))
8699 scalar_type = build_nonstandard_integer_type
8700 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8701 TYPE_UNSIGNED (scalar_type));
8702 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8705 return true;
8708 /* vectorizable_condition.
8710 Check if STMT_INFO is conditional modify expression that can be vectorized.
8711 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
8712 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8713 at GSI.
8715 When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector
8716 variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1,
8717 and in else clause if it is 2).
8719 Return true if STMT_INFO is vectorizable in this way. */
8721 bool
8722 vectorizable_condition (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
8723 stmt_vec_info *vec_stmt, tree reduc_def,
8724 int reduc_index, slp_tree slp_node,
8725 stmt_vector_for_cost *cost_vec)
8727 vec_info *vinfo = stmt_info->vinfo;
8728 tree scalar_dest = NULL_TREE;
8729 tree vec_dest = NULL_TREE;
8730 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8731 tree then_clause, else_clause;
8732 tree comp_vectype = NULL_TREE;
8733 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8734 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8735 tree vec_compare;
8736 tree new_temp;
8737 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8738 enum vect_def_type dts[4]
8739 = {vect_unknown_def_type, vect_unknown_def_type,
8740 vect_unknown_def_type, vect_unknown_def_type};
8741 int ndts = 4;
8742 int ncopies;
8743 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8744 stmt_vec_info prev_stmt_info = NULL;
8745 int i, j;
8746 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8747 vec<tree> vec_oprnds0 = vNULL;
8748 vec<tree> vec_oprnds1 = vNULL;
8749 vec<tree> vec_oprnds2 = vNULL;
8750 vec<tree> vec_oprnds3 = vNULL;
8751 tree vec_cmp_type;
8752 bool masked = false;
8754 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8755 return false;
8757 vect_reduction_type reduction_type
8758 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8759 if (reduction_type == TREE_CODE_REDUCTION)
8761 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8762 return false;
8764 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8765 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8766 && reduc_def))
8767 return false;
8769 /* FORNOW: not yet supported. */
8770 if (STMT_VINFO_LIVE_P (stmt_info))
8772 if (dump_enabled_p ())
8773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8774 "value used after loop.\n");
8775 return false;
8779 /* Is vectorizable conditional operation? */
8780 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
8781 if (!stmt)
8782 return false;
8784 code = gimple_assign_rhs_code (stmt);
8786 if (code != COND_EXPR)
8787 return false;
8789 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8790 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8792 if (slp_node)
8793 ncopies = 1;
8794 else
8795 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8797 gcc_assert (ncopies >= 1);
8798 if (reduc_index && ncopies > 1)
8799 return false; /* FORNOW */
8801 cond_expr = gimple_assign_rhs1 (stmt);
8802 then_clause = gimple_assign_rhs2 (stmt);
8803 else_clause = gimple_assign_rhs3 (stmt);
8805 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8806 &comp_vectype, &dts[0], slp_node ? NULL : vectype)
8807 || !comp_vectype)
8808 return false;
8810 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &dts[2], &vectype1))
8811 return false;
8812 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &dts[3], &vectype2))
8813 return false;
8815 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8816 return false;
8818 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8819 return false;
8821 masked = !COMPARISON_CLASS_P (cond_expr);
8822 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8824 if (vec_cmp_type == NULL_TREE)
8825 return false;
8827 cond_code = TREE_CODE (cond_expr);
8828 if (!masked)
8830 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8831 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8834 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8836 /* Boolean values may have another representation in vectors
8837 and therefore we prefer bit operations over comparison for
8838 them (which also works for scalar masks). We store opcodes
8839 to use in bitop1 and bitop2. Statement is vectorized as
8840 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8841 depending on bitop1 and bitop2 arity. */
8842 switch (cond_code)
8844 case GT_EXPR:
8845 bitop1 = BIT_NOT_EXPR;
8846 bitop2 = BIT_AND_EXPR;
8847 break;
8848 case GE_EXPR:
8849 bitop1 = BIT_NOT_EXPR;
8850 bitop2 = BIT_IOR_EXPR;
8851 break;
8852 case LT_EXPR:
8853 bitop1 = BIT_NOT_EXPR;
8854 bitop2 = BIT_AND_EXPR;
8855 std::swap (cond_expr0, cond_expr1);
8856 break;
8857 case LE_EXPR:
8858 bitop1 = BIT_NOT_EXPR;
8859 bitop2 = BIT_IOR_EXPR;
8860 std::swap (cond_expr0, cond_expr1);
8861 break;
8862 case NE_EXPR:
8863 bitop1 = BIT_XOR_EXPR;
8864 break;
8865 case EQ_EXPR:
8866 bitop1 = BIT_XOR_EXPR;
8867 bitop2 = BIT_NOT_EXPR;
8868 break;
8869 default:
8870 return false;
8872 cond_code = SSA_NAME;
8875 if (!vec_stmt)
8877 if (bitop1 != NOP_EXPR)
8879 machine_mode mode = TYPE_MODE (comp_vectype);
8880 optab optab;
8882 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8883 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8884 return false;
8886 if (bitop2 != NOP_EXPR)
8888 optab = optab_for_tree_code (bitop2, comp_vectype,
8889 optab_default);
8890 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8891 return false;
8894 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8895 cond_code))
8897 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8898 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
8899 cost_vec);
8900 return true;
8902 return false;
8905 /* Transform. */
8907 if (!slp_node)
8909 vec_oprnds0.create (1);
8910 vec_oprnds1.create (1);
8911 vec_oprnds2.create (1);
8912 vec_oprnds3.create (1);
8915 /* Handle def. */
8916 scalar_dest = gimple_assign_lhs (stmt);
8917 if (reduction_type != EXTRACT_LAST_REDUCTION)
8918 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8920 /* Handle cond expr. */
8921 for (j = 0; j < ncopies; j++)
8923 stmt_vec_info new_stmt_info = NULL;
8924 if (j == 0)
8926 if (slp_node)
8928 auto_vec<tree, 4> ops;
8929 auto_vec<vec<tree>, 4> vec_defs;
8931 if (masked)
8932 ops.safe_push (cond_expr);
8933 else
8935 ops.safe_push (cond_expr0);
8936 ops.safe_push (cond_expr1);
8938 ops.safe_push (then_clause);
8939 ops.safe_push (else_clause);
8940 vect_get_slp_defs (ops, slp_node, &vec_defs);
8941 vec_oprnds3 = vec_defs.pop ();
8942 vec_oprnds2 = vec_defs.pop ();
8943 if (!masked)
8944 vec_oprnds1 = vec_defs.pop ();
8945 vec_oprnds0 = vec_defs.pop ();
8947 else
8949 if (masked)
8951 vec_cond_lhs
8952 = vect_get_vec_def_for_operand (cond_expr, stmt_info,
8953 comp_vectype);
8954 vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]);
8956 else
8958 vec_cond_lhs
8959 = vect_get_vec_def_for_operand (cond_expr0,
8960 stmt_info, comp_vectype);
8961 vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]);
8963 vec_cond_rhs
8964 = vect_get_vec_def_for_operand (cond_expr1,
8965 stmt_info, comp_vectype);
8966 vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]);
8968 if (reduc_index == 1)
8969 vec_then_clause = reduc_def;
8970 else
8972 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8973 stmt_info);
8974 vect_is_simple_use (then_clause, loop_vinfo, &dts[2]);
8976 if (reduc_index == 2)
8977 vec_else_clause = reduc_def;
8978 else
8980 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8981 stmt_info);
8982 vect_is_simple_use (else_clause, loop_vinfo, &dts[3]);
8986 else
8988 vec_cond_lhs
8989 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds0.pop ());
8990 if (!masked)
8991 vec_cond_rhs
8992 = vect_get_vec_def_for_stmt_copy (vinfo, vec_oprnds1.pop ());
8994 vec_then_clause = vect_get_vec_def_for_stmt_copy (vinfo,
8995 vec_oprnds2.pop ());
8996 vec_else_clause = vect_get_vec_def_for_stmt_copy (vinfo,
8997 vec_oprnds3.pop ());
9000 if (!slp_node)
9002 vec_oprnds0.quick_push (vec_cond_lhs);
9003 if (!masked)
9004 vec_oprnds1.quick_push (vec_cond_rhs);
9005 vec_oprnds2.quick_push (vec_then_clause);
9006 vec_oprnds3.quick_push (vec_else_clause);
9009 /* Arguments are ready. Create the new vector stmt. */
9010 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
9012 vec_then_clause = vec_oprnds2[i];
9013 vec_else_clause = vec_oprnds3[i];
9015 if (masked)
9016 vec_compare = vec_cond_lhs;
9017 else
9019 vec_cond_rhs = vec_oprnds1[i];
9020 if (bitop1 == NOP_EXPR)
9021 vec_compare = build2 (cond_code, vec_cmp_type,
9022 vec_cond_lhs, vec_cond_rhs);
9023 else
9025 new_temp = make_ssa_name (vec_cmp_type);
9026 gassign *new_stmt;
9027 if (bitop1 == BIT_NOT_EXPR)
9028 new_stmt = gimple_build_assign (new_temp, bitop1,
9029 vec_cond_rhs);
9030 else
9031 new_stmt
9032 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
9033 vec_cond_rhs);
9034 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9035 if (bitop2 == NOP_EXPR)
9036 vec_compare = new_temp;
9037 else if (bitop2 == BIT_NOT_EXPR)
9039 /* Instead of doing ~x ? y : z do x ? z : y. */
9040 vec_compare = new_temp;
9041 std::swap (vec_then_clause, vec_else_clause);
9043 else
9045 vec_compare = make_ssa_name (vec_cmp_type);
9046 new_stmt
9047 = gimple_build_assign (vec_compare, bitop2,
9048 vec_cond_lhs, new_temp);
9049 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9053 if (reduction_type == EXTRACT_LAST_REDUCTION)
9055 if (!is_gimple_val (vec_compare))
9057 tree vec_compare_name = make_ssa_name (vec_cmp_type);
9058 gassign *new_stmt = gimple_build_assign (vec_compare_name,
9059 vec_compare);
9060 vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9061 vec_compare = vec_compare_name;
9063 gcc_assert (reduc_index == 2);
9064 gcall *new_stmt = gimple_build_call_internal
9065 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
9066 vec_then_clause);
9067 gimple_call_set_lhs (new_stmt, scalar_dest);
9068 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
9069 if (stmt_info->stmt == gsi_stmt (*gsi))
9070 new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
9071 else
9073 /* In this case we're moving the definition to later in the
9074 block. That doesn't matter because the only uses of the
9075 lhs are in phi statements. */
9076 gimple_stmt_iterator old_gsi
9077 = gsi_for_stmt (stmt_info->stmt);
9078 gsi_remove (&old_gsi, true);
9079 new_stmt_info
9080 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9083 else
9085 new_temp = make_ssa_name (vec_dest);
9086 gassign *new_stmt
9087 = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
9088 vec_then_clause, vec_else_clause);
9089 new_stmt_info
9090 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9092 if (slp_node)
9093 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9096 if (slp_node)
9097 continue;
9099 if (j == 0)
9100 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9101 else
9102 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9104 prev_stmt_info = new_stmt_info;
9107 vec_oprnds0.release ();
9108 vec_oprnds1.release ();
9109 vec_oprnds2.release ();
9110 vec_oprnds3.release ();
9112 return true;
9115 /* vectorizable_comparison.
9117 Check if STMT_INFO is comparison expression that can be vectorized.
9118 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
9119 comparison, put it in VEC_STMT, and insert it at GSI.
9121 Return true if STMT_INFO is vectorizable in this way. */
9123 static bool
9124 vectorizable_comparison (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9125 stmt_vec_info *vec_stmt, tree reduc_def,
9126 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
9128 vec_info *vinfo = stmt_info->vinfo;
9129 tree lhs, rhs1, rhs2;
9130 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
9131 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
9132 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
9133 tree new_temp;
9134 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9135 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
9136 int ndts = 2;
9137 poly_uint64 nunits;
9138 int ncopies;
9139 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
9140 stmt_vec_info prev_stmt_info = NULL;
9141 int i, j;
9142 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9143 vec<tree> vec_oprnds0 = vNULL;
9144 vec<tree> vec_oprnds1 = vNULL;
9145 tree mask_type;
9146 tree mask;
9148 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
9149 return false;
9151 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
9152 return false;
9154 mask_type = vectype;
9155 nunits = TYPE_VECTOR_SUBPARTS (vectype);
9157 if (slp_node)
9158 ncopies = 1;
9159 else
9160 ncopies = vect_get_num_copies (loop_vinfo, vectype);
9162 gcc_assert (ncopies >= 1);
9163 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
9164 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9165 && reduc_def))
9166 return false;
9168 if (STMT_VINFO_LIVE_P (stmt_info))
9170 if (dump_enabled_p ())
9171 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9172 "value used after loop.\n");
9173 return false;
9176 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
9177 if (!stmt)
9178 return false;
9180 code = gimple_assign_rhs_code (stmt);
9182 if (TREE_CODE_CLASS (code) != tcc_comparison)
9183 return false;
9185 rhs1 = gimple_assign_rhs1 (stmt);
9186 rhs2 = gimple_assign_rhs2 (stmt);
9188 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &dts[0], &vectype1))
9189 return false;
9191 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &dts[1], &vectype2))
9192 return false;
9194 if (vectype1 && vectype2
9195 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
9196 TYPE_VECTOR_SUBPARTS (vectype2)))
9197 return false;
9199 vectype = vectype1 ? vectype1 : vectype2;
9201 /* Invariant comparison. */
9202 if (!vectype)
9204 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
9205 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
9206 return false;
9208 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
9209 return false;
9211 /* Can't compare mask and non-mask types. */
9212 if (vectype1 && vectype2
9213 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9214 return false;
9216 /* Boolean values may have another representation in vectors
9217 and therefore we prefer bit operations over comparison for
9218 them (which also works for scalar masks). We store opcodes
9219 to use in bitop1 and bitop2. Statement is vectorized as
9220 BITOP2 (rhs1 BITOP1 rhs2) or
9221 rhs1 BITOP2 (BITOP1 rhs2)
9222 depending on bitop1 and bitop2 arity. */
9223 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9225 if (code == GT_EXPR)
9227 bitop1 = BIT_NOT_EXPR;
9228 bitop2 = BIT_AND_EXPR;
9230 else if (code == GE_EXPR)
9232 bitop1 = BIT_NOT_EXPR;
9233 bitop2 = BIT_IOR_EXPR;
9235 else if (code == LT_EXPR)
9237 bitop1 = BIT_NOT_EXPR;
9238 bitop2 = BIT_AND_EXPR;
9239 std::swap (rhs1, rhs2);
9240 std::swap (dts[0], dts[1]);
9242 else if (code == LE_EXPR)
9244 bitop1 = BIT_NOT_EXPR;
9245 bitop2 = BIT_IOR_EXPR;
9246 std::swap (rhs1, rhs2);
9247 std::swap (dts[0], dts[1]);
9249 else
9251 bitop1 = BIT_XOR_EXPR;
9252 if (code == EQ_EXPR)
9253 bitop2 = BIT_NOT_EXPR;
9257 if (!vec_stmt)
9259 if (bitop1 == NOP_EXPR)
9261 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
9262 return false;
9264 else
9266 machine_mode mode = TYPE_MODE (vectype);
9267 optab optab;
9269 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9270 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9271 return false;
9273 if (bitop2 != NOP_EXPR)
9275 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9276 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9277 return false;
9281 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9282 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9283 dts, ndts, slp_node, cost_vec);
9284 return true;
9287 /* Transform. */
9288 if (!slp_node)
9290 vec_oprnds0.create (1);
9291 vec_oprnds1.create (1);
9294 /* Handle def. */
9295 lhs = gimple_assign_lhs (stmt);
9296 mask = vect_create_destination_var (lhs, mask_type);
9298 /* Handle cmp expr. */
9299 for (j = 0; j < ncopies; j++)
9301 stmt_vec_info new_stmt_info = NULL;
9302 if (j == 0)
9304 if (slp_node)
9306 auto_vec<tree, 2> ops;
9307 auto_vec<vec<tree>, 2> vec_defs;
9309 ops.safe_push (rhs1);
9310 ops.safe_push (rhs2);
9311 vect_get_slp_defs (ops, slp_node, &vec_defs);
9312 vec_oprnds1 = vec_defs.pop ();
9313 vec_oprnds0 = vec_defs.pop ();
9315 else
9317 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
9318 vectype);
9319 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
9320 vectype);
9323 else
9325 vec_rhs1 = vect_get_vec_def_for_stmt_copy (vinfo,
9326 vec_oprnds0.pop ());
9327 vec_rhs2 = vect_get_vec_def_for_stmt_copy (vinfo,
9328 vec_oprnds1.pop ());
9331 if (!slp_node)
9333 vec_oprnds0.quick_push (vec_rhs1);
9334 vec_oprnds1.quick_push (vec_rhs2);
9337 /* Arguments are ready. Create the new vector stmt. */
9338 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9340 vec_rhs2 = vec_oprnds1[i];
9342 new_temp = make_ssa_name (mask);
9343 if (bitop1 == NOP_EXPR)
9345 gassign *new_stmt = gimple_build_assign (new_temp, code,
9346 vec_rhs1, vec_rhs2);
9347 new_stmt_info
9348 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9350 else
9352 gassign *new_stmt;
9353 if (bitop1 == BIT_NOT_EXPR)
9354 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9355 else
9356 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9357 vec_rhs2);
9358 new_stmt_info
9359 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9360 if (bitop2 != NOP_EXPR)
9362 tree res = make_ssa_name (mask);
9363 if (bitop2 == BIT_NOT_EXPR)
9364 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9365 else
9366 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9367 new_temp);
9368 new_stmt_info
9369 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
9372 if (slp_node)
9373 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
9376 if (slp_node)
9377 continue;
9379 if (j == 0)
9380 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
9381 else
9382 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
9384 prev_stmt_info = new_stmt_info;
9387 vec_oprnds0.release ();
9388 vec_oprnds1.release ();
9390 return true;
9393 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9394 can handle all live statements in the node. Otherwise return true
9395 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
9396 GSI and VEC_STMT are as for vectorizable_live_operation. */
9398 static bool
9399 can_vectorize_live_stmts (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9400 slp_tree slp_node, stmt_vec_info *vec_stmt,
9401 stmt_vector_for_cost *cost_vec)
9403 if (slp_node)
9405 stmt_vec_info slp_stmt_info;
9406 unsigned int i;
9407 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
9409 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9410 && !vectorizable_live_operation (slp_stmt_info, gsi, slp_node, i,
9411 vec_stmt, cost_vec))
9412 return false;
9415 else if (STMT_VINFO_LIVE_P (stmt_info)
9416 && !vectorizable_live_operation (stmt_info, gsi, slp_node, -1,
9417 vec_stmt, cost_vec))
9418 return false;
9420 return true;
9423 /* Make sure the statement is vectorizable. */
9425 bool
9426 vect_analyze_stmt (stmt_vec_info stmt_info, bool *need_to_vectorize,
9427 slp_tree node, slp_instance node_instance,
9428 stmt_vector_for_cost *cost_vec)
9430 vec_info *vinfo = stmt_info->vinfo;
9431 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9432 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9433 bool ok;
9434 gimple_seq pattern_def_seq;
9436 if (dump_enabled_p ())
9438 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
9439 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
9442 if (gimple_has_volatile_ops (stmt_info->stmt))
9444 if (dump_enabled_p ())
9445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9446 "not vectorized: stmt has volatile operands\n");
9448 return false;
9451 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9452 && node == NULL
9453 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9455 gimple_stmt_iterator si;
9457 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9459 stmt_vec_info pattern_def_stmt_info
9460 = vinfo->lookup_stmt (gsi_stmt (si));
9461 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
9462 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
9464 /* Analyze def stmt of STMT if it's a pattern stmt. */
9465 if (dump_enabled_p ())
9467 dump_printf_loc (MSG_NOTE, vect_location,
9468 "==> examining pattern def statement: ");
9469 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
9470 pattern_def_stmt_info->stmt, 0);
9473 if (!vect_analyze_stmt (pattern_def_stmt_info,
9474 need_to_vectorize, node, node_instance,
9475 cost_vec))
9476 return false;
9481 /* Skip stmts that do not need to be vectorized. In loops this is expected
9482 to include:
9483 - the COND_EXPR which is the loop exit condition
9484 - any LABEL_EXPRs in the loop
9485 - computations that are used only for array indexing or loop control.
9486 In basic blocks we only analyze statements that are a part of some SLP
9487 instance, therefore, all the statements are relevant.
9489 Pattern statement needs to be analyzed instead of the original statement
9490 if the original statement is not relevant. Otherwise, we analyze both
9491 statements. In basic blocks we are called from some SLP instance
9492 traversal, don't analyze pattern stmts instead, the pattern stmts
9493 already will be part of SLP instance. */
9495 stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
9496 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9497 && !STMT_VINFO_LIVE_P (stmt_info))
9499 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9500 && pattern_stmt_info
9501 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9502 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9504 /* Analyze PATTERN_STMT instead of the original stmt. */
9505 stmt_info = pattern_stmt_info;
9506 if (dump_enabled_p ())
9508 dump_printf_loc (MSG_NOTE, vect_location,
9509 "==> examining pattern statement: ");
9510 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
9513 else
9515 if (dump_enabled_p ())
9516 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9518 return true;
9521 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9522 && node == NULL
9523 && pattern_stmt_info
9524 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
9525 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
9527 /* Analyze PATTERN_STMT too. */
9528 if (dump_enabled_p ())
9530 dump_printf_loc (MSG_NOTE, vect_location,
9531 "==> examining pattern statement: ");
9532 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0);
9535 if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
9536 node_instance, cost_vec))
9537 return false;
9540 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9542 case vect_internal_def:
9543 break;
9545 case vect_reduction_def:
9546 case vect_nested_cycle:
9547 gcc_assert (!bb_vinfo
9548 && (relevance == vect_used_in_outer
9549 || relevance == vect_used_in_outer_by_reduction
9550 || relevance == vect_used_by_reduction
9551 || relevance == vect_unused_in_scope
9552 || relevance == vect_used_only_live));
9553 break;
9555 case vect_induction_def:
9556 gcc_assert (!bb_vinfo);
9557 break;
9559 case vect_constant_def:
9560 case vect_external_def:
9561 case vect_unknown_def_type:
9562 default:
9563 gcc_unreachable ();
9566 if (STMT_VINFO_RELEVANT_P (stmt_info))
9568 tree type = gimple_expr_type (stmt_info->stmt);
9569 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
9570 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
9571 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9572 || (call && gimple_call_lhs (call) == NULL_TREE));
9573 *need_to_vectorize = true;
9576 if (PURE_SLP_STMT (stmt_info) && !node)
9578 dump_printf_loc (MSG_NOTE, vect_location,
9579 "handled only by SLP analysis\n");
9580 return true;
9583 ok = true;
9584 if (!bb_vinfo
9585 && (STMT_VINFO_RELEVANT_P (stmt_info)
9586 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9587 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec)
9588 || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
9589 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9590 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9591 || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
9592 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9593 cost_vec)
9594 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9595 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9596 || vectorizable_reduction (stmt_info, NULL, NULL, node,
9597 node_instance, cost_vec)
9598 || vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
9599 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
9600 cost_vec)
9601 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
9602 cost_vec));
9603 else
9605 if (bb_vinfo)
9606 ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
9607 cost_vec)
9608 || vectorizable_conversion (stmt_info, NULL, NULL, node,
9609 cost_vec)
9610 || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
9611 || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
9612 || vectorizable_assignment (stmt_info, NULL, NULL, node,
9613 cost_vec)
9614 || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
9615 cost_vec)
9616 || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
9617 || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
9618 || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
9619 cost_vec)
9620 || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
9621 cost_vec));
9624 if (!ok)
9626 if (dump_enabled_p ())
9628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9629 "not vectorized: relevant stmt not ");
9630 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
9631 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9632 stmt_info->stmt, 0);
9635 return false;
9638 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9639 need extra handling, except for vectorizable reductions. */
9640 if (!bb_vinfo
9641 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9642 && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
9644 if (dump_enabled_p ())
9646 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9647 "not vectorized: live stmt not supported: ");
9648 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
9649 stmt_info->stmt, 0);
9652 return false;
9655 return true;
9659 /* Function vect_transform_stmt.
9661 Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
9663 bool
9664 vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
9665 bool *grouped_store, slp_tree slp_node,
9666 slp_instance slp_node_instance)
9668 vec_info *vinfo = stmt_info->vinfo;
9669 bool is_store = false;
9670 stmt_vec_info vec_stmt = NULL;
9671 bool done;
9673 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9674 stmt_vec_info old_vec_stmt_info = STMT_VINFO_VEC_STMT (stmt_info);
9676 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9677 && nested_in_vect_loop_p
9678 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9679 stmt_info));
9681 gimple *stmt = stmt_info->stmt;
9682 switch (STMT_VINFO_TYPE (stmt_info))
9684 case type_demotion_vec_info_type:
9685 case type_promotion_vec_info_type:
9686 case type_conversion_vec_info_type:
9687 done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
9688 NULL);
9689 gcc_assert (done);
9690 break;
9692 case induc_vec_info_type:
9693 done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
9694 NULL);
9695 gcc_assert (done);
9696 break;
9698 case shift_vec_info_type:
9699 done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9700 gcc_assert (done);
9701 break;
9703 case op_vec_info_type:
9704 done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
9705 NULL);
9706 gcc_assert (done);
9707 break;
9709 case assignment_vec_info_type:
9710 done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
9711 NULL);
9712 gcc_assert (done);
9713 break;
9715 case load_vec_info_type:
9716 done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
9717 slp_node_instance, NULL);
9718 gcc_assert (done);
9719 break;
9721 case store_vec_info_type:
9722 done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9723 gcc_assert (done);
9724 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9726 /* In case of interleaving, the whole chain is vectorized when the
9727 last store in the chain is reached. Store stmts before the last
9728 one are skipped, and there vec_stmt_info shouldn't be freed
9729 meanwhile. */
9730 *grouped_store = true;
9731 stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9732 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
9733 is_store = true;
9735 else
9736 is_store = true;
9737 break;
9739 case condition_vec_info_type:
9740 done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0,
9741 slp_node, NULL);
9742 gcc_assert (done);
9743 break;
9745 case comparison_vec_info_type:
9746 done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL,
9747 slp_node, NULL);
9748 gcc_assert (done);
9749 break;
9751 case call_vec_info_type:
9752 done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
9753 stmt = gsi_stmt (*gsi);
9754 break;
9756 case call_simd_clone_vec_info_type:
9757 done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
9758 slp_node, NULL);
9759 stmt = gsi_stmt (*gsi);
9760 break;
9762 case reduc_vec_info_type:
9763 done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node,
9764 slp_node_instance, NULL);
9765 gcc_assert (done);
9766 break;
9768 default:
9769 if (!STMT_VINFO_LIVE_P (stmt_info))
9771 if (dump_enabled_p ())
9772 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9773 "stmt not supported.\n");
9774 gcc_unreachable ();
9778 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9779 This would break hybrid SLP vectorization. */
9780 if (slp_node)
9781 gcc_assert (!vec_stmt
9782 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt_info);
9784 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9785 is being vectorized, but outside the immediately enclosing loop. */
9786 if (vec_stmt
9787 && nested_p
9788 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9789 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9790 || STMT_VINFO_RELEVANT (stmt_info) ==
9791 vect_used_in_outer_by_reduction))
9793 struct loop *innerloop = LOOP_VINFO_LOOP (
9794 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9795 imm_use_iterator imm_iter;
9796 use_operand_p use_p;
9797 tree scalar_dest;
9799 if (dump_enabled_p ())
9800 dump_printf_loc (MSG_NOTE, vect_location,
9801 "Record the vdef for outer-loop vectorization.\n");
9803 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9804 (to be used when vectorizing outer-loop stmts that use the DEF of
9805 STMT). */
9806 if (gimple_code (stmt) == GIMPLE_PHI)
9807 scalar_dest = PHI_RESULT (stmt);
9808 else
9809 scalar_dest = gimple_assign_lhs (stmt);
9811 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9812 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9814 stmt_vec_info exit_phi_info
9815 = vinfo->lookup_stmt (USE_STMT (use_p));
9816 STMT_VINFO_VEC_STMT (exit_phi_info) = vec_stmt;
9820 /* Handle stmts whose DEF is used outside the loop-nest that is
9821 being vectorized. */
9822 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9824 done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt,
9825 NULL);
9826 gcc_assert (done);
9829 if (vec_stmt)
9830 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9832 return is_store;
9836 /* Remove a group of stores (for SLP or interleaving), free their
9837 stmt_vec_info. */
9839 void
9840 vect_remove_stores (stmt_vec_info first_stmt_info)
9842 vec_info *vinfo = first_stmt_info->vinfo;
9843 stmt_vec_info next_stmt_info = first_stmt_info;
9845 while (next_stmt_info)
9847 stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
9848 if (is_pattern_stmt_p (next_stmt_info))
9849 next_stmt_info = STMT_VINFO_RELATED_STMT (next_stmt_info);
9850 /* Free the attached stmt_vec_info and remove the stmt. */
9851 vinfo->remove_stmt (next_stmt_info);
9852 next_stmt_info = tmp;
9856 /* Function get_vectype_for_scalar_type_and_size.
9858 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9859 by the target. */
9861 tree
9862 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9864 tree orig_scalar_type = scalar_type;
9865 scalar_mode inner_mode;
9866 machine_mode simd_mode;
9867 poly_uint64 nunits;
9868 tree vectype;
9870 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9871 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9872 return NULL_TREE;
9874 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9876 /* For vector types of elements whose mode precision doesn't
9877 match their types precision we use a element type of mode
9878 precision. The vectorization routines will have to make sure
9879 they support the proper result truncation/extension.
9880 We also make sure to build vector types with INTEGER_TYPE
9881 component type only. */
9882 if (INTEGRAL_TYPE_P (scalar_type)
9883 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9884 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9885 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9886 TYPE_UNSIGNED (scalar_type));
9888 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9889 When the component mode passes the above test simply use a type
9890 corresponding to that mode. The theory is that any use that
9891 would cause problems with this will disable vectorization anyway. */
9892 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9893 && !INTEGRAL_TYPE_P (scalar_type))
9894 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9896 /* We can't build a vector type of elements with alignment bigger than
9897 their size. */
9898 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9899 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9900 TYPE_UNSIGNED (scalar_type));
9902 /* If we felt back to using the mode fail if there was
9903 no scalar type for it. */
9904 if (scalar_type == NULL_TREE)
9905 return NULL_TREE;
9907 /* If no size was supplied use the mode the target prefers. Otherwise
9908 lookup a vector mode of the specified size. */
9909 if (known_eq (size, 0U))
9910 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9911 else if (!multiple_p (size, nbytes, &nunits)
9912 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9913 return NULL_TREE;
9914 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9915 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9916 return NULL_TREE;
9918 vectype = build_vector_type (scalar_type, nunits);
9920 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9921 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9922 return NULL_TREE;
9924 /* Re-attach the address-space qualifier if we canonicalized the scalar
9925 type. */
9926 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9927 return build_qualified_type
9928 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9930 return vectype;
9933 poly_uint64 current_vector_size;
9935 /* Function get_vectype_for_scalar_type.
9937 Returns the vector type corresponding to SCALAR_TYPE as supported
9938 by the target. */
9940 tree
9941 get_vectype_for_scalar_type (tree scalar_type)
9943 tree vectype;
9944 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9945 current_vector_size);
9946 if (vectype
9947 && known_eq (current_vector_size, 0U))
9948 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9949 return vectype;
9952 /* Function get_mask_type_for_scalar_type.
9954 Returns the mask type corresponding to a result of comparison
9955 of vectors of specified SCALAR_TYPE as supported by target. */
9957 tree
9958 get_mask_type_for_scalar_type (tree scalar_type)
9960 tree vectype = get_vectype_for_scalar_type (scalar_type);
9962 if (!vectype)
9963 return NULL;
9965 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9966 current_vector_size);
9969 /* Function get_same_sized_vectype
9971 Returns a vector type corresponding to SCALAR_TYPE of size
9972 VECTOR_TYPE if supported by the target. */
9974 tree
9975 get_same_sized_vectype (tree scalar_type, tree vector_type)
9977 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9978 return build_same_sized_truth_vector_type (vector_type);
9980 return get_vectype_for_scalar_type_and_size
9981 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9984 /* Function vect_is_simple_use.
9986 Input:
9987 VINFO - the vect info of the loop or basic block that is being vectorized.
9988 OPERAND - operand in the loop or bb.
9989 Output:
9990 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
9991 case OPERAND is an SSA_NAME that is defined in the vectorizable region
9992 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
9993 the definition could be anywhere in the function
9994 DT - the type of definition
9996 Returns whether a stmt with OPERAND can be vectorized.
9997 For loops, supportable operands are constants, loop invariants, and operands
9998 that are defined by the current iteration of the loop. Unsupportable
9999 operands are those that are defined by a previous iteration of the loop (as
10000 is the case in reduction/induction computations).
10001 For basic blocks, supportable operands are constants and bb invariants.
10002 For now, operands defined outside the basic block are not supported. */
10004 bool
10005 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10006 stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out)
10008 if (def_stmt_info_out)
10009 *def_stmt_info_out = NULL;
10010 if (def_stmt_out)
10011 *def_stmt_out = NULL;
10012 *dt = vect_unknown_def_type;
10014 if (dump_enabled_p ())
10016 dump_printf_loc (MSG_NOTE, vect_location,
10017 "vect_is_simple_use: operand ");
10018 if (TREE_CODE (operand) == SSA_NAME
10019 && !SSA_NAME_IS_DEFAULT_DEF (operand))
10020 dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0);
10021 else
10022 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
10025 if (CONSTANT_CLASS_P (operand))
10026 *dt = vect_constant_def;
10027 else if (is_gimple_min_invariant (operand))
10028 *dt = vect_external_def;
10029 else if (TREE_CODE (operand) != SSA_NAME)
10030 *dt = vect_unknown_def_type;
10031 else if (SSA_NAME_IS_DEFAULT_DEF (operand))
10032 *dt = vect_external_def;
10033 else
10035 gimple *def_stmt = SSA_NAME_DEF_STMT (operand);
10036 stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand);
10037 if (!stmt_vinfo)
10038 *dt = vect_external_def;
10039 else
10041 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo))
10043 stmt_vinfo = STMT_VINFO_RELATED_STMT (stmt_vinfo);
10044 def_stmt = stmt_vinfo->stmt;
10046 switch (gimple_code (def_stmt))
10048 case GIMPLE_PHI:
10049 case GIMPLE_ASSIGN:
10050 case GIMPLE_CALL:
10051 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
10052 break;
10053 default:
10054 *dt = vect_unknown_def_type;
10055 break;
10057 if (def_stmt_info_out)
10058 *def_stmt_info_out = stmt_vinfo;
10060 if (def_stmt_out)
10061 *def_stmt_out = def_stmt;
10064 if (dump_enabled_p ())
10066 dump_printf (MSG_NOTE, ", type of def: ");
10067 switch (*dt)
10069 case vect_uninitialized_def:
10070 dump_printf (MSG_NOTE, "uninitialized\n");
10071 break;
10072 case vect_constant_def:
10073 dump_printf (MSG_NOTE, "constant\n");
10074 break;
10075 case vect_external_def:
10076 dump_printf (MSG_NOTE, "external\n");
10077 break;
10078 case vect_internal_def:
10079 dump_printf (MSG_NOTE, "internal\n");
10080 break;
10081 case vect_induction_def:
10082 dump_printf (MSG_NOTE, "induction\n");
10083 break;
10084 case vect_reduction_def:
10085 dump_printf (MSG_NOTE, "reduction\n");
10086 break;
10087 case vect_double_reduction_def:
10088 dump_printf (MSG_NOTE, "double reduction\n");
10089 break;
10090 case vect_nested_cycle:
10091 dump_printf (MSG_NOTE, "nested cycle\n");
10092 break;
10093 case vect_unknown_def_type:
10094 dump_printf (MSG_NOTE, "unknown\n");
10095 break;
10099 if (*dt == vect_unknown_def_type)
10101 if (dump_enabled_p ())
10102 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10103 "Unsupported pattern.\n");
10104 return false;
10107 return true;
10110 /* Function vect_is_simple_use.
10112 Same as vect_is_simple_use but also determines the vector operand
10113 type of OPERAND and stores it to *VECTYPE. If the definition of
10114 OPERAND is vect_uninitialized_def, vect_constant_def or
10115 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10116 is responsible to compute the best suited vector type for the
10117 scalar operand. */
10119 bool
10120 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
10121 tree *vectype, stmt_vec_info *def_stmt_info_out,
10122 gimple **def_stmt_out)
10124 stmt_vec_info def_stmt_info;
10125 gimple *def_stmt;
10126 if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt))
10127 return false;
10129 if (def_stmt_out)
10130 *def_stmt_out = def_stmt;
10131 if (def_stmt_info_out)
10132 *def_stmt_info_out = def_stmt_info;
10134 /* Now get a vector type if the def is internal, otherwise supply
10135 NULL_TREE and leave it up to the caller to figure out a proper
10136 type for the use stmt. */
10137 if (*dt == vect_internal_def
10138 || *dt == vect_induction_def
10139 || *dt == vect_reduction_def
10140 || *dt == vect_double_reduction_def
10141 || *dt == vect_nested_cycle)
10143 *vectype = STMT_VINFO_VECTYPE (def_stmt_info);
10144 gcc_assert (*vectype != NULL_TREE);
10145 if (dump_enabled_p ())
10147 dump_printf_loc (MSG_NOTE, vect_location,
10148 "vect_is_simple_use: vectype ");
10149 dump_generic_expr (MSG_NOTE, TDF_SLIM, *vectype);
10150 dump_printf (MSG_NOTE, "\n");
10153 else if (*dt == vect_uninitialized_def
10154 || *dt == vect_constant_def
10155 || *dt == vect_external_def)
10156 *vectype = NULL_TREE;
10157 else
10158 gcc_unreachable ();
10160 return true;
10164 /* Function supportable_widening_operation
10166 Check whether an operation represented by the code CODE is a
10167 widening operation that is supported by the target platform in
10168 vector form (i.e., when operating on arguments of type VECTYPE_IN
10169 producing a result of type VECTYPE_OUT).
10171 Widening operations we currently support are NOP (CONVERT), FLOAT,
10172 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10173 are supported by the target platform either directly (via vector
10174 tree-codes), or via target builtins.
10176 Output:
10177 - CODE1 and CODE2 are codes of vector operations to be used when
10178 vectorizing the operation, if available.
10179 - MULTI_STEP_CVT determines the number of required intermediate steps in
10180 case of multi-step conversion (like char->short->int - in that case
10181 MULTI_STEP_CVT will be 1).
10182 - INTERM_TYPES contains the intermediate type required to perform the
10183 widening operation (short in the above example). */
10185 bool
10186 supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
10187 tree vectype_out, tree vectype_in,
10188 enum tree_code *code1, enum tree_code *code2,
10189 int *multi_step_cvt,
10190 vec<tree> *interm_types)
10192 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10193 struct loop *vect_loop = NULL;
10194 machine_mode vec_mode;
10195 enum insn_code icode1, icode2;
10196 optab optab1, optab2;
10197 tree vectype = vectype_in;
10198 tree wide_vectype = vectype_out;
10199 enum tree_code c1, c2;
10200 int i;
10201 tree prev_type, intermediate_type;
10202 machine_mode intermediate_mode, prev_mode;
10203 optab optab3, optab4;
10205 *multi_step_cvt = 0;
10206 if (loop_info)
10207 vect_loop = LOOP_VINFO_LOOP (loop_info);
10209 switch (code)
10211 case WIDEN_MULT_EXPR:
10212 /* The result of a vectorized widening operation usually requires
10213 two vectors (because the widened results do not fit into one vector).
10214 The generated vector results would normally be expected to be
10215 generated in the same order as in the original scalar computation,
10216 i.e. if 8 results are generated in each vector iteration, they are
10217 to be organized as follows:
10218 vect1: [res1,res2,res3,res4],
10219 vect2: [res5,res6,res7,res8].
10221 However, in the special case that the result of the widening
10222 operation is used in a reduction computation only, the order doesn't
10223 matter (because when vectorizing a reduction we change the order of
10224 the computation). Some targets can take advantage of this and
10225 generate more efficient code. For example, targets like Altivec,
10226 that support widen_mult using a sequence of {mult_even,mult_odd}
10227 generate the following vectors:
10228 vect1: [res1,res3,res5,res7],
10229 vect2: [res2,res4,res6,res8].
10231 When vectorizing outer-loops, we execute the inner-loop sequentially
10232 (each vectorized inner-loop iteration contributes to VF outer-loop
10233 iterations in parallel). We therefore don't allow to change the
10234 order of the computation in the inner-loop during outer-loop
10235 vectorization. */
10236 /* TODO: Another case in which order doesn't *really* matter is when we
10237 widen and then contract again, e.g. (short)((int)x * y >> 8).
10238 Normally, pack_trunc performs an even/odd permute, whereas the
10239 repack from an even/odd expansion would be an interleave, which
10240 would be significantly simpler for e.g. AVX2. */
10241 /* In any case, in order to avoid duplicating the code below, recurse
10242 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10243 are properly set up for the caller. If we fail, we'll continue with
10244 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10245 if (vect_loop
10246 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10247 && !nested_in_vect_loop_p (vect_loop, stmt_info)
10248 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10249 stmt_info, vectype_out,
10250 vectype_in, code1, code2,
10251 multi_step_cvt, interm_types))
10253 /* Elements in a vector with vect_used_by_reduction property cannot
10254 be reordered if the use chain with this property does not have the
10255 same operation. One such an example is s += a * b, where elements
10256 in a and b cannot be reordered. Here we check if the vector defined
10257 by STMT is only directly used in the reduction statement. */
10258 tree lhs = gimple_assign_lhs (stmt_info->stmt);
10259 stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
10260 if (use_stmt_info
10261 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10262 return true;
10264 c1 = VEC_WIDEN_MULT_LO_EXPR;
10265 c2 = VEC_WIDEN_MULT_HI_EXPR;
10266 break;
10268 case DOT_PROD_EXPR:
10269 c1 = DOT_PROD_EXPR;
10270 c2 = DOT_PROD_EXPR;
10271 break;
10273 case SAD_EXPR:
10274 c1 = SAD_EXPR;
10275 c2 = SAD_EXPR;
10276 break;
10278 case VEC_WIDEN_MULT_EVEN_EXPR:
10279 /* Support the recursion induced just above. */
10280 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10281 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10282 break;
10284 case WIDEN_LSHIFT_EXPR:
10285 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10286 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10287 break;
10289 CASE_CONVERT:
10290 c1 = VEC_UNPACK_LO_EXPR;
10291 c2 = VEC_UNPACK_HI_EXPR;
10292 break;
10294 case FLOAT_EXPR:
10295 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10296 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10297 break;
10299 case FIX_TRUNC_EXPR:
10300 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
10301 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
10302 break;
10304 default:
10305 gcc_unreachable ();
10308 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10309 std::swap (c1, c2);
10311 if (code == FIX_TRUNC_EXPR)
10313 /* The signedness is determined from output operand. */
10314 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10315 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10317 else
10319 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10320 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10323 if (!optab1 || !optab2)
10324 return false;
10326 vec_mode = TYPE_MODE (vectype);
10327 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10328 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10329 return false;
10331 *code1 = c1;
10332 *code2 = c2;
10334 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10335 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10336 /* For scalar masks we may have different boolean
10337 vector types having the same QImode. Thus we
10338 add additional check for elements number. */
10339 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10340 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10341 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10343 /* Check if it's a multi-step conversion that can be done using intermediate
10344 types. */
10346 prev_type = vectype;
10347 prev_mode = vec_mode;
10349 if (!CONVERT_EXPR_CODE_P (code))
10350 return false;
10352 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10353 intermediate steps in promotion sequence. We try
10354 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10355 not. */
10356 interm_types->create (MAX_INTERM_CVT_STEPS);
10357 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10359 intermediate_mode = insn_data[icode1].operand[0].mode;
10360 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10362 intermediate_type = vect_halve_mask_nunits (prev_type);
10363 if (intermediate_mode != TYPE_MODE (intermediate_type))
10364 return false;
10366 else
10367 intermediate_type
10368 = lang_hooks.types.type_for_mode (intermediate_mode,
10369 TYPE_UNSIGNED (prev_type));
10371 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10372 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10374 if (!optab3 || !optab4
10375 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10376 || insn_data[icode1].operand[0].mode != intermediate_mode
10377 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10378 || insn_data[icode2].operand[0].mode != intermediate_mode
10379 || ((icode1 = optab_handler (optab3, intermediate_mode))
10380 == CODE_FOR_nothing)
10381 || ((icode2 = optab_handler (optab4, intermediate_mode))
10382 == CODE_FOR_nothing))
10383 break;
10385 interm_types->quick_push (intermediate_type);
10386 (*multi_step_cvt)++;
10388 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10389 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10390 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10391 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10392 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10394 prev_type = intermediate_type;
10395 prev_mode = intermediate_mode;
10398 interm_types->release ();
10399 return false;
10403 /* Function supportable_narrowing_operation
10405 Check whether an operation represented by the code CODE is a
10406 narrowing operation that is supported by the target platform in
10407 vector form (i.e., when operating on arguments of type VECTYPE_IN
10408 and producing a result of type VECTYPE_OUT).
10410 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10411 and FLOAT. This function checks if these operations are supported by
10412 the target platform directly via vector tree-codes.
10414 Output:
10415 - CODE1 is the code of a vector operation to be used when
10416 vectorizing the operation, if available.
10417 - MULTI_STEP_CVT determines the number of required intermediate steps in
10418 case of multi-step conversion (like int->short->char - in that case
10419 MULTI_STEP_CVT will be 1).
10420 - INTERM_TYPES contains the intermediate type required to perform the
10421 narrowing operation (short in the above example). */
10423 bool
10424 supportable_narrowing_operation (enum tree_code code,
10425 tree vectype_out, tree vectype_in,
10426 enum tree_code *code1, int *multi_step_cvt,
10427 vec<tree> *interm_types)
10429 machine_mode vec_mode;
10430 enum insn_code icode1;
10431 optab optab1, interm_optab;
10432 tree vectype = vectype_in;
10433 tree narrow_vectype = vectype_out;
10434 enum tree_code c1;
10435 tree intermediate_type, prev_type;
10436 machine_mode intermediate_mode, prev_mode;
10437 int i;
10438 bool uns;
10440 *multi_step_cvt = 0;
10441 switch (code)
10443 CASE_CONVERT:
10444 c1 = VEC_PACK_TRUNC_EXPR;
10445 break;
10447 case FIX_TRUNC_EXPR:
10448 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10449 break;
10451 case FLOAT_EXPR:
10452 c1 = VEC_PACK_FLOAT_EXPR;
10453 break;
10455 default:
10456 gcc_unreachable ();
10459 if (code == FIX_TRUNC_EXPR)
10460 /* The signedness is determined from output operand. */
10461 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10462 else
10463 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10465 if (!optab1)
10466 return false;
10468 vec_mode = TYPE_MODE (vectype);
10469 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10470 return false;
10472 *code1 = c1;
10474 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10475 /* For scalar masks we may have different boolean
10476 vector types having the same QImode. Thus we
10477 add additional check for elements number. */
10478 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10479 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10480 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10482 if (code == FLOAT_EXPR)
10483 return false;
10485 /* Check if it's a multi-step conversion that can be done using intermediate
10486 types. */
10487 prev_mode = vec_mode;
10488 prev_type = vectype;
10489 if (code == FIX_TRUNC_EXPR)
10490 uns = TYPE_UNSIGNED (vectype_out);
10491 else
10492 uns = TYPE_UNSIGNED (vectype);
10494 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10495 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10496 costly than signed. */
10497 if (code == FIX_TRUNC_EXPR && uns)
10499 enum insn_code icode2;
10501 intermediate_type
10502 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10503 interm_optab
10504 = optab_for_tree_code (c1, intermediate_type, optab_default);
10505 if (interm_optab != unknown_optab
10506 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10507 && insn_data[icode1].operand[0].mode
10508 == insn_data[icode2].operand[0].mode)
10510 uns = false;
10511 optab1 = interm_optab;
10512 icode1 = icode2;
10516 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10517 intermediate steps in promotion sequence. We try
10518 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10519 interm_types->create (MAX_INTERM_CVT_STEPS);
10520 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10522 intermediate_mode = insn_data[icode1].operand[0].mode;
10523 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10525 intermediate_type = vect_double_mask_nunits (prev_type);
10526 if (intermediate_mode != TYPE_MODE (intermediate_type))
10527 return false;
10529 else
10530 intermediate_type
10531 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10532 interm_optab
10533 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10534 optab_default);
10535 if (!interm_optab
10536 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10537 || insn_data[icode1].operand[0].mode != intermediate_mode
10538 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10539 == CODE_FOR_nothing))
10540 break;
10542 interm_types->quick_push (intermediate_type);
10543 (*multi_step_cvt)++;
10545 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10546 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10547 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10548 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10550 prev_mode = intermediate_mode;
10551 prev_type = intermediate_type;
10552 optab1 = interm_optab;
10555 interm_types->release ();
10556 return false;
10559 /* Generate and return a statement that sets vector mask MASK such that
10560 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10562 gcall *
10563 vect_gen_while (tree mask, tree start_index, tree end_index)
10565 tree cmp_type = TREE_TYPE (start_index);
10566 tree mask_type = TREE_TYPE (mask);
10567 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10568 cmp_type, mask_type,
10569 OPTIMIZE_FOR_SPEED));
10570 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10571 start_index, end_index,
10572 build_zero_cst (mask_type));
10573 gimple_call_set_lhs (call, mask);
10574 return call;
10577 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10578 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10580 tree
10581 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10582 tree end_index)
10584 tree tmp = make_ssa_name (mask_type);
10585 gcall *call = vect_gen_while (tmp, start_index, end_index);
10586 gimple_seq_add_stmt (seq, call);
10587 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
10590 /* Try to compute the vector types required to vectorize STMT_INFO,
10591 returning true on success and false if vectorization isn't possible.
10593 On success:
10595 - Set *STMT_VECTYPE_OUT to:
10596 - NULL_TREE if the statement doesn't need to be vectorized;
10597 - boolean_type_node if the statement is a boolean operation whose
10598 vector type can only be determined once all the other vector types
10599 are known; and
10600 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10602 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10603 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10604 statement does not help to determine the overall number of units. */
10606 bool
10607 vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
10608 tree *stmt_vectype_out,
10609 tree *nunits_vectype_out)
10611 gimple *stmt = stmt_info->stmt;
10613 *stmt_vectype_out = NULL_TREE;
10614 *nunits_vectype_out = NULL_TREE;
10616 if (gimple_get_lhs (stmt) == NULL_TREE
10617 /* MASK_STORE has no lhs, but is ok. */
10618 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
10620 if (is_a <gcall *> (stmt))
10622 /* Ignore calls with no lhs. These must be calls to
10623 #pragma omp simd functions, and what vectorization factor
10624 it really needs can't be determined until
10625 vectorizable_simd_clone_call. */
10626 if (dump_enabled_p ())
10627 dump_printf_loc (MSG_NOTE, vect_location,
10628 "defer to SIMD clone analysis.\n");
10629 return true;
10632 if (dump_enabled_p ())
10634 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10635 "not vectorized: irregular stmt.");
10636 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10638 return false;
10641 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
10643 if (dump_enabled_p ())
10645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10646 "not vectorized: vector stmt in loop:");
10647 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10649 return false;
10652 tree vectype;
10653 tree scalar_type = NULL_TREE;
10654 if (STMT_VINFO_VECTYPE (stmt_info))
10655 *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
10656 else
10658 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
10659 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
10660 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
10661 else
10662 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
10664 /* Pure bool ops don't participate in number-of-units computation.
10665 For comparisons use the types being compared. */
10666 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
10667 && is_gimple_assign (stmt)
10668 && gimple_assign_rhs_code (stmt) != COND_EXPR)
10670 *stmt_vectype_out = boolean_type_node;
10672 tree rhs1 = gimple_assign_rhs1 (stmt);
10673 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10674 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
10675 scalar_type = TREE_TYPE (rhs1);
10676 else
10678 if (dump_enabled_p ())
10679 dump_printf_loc (MSG_NOTE, vect_location,
10680 "pure bool operation.\n");
10681 return true;
10685 if (dump_enabled_p ())
10687 dump_printf_loc (MSG_NOTE, vect_location,
10688 "get vectype for scalar type: ");
10689 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10690 dump_printf (MSG_NOTE, "\n");
10692 vectype = get_vectype_for_scalar_type (scalar_type);
10693 if (!vectype)
10695 if (dump_enabled_p ())
10697 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10698 "not vectorized: unsupported data-type ");
10699 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10700 scalar_type);
10701 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10703 return false;
10706 if (!*stmt_vectype_out)
10707 *stmt_vectype_out = vectype;
10709 if (dump_enabled_p ())
10711 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10712 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
10713 dump_printf (MSG_NOTE, "\n");
10717 /* Don't try to compute scalar types if the stmt produces a boolean
10718 vector; use the existing vector type instead. */
10719 tree nunits_vectype;
10720 if (VECTOR_BOOLEAN_TYPE_P (vectype))
10721 nunits_vectype = vectype;
10722 else
10724 /* The number of units is set according to the smallest scalar
10725 type (or the largest vector size, but we only support one
10726 vector size per vectorization). */
10727 if (*stmt_vectype_out != boolean_type_node)
10729 HOST_WIDE_INT dummy;
10730 scalar_type = vect_get_smallest_scalar_type (stmt_info,
10731 &dummy, &dummy);
10733 if (dump_enabled_p ())
10735 dump_printf_loc (MSG_NOTE, vect_location,
10736 "get vectype for scalar type: ");
10737 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10738 dump_printf (MSG_NOTE, "\n");
10740 nunits_vectype = get_vectype_for_scalar_type (scalar_type);
10742 if (!nunits_vectype)
10744 if (dump_enabled_p ())
10746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10747 "not vectorized: unsupported data-type ");
10748 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
10749 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10751 return false;
10754 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
10755 GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
10757 if (dump_enabled_p ())
10759 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10760 "not vectorized: different sized vector "
10761 "types in statement, ");
10762 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
10763 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10764 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype);
10765 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10767 return false;
10770 if (dump_enabled_p ())
10772 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10773 dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
10774 dump_printf (MSG_NOTE, "\n");
10776 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
10777 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
10778 dump_printf (MSG_NOTE, "\n");
10781 *nunits_vectype_out = nunits_vectype;
10782 return true;
10785 /* Try to determine the correct vector type for STMT_INFO, which is a
10786 statement that produces a scalar boolean result. Return the vector
10787 type on success, otherwise return NULL_TREE. */
10789 tree
10790 vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
10792 gimple *stmt = stmt_info->stmt;
10793 tree mask_type = NULL;
10794 tree vectype, scalar_type;
10796 if (is_gimple_assign (stmt)
10797 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10798 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
10800 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
10801 mask_type = get_mask_type_for_scalar_type (scalar_type);
10803 if (!mask_type)
10805 if (dump_enabled_p ())
10806 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10807 "not vectorized: unsupported mask\n");
10808 return NULL_TREE;
10811 else
10813 tree rhs;
10814 ssa_op_iter iter;
10815 enum vect_def_type dt;
10817 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
10819 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &dt, &vectype))
10821 if (dump_enabled_p ())
10823 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10824 "not vectorized: can't compute mask type "
10825 "for statement, ");
10826 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
10829 return NULL_TREE;
10832 /* No vectype probably means external definition.
10833 Allow it in case there is another operand which
10834 allows to determine mask type. */
10835 if (!vectype)
10836 continue;
10838 if (!mask_type)
10839 mask_type = vectype;
10840 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
10841 TYPE_VECTOR_SUBPARTS (vectype)))
10843 if (dump_enabled_p ())
10845 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10846 "not vectorized: different sized masks "
10847 "types in statement, ");
10848 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10849 mask_type);
10850 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10851 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10852 vectype);
10853 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10855 return NULL_TREE;
10857 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
10858 != VECTOR_BOOLEAN_TYPE_P (vectype))
10860 if (dump_enabled_p ())
10862 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10863 "not vectorized: mixed mask and "
10864 "nonmask vector types in statement, ");
10865 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10866 mask_type);
10867 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10868 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10869 vectype);
10870 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10872 return NULL_TREE;
10876 /* We may compare boolean value loaded as vector of integers.
10877 Fix mask_type in such case. */
10878 if (mask_type
10879 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
10880 && gimple_code (stmt) == GIMPLE_ASSIGN
10881 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10882 mask_type = build_same_sized_truth_vector_type (mask_type);
10885 /* No mask_type should mean loop invariant predicate.
10886 This is probably a subject for optimization in if-conversion. */
10887 if (!mask_type && dump_enabled_p ())
10889 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10890 "not vectorized: can't compute mask type "
10891 "for statement, ");
10892 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10894 return mask_type;