PR middle-end/84095 - false-positive -Wrestrict warnings for memcpy within array
[official-gcc.git] / gcc / tree-vect-stmts.c
bloba98e0e5e2593bb4a46a1d338f8750c8bbbb004bb
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
100 if (body_cost_vec)
102 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
103 stmt_info_for_cost si = { count, kind,
104 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
105 misalign };
106 body_cost_vec->safe_push (si);
107 return (unsigned)
108 (builtin_vectorization_cost (kind, vectype, misalign) * count);
110 else
111 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
112 count, kind, stmt_info, misalign, where);
115 /* Return a variable of type ELEM_TYPE[NELEMS]. */
117 static tree
118 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
120 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
121 "vect_array");
124 /* ARRAY is an array of vectors created by create_vector_array.
125 Return an SSA_NAME for the vector in index N. The reference
126 is part of the vectorization of STMT and the vector is associated
127 with scalar destination SCALAR_DEST. */
129 static tree
130 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
131 tree array, unsigned HOST_WIDE_INT n)
133 tree vect_type, vect, vect_name, array_ref;
134 gimple *new_stmt;
136 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
137 vect_type = TREE_TYPE (TREE_TYPE (array));
138 vect = vect_create_destination_var (scalar_dest, vect_type);
139 array_ref = build4 (ARRAY_REF, vect_type, array,
140 build_int_cst (size_type_node, n),
141 NULL_TREE, NULL_TREE);
143 new_stmt = gimple_build_assign (vect, array_ref);
144 vect_name = make_ssa_name (vect, new_stmt);
145 gimple_assign_set_lhs (new_stmt, vect_name);
146 vect_finish_stmt_generation (stmt, new_stmt, gsi);
148 return vect_name;
151 /* ARRAY is an array of vectors created by create_vector_array.
152 Emit code to store SSA_NAME VECT in index N of the array.
153 The store is part of the vectorization of STMT. */
155 static void
156 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
157 tree array, unsigned HOST_WIDE_INT n)
159 tree array_ref;
160 gimple *new_stmt;
162 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
163 build_int_cst (size_type_node, n),
164 NULL_TREE, NULL_TREE);
166 new_stmt = gimple_build_assign (array_ref, vect);
167 vect_finish_stmt_generation (stmt, new_stmt, gsi);
170 /* PTR is a pointer to an array of type TYPE. Return a representation
171 of *PTR. The memory reference replaces those in FIRST_DR
172 (and its group). */
174 static tree
175 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
177 tree mem_ref;
179 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
180 /* Arrays have the same alignment as their type. */
181 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
182 return mem_ref;
185 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
187 /* Function vect_mark_relevant.
189 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
191 static void
192 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
193 enum vect_relevant relevant, bool live_p)
195 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
196 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
197 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
198 gimple *pattern_stmt;
200 if (dump_enabled_p ())
202 dump_printf_loc (MSG_NOTE, vect_location,
203 "mark relevant %d, live %d: ", relevant, live_p);
204 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
207 /* If this stmt is an original stmt in a pattern, we might need to mark its
208 related pattern stmt instead of the original stmt. However, such stmts
209 may have their own uses that are not in any pattern, in such cases the
210 stmt itself should be marked. */
211 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
213 /* This is the last stmt in a sequence that was detected as a
214 pattern that can potentially be vectorized. Don't mark the stmt
215 as relevant/live because it's not going to be vectorized.
216 Instead mark the pattern-stmt that replaces it. */
218 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
220 if (dump_enabled_p ())
221 dump_printf_loc (MSG_NOTE, vect_location,
222 "last stmt in pattern. don't mark"
223 " relevant/live.\n");
224 stmt_info = vinfo_for_stmt (pattern_stmt);
225 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
226 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
227 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
228 stmt = pattern_stmt;
231 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
232 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
233 STMT_VINFO_RELEVANT (stmt_info) = relevant;
235 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
236 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
238 if (dump_enabled_p ())
239 dump_printf_loc (MSG_NOTE, vect_location,
240 "already marked relevant/live.\n");
241 return;
244 worklist->safe_push (stmt);
248 /* Function is_simple_and_all_uses_invariant
250 Return true if STMT is simple and all uses of it are invariant. */
252 bool
253 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
255 tree op;
256 gimple *def_stmt;
257 ssa_op_iter iter;
259 if (!is_gimple_assign (stmt))
260 return false;
262 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
264 enum vect_def_type dt = vect_uninitialized_def;
266 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
268 if (dump_enabled_p ())
269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
270 "use not simple.\n");
271 return false;
274 if (dt != vect_external_def && dt != vect_constant_def)
275 return false;
277 return true;
280 /* Function vect_stmt_relevant_p.
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
290 CHECKME: what other side effects would the vectorizer allow? */
292 static bool
293 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
294 enum vect_relevant *relevant, bool *live_p)
296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
297 ssa_op_iter op_iter;
298 imm_use_iterator imm_iter;
299 use_operand_p use_p;
300 def_operand_p def_p;
302 *relevant = vect_unused_in_scope;
303 *live_p = false;
305 /* cond stmt other than loop exit cond. */
306 if (is_ctrl_stmt (stmt)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
308 != loop_exit_ctrl_vec_info_type)
309 *relevant = vect_used_in_scope;
311 /* changing memory. */
312 if (gimple_code (stmt) != GIMPLE_PHI)
313 if (gimple_vdef (stmt)
314 && !gimple_clobber_p (stmt))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE, vect_location,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant = vect_used_in_scope;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
325 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 basic_block bb = gimple_bb (USE_STMT (use_p));
328 if (!flow_bb_inside_loop_p (loop, bb))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE, vect_location,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p)))
335 continue;
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
340 gcc_assert (bb == single_exit (loop)->dest);
342 *live_p = true;
347 if (*live_p && *relevant == vect_unused_in_scope
348 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "vec_stmt_relevant_p: stmt live but not relevant.\n");
353 *relevant = vect_used_only_live;
356 return (*live_p || *relevant);
360 /* Function exist_non_indexing_operands_for_use_p
362 USE is one of the uses attached to STMT. Check if USE is
363 used in STMT for anything other than indexing an array. */
365 static bool
366 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
368 tree operand;
369 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
371 /* USE corresponds to some operand in STMT. If there is no data
372 reference in STMT, then any operand that corresponds to USE
373 is not indexing an array. */
374 if (!STMT_VINFO_DATA_REF (stmt_info))
375 return true;
377 /* STMT has a data_ref. FORNOW this means that its of one of
378 the following forms:
379 -1- ARRAY_REF = var
380 -2- var = ARRAY_REF
381 (This should have been verified in analyze_data_refs).
383 'var' in the second case corresponds to a def, not a use,
384 so USE cannot correspond to any operands that are not used
385 for array indexing.
387 Therefore, all we need to check is if STMT falls into the
388 first case, and whether var corresponds to USE. */
390 if (!gimple_assign_copy_p (stmt))
392 if (is_gimple_call (stmt)
393 && gimple_call_internal_p (stmt))
395 internal_fn ifn = gimple_call_internal_fn (stmt);
396 int mask_index = internal_fn_mask_index (ifn);
397 if (mask_index >= 0
398 && use == gimple_call_arg (stmt, mask_index))
399 return true;
400 int stored_value_index = internal_fn_stored_value_index (ifn);
401 if (stored_value_index >= 0
402 && use == gimple_call_arg (stmt, stored_value_index))
403 return true;
404 if (internal_gather_scatter_fn_p (ifn)
405 && use == gimple_call_arg (stmt, 1))
406 return true;
408 return false;
411 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
412 return false;
413 operand = gimple_assign_rhs1 (stmt);
414 if (TREE_CODE (operand) != SSA_NAME)
415 return false;
417 if (operand == use)
418 return true;
420 return false;
425 Function process_use.
427 Inputs:
428 - a USE in STMT in a loop represented by LOOP_VINFO
429 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
430 that defined USE. This is done by calling mark_relevant and passing it
431 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
432 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
433 be performed.
435 Outputs:
436 Generally, LIVE_P and RELEVANT are used to define the liveness and
437 relevance info of the DEF_STMT of this USE:
438 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
439 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 Exceptions:
441 - case 1: If USE is used only for address computations (e.g. array indexing),
442 which does not need to be directly vectorized, then the liveness/relevance
443 of the respective DEF_STMT is left unchanged.
444 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
445 skip DEF_STMT cause it had already been processed.
446 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
447 be modified accordingly.
449 Return true if everything is as expected. Return false otherwise. */
451 static bool
452 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
453 enum vect_relevant relevant, vec<gimple *> *worklist,
454 bool force)
456 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
457 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
458 stmt_vec_info dstmt_vinfo;
459 basic_block bb, def_bb;
460 gimple *def_stmt;
461 enum vect_def_type dt;
463 /* case 1: we are only interested in uses that need to be vectorized. Uses
464 that are used for address computation are not considered relevant. */
465 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
466 return true;
468 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
472 "not vectorized: unsupported use in stmt.\n");
473 return false;
476 if (!def_stmt || gimple_nop_p (def_stmt))
477 return true;
479 def_bb = gimple_bb (def_stmt);
480 if (!flow_bb_inside_loop_p (loop, def_bb))
482 if (dump_enabled_p ())
483 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
484 return true;
487 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
488 DEF_STMT must have already been processed, because this should be the
489 only way that STMT, which is a reduction-phi, was put in the worklist,
490 as there should be no other uses for DEF_STMT in the loop. So we just
491 check that everything is as expected, and we are done. */
492 dstmt_vinfo = vinfo_for_stmt (def_stmt);
493 bb = gimple_bb (stmt);
494 if (gimple_code (stmt) == GIMPLE_PHI
495 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
496 && gimple_code (def_stmt) != GIMPLE_PHI
497 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
498 && bb->loop_father == def_bb->loop_father)
500 if (dump_enabled_p ())
501 dump_printf_loc (MSG_NOTE, vect_location,
502 "reduc-stmt defining reduc-phi in the same nest.\n");
503 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
504 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
505 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
506 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
507 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
508 return true;
511 /* case 3a: outer-loop stmt defining an inner-loop stmt:
512 outer-loop-header-bb:
513 d = def_stmt
514 inner-loop:
515 stmt # use (d)
516 outer-loop-tail-bb:
517 ... */
518 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
520 if (dump_enabled_p ())
521 dump_printf_loc (MSG_NOTE, vect_location,
522 "outer-loop def-stmt defining inner-loop stmt.\n");
524 switch (relevant)
526 case vect_unused_in_scope:
527 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
528 vect_used_in_scope : vect_unused_in_scope;
529 break;
531 case vect_used_in_outer_by_reduction:
532 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
533 relevant = vect_used_by_reduction;
534 break;
536 case vect_used_in_outer:
537 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
538 relevant = vect_used_in_scope;
539 break;
541 case vect_used_in_scope:
542 break;
544 default:
545 gcc_unreachable ();
549 /* case 3b: inner-loop stmt defining an outer-loop stmt:
550 outer-loop-header-bb:
552 inner-loop:
553 d = def_stmt
554 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 stmt # use (d) */
556 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_NOTE, vect_location,
560 "inner-loop def-stmt defining outer-loop stmt.\n");
562 switch (relevant)
564 case vect_unused_in_scope:
565 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
566 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
567 vect_used_in_outer_by_reduction : vect_unused_in_scope;
568 break;
570 case vect_used_by_reduction:
571 case vect_used_only_live:
572 relevant = vect_used_in_outer_by_reduction;
573 break;
575 case vect_used_in_scope:
576 relevant = vect_used_in_outer;
577 break;
579 default:
580 gcc_unreachable ();
583 /* We are also not interested in uses on loop PHI backedges that are
584 inductions. Otherwise we'll needlessly vectorize the IV increment
585 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 of course. */
587 else if (gimple_code (stmt) == GIMPLE_PHI
588 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
589 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
590 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
591 == use))
593 if (dump_enabled_p ())
594 dump_printf_loc (MSG_NOTE, vect_location,
595 "induction value on backedge.\n");
596 return true;
600 vect_mark_relevant (worklist, def_stmt, relevant, false);
601 return true;
605 /* Function vect_mark_stmts_to_be_vectorized.
607 Not all stmts in the loop need to be vectorized. For example:
609 for i...
610 for j...
611 1. T0 = i + j
612 2. T1 = a[T0]
614 3. j = j + 1
616 Stmt 1 and 3 do not need to be vectorized, because loop control and
617 addressing of vectorized data-refs are handled differently.
619 This pass detects such stmts. */
621 bool
622 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
624 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
625 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
626 unsigned int nbbs = loop->num_nodes;
627 gimple_stmt_iterator si;
628 gimple *stmt;
629 unsigned int i;
630 stmt_vec_info stmt_vinfo;
631 basic_block bb;
632 gimple *phi;
633 bool live_p;
634 enum vect_relevant relevant;
636 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE, vect_location,
638 "=== vect_mark_stmts_to_be_vectorized ===\n");
640 auto_vec<gimple *, 64> worklist;
642 /* 1. Init worklist. */
643 for (i = 0; i < nbbs; i++)
645 bb = bbs[i];
646 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
648 phi = gsi_stmt (si);
649 if (dump_enabled_p ())
651 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
652 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
655 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
656 vect_mark_relevant (&worklist, phi, relevant, live_p);
658 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
660 stmt = gsi_stmt (si);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
664 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
667 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
668 vect_mark_relevant (&worklist, stmt, relevant, live_p);
672 /* 2. Process_worklist */
673 while (worklist.length () > 0)
675 use_operand_p use_p;
676 ssa_op_iter iter;
678 stmt = worklist.pop ();
679 if (dump_enabled_p ())
681 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
682 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
685 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
686 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 of STMT. */
688 stmt_vinfo = vinfo_for_stmt (stmt);
689 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
691 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
692 propagated as is to the DEF_STMTs of its USEs.
694 One exception is when STMT has been identified as defining a reduction
695 variable; in this case we set the relevance to vect_used_by_reduction.
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
704 case vect_reduction_def:
705 gcc_assert (relevant != vect_unused_in_scope);
706 if (relevant != vect_unused_in_scope
707 && relevant != vect_used_in_scope
708 && relevant != vect_used_by_reduction
709 && relevant != vect_used_only_live)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
713 "unsupported use of reduction.\n");
714 return false;
716 break;
718 case vect_nested_cycle:
719 if (relevant != vect_unused_in_scope
720 && relevant != vect_used_in_outer_by_reduction
721 && relevant != vect_used_in_outer)
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
725 "unsupported use of nested cycle.\n");
727 return false;
729 break;
731 case vect_double_reduction_def:
732 if (relevant != vect_unused_in_scope
733 && relevant != vect_used_by_reduction
734 && relevant != vect_used_only_live)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
738 "unsupported use of double reduction.\n");
740 return false;
742 break;
744 default:
745 break;
748 if (is_pattern_stmt_p (stmt_vinfo))
750 /* Pattern statements are not inserted into the code, so
751 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
752 have to scan the RHS or function arguments instead. */
753 if (is_gimple_assign (stmt))
755 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
756 tree op = gimple_assign_rhs1 (stmt);
758 i = 1;
759 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
761 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
762 relevant, &worklist, false)
763 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
764 relevant, &worklist, false))
765 return false;
766 i = 2;
768 for (; i < gimple_num_ops (stmt); i++)
770 op = gimple_op (stmt, i);
771 if (TREE_CODE (op) == SSA_NAME
772 && !process_use (stmt, op, loop_vinfo, relevant,
773 &worklist, false))
774 return false;
777 else if (is_gimple_call (stmt))
779 for (i = 0; i < gimple_call_num_args (stmt); i++)
781 tree arg = gimple_call_arg (stmt, i);
782 if (!process_use (stmt, arg, loop_vinfo, relevant,
783 &worklist, false))
784 return false;
788 else
789 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
791 tree op = USE_FROM_PTR (use_p);
792 if (!process_use (stmt, op, loop_vinfo, relevant,
793 &worklist, false))
794 return false;
797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
799 gather_scatter_info gs_info;
800 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
801 gcc_unreachable ();
802 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
803 &worklist, true))
804 return false;
806 } /* while worklist */
808 return true;
812 /* Function vect_model_simple_cost.
814 Models cost for simple operations, i.e. those that only emit ncopies of a
815 single op. Right now, this does not account for multiple insns that could
816 be generated for the single vector op. We will handle that shortly. */
818 void
819 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
820 enum vect_def_type *dt,
821 int ndts,
822 stmt_vector_for_cost *prologue_cost_vec,
823 stmt_vector_for_cost *body_cost_vec)
825 int i;
826 int inside_cost = 0, prologue_cost = 0;
828 /* The SLP costs were already calculated during SLP tree build. */
829 gcc_assert (!PURE_SLP_STMT (stmt_info));
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
833 cost model. */
834 for (i = 0; i < ndts; i++)
835 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
836 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
837 stmt_info, 0, vect_prologue);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
841 stmt_info, 0, vect_body);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE, vect_location,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost, prologue_cost);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
855 static void
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
857 enum vect_def_type *dt, int pwr)
859 int i, tmp;
860 int inside_cost = 0, prologue_cost = 0;
861 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
862 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
863 void *target_cost_data;
865 /* The SLP costs were already calculated during SLP tree build. */
866 gcc_assert (!PURE_SLP_STMT (stmt_info));
868 if (loop_vinfo)
869 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
870 else
871 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
873 for (i = 0; i < pwr + 1; i++)
875 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
876 (i + 1) : i;
877 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
878 vec_promote_demote, stmt_info, 0,
879 vect_body);
882 /* FORNOW: Assuming maximum 2 args per stmts. */
883 for (i = 0; i < 2; i++)
884 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
885 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
886 stmt_info, 0, vect_prologue);
888 if (dump_enabled_p ())
889 dump_printf_loc (MSG_NOTE, vect_location,
890 "vect_model_promotion_demotion_cost: inside_cost = %d, "
891 "prologue_cost = %d .\n", inside_cost, prologue_cost);
894 /* Function vect_model_store_cost
896 Models cost for stores. In the case of grouped accesses, one access
897 has the overhead of the grouped access attributed to it. */
899 void
900 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
901 vect_memory_access_type memory_access_type,
902 vec_load_store_type vls_type, slp_tree slp_node,
903 stmt_vector_for_cost *prologue_cost_vec,
904 stmt_vector_for_cost *body_cost_vec)
906 unsigned int inside_cost = 0, prologue_cost = 0;
907 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
908 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
909 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
911 if (vls_type == VLS_STORE_INVARIANT)
912 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
913 stmt_info, 0, vect_prologue);
915 /* Grouped stores update all elements in the group at once,
916 so we want the DR for the first statement. */
917 if (!slp_node && grouped_access_p)
919 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
920 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
923 /* True if we should include any once-per-group costs as well as
924 the cost of the statement itself. For SLP we only get called
925 once per group anyhow. */
926 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
928 /* We assume that the cost of a single store-lanes instruction is
929 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
930 access is instead being provided by a permute-and-store operation,
931 include the cost of the permutes. */
932 if (first_stmt_p
933 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
935 /* Uses a high and low interleave or shuffle operations for each
936 needed permute. */
937 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
938 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
939 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
940 stmt_info, 0, vect_body);
942 if (dump_enabled_p ())
943 dump_printf_loc (MSG_NOTE, vect_location,
944 "vect_model_store_cost: strided group_size = %d .\n",
945 group_size);
948 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
949 /* Costs of the stores. */
950 if (memory_access_type == VMAT_ELEMENTWISE
951 || memory_access_type == VMAT_GATHER_SCATTER)
953 /* N scalar stores plus extracting the elements. */
954 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
955 inside_cost += record_stmt_cost (body_cost_vec,
956 ncopies * assumed_nunits,
957 scalar_store, stmt_info, 0, vect_body);
959 else
960 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
962 if (memory_access_type == VMAT_ELEMENTWISE
963 || memory_access_type == VMAT_STRIDED_SLP)
965 /* N scalar stores plus extracting the elements. */
966 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
967 inside_cost += record_stmt_cost (body_cost_vec,
968 ncopies * assumed_nunits,
969 vec_to_scalar, stmt_info, 0, vect_body);
972 if (dump_enabled_p ())
973 dump_printf_loc (MSG_NOTE, vect_location,
974 "vect_model_store_cost: inside_cost = %d, "
975 "prologue_cost = %d .\n", inside_cost, prologue_cost);
979 /* Calculate cost of DR's memory access. */
980 void
981 vect_get_store_cost (struct data_reference *dr, int ncopies,
982 unsigned int *inside_cost,
983 stmt_vector_for_cost *body_cost_vec)
985 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
986 gimple *stmt = DR_STMT (dr);
987 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
989 switch (alignment_support_scheme)
991 case dr_aligned:
993 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
994 vector_store, stmt_info, 0,
995 vect_body);
997 if (dump_enabled_p ())
998 dump_printf_loc (MSG_NOTE, vect_location,
999 "vect_model_store_cost: aligned.\n");
1000 break;
1003 case dr_unaligned_supported:
1005 /* Here, we assign an additional cost for the unaligned store. */
1006 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1007 unaligned_store, stmt_info,
1008 DR_MISALIGNMENT (dr), vect_body);
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE, vect_location,
1011 "vect_model_store_cost: unaligned supported by "
1012 "hardware.\n");
1013 break;
1016 case dr_unaligned_unsupported:
1018 *inside_cost = VECT_MAX_COST;
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1022 "vect_model_store_cost: unsupported access.\n");
1023 break;
1026 default:
1027 gcc_unreachable ();
1032 /* Function vect_model_load_cost
1034 Models cost for loads. In the case of grouped accesses, one access has
1035 the overhead of the grouped access attributed to it. Since unaligned
1036 accesses are supported for loads, we also account for the costs of the
1037 access scheme chosen. */
1039 void
1040 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1041 vect_memory_access_type memory_access_type,
1042 slp_tree slp_node,
1043 stmt_vector_for_cost *prologue_cost_vec,
1044 stmt_vector_for_cost *body_cost_vec)
1046 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1047 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1048 unsigned int inside_cost = 0, prologue_cost = 0;
1049 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1051 /* Grouped loads read all elements in the group at once,
1052 so we want the DR for the first statement. */
1053 if (!slp_node && grouped_access_p)
1055 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1056 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1059 /* True if we should include any once-per-group costs as well as
1060 the cost of the statement itself. For SLP we only get called
1061 once per group anyhow. */
1062 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1064 /* We assume that the cost of a single load-lanes instruction is
1065 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1066 access is instead being provided by a load-and-permute operation,
1067 include the cost of the permutes. */
1068 if (first_stmt_p
1069 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1071 /* Uses an even and odd extract operations or shuffle operations
1072 for each needed permute. */
1073 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1074 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1075 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1076 stmt_info, 0, vect_body);
1078 if (dump_enabled_p ())
1079 dump_printf_loc (MSG_NOTE, vect_location,
1080 "vect_model_load_cost: strided group_size = %d .\n",
1081 group_size);
1084 /* The loads themselves. */
1085 if (memory_access_type == VMAT_ELEMENTWISE
1086 || memory_access_type == VMAT_GATHER_SCATTER)
1088 /* N scalar loads plus gathering them into a vector. */
1089 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1090 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1091 inside_cost += record_stmt_cost (body_cost_vec,
1092 ncopies * assumed_nunits,
1093 scalar_load, stmt_info, 0, vect_body);
1095 else
1096 vect_get_load_cost (dr, ncopies, first_stmt_p,
1097 &inside_cost, &prologue_cost,
1098 prologue_cost_vec, body_cost_vec, true);
1099 if (memory_access_type == VMAT_ELEMENTWISE
1100 || memory_access_type == VMAT_STRIDED_SLP)
1101 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1102 stmt_info, 0, vect_body);
1104 if (dump_enabled_p ())
1105 dump_printf_loc (MSG_NOTE, vect_location,
1106 "vect_model_load_cost: inside_cost = %d, "
1107 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1111 /* Calculate cost of DR's memory access. */
1112 void
1113 vect_get_load_cost (struct data_reference *dr, int ncopies,
1114 bool add_realign_cost, unsigned int *inside_cost,
1115 unsigned int *prologue_cost,
1116 stmt_vector_for_cost *prologue_cost_vec,
1117 stmt_vector_for_cost *body_cost_vec,
1118 bool record_prologue_costs)
1120 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1121 gimple *stmt = DR_STMT (dr);
1122 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1124 switch (alignment_support_scheme)
1126 case dr_aligned:
1128 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1129 stmt_info, 0, vect_body);
1131 if (dump_enabled_p ())
1132 dump_printf_loc (MSG_NOTE, vect_location,
1133 "vect_model_load_cost: aligned.\n");
1135 break;
1137 case dr_unaligned_supported:
1139 /* Here, we assign an additional cost for the unaligned load. */
1140 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1141 unaligned_load, stmt_info,
1142 DR_MISALIGNMENT (dr), vect_body);
1144 if (dump_enabled_p ())
1145 dump_printf_loc (MSG_NOTE, vect_location,
1146 "vect_model_load_cost: unaligned supported by "
1147 "hardware.\n");
1149 break;
1151 case dr_explicit_realign:
1153 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1154 vector_load, stmt_info, 0, vect_body);
1155 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1156 vec_perm, stmt_info, 0, vect_body);
1158 /* FIXME: If the misalignment remains fixed across the iterations of
1159 the containing loop, the following cost should be added to the
1160 prologue costs. */
1161 if (targetm.vectorize.builtin_mask_for_load)
1162 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1163 stmt_info, 0, vect_body);
1165 if (dump_enabled_p ())
1166 dump_printf_loc (MSG_NOTE, vect_location,
1167 "vect_model_load_cost: explicit realign\n");
1169 break;
1171 case dr_explicit_realign_optimized:
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_NOTE, vect_location,
1175 "vect_model_load_cost: unaligned software "
1176 "pipelined.\n");
1178 /* Unaligned software pipeline has a load of an address, an initial
1179 load, and possibly a mask operation to "prime" the loop. However,
1180 if this is an access in a group of loads, which provide grouped
1181 access, then the above cost should only be considered for one
1182 access in the group. Inside the loop, there is a load op
1183 and a realignment op. */
1185 if (add_realign_cost && record_prologue_costs)
1187 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1188 vector_stmt, stmt_info,
1189 0, vect_prologue);
1190 if (targetm.vectorize.builtin_mask_for_load)
1191 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1192 vector_stmt, stmt_info,
1193 0, vect_prologue);
1196 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1197 stmt_info, 0, vect_body);
1198 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1199 stmt_info, 0, vect_body);
1201 if (dump_enabled_p ())
1202 dump_printf_loc (MSG_NOTE, vect_location,
1203 "vect_model_load_cost: explicit realign optimized"
1204 "\n");
1206 break;
1209 case dr_unaligned_unsupported:
1211 *inside_cost = VECT_MAX_COST;
1213 if (dump_enabled_p ())
1214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1215 "vect_model_load_cost: unsupported access.\n");
1216 break;
1219 default:
1220 gcc_unreachable ();
1224 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1225 the loop preheader for the vectorized stmt STMT. */
1227 static void
1228 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1230 if (gsi)
1231 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1232 else
1234 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1235 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1237 if (loop_vinfo)
1239 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1240 basic_block new_bb;
1241 edge pe;
1243 if (nested_in_vect_loop_p (loop, stmt))
1244 loop = loop->inner;
1246 pe = loop_preheader_edge (loop);
1247 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1248 gcc_assert (!new_bb);
1250 else
1252 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1253 basic_block bb;
1254 gimple_stmt_iterator gsi_bb_start;
1256 gcc_assert (bb_vinfo);
1257 bb = BB_VINFO_BB (bb_vinfo);
1258 gsi_bb_start = gsi_after_labels (bb);
1259 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1263 if (dump_enabled_p ())
1265 dump_printf_loc (MSG_NOTE, vect_location,
1266 "created new init_stmt: ");
1267 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1271 /* Function vect_init_vector.
1273 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1274 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1275 vector type a vector with all elements equal to VAL is created first.
1276 Place the initialization at BSI if it is not NULL. Otherwise, place the
1277 initialization at the loop preheader.
1278 Return the DEF of INIT_STMT.
1279 It will be used in the vectorization of STMT. */
1281 tree
1282 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1284 gimple *init_stmt;
1285 tree new_temp;
1287 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1288 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1290 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1291 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1293 /* Scalar boolean value should be transformed into
1294 all zeros or all ones value before building a vector. */
1295 if (VECTOR_BOOLEAN_TYPE_P (type))
1297 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1298 tree false_val = build_zero_cst (TREE_TYPE (type));
1300 if (CONSTANT_CLASS_P (val))
1301 val = integer_zerop (val) ? false_val : true_val;
1302 else
1304 new_temp = make_ssa_name (TREE_TYPE (type));
1305 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1306 val, true_val, false_val);
1307 vect_init_vector_1 (stmt, init_stmt, gsi);
1308 val = new_temp;
1311 else if (CONSTANT_CLASS_P (val))
1312 val = fold_convert (TREE_TYPE (type), val);
1313 else
1315 new_temp = make_ssa_name (TREE_TYPE (type));
1316 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1317 init_stmt = gimple_build_assign (new_temp,
1318 fold_build1 (VIEW_CONVERT_EXPR,
1319 TREE_TYPE (type),
1320 val));
1321 else
1322 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1323 vect_init_vector_1 (stmt, init_stmt, gsi);
1324 val = new_temp;
1327 val = build_vector_from_val (type, val);
1330 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1331 init_stmt = gimple_build_assign (new_temp, val);
1332 vect_init_vector_1 (stmt, init_stmt, gsi);
1333 return new_temp;
1336 /* Function vect_get_vec_def_for_operand_1.
1338 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1339 DT that will be used in the vectorized stmt. */
1341 tree
1342 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1344 tree vec_oprnd;
1345 gimple *vec_stmt;
1346 stmt_vec_info def_stmt_info = NULL;
1348 switch (dt)
1350 /* operand is a constant or a loop invariant. */
1351 case vect_constant_def:
1352 case vect_external_def:
1353 /* Code should use vect_get_vec_def_for_operand. */
1354 gcc_unreachable ();
1356 /* operand is defined inside the loop. */
1357 case vect_internal_def:
1359 /* Get the def from the vectorized stmt. */
1360 def_stmt_info = vinfo_for_stmt (def_stmt);
1362 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1363 /* Get vectorized pattern statement. */
1364 if (!vec_stmt
1365 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1366 && !STMT_VINFO_RELEVANT (def_stmt_info))
1367 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1368 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1369 gcc_assert (vec_stmt);
1370 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1371 vec_oprnd = PHI_RESULT (vec_stmt);
1372 else if (is_gimple_call (vec_stmt))
1373 vec_oprnd = gimple_call_lhs (vec_stmt);
1374 else
1375 vec_oprnd = gimple_assign_lhs (vec_stmt);
1376 return vec_oprnd;
1379 /* operand is defined by a loop header phi. */
1380 case vect_reduction_def:
1381 case vect_double_reduction_def:
1382 case vect_nested_cycle:
1383 case vect_induction_def:
1385 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1387 /* Get the def from the vectorized stmt. */
1388 def_stmt_info = vinfo_for_stmt (def_stmt);
1389 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1390 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1391 vec_oprnd = PHI_RESULT (vec_stmt);
1392 else
1393 vec_oprnd = gimple_get_lhs (vec_stmt);
1394 return vec_oprnd;
1397 default:
1398 gcc_unreachable ();
1403 /* Function vect_get_vec_def_for_operand.
1405 OP is an operand in STMT. This function returns a (vector) def that will be
1406 used in the vectorized stmt for STMT.
1408 In the case that OP is an SSA_NAME which is defined in the loop, then
1409 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1411 In case OP is an invariant or constant, a new stmt that creates a vector def
1412 needs to be introduced. VECTYPE may be used to specify a required type for
1413 vector invariant. */
1415 tree
1416 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1418 gimple *def_stmt;
1419 enum vect_def_type dt;
1420 bool is_simple_use;
1421 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1422 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1424 if (dump_enabled_p ())
1426 dump_printf_loc (MSG_NOTE, vect_location,
1427 "vect_get_vec_def_for_operand: ");
1428 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1429 dump_printf (MSG_NOTE, "\n");
1432 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1433 gcc_assert (is_simple_use);
1434 if (def_stmt && dump_enabled_p ())
1436 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1437 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1440 if (dt == vect_constant_def || dt == vect_external_def)
1442 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1443 tree vector_type;
1445 if (vectype)
1446 vector_type = vectype;
1447 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1448 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1449 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1450 else
1451 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1453 gcc_assert (vector_type);
1454 return vect_init_vector (stmt, op, vector_type, NULL);
1456 else
1457 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1461 /* Function vect_get_vec_def_for_stmt_copy
1463 Return a vector-def for an operand. This function is used when the
1464 vectorized stmt to be created (by the caller to this function) is a "copy"
1465 created in case the vectorized result cannot fit in one vector, and several
1466 copies of the vector-stmt are required. In this case the vector-def is
1467 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1468 of the stmt that defines VEC_OPRND.
1469 DT is the type of the vector def VEC_OPRND.
1471 Context:
1472 In case the vectorization factor (VF) is bigger than the number
1473 of elements that can fit in a vectype (nunits), we have to generate
1474 more than one vector stmt to vectorize the scalar stmt. This situation
1475 arises when there are multiple data-types operated upon in the loop; the
1476 smallest data-type determines the VF, and as a result, when vectorizing
1477 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1478 vector stmt (each computing a vector of 'nunits' results, and together
1479 computing 'VF' results in each iteration). This function is called when
1480 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1481 which VF=16 and nunits=4, so the number of copies required is 4):
1483 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1485 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1486 VS1.1: vx.1 = memref1 VS1.2
1487 VS1.2: vx.2 = memref2 VS1.3
1488 VS1.3: vx.3 = memref3
1490 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1491 VSnew.1: vz1 = vx.1 + ... VSnew.2
1492 VSnew.2: vz2 = vx.2 + ... VSnew.3
1493 VSnew.3: vz3 = vx.3 + ...
1495 The vectorization of S1 is explained in vectorizable_load.
1496 The vectorization of S2:
1497 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1498 the function 'vect_get_vec_def_for_operand' is called to
1499 get the relevant vector-def for each operand of S2. For operand x it
1500 returns the vector-def 'vx.0'.
1502 To create the remaining copies of the vector-stmt (VSnew.j), this
1503 function is called to get the relevant vector-def for each operand. It is
1504 obtained from the respective VS1.j stmt, which is recorded in the
1505 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1507 For example, to obtain the vector-def 'vx.1' in order to create the
1508 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1509 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1510 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1511 and return its def ('vx.1').
1512 Overall, to create the above sequence this function will be called 3 times:
1513 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1514 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1515 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1517 tree
1518 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1520 gimple *vec_stmt_for_operand;
1521 stmt_vec_info def_stmt_info;
1523 /* Do nothing; can reuse same def. */
1524 if (dt == vect_external_def || dt == vect_constant_def )
1525 return vec_oprnd;
1527 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1528 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1529 gcc_assert (def_stmt_info);
1530 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1531 gcc_assert (vec_stmt_for_operand);
1532 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1533 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1534 else
1535 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1536 return vec_oprnd;
1540 /* Get vectorized definitions for the operands to create a copy of an original
1541 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1543 void
1544 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1545 vec<tree> *vec_oprnds0,
1546 vec<tree> *vec_oprnds1)
1548 tree vec_oprnd = vec_oprnds0->pop ();
1550 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1551 vec_oprnds0->quick_push (vec_oprnd);
1553 if (vec_oprnds1 && vec_oprnds1->length ())
1555 vec_oprnd = vec_oprnds1->pop ();
1556 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1557 vec_oprnds1->quick_push (vec_oprnd);
1562 /* Get vectorized definitions for OP0 and OP1. */
1564 void
1565 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1566 vec<tree> *vec_oprnds0,
1567 vec<tree> *vec_oprnds1,
1568 slp_tree slp_node)
1570 if (slp_node)
1572 int nops = (op1 == NULL_TREE) ? 1 : 2;
1573 auto_vec<tree> ops (nops);
1574 auto_vec<vec<tree> > vec_defs (nops);
1576 ops.quick_push (op0);
1577 if (op1)
1578 ops.quick_push (op1);
1580 vect_get_slp_defs (ops, slp_node, &vec_defs);
1582 *vec_oprnds0 = vec_defs[0];
1583 if (op1)
1584 *vec_oprnds1 = vec_defs[1];
1586 else
1588 tree vec_oprnd;
1590 vec_oprnds0->create (1);
1591 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1592 vec_oprnds0->quick_push (vec_oprnd);
1594 if (op1)
1596 vec_oprnds1->create (1);
1597 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1598 vec_oprnds1->quick_push (vec_oprnd);
1603 /* Helper function called by vect_finish_replace_stmt and
1604 vect_finish_stmt_generation. Set the location of the new
1605 statement and create a stmt_vec_info for it. */
1607 static void
1608 vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
1610 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1611 vec_info *vinfo = stmt_info->vinfo;
1613 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1615 if (dump_enabled_p ())
1617 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1618 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1621 gimple_set_location (vec_stmt, gimple_location (stmt));
1623 /* While EH edges will generally prevent vectorization, stmt might
1624 e.g. be in a must-not-throw region. Ensure newly created stmts
1625 that could throw are part of the same region. */
1626 int lp_nr = lookup_stmt_eh_lp (stmt);
1627 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1628 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1631 /* Replace the scalar statement STMT with a new vector statement VEC_STMT,
1632 which sets the same scalar result as STMT did. */
1634 void
1635 vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt)
1637 gcc_assert (gimple_get_lhs (stmt) == gimple_get_lhs (vec_stmt));
1639 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1640 gsi_replace (&gsi, vec_stmt, false);
1642 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1645 /* Function vect_finish_stmt_generation.
1647 Insert a new stmt. */
1649 void
1650 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1651 gimple_stmt_iterator *gsi)
1653 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1655 if (!gsi_end_p (*gsi)
1656 && gimple_has_mem_ops (vec_stmt))
1658 gimple *at_stmt = gsi_stmt (*gsi);
1659 tree vuse = gimple_vuse (at_stmt);
1660 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1662 tree vdef = gimple_vdef (at_stmt);
1663 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1664 /* If we have an SSA vuse and insert a store, update virtual
1665 SSA form to avoid triggering the renamer. Do so only
1666 if we can easily see all uses - which is what almost always
1667 happens with the way vectorized stmts are inserted. */
1668 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1669 && ((is_gimple_assign (vec_stmt)
1670 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1671 || (is_gimple_call (vec_stmt)
1672 && !(gimple_call_flags (vec_stmt)
1673 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1675 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1676 gimple_set_vdef (vec_stmt, new_vdef);
1677 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1681 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1682 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1685 /* We want to vectorize a call to combined function CFN with function
1686 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1687 as the types of all inputs. Check whether this is possible using
1688 an internal function, returning its code if so or IFN_LAST if not. */
1690 static internal_fn
1691 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1692 tree vectype_out, tree vectype_in)
1694 internal_fn ifn;
1695 if (internal_fn_p (cfn))
1696 ifn = as_internal_fn (cfn);
1697 else
1698 ifn = associated_internal_fn (fndecl);
1699 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1701 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1702 if (info.vectorizable)
1704 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1705 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1706 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1707 OPTIMIZE_FOR_SPEED))
1708 return ifn;
1711 return IFN_LAST;
1715 static tree permute_vec_elements (tree, tree, tree, gimple *,
1716 gimple_stmt_iterator *);
1718 /* Check whether a load or store statement in the loop described by
1719 LOOP_VINFO is possible in a fully-masked loop. This is testing
1720 whether the vectorizer pass has the appropriate support, as well as
1721 whether the target does.
1723 VLS_TYPE says whether the statement is a load or store and VECTYPE
1724 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1725 says how the load or store is going to be implemented and GROUP_SIZE
1726 is the number of load or store statements in the containing group.
1727 If the access is a gather load or scatter store, GS_INFO describes
1728 its arguments.
1730 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1731 supported, otherwise record the required mask types. */
1733 static void
1734 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1735 vec_load_store_type vls_type, int group_size,
1736 vect_memory_access_type memory_access_type,
1737 gather_scatter_info *gs_info)
1739 /* Invariant loads need no special support. */
1740 if (memory_access_type == VMAT_INVARIANT)
1741 return;
1743 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1744 machine_mode vecmode = TYPE_MODE (vectype);
1745 bool is_load = (vls_type == VLS_LOAD);
1746 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1748 if (is_load
1749 ? !vect_load_lanes_supported (vectype, group_size, true)
1750 : !vect_store_lanes_supported (vectype, group_size, true))
1752 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1754 "can't use a fully-masked loop because the"
1755 " target doesn't have an appropriate masked"
1756 " load/store-lanes instruction.\n");
1757 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1758 return;
1760 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1761 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1762 return;
1765 if (memory_access_type == VMAT_GATHER_SCATTER)
1767 internal_fn ifn = (is_load
1768 ? IFN_MASK_GATHER_LOAD
1769 : IFN_MASK_SCATTER_STORE);
1770 tree offset_type = TREE_TYPE (gs_info->offset);
1771 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1772 gs_info->memory_type,
1773 TYPE_SIGN (offset_type),
1774 gs_info->scale))
1776 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1778 "can't use a fully-masked loop because the"
1779 " target doesn't have an appropriate masked"
1780 " gather load or scatter store instruction.\n");
1781 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1782 return;
1784 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1785 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1786 return;
1789 if (memory_access_type != VMAT_CONTIGUOUS
1790 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1792 /* Element X of the data must come from iteration i * VF + X of the
1793 scalar loop. We need more work to support other mappings. */
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1796 "can't use a fully-masked loop because an access"
1797 " isn't contiguous.\n");
1798 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1799 return;
1802 machine_mode mask_mode;
1803 if (!(targetm.vectorize.get_mask_mode
1804 (GET_MODE_NUNITS (vecmode),
1805 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1806 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1808 if (dump_enabled_p ())
1809 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1810 "can't use a fully-masked loop because the target"
1811 " doesn't have the appropriate masked load or"
1812 " store.\n");
1813 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1814 return;
1816 /* We might load more scalars than we need for permuting SLP loads.
1817 We checked in get_group_load_store_type that the extra elements
1818 don't leak into a new vector. */
1819 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1820 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1821 unsigned int nvectors;
1822 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1823 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1824 else
1825 gcc_unreachable ();
1828 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1829 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1830 that needs to be applied to all loads and stores in a vectorized loop.
1831 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1833 MASK_TYPE is the type of both masks. If new statements are needed,
1834 insert them before GSI. */
1836 static tree
1837 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1838 gimple_stmt_iterator *gsi)
1840 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1841 if (!loop_mask)
1842 return vec_mask;
1844 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1845 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1846 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1847 vec_mask, loop_mask);
1848 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1849 return and_res;
1852 /* Determine whether we can use a gather load or scatter store to vectorize
1853 strided load or store STMT by truncating the current offset to a smaller
1854 width. We need to be able to construct an offset vector:
1856 { 0, X, X*2, X*3, ... }
1858 without loss of precision, where X is STMT's DR_STEP.
1860 Return true if this is possible, describing the gather load or scatter
1861 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1863 static bool
1864 vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
1865 bool masked_p,
1866 gather_scatter_info *gs_info)
1868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1869 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1870 tree step = DR_STEP (dr);
1871 if (TREE_CODE (step) != INTEGER_CST)
1873 /* ??? Perhaps we could use range information here? */
1874 if (dump_enabled_p ())
1875 dump_printf_loc (MSG_NOTE, vect_location,
1876 "cannot truncate variable step.\n");
1877 return false;
1880 /* Get the number of bits in an element. */
1881 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1882 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1883 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1885 /* Set COUNT to the upper limit on the number of elements - 1.
1886 Start with the maximum vectorization factor. */
1887 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
1889 /* Try lowering COUNT to the number of scalar latch iterations. */
1890 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1891 widest_int max_iters;
1892 if (max_loop_iterations (loop, &max_iters)
1893 && max_iters < count)
1894 count = max_iters.to_shwi ();
1896 /* Try scales of 1 and the element size. */
1897 int scales[] = { 1, vect_get_scalar_dr_size (dr) };
1898 bool overflow_p = false;
1899 for (int i = 0; i < 2; ++i)
1901 int scale = scales[i];
1902 widest_int factor;
1903 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
1904 continue;
1906 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
1907 in OFFSET_BITS bits. */
1908 widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
1909 if (overflow_p)
1910 continue;
1911 signop sign = range >= 0 ? UNSIGNED : SIGNED;
1912 if (wi::min_precision (range, sign) > element_bits)
1914 overflow_p = true;
1915 continue;
1918 /* See whether the target supports the operation. */
1919 tree memory_type = TREE_TYPE (DR_REF (dr));
1920 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
1921 memory_type, element_bits, sign, scale,
1922 &gs_info->ifn, &gs_info->element_type))
1923 continue;
1925 tree offset_type = build_nonstandard_integer_type (element_bits,
1926 sign == UNSIGNED);
1928 gs_info->decl = NULL_TREE;
1929 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
1930 but we don't need to store that here. */
1931 gs_info->base = NULL_TREE;
1932 gs_info->offset = fold_convert (offset_type, step);
1933 gs_info->offset_dt = vect_constant_def;
1934 gs_info->offset_vectype = NULL_TREE;
1935 gs_info->scale = scale;
1936 gs_info->memory_type = memory_type;
1937 return true;
1940 if (overflow_p && dump_enabled_p ())
1941 dump_printf_loc (MSG_NOTE, vect_location,
1942 "truncating gather/scatter offset to %d bits"
1943 " might change its value.\n", element_bits);
1945 return false;
1948 /* Return true if we can use gather/scatter internal functions to
1949 vectorize STMT, which is a grouped or strided load or store.
1950 MASKED_P is true if load or store is conditional. When returning
1951 true, fill in GS_INFO with the information required to perform the
1952 operation. */
1954 static bool
1955 vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
1956 bool masked_p,
1957 gather_scatter_info *gs_info)
1959 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)
1960 || gs_info->decl)
1961 return vect_truncate_gather_scatter_offset (stmt, loop_vinfo,
1962 masked_p, gs_info);
1964 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
1965 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1966 tree offset_type = TREE_TYPE (gs_info->offset);
1967 unsigned int offset_bits = TYPE_PRECISION (offset_type);
1969 /* Enforced by vect_check_gather_scatter. */
1970 gcc_assert (element_bits >= offset_bits);
1972 /* If the elements are wider than the offset, convert the offset to the
1973 same width, without changing its sign. */
1974 if (element_bits > offset_bits)
1976 bool unsigned_p = TYPE_UNSIGNED (offset_type);
1977 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
1978 gs_info->offset = fold_convert (offset_type, gs_info->offset);
1981 if (dump_enabled_p ())
1982 dump_printf_loc (MSG_NOTE, vect_location,
1983 "using gather/scatter for strided/grouped access,"
1984 " scale = %d\n", gs_info->scale);
1986 return true;
1989 /* STMT is a non-strided load or store, meaning that it accesses
1990 elements with a known constant step. Return -1 if that step
1991 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1993 static int
1994 compare_step_with_zero (gimple *stmt)
1996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1997 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1998 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1999 size_zero_node);
2002 /* If the target supports a permute mask that reverses the elements in
2003 a vector of type VECTYPE, return that mask, otherwise return null. */
2005 static tree
2006 perm_mask_for_reverse (tree vectype)
2008 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2010 /* The encoding has a single stepped pattern. */
2011 vec_perm_builder sel (nunits, 1, 3);
2012 for (int i = 0; i < 3; ++i)
2013 sel.quick_push (nunits - 1 - i);
2015 vec_perm_indices indices (sel, 1, nunits);
2016 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2017 return NULL_TREE;
2018 return vect_gen_perm_mask_checked (vectype, indices);
2021 /* STMT is either a masked or unconditional store. Return the value
2022 being stored. */
2024 tree
2025 vect_get_store_rhs (gimple *stmt)
2027 if (gassign *assign = dyn_cast <gassign *> (stmt))
2029 gcc_assert (gimple_assign_single_p (assign));
2030 return gimple_assign_rhs1 (assign);
2032 if (gcall *call = dyn_cast <gcall *> (stmt))
2034 internal_fn ifn = gimple_call_internal_fn (call);
2035 int index = internal_fn_stored_value_index (ifn);
2036 gcc_assert (index >= 0);
2037 return gimple_call_arg (stmt, index);
2039 gcc_unreachable ();
2042 /* A subroutine of get_load_store_type, with a subset of the same
2043 arguments. Handle the case where STMT is part of a grouped load
2044 or store.
2046 For stores, the statements in the group are all consecutive
2047 and there is no gap at the end. For loads, the statements in the
2048 group might not be consecutive; there can be gaps between statements
2049 as well as at the end. */
2051 static bool
2052 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
2053 bool masked_p, vec_load_store_type vls_type,
2054 vect_memory_access_type *memory_access_type,
2055 gather_scatter_info *gs_info)
2057 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2058 vec_info *vinfo = stmt_info->vinfo;
2059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2060 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2061 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
2062 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2063 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
2064 bool single_element_p = (stmt == first_stmt
2065 && !GROUP_NEXT_ELEMENT (stmt_info));
2066 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
2067 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2069 /* True if the vectorized statements would access beyond the last
2070 statement in the group. */
2071 bool overrun_p = false;
2073 /* True if we can cope with such overrun by peeling for gaps, so that
2074 there is at least one final scalar iteration after the vector loop. */
2075 bool can_overrun_p = (!masked_p
2076 && vls_type == VLS_LOAD
2077 && loop_vinfo
2078 && !loop->inner);
2080 /* There can only be a gap at the end of the group if the stride is
2081 known at compile time. */
2082 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
2084 /* Stores can't yet have gaps. */
2085 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2087 if (slp)
2089 if (STMT_VINFO_STRIDED_P (stmt_info))
2091 /* Try to use consecutive accesses of GROUP_SIZE elements,
2092 separated by the stride, until we have a complete vector.
2093 Fall back to scalar accesses if that isn't possible. */
2094 if (multiple_p (nunits, group_size))
2095 *memory_access_type = VMAT_STRIDED_SLP;
2096 else
2097 *memory_access_type = VMAT_ELEMENTWISE;
2099 else
2101 overrun_p = loop_vinfo && gap != 0;
2102 if (overrun_p && vls_type != VLS_LOAD)
2104 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2105 "Grouped store with gaps requires"
2106 " non-consecutive accesses\n");
2107 return false;
2109 /* An overrun is fine if the trailing elements are smaller
2110 than the alignment boundary B. Every vector access will
2111 be a multiple of B and so we are guaranteed to access a
2112 non-gap element in the same B-sized block. */
2113 if (overrun_p
2114 && gap < (vect_known_alignment_in_bytes (first_dr)
2115 / vect_get_scalar_dr_size (first_dr)))
2116 overrun_p = false;
2117 if (overrun_p && !can_overrun_p)
2119 if (dump_enabled_p ())
2120 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2121 "Peeling for outer loop is not supported\n");
2122 return false;
2124 *memory_access_type = VMAT_CONTIGUOUS;
2127 else
2129 /* We can always handle this case using elementwise accesses,
2130 but see if something more efficient is available. */
2131 *memory_access_type = VMAT_ELEMENTWISE;
2133 /* If there is a gap at the end of the group then these optimizations
2134 would access excess elements in the last iteration. */
2135 bool would_overrun_p = (gap != 0);
2136 /* An overrun is fine if the trailing elements are smaller than the
2137 alignment boundary B. Every vector access will be a multiple of B
2138 and so we are guaranteed to access a non-gap element in the
2139 same B-sized block. */
2140 if (would_overrun_p
2141 && !masked_p
2142 && gap < (vect_known_alignment_in_bytes (first_dr)
2143 / vect_get_scalar_dr_size (first_dr)))
2144 would_overrun_p = false;
2146 if (!STMT_VINFO_STRIDED_P (stmt_info)
2147 && (can_overrun_p || !would_overrun_p)
2148 && compare_step_with_zero (stmt) > 0)
2150 /* First cope with the degenerate case of a single-element
2151 vector. */
2152 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2153 *memory_access_type = VMAT_CONTIGUOUS;
2155 /* Otherwise try using LOAD/STORE_LANES. */
2156 if (*memory_access_type == VMAT_ELEMENTWISE
2157 && (vls_type == VLS_LOAD
2158 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2159 : vect_store_lanes_supported (vectype, group_size,
2160 masked_p)))
2162 *memory_access_type = VMAT_LOAD_STORE_LANES;
2163 overrun_p = would_overrun_p;
2166 /* If that fails, try using permuting loads. */
2167 if (*memory_access_type == VMAT_ELEMENTWISE
2168 && (vls_type == VLS_LOAD
2169 ? vect_grouped_load_supported (vectype, single_element_p,
2170 group_size)
2171 : vect_grouped_store_supported (vectype, group_size)))
2173 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2174 overrun_p = would_overrun_p;
2178 /* As a last resort, trying using a gather load or scatter store.
2180 ??? Although the code can handle all group sizes correctly,
2181 it probably isn't a win to use separate strided accesses based
2182 on nearby locations. Or, even if it's a win over scalar code,
2183 it might not be a win over vectorizing at a lower VF, if that
2184 allows us to use contiguous accesses. */
2185 if (*memory_access_type == VMAT_ELEMENTWISE
2186 && single_element_p
2187 && loop_vinfo
2188 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2189 masked_p, gs_info))
2190 *memory_access_type = VMAT_GATHER_SCATTER;
2193 if (vls_type != VLS_LOAD && first_stmt == stmt)
2195 /* STMT is the leader of the group. Check the operands of all the
2196 stmts of the group. */
2197 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
2198 while (next_stmt)
2200 tree op = vect_get_store_rhs (next_stmt);
2201 gimple *def_stmt;
2202 enum vect_def_type dt;
2203 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2207 "use not simple.\n");
2208 return false;
2210 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2214 if (overrun_p)
2216 gcc_assert (can_overrun_p);
2217 if (dump_enabled_p ())
2218 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2219 "Data access with gaps requires scalar "
2220 "epilogue loop\n");
2221 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2224 return true;
2227 /* A subroutine of get_load_store_type, with a subset of the same
2228 arguments. Handle the case where STMT is a load or store that
2229 accesses consecutive elements with a negative step. */
2231 static vect_memory_access_type
2232 get_negative_load_store_type (gimple *stmt, tree vectype,
2233 vec_load_store_type vls_type,
2234 unsigned int ncopies)
2236 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2237 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2238 dr_alignment_support alignment_support_scheme;
2240 if (ncopies > 1)
2242 if (dump_enabled_p ())
2243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2244 "multiple types with negative step.\n");
2245 return VMAT_ELEMENTWISE;
2248 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
2249 if (alignment_support_scheme != dr_aligned
2250 && alignment_support_scheme != dr_unaligned_supported)
2252 if (dump_enabled_p ())
2253 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2254 "negative step but alignment required.\n");
2255 return VMAT_ELEMENTWISE;
2258 if (vls_type == VLS_STORE_INVARIANT)
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_NOTE, vect_location,
2262 "negative step with invariant source;"
2263 " no permute needed.\n");
2264 return VMAT_CONTIGUOUS_DOWN;
2267 if (!perm_mask_for_reverse (vectype))
2269 if (dump_enabled_p ())
2270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2271 "negative step and reversing not supported.\n");
2272 return VMAT_ELEMENTWISE;
2275 return VMAT_CONTIGUOUS_REVERSE;
2278 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
2279 if there is a memory access type that the vectorized form can use,
2280 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2281 or scatters, fill in GS_INFO accordingly.
2283 SLP says whether we're performing SLP rather than loop vectorization.
2284 MASKED_P is true if the statement is conditional on a vectorized mask.
2285 VECTYPE is the vector type that the vectorized statements will use.
2286 NCOPIES is the number of vector statements that will be needed. */
2288 static bool
2289 get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
2290 vec_load_store_type vls_type, unsigned int ncopies,
2291 vect_memory_access_type *memory_access_type,
2292 gather_scatter_info *gs_info)
2294 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2295 vec_info *vinfo = stmt_info->vinfo;
2296 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2297 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2298 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2300 *memory_access_type = VMAT_GATHER_SCATTER;
2301 gimple *def_stmt;
2302 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
2303 gcc_unreachable ();
2304 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
2305 &gs_info->offset_dt,
2306 &gs_info->offset_vectype))
2308 if (dump_enabled_p ())
2309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2310 "%s index use not simple.\n",
2311 vls_type == VLS_LOAD ? "gather" : "scatter");
2312 return false;
2315 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2317 if (!get_group_load_store_type (stmt, vectype, slp, masked_p, vls_type,
2318 memory_access_type, gs_info))
2319 return false;
2321 else if (STMT_VINFO_STRIDED_P (stmt_info))
2323 gcc_assert (!slp);
2324 if (loop_vinfo
2325 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2326 masked_p, gs_info))
2327 *memory_access_type = VMAT_GATHER_SCATTER;
2328 else
2329 *memory_access_type = VMAT_ELEMENTWISE;
2331 else
2333 int cmp = compare_step_with_zero (stmt);
2334 if (cmp < 0)
2335 *memory_access_type = get_negative_load_store_type
2336 (stmt, vectype, vls_type, ncopies);
2337 else if (cmp == 0)
2339 gcc_assert (vls_type == VLS_LOAD);
2340 *memory_access_type = VMAT_INVARIANT;
2342 else
2343 *memory_access_type = VMAT_CONTIGUOUS;
2346 if ((*memory_access_type == VMAT_ELEMENTWISE
2347 || *memory_access_type == VMAT_STRIDED_SLP)
2348 && !nunits.is_constant ())
2350 if (dump_enabled_p ())
2351 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2352 "Not using elementwise accesses due to variable "
2353 "vectorization factor.\n");
2354 return false;
2357 /* FIXME: At the moment the cost model seems to underestimate the
2358 cost of using elementwise accesses. This check preserves the
2359 traditional behavior until that can be fixed. */
2360 if (*memory_access_type == VMAT_ELEMENTWISE
2361 && !STMT_VINFO_STRIDED_P (stmt_info)
2362 && !(stmt == GROUP_FIRST_ELEMENT (stmt_info)
2363 && !GROUP_NEXT_ELEMENT (stmt_info)
2364 && !pow2p_hwi (GROUP_SIZE (stmt_info))))
2366 if (dump_enabled_p ())
2367 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2368 "not falling back to elementwise accesses\n");
2369 return false;
2371 return true;
2374 /* Return true if boolean argument MASK is suitable for vectorizing
2375 conditional load or store STMT. When returning true, store the type
2376 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2377 in *MASK_VECTYPE_OUT. */
2379 static bool
2380 vect_check_load_store_mask (gimple *stmt, tree mask,
2381 vect_def_type *mask_dt_out,
2382 tree *mask_vectype_out)
2384 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2386 if (dump_enabled_p ())
2387 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2388 "mask argument is not a boolean.\n");
2389 return false;
2392 if (TREE_CODE (mask) != SSA_NAME)
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2396 "mask argument is not an SSA name.\n");
2397 return false;
2400 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2401 gimple *def_stmt;
2402 enum vect_def_type mask_dt;
2403 tree mask_vectype;
2404 if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &mask_dt,
2405 &mask_vectype))
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2409 "mask use not simple.\n");
2410 return false;
2413 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2414 if (!mask_vectype)
2415 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2417 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2421 "could not find an appropriate vector mask type.\n");
2422 return false;
2425 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2426 TYPE_VECTOR_SUBPARTS (vectype)))
2428 if (dump_enabled_p ())
2430 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2431 "vector mask type ");
2432 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2433 dump_printf (MSG_MISSED_OPTIMIZATION,
2434 " does not match vector data type ");
2435 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2436 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2438 return false;
2441 *mask_dt_out = mask_dt;
2442 *mask_vectype_out = mask_vectype;
2443 return true;
2446 /* Return true if stored value RHS is suitable for vectorizing store
2447 statement STMT. When returning true, store the type of the
2448 definition in *RHS_DT_OUT, the type of the vectorized store value in
2449 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2451 static bool
2452 vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
2453 tree *rhs_vectype_out, vec_load_store_type *vls_type_out)
2455 /* In the case this is a store from a constant make sure
2456 native_encode_expr can handle it. */
2457 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2461 "cannot encode constant as a byte sequence.\n");
2462 return false;
2465 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2466 gimple *def_stmt;
2467 enum vect_def_type rhs_dt;
2468 tree rhs_vectype;
2469 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &rhs_dt,
2470 &rhs_vectype))
2472 if (dump_enabled_p ())
2473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2474 "use not simple.\n");
2475 return false;
2478 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2479 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2481 if (dump_enabled_p ())
2482 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2483 "incompatible vector types.\n");
2484 return false;
2487 *rhs_dt_out = rhs_dt;
2488 *rhs_vectype_out = rhs_vectype;
2489 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2490 *vls_type_out = VLS_STORE_INVARIANT;
2491 else
2492 *vls_type_out = VLS_STORE;
2493 return true;
2496 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
2497 Note that we support masks with floating-point type, in which case the
2498 floats are interpreted as a bitmask. */
2500 static tree
2501 vect_build_all_ones_mask (gimple *stmt, tree masktype)
2503 if (TREE_CODE (masktype) == INTEGER_TYPE)
2504 return build_int_cst (masktype, -1);
2505 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2507 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2508 mask = build_vector_from_val (masktype, mask);
2509 return vect_init_vector (stmt, mask, masktype, NULL);
2511 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2513 REAL_VALUE_TYPE r;
2514 long tmp[6];
2515 for (int j = 0; j < 6; ++j)
2516 tmp[j] = -1;
2517 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2518 tree mask = build_real (TREE_TYPE (masktype), r);
2519 mask = build_vector_from_val (masktype, mask);
2520 return vect_init_vector (stmt, mask, masktype, NULL);
2522 gcc_unreachable ();
2525 /* Build an all-zero merge value of type VECTYPE while vectorizing
2526 STMT as a gather load. */
2528 static tree
2529 vect_build_zero_merge_argument (gimple *stmt, tree vectype)
2531 tree merge;
2532 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2533 merge = build_int_cst (TREE_TYPE (vectype), 0);
2534 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2536 REAL_VALUE_TYPE r;
2537 long tmp[6];
2538 for (int j = 0; j < 6; ++j)
2539 tmp[j] = 0;
2540 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2541 merge = build_real (TREE_TYPE (vectype), r);
2543 else
2544 gcc_unreachable ();
2545 merge = build_vector_from_val (vectype, merge);
2546 return vect_init_vector (stmt, merge, vectype, NULL);
2549 /* Build a gather load call while vectorizing STMT. Insert new instructions
2550 before GSI and add them to VEC_STMT. GS_INFO describes the gather load
2551 operation. If the load is conditional, MASK is the unvectorized
2552 condition and MASK_DT is its definition type, otherwise MASK is null. */
2554 static void
2555 vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
2556 gimple **vec_stmt, gather_scatter_info *gs_info,
2557 tree mask, vect_def_type mask_dt)
2559 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2560 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2561 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2562 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2563 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2564 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2565 edge pe = loop_preheader_edge (loop);
2566 enum { NARROW, NONE, WIDEN } modifier;
2567 poly_uint64 gather_off_nunits
2568 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2570 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2571 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2572 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2573 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2574 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2575 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2576 tree scaletype = TREE_VALUE (arglist);
2577 gcc_checking_assert (types_compatible_p (srctype, rettype)
2578 && (!mask || types_compatible_p (srctype, masktype)));
2580 tree perm_mask = NULL_TREE;
2581 tree mask_perm_mask = NULL_TREE;
2582 if (known_eq (nunits, gather_off_nunits))
2583 modifier = NONE;
2584 else if (known_eq (nunits * 2, gather_off_nunits))
2586 modifier = WIDEN;
2588 /* Currently widening gathers and scatters are only supported for
2589 fixed-length vectors. */
2590 int count = gather_off_nunits.to_constant ();
2591 vec_perm_builder sel (count, count, 1);
2592 for (int i = 0; i < count; ++i)
2593 sel.quick_push (i | (count / 2));
2595 vec_perm_indices indices (sel, 1, count);
2596 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2597 indices);
2599 else if (known_eq (nunits, gather_off_nunits * 2))
2601 modifier = NARROW;
2603 /* Currently narrowing gathers and scatters are only supported for
2604 fixed-length vectors. */
2605 int count = nunits.to_constant ();
2606 vec_perm_builder sel (count, count, 1);
2607 sel.quick_grow (count);
2608 for (int i = 0; i < count; ++i)
2609 sel[i] = i < count / 2 ? i : i + count / 2;
2610 vec_perm_indices indices (sel, 2, count);
2611 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2613 ncopies *= 2;
2615 if (mask)
2617 for (int i = 0; i < count; ++i)
2618 sel[i] = i | (count / 2);
2619 indices.new_vector (sel, 2, count);
2620 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2623 else
2624 gcc_unreachable ();
2626 tree vec_dest = vect_create_destination_var (gimple_get_lhs (stmt),
2627 vectype);
2629 tree ptr = fold_convert (ptrtype, gs_info->base);
2630 if (!is_gimple_min_invariant (ptr))
2632 gimple_seq seq;
2633 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2634 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2635 gcc_assert (!new_bb);
2638 tree scale = build_int_cst (scaletype, gs_info->scale);
2640 tree vec_oprnd0 = NULL_TREE;
2641 tree vec_mask = NULL_TREE;
2642 tree src_op = NULL_TREE;
2643 tree mask_op = NULL_TREE;
2644 tree prev_res = NULL_TREE;
2645 stmt_vec_info prev_stmt_info = NULL;
2647 if (!mask)
2649 src_op = vect_build_zero_merge_argument (stmt, rettype);
2650 mask_op = vect_build_all_ones_mask (stmt, masktype);
2653 for (int j = 0; j < ncopies; ++j)
2655 tree op, var;
2656 gimple *new_stmt;
2657 if (modifier == WIDEN && (j & 1))
2658 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2659 perm_mask, stmt, gsi);
2660 else if (j == 0)
2661 op = vec_oprnd0
2662 = vect_get_vec_def_for_operand (gs_info->offset, stmt);
2663 else
2664 op = vec_oprnd0
2665 = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
2667 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2669 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2670 TYPE_VECTOR_SUBPARTS (idxtype)));
2671 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2672 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2673 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2674 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2675 op = var;
2678 if (mask)
2680 if (mask_perm_mask && (j & 1))
2681 mask_op = permute_vec_elements (mask_op, mask_op,
2682 mask_perm_mask, stmt, gsi);
2683 else
2685 if (j == 0)
2686 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2687 else
2688 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
2690 mask_op = vec_mask;
2691 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2693 gcc_assert
2694 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2695 TYPE_VECTOR_SUBPARTS (masktype)));
2696 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2697 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2698 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR,
2699 mask_op);
2700 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2701 mask_op = var;
2704 src_op = mask_op;
2707 new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2708 mask_op, scale);
2710 if (!useless_type_conversion_p (vectype, rettype))
2712 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2713 TYPE_VECTOR_SUBPARTS (rettype)));
2714 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2715 gimple_call_set_lhs (new_stmt, op);
2716 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2717 var = make_ssa_name (vec_dest);
2718 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2719 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2721 else
2723 var = make_ssa_name (vec_dest, new_stmt);
2724 gimple_call_set_lhs (new_stmt, var);
2727 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2729 if (modifier == NARROW)
2731 if ((j & 1) == 0)
2733 prev_res = var;
2734 continue;
2736 var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi);
2737 new_stmt = SSA_NAME_DEF_STMT (var);
2740 if (prev_stmt_info == NULL)
2741 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2742 else
2743 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2744 prev_stmt_info = vinfo_for_stmt (new_stmt);
2748 /* Prepare the base and offset in GS_INFO for vectorization.
2749 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2750 to the vectorized offset argument for the first copy of STMT. STMT
2751 is the statement described by GS_INFO and LOOP is the containing loop. */
2753 static void
2754 vect_get_gather_scatter_ops (struct loop *loop, gimple *stmt,
2755 gather_scatter_info *gs_info,
2756 tree *dataref_ptr, tree *vec_offset)
2758 gimple_seq stmts = NULL;
2759 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2760 if (stmts != NULL)
2762 basic_block new_bb;
2763 edge pe = loop_preheader_edge (loop);
2764 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2765 gcc_assert (!new_bb);
2767 tree offset_type = TREE_TYPE (gs_info->offset);
2768 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2769 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt,
2770 offset_vectype);
2773 /* Prepare to implement a grouped or strided load or store using
2774 the gather load or scatter store operation described by GS_INFO.
2775 STMT is the load or store statement.
2777 Set *DATAREF_BUMP to the amount that should be added to the base
2778 address after each copy of the vectorized statement. Set *VEC_OFFSET
2779 to an invariant offset vector in which element I has the value
2780 I * DR_STEP / SCALE. */
2782 static void
2783 vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo,
2784 gather_scatter_info *gs_info,
2785 tree *dataref_bump, tree *vec_offset)
2787 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2788 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2789 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2790 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2791 gimple_seq stmts;
2793 tree bump = size_binop (MULT_EXPR,
2794 fold_convert (sizetype, DR_STEP (dr)),
2795 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2796 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2797 if (stmts)
2798 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2800 /* The offset given in GS_INFO can have pointer type, so use the element
2801 type of the vector instead. */
2802 tree offset_type = TREE_TYPE (gs_info->offset);
2803 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2804 offset_type = TREE_TYPE (offset_vectype);
2806 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2807 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2808 ssize_int (gs_info->scale));
2809 step = fold_convert (offset_type, step);
2810 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2812 /* Create {0, X, X*2, X*3, ...}. */
2813 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2814 build_zero_cst (offset_type), step);
2815 if (stmts)
2816 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2819 /* Return the amount that should be added to a vector pointer to move
2820 to the next or previous copy of AGGR_TYPE. DR is the data reference
2821 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2822 vectorization. */
2824 static tree
2825 vect_get_data_ptr_increment (data_reference *dr, tree aggr_type,
2826 vect_memory_access_type memory_access_type)
2828 if (memory_access_type == VMAT_INVARIANT)
2829 return size_zero_node;
2831 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2832 tree step = vect_dr_behavior (dr)->step;
2833 if (tree_int_cst_sgn (step) == -1)
2834 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2835 return iv_step;
2838 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2840 static bool
2841 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2842 gimple **vec_stmt, slp_tree slp_node,
2843 tree vectype_in, enum vect_def_type *dt)
2845 tree op, vectype;
2846 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2847 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2848 unsigned ncopies;
2849 unsigned HOST_WIDE_INT nunits, num_bytes;
2851 op = gimple_call_arg (stmt, 0);
2852 vectype = STMT_VINFO_VECTYPE (stmt_info);
2854 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2855 return false;
2857 /* Multiple types in SLP are handled by creating the appropriate number of
2858 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2859 case of SLP. */
2860 if (slp_node)
2861 ncopies = 1;
2862 else
2863 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2865 gcc_assert (ncopies >= 1);
2867 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2868 if (! char_vectype)
2869 return false;
2871 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2872 return false;
2874 unsigned word_bytes = num_bytes / nunits;
2876 /* The encoding uses one stepped pattern for each byte in the word. */
2877 vec_perm_builder elts (num_bytes, word_bytes, 3);
2878 for (unsigned i = 0; i < 3; ++i)
2879 for (unsigned j = 0; j < word_bytes; ++j)
2880 elts.quick_push ((i + 1) * word_bytes - j - 1);
2882 vec_perm_indices indices (elts, 1, num_bytes);
2883 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2884 return false;
2886 if (! vec_stmt)
2888 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2889 if (dump_enabled_p ())
2890 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2891 "\n");
2892 if (! slp_node)
2894 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2895 1, vector_stmt, stmt_info, 0, vect_prologue);
2896 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2897 ncopies, vec_perm, stmt_info, 0, vect_body);
2899 return true;
2902 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2904 /* Transform. */
2905 vec<tree> vec_oprnds = vNULL;
2906 gimple *new_stmt = NULL;
2907 stmt_vec_info prev_stmt_info = NULL;
2908 for (unsigned j = 0; j < ncopies; j++)
2910 /* Handle uses. */
2911 if (j == 0)
2912 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2913 else
2914 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2916 /* Arguments are ready. create the new vector stmt. */
2917 unsigned i;
2918 tree vop;
2919 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2921 tree tem = make_ssa_name (char_vectype);
2922 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2923 char_vectype, vop));
2924 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2925 tree tem2 = make_ssa_name (char_vectype);
2926 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2927 tem, tem, bswap_vconst);
2928 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2929 tem = make_ssa_name (vectype);
2930 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2931 vectype, tem2));
2932 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2933 if (slp_node)
2934 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2937 if (slp_node)
2938 continue;
2940 if (j == 0)
2941 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2942 else
2943 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2945 prev_stmt_info = vinfo_for_stmt (new_stmt);
2948 vec_oprnds.release ();
2949 return true;
2952 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2953 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2954 in a single step. On success, store the binary pack code in
2955 *CONVERT_CODE. */
2957 static bool
2958 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2959 tree_code *convert_code)
2961 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2962 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2963 return false;
2965 tree_code code;
2966 int multi_step_cvt = 0;
2967 auto_vec <tree, 8> interm_types;
2968 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2969 &code, &multi_step_cvt,
2970 &interm_types)
2971 || multi_step_cvt)
2972 return false;
2974 *convert_code = code;
2975 return true;
2978 /* Function vectorizable_call.
2980 Check if GS performs a function call that can be vectorized.
2981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2982 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2983 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2985 static bool
2986 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2987 slp_tree slp_node)
2989 gcall *stmt;
2990 tree vec_dest;
2991 tree scalar_dest;
2992 tree op, type;
2993 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2994 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2995 tree vectype_out, vectype_in;
2996 poly_uint64 nunits_in;
2997 poly_uint64 nunits_out;
2998 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2999 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3000 vec_info *vinfo = stmt_info->vinfo;
3001 tree fndecl, new_temp, rhs_type;
3002 gimple *def_stmt;
3003 enum vect_def_type dt[3]
3004 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
3005 int ndts = 3;
3006 gimple *new_stmt = NULL;
3007 int ncopies, j;
3008 vec<tree> vargs = vNULL;
3009 enum { NARROW, NONE, WIDEN } modifier;
3010 size_t i, nargs;
3011 tree lhs;
3013 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3014 return false;
3016 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3017 && ! vec_stmt)
3018 return false;
3020 /* Is GS a vectorizable call? */
3021 stmt = dyn_cast <gcall *> (gs);
3022 if (!stmt)
3023 return false;
3025 if (gimple_call_internal_p (stmt)
3026 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3027 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3028 /* Handled by vectorizable_load and vectorizable_store. */
3029 return false;
3031 if (gimple_call_lhs (stmt) == NULL_TREE
3032 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3033 return false;
3035 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3037 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3039 /* Process function arguments. */
3040 rhs_type = NULL_TREE;
3041 vectype_in = NULL_TREE;
3042 nargs = gimple_call_num_args (stmt);
3044 /* Bail out if the function has more than three arguments, we do not have
3045 interesting builtin functions to vectorize with more than two arguments
3046 except for fma. No arguments is also not good. */
3047 if (nargs == 0 || nargs > 3)
3048 return false;
3050 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3051 if (gimple_call_internal_p (stmt)
3052 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3054 nargs = 0;
3055 rhs_type = unsigned_type_node;
3058 for (i = 0; i < nargs; i++)
3060 tree opvectype;
3062 op = gimple_call_arg (stmt, i);
3064 /* We can only handle calls with arguments of the same type. */
3065 if (rhs_type
3066 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3068 if (dump_enabled_p ())
3069 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3070 "argument types differ.\n");
3071 return false;
3073 if (!rhs_type)
3074 rhs_type = TREE_TYPE (op);
3076 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
3078 if (dump_enabled_p ())
3079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3080 "use not simple.\n");
3081 return false;
3084 if (!vectype_in)
3085 vectype_in = opvectype;
3086 else if (opvectype
3087 && opvectype != vectype_in)
3089 if (dump_enabled_p ())
3090 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3091 "argument vector types differ.\n");
3092 return false;
3095 /* If all arguments are external or constant defs use a vector type with
3096 the same size as the output vector type. */
3097 if (!vectype_in)
3098 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3099 if (vec_stmt)
3100 gcc_assert (vectype_in);
3101 if (!vectype_in)
3103 if (dump_enabled_p ())
3105 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3106 "no vectype for scalar type ");
3107 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3108 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3111 return false;
3114 /* FORNOW */
3115 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3116 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3117 if (known_eq (nunits_in * 2, nunits_out))
3118 modifier = NARROW;
3119 else if (known_eq (nunits_out, nunits_in))
3120 modifier = NONE;
3121 else if (known_eq (nunits_out * 2, nunits_in))
3122 modifier = WIDEN;
3123 else
3124 return false;
3126 /* We only handle functions that do not read or clobber memory. */
3127 if (gimple_vuse (stmt))
3129 if (dump_enabled_p ())
3130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3131 "function reads from or writes to memory.\n");
3132 return false;
3135 /* For now, we only vectorize functions if a target specific builtin
3136 is available. TODO -- in some cases, it might be profitable to
3137 insert the calls for pieces of the vector, in order to be able
3138 to vectorize other operations in the loop. */
3139 fndecl = NULL_TREE;
3140 internal_fn ifn = IFN_LAST;
3141 combined_fn cfn = gimple_call_combined_fn (stmt);
3142 tree callee = gimple_call_fndecl (stmt);
3144 /* First try using an internal function. */
3145 tree_code convert_code = ERROR_MARK;
3146 if (cfn != CFN_LAST
3147 && (modifier == NONE
3148 || (modifier == NARROW
3149 && simple_integer_narrowing (vectype_out, vectype_in,
3150 &convert_code))))
3151 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3152 vectype_in);
3154 /* If that fails, try asking for a target-specific built-in function. */
3155 if (ifn == IFN_LAST)
3157 if (cfn != CFN_LAST)
3158 fndecl = targetm.vectorize.builtin_vectorized_function
3159 (cfn, vectype_out, vectype_in);
3160 else if (callee)
3161 fndecl = targetm.vectorize.builtin_md_vectorized_function
3162 (callee, vectype_out, vectype_in);
3165 if (ifn == IFN_LAST && !fndecl)
3167 if (cfn == CFN_GOMP_SIMD_LANE
3168 && !slp_node
3169 && loop_vinfo
3170 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3171 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3172 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3173 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3175 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3176 { 0, 1, 2, ... vf - 1 } vector. */
3177 gcc_assert (nargs == 0);
3179 else if (modifier == NONE
3180 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3181 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3182 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3183 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
3184 vectype_in, dt);
3185 else
3187 if (dump_enabled_p ())
3188 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3189 "function is not vectorizable.\n");
3190 return false;
3194 if (slp_node)
3195 ncopies = 1;
3196 else if (modifier == NARROW && ifn == IFN_LAST)
3197 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3198 else
3199 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3201 /* Sanity check: make sure that at least one copy of the vectorized stmt
3202 needs to be generated. */
3203 gcc_assert (ncopies >= 1);
3205 if (!vec_stmt) /* transformation not required. */
3207 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3208 if (dump_enabled_p ())
3209 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
3210 "\n");
3211 if (!slp_node)
3213 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
3214 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3215 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
3216 vec_promote_demote, stmt_info, 0, vect_body);
3219 return true;
3222 /* Transform. */
3224 if (dump_enabled_p ())
3225 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3227 /* Handle def. */
3228 scalar_dest = gimple_call_lhs (stmt);
3229 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3231 prev_stmt_info = NULL;
3232 if (modifier == NONE || ifn != IFN_LAST)
3234 tree prev_res = NULL_TREE;
3235 for (j = 0; j < ncopies; ++j)
3237 /* Build argument list for the vectorized call. */
3238 if (j == 0)
3239 vargs.create (nargs);
3240 else
3241 vargs.truncate (0);
3243 if (slp_node)
3245 auto_vec<vec<tree> > vec_defs (nargs);
3246 vec<tree> vec_oprnds0;
3248 for (i = 0; i < nargs; i++)
3249 vargs.quick_push (gimple_call_arg (stmt, i));
3250 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3251 vec_oprnds0 = vec_defs[0];
3253 /* Arguments are ready. Create the new vector stmt. */
3254 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3256 size_t k;
3257 for (k = 0; k < nargs; k++)
3259 vec<tree> vec_oprndsk = vec_defs[k];
3260 vargs[k] = vec_oprndsk[i];
3262 if (modifier == NARROW)
3264 tree half_res = make_ssa_name (vectype_in);
3265 gcall *call
3266 = gimple_build_call_internal_vec (ifn, vargs);
3267 gimple_call_set_lhs (call, half_res);
3268 gimple_call_set_nothrow (call, true);
3269 new_stmt = call;
3270 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3271 if ((i & 1) == 0)
3273 prev_res = half_res;
3274 continue;
3276 new_temp = make_ssa_name (vec_dest);
3277 new_stmt = gimple_build_assign (new_temp, convert_code,
3278 prev_res, half_res);
3280 else
3282 gcall *call;
3283 if (ifn != IFN_LAST)
3284 call = gimple_build_call_internal_vec (ifn, vargs);
3285 else
3286 call = gimple_build_call_vec (fndecl, vargs);
3287 new_temp = make_ssa_name (vec_dest, call);
3288 gimple_call_set_lhs (call, new_temp);
3289 gimple_call_set_nothrow (call, true);
3290 new_stmt = call;
3292 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3293 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3296 for (i = 0; i < nargs; i++)
3298 vec<tree> vec_oprndsi = vec_defs[i];
3299 vec_oprndsi.release ();
3301 continue;
3304 for (i = 0; i < nargs; i++)
3306 op = gimple_call_arg (stmt, i);
3307 if (j == 0)
3308 vec_oprnd0
3309 = vect_get_vec_def_for_operand (op, stmt);
3310 else
3312 vec_oprnd0 = gimple_call_arg (new_stmt, i);
3313 vec_oprnd0
3314 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3317 vargs.quick_push (vec_oprnd0);
3320 if (gimple_call_internal_p (stmt)
3321 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3323 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3324 tree new_var
3325 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3326 gimple *init_stmt = gimple_build_assign (new_var, cst);
3327 vect_init_vector_1 (stmt, init_stmt, NULL);
3328 new_temp = make_ssa_name (vec_dest);
3329 new_stmt = gimple_build_assign (new_temp, new_var);
3331 else if (modifier == NARROW)
3333 tree half_res = make_ssa_name (vectype_in);
3334 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3335 gimple_call_set_lhs (call, half_res);
3336 gimple_call_set_nothrow (call, true);
3337 new_stmt = call;
3338 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3339 if ((j & 1) == 0)
3341 prev_res = half_res;
3342 continue;
3344 new_temp = make_ssa_name (vec_dest);
3345 new_stmt = gimple_build_assign (new_temp, convert_code,
3346 prev_res, half_res);
3348 else
3350 gcall *call;
3351 if (ifn != IFN_LAST)
3352 call = gimple_build_call_internal_vec (ifn, vargs);
3353 else
3354 call = gimple_build_call_vec (fndecl, vargs);
3355 new_temp = make_ssa_name (vec_dest, new_stmt);
3356 gimple_call_set_lhs (call, new_temp);
3357 gimple_call_set_nothrow (call, true);
3358 new_stmt = call;
3360 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3362 if (j == (modifier == NARROW ? 1 : 0))
3363 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3364 else
3365 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3367 prev_stmt_info = vinfo_for_stmt (new_stmt);
3370 else if (modifier == NARROW)
3372 for (j = 0; j < ncopies; ++j)
3374 /* Build argument list for the vectorized call. */
3375 if (j == 0)
3376 vargs.create (nargs * 2);
3377 else
3378 vargs.truncate (0);
3380 if (slp_node)
3382 auto_vec<vec<tree> > vec_defs (nargs);
3383 vec<tree> vec_oprnds0;
3385 for (i = 0; i < nargs; i++)
3386 vargs.quick_push (gimple_call_arg (stmt, i));
3387 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3388 vec_oprnds0 = vec_defs[0];
3390 /* Arguments are ready. Create the new vector stmt. */
3391 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3393 size_t k;
3394 vargs.truncate (0);
3395 for (k = 0; k < nargs; k++)
3397 vec<tree> vec_oprndsk = vec_defs[k];
3398 vargs.quick_push (vec_oprndsk[i]);
3399 vargs.quick_push (vec_oprndsk[i + 1]);
3401 gcall *call;
3402 if (ifn != IFN_LAST)
3403 call = gimple_build_call_internal_vec (ifn, vargs);
3404 else
3405 call = gimple_build_call_vec (fndecl, vargs);
3406 new_temp = make_ssa_name (vec_dest, call);
3407 gimple_call_set_lhs (call, new_temp);
3408 gimple_call_set_nothrow (call, true);
3409 new_stmt = call;
3410 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3411 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3414 for (i = 0; i < nargs; i++)
3416 vec<tree> vec_oprndsi = vec_defs[i];
3417 vec_oprndsi.release ();
3419 continue;
3422 for (i = 0; i < nargs; i++)
3424 op = gimple_call_arg (stmt, i);
3425 if (j == 0)
3427 vec_oprnd0
3428 = vect_get_vec_def_for_operand (op, stmt);
3429 vec_oprnd1
3430 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3432 else
3434 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3435 vec_oprnd0
3436 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3437 vec_oprnd1
3438 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3441 vargs.quick_push (vec_oprnd0);
3442 vargs.quick_push (vec_oprnd1);
3445 new_stmt = gimple_build_call_vec (fndecl, vargs);
3446 new_temp = make_ssa_name (vec_dest, new_stmt);
3447 gimple_call_set_lhs (new_stmt, new_temp);
3448 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3450 if (j == 0)
3451 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3452 else
3453 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3455 prev_stmt_info = vinfo_for_stmt (new_stmt);
3458 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3460 else
3461 /* No current target implements this case. */
3462 return false;
3464 vargs.release ();
3466 /* The call in STMT might prevent it from being removed in dce.
3467 We however cannot remove it here, due to the way the ssa name
3468 it defines is mapped to the new definition. So just replace
3469 rhs of the statement with something harmless. */
3471 if (slp_node)
3472 return true;
3474 type = TREE_TYPE (scalar_dest);
3475 if (is_pattern_stmt_p (stmt_info))
3476 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3477 else
3478 lhs = gimple_call_lhs (stmt);
3480 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3481 set_vinfo_for_stmt (new_stmt, stmt_info);
3482 set_vinfo_for_stmt (stmt, NULL);
3483 STMT_VINFO_STMT (stmt_info) = new_stmt;
3484 gsi_replace (gsi, new_stmt, false);
3486 return true;
3490 struct simd_call_arg_info
3492 tree vectype;
3493 tree op;
3494 HOST_WIDE_INT linear_step;
3495 enum vect_def_type dt;
3496 unsigned int align;
3497 bool simd_lane_linear;
3500 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3501 is linear within simd lane (but not within whole loop), note it in
3502 *ARGINFO. */
3504 static void
3505 vect_simd_lane_linear (tree op, struct loop *loop,
3506 struct simd_call_arg_info *arginfo)
3508 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3510 if (!is_gimple_assign (def_stmt)
3511 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3512 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3513 return;
3515 tree base = gimple_assign_rhs1 (def_stmt);
3516 HOST_WIDE_INT linear_step = 0;
3517 tree v = gimple_assign_rhs2 (def_stmt);
3518 while (TREE_CODE (v) == SSA_NAME)
3520 tree t;
3521 def_stmt = SSA_NAME_DEF_STMT (v);
3522 if (is_gimple_assign (def_stmt))
3523 switch (gimple_assign_rhs_code (def_stmt))
3525 case PLUS_EXPR:
3526 t = gimple_assign_rhs2 (def_stmt);
3527 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3528 return;
3529 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3530 v = gimple_assign_rhs1 (def_stmt);
3531 continue;
3532 case MULT_EXPR:
3533 t = gimple_assign_rhs2 (def_stmt);
3534 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3535 return;
3536 linear_step = tree_to_shwi (t);
3537 v = gimple_assign_rhs1 (def_stmt);
3538 continue;
3539 CASE_CONVERT:
3540 t = gimple_assign_rhs1 (def_stmt);
3541 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3542 || (TYPE_PRECISION (TREE_TYPE (v))
3543 < TYPE_PRECISION (TREE_TYPE (t))))
3544 return;
3545 if (!linear_step)
3546 linear_step = 1;
3547 v = t;
3548 continue;
3549 default:
3550 return;
3552 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3553 && loop->simduid
3554 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3555 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3556 == loop->simduid))
3558 if (!linear_step)
3559 linear_step = 1;
3560 arginfo->linear_step = linear_step;
3561 arginfo->op = base;
3562 arginfo->simd_lane_linear = true;
3563 return;
3568 /* Return the number of elements in vector type VECTYPE, which is associated
3569 with a SIMD clone. At present these vectors always have a constant
3570 length. */
3572 static unsigned HOST_WIDE_INT
3573 simd_clone_subparts (tree vectype)
3575 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3578 /* Function vectorizable_simd_clone_call.
3580 Check if STMT performs a function call that can be vectorized
3581 by calling a simd clone of the function.
3582 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3583 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3584 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3586 static bool
3587 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3588 gimple **vec_stmt, slp_tree slp_node)
3590 tree vec_dest;
3591 tree scalar_dest;
3592 tree op, type;
3593 tree vec_oprnd0 = NULL_TREE;
3594 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3595 tree vectype;
3596 unsigned int nunits;
3597 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3598 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3599 vec_info *vinfo = stmt_info->vinfo;
3600 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3601 tree fndecl, new_temp;
3602 gimple *def_stmt;
3603 gimple *new_stmt = NULL;
3604 int ncopies, j;
3605 auto_vec<simd_call_arg_info> arginfo;
3606 vec<tree> vargs = vNULL;
3607 size_t i, nargs;
3608 tree lhs, rtype, ratype;
3609 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3611 /* Is STMT a vectorizable call? */
3612 if (!is_gimple_call (stmt))
3613 return false;
3615 fndecl = gimple_call_fndecl (stmt);
3616 if (fndecl == NULL_TREE)
3617 return false;
3619 struct cgraph_node *node = cgraph_node::get (fndecl);
3620 if (node == NULL || node->simd_clones == NULL)
3621 return false;
3623 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3624 return false;
3626 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3627 && ! vec_stmt)
3628 return false;
3630 if (gimple_call_lhs (stmt)
3631 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3632 return false;
3634 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3636 vectype = STMT_VINFO_VECTYPE (stmt_info);
3638 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3639 return false;
3641 /* FORNOW */
3642 if (slp_node)
3643 return false;
3645 /* Process function arguments. */
3646 nargs = gimple_call_num_args (stmt);
3648 /* Bail out if the function has zero arguments. */
3649 if (nargs == 0)
3650 return false;
3652 arginfo.reserve (nargs, true);
3654 for (i = 0; i < nargs; i++)
3656 simd_call_arg_info thisarginfo;
3657 affine_iv iv;
3659 thisarginfo.linear_step = 0;
3660 thisarginfo.align = 0;
3661 thisarginfo.op = NULL_TREE;
3662 thisarginfo.simd_lane_linear = false;
3664 op = gimple_call_arg (stmt, i);
3665 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3666 &thisarginfo.vectype)
3667 || thisarginfo.dt == vect_uninitialized_def)
3669 if (dump_enabled_p ())
3670 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3671 "use not simple.\n");
3672 return false;
3675 if (thisarginfo.dt == vect_constant_def
3676 || thisarginfo.dt == vect_external_def)
3677 gcc_assert (thisarginfo.vectype == NULL_TREE);
3678 else
3679 gcc_assert (thisarginfo.vectype != NULL_TREE);
3681 /* For linear arguments, the analyze phase should have saved
3682 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3683 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3684 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3686 gcc_assert (vec_stmt);
3687 thisarginfo.linear_step
3688 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3689 thisarginfo.op
3690 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3691 thisarginfo.simd_lane_linear
3692 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3693 == boolean_true_node);
3694 /* If loop has been peeled for alignment, we need to adjust it. */
3695 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3696 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3697 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3699 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3700 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3701 tree opt = TREE_TYPE (thisarginfo.op);
3702 bias = fold_convert (TREE_TYPE (step), bias);
3703 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3704 thisarginfo.op
3705 = fold_build2 (POINTER_TYPE_P (opt)
3706 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3707 thisarginfo.op, bias);
3710 else if (!vec_stmt
3711 && thisarginfo.dt != vect_constant_def
3712 && thisarginfo.dt != vect_external_def
3713 && loop_vinfo
3714 && TREE_CODE (op) == SSA_NAME
3715 && simple_iv (loop, loop_containing_stmt (stmt), op,
3716 &iv, false)
3717 && tree_fits_shwi_p (iv.step))
3719 thisarginfo.linear_step = tree_to_shwi (iv.step);
3720 thisarginfo.op = iv.base;
3722 else if ((thisarginfo.dt == vect_constant_def
3723 || thisarginfo.dt == vect_external_def)
3724 && POINTER_TYPE_P (TREE_TYPE (op)))
3725 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3726 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3727 linear too. */
3728 if (POINTER_TYPE_P (TREE_TYPE (op))
3729 && !thisarginfo.linear_step
3730 && !vec_stmt
3731 && thisarginfo.dt != vect_constant_def
3732 && thisarginfo.dt != vect_external_def
3733 && loop_vinfo
3734 && !slp_node
3735 && TREE_CODE (op) == SSA_NAME)
3736 vect_simd_lane_linear (op, loop, &thisarginfo);
3738 arginfo.quick_push (thisarginfo);
3741 unsigned HOST_WIDE_INT vf;
3742 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3744 if (dump_enabled_p ())
3745 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3746 "not considering SIMD clones; not yet supported"
3747 " for variable-width vectors.\n");
3748 return NULL;
3751 unsigned int badness = 0;
3752 struct cgraph_node *bestn = NULL;
3753 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3754 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3755 else
3756 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3757 n = n->simdclone->next_clone)
3759 unsigned int this_badness = 0;
3760 if (n->simdclone->simdlen > vf
3761 || n->simdclone->nargs != nargs)
3762 continue;
3763 if (n->simdclone->simdlen < vf)
3764 this_badness += (exact_log2 (vf)
3765 - exact_log2 (n->simdclone->simdlen)) * 1024;
3766 if (n->simdclone->inbranch)
3767 this_badness += 2048;
3768 int target_badness = targetm.simd_clone.usable (n);
3769 if (target_badness < 0)
3770 continue;
3771 this_badness += target_badness * 512;
3772 /* FORNOW: Have to add code to add the mask argument. */
3773 if (n->simdclone->inbranch)
3774 continue;
3775 for (i = 0; i < nargs; i++)
3777 switch (n->simdclone->args[i].arg_type)
3779 case SIMD_CLONE_ARG_TYPE_VECTOR:
3780 if (!useless_type_conversion_p
3781 (n->simdclone->args[i].orig_type,
3782 TREE_TYPE (gimple_call_arg (stmt, i))))
3783 i = -1;
3784 else if (arginfo[i].dt == vect_constant_def
3785 || arginfo[i].dt == vect_external_def
3786 || arginfo[i].linear_step)
3787 this_badness += 64;
3788 break;
3789 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3790 if (arginfo[i].dt != vect_constant_def
3791 && arginfo[i].dt != vect_external_def)
3792 i = -1;
3793 break;
3794 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3795 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3796 if (arginfo[i].dt == vect_constant_def
3797 || arginfo[i].dt == vect_external_def
3798 || (arginfo[i].linear_step
3799 != n->simdclone->args[i].linear_step))
3800 i = -1;
3801 break;
3802 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3803 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3804 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3805 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3806 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3807 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3808 /* FORNOW */
3809 i = -1;
3810 break;
3811 case SIMD_CLONE_ARG_TYPE_MASK:
3812 gcc_unreachable ();
3814 if (i == (size_t) -1)
3815 break;
3816 if (n->simdclone->args[i].alignment > arginfo[i].align)
3818 i = -1;
3819 break;
3821 if (arginfo[i].align)
3822 this_badness += (exact_log2 (arginfo[i].align)
3823 - exact_log2 (n->simdclone->args[i].alignment));
3825 if (i == (size_t) -1)
3826 continue;
3827 if (bestn == NULL || this_badness < badness)
3829 bestn = n;
3830 badness = this_badness;
3834 if (bestn == NULL)
3835 return false;
3837 for (i = 0; i < nargs; i++)
3838 if ((arginfo[i].dt == vect_constant_def
3839 || arginfo[i].dt == vect_external_def)
3840 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3842 arginfo[i].vectype
3843 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3844 i)));
3845 if (arginfo[i].vectype == NULL
3846 || (simd_clone_subparts (arginfo[i].vectype)
3847 > bestn->simdclone->simdlen))
3848 return false;
3851 fndecl = bestn->decl;
3852 nunits = bestn->simdclone->simdlen;
3853 ncopies = vf / nunits;
3855 /* If the function isn't const, only allow it in simd loops where user
3856 has asserted that at least nunits consecutive iterations can be
3857 performed using SIMD instructions. */
3858 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3859 && gimple_vuse (stmt))
3860 return false;
3862 /* Sanity check: make sure that at least one copy of the vectorized stmt
3863 needs to be generated. */
3864 gcc_assert (ncopies >= 1);
3866 if (!vec_stmt) /* transformation not required. */
3868 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3869 for (i = 0; i < nargs; i++)
3870 if ((bestn->simdclone->args[i].arg_type
3871 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3872 || (bestn->simdclone->args[i].arg_type
3873 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3875 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3876 + 1);
3877 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3878 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3879 ? size_type_node : TREE_TYPE (arginfo[i].op);
3880 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3881 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3882 tree sll = arginfo[i].simd_lane_linear
3883 ? boolean_true_node : boolean_false_node;
3884 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3886 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3887 if (dump_enabled_p ())
3888 dump_printf_loc (MSG_NOTE, vect_location,
3889 "=== vectorizable_simd_clone_call ===\n");
3890 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3891 return true;
3894 /* Transform. */
3896 if (dump_enabled_p ())
3897 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3899 /* Handle def. */
3900 scalar_dest = gimple_call_lhs (stmt);
3901 vec_dest = NULL_TREE;
3902 rtype = NULL_TREE;
3903 ratype = NULL_TREE;
3904 if (scalar_dest)
3906 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3907 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3908 if (TREE_CODE (rtype) == ARRAY_TYPE)
3910 ratype = rtype;
3911 rtype = TREE_TYPE (ratype);
3915 prev_stmt_info = NULL;
3916 for (j = 0; j < ncopies; ++j)
3918 /* Build argument list for the vectorized call. */
3919 if (j == 0)
3920 vargs.create (nargs);
3921 else
3922 vargs.truncate (0);
3924 for (i = 0; i < nargs; i++)
3926 unsigned int k, l, m, o;
3927 tree atype;
3928 op = gimple_call_arg (stmt, i);
3929 switch (bestn->simdclone->args[i].arg_type)
3931 case SIMD_CLONE_ARG_TYPE_VECTOR:
3932 atype = bestn->simdclone->args[i].vector_type;
3933 o = nunits / simd_clone_subparts (atype);
3934 for (m = j * o; m < (j + 1) * o; m++)
3936 if (simd_clone_subparts (atype)
3937 < simd_clone_subparts (arginfo[i].vectype))
3939 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3940 k = (simd_clone_subparts (arginfo[i].vectype)
3941 / simd_clone_subparts (atype));
3942 gcc_assert ((k & (k - 1)) == 0);
3943 if (m == 0)
3944 vec_oprnd0
3945 = vect_get_vec_def_for_operand (op, stmt);
3946 else
3948 vec_oprnd0 = arginfo[i].op;
3949 if ((m & (k - 1)) == 0)
3950 vec_oprnd0
3951 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3952 vec_oprnd0);
3954 arginfo[i].op = vec_oprnd0;
3955 vec_oprnd0
3956 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3957 bitsize_int (prec),
3958 bitsize_int ((m & (k - 1)) * prec));
3959 new_stmt
3960 = gimple_build_assign (make_ssa_name (atype),
3961 vec_oprnd0);
3962 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3963 vargs.safe_push (gimple_assign_lhs (new_stmt));
3965 else
3967 k = (simd_clone_subparts (atype)
3968 / simd_clone_subparts (arginfo[i].vectype));
3969 gcc_assert ((k & (k - 1)) == 0);
3970 vec<constructor_elt, va_gc> *ctor_elts;
3971 if (k != 1)
3972 vec_alloc (ctor_elts, k);
3973 else
3974 ctor_elts = NULL;
3975 for (l = 0; l < k; l++)
3977 if (m == 0 && l == 0)
3978 vec_oprnd0
3979 = vect_get_vec_def_for_operand (op, stmt);
3980 else
3981 vec_oprnd0
3982 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3983 arginfo[i].op);
3984 arginfo[i].op = vec_oprnd0;
3985 if (k == 1)
3986 break;
3987 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3988 vec_oprnd0);
3990 if (k == 1)
3991 vargs.safe_push (vec_oprnd0);
3992 else
3994 vec_oprnd0 = build_constructor (atype, ctor_elts);
3995 new_stmt
3996 = gimple_build_assign (make_ssa_name (atype),
3997 vec_oprnd0);
3998 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3999 vargs.safe_push (gimple_assign_lhs (new_stmt));
4003 break;
4004 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4005 vargs.safe_push (op);
4006 break;
4007 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4008 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4009 if (j == 0)
4011 gimple_seq stmts;
4012 arginfo[i].op
4013 = force_gimple_operand (arginfo[i].op, &stmts, true,
4014 NULL_TREE);
4015 if (stmts != NULL)
4017 basic_block new_bb;
4018 edge pe = loop_preheader_edge (loop);
4019 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4020 gcc_assert (!new_bb);
4022 if (arginfo[i].simd_lane_linear)
4024 vargs.safe_push (arginfo[i].op);
4025 break;
4027 tree phi_res = copy_ssa_name (op);
4028 gphi *new_phi = create_phi_node (phi_res, loop->header);
4029 set_vinfo_for_stmt (new_phi,
4030 new_stmt_vec_info (new_phi, loop_vinfo));
4031 add_phi_arg (new_phi, arginfo[i].op,
4032 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4033 enum tree_code code
4034 = POINTER_TYPE_P (TREE_TYPE (op))
4035 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4036 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4037 ? sizetype : TREE_TYPE (op);
4038 widest_int cst
4039 = wi::mul (bestn->simdclone->args[i].linear_step,
4040 ncopies * nunits);
4041 tree tcst = wide_int_to_tree (type, cst);
4042 tree phi_arg = copy_ssa_name (op);
4043 new_stmt
4044 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4045 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4046 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4047 set_vinfo_for_stmt (new_stmt,
4048 new_stmt_vec_info (new_stmt, loop_vinfo));
4049 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4050 UNKNOWN_LOCATION);
4051 arginfo[i].op = phi_res;
4052 vargs.safe_push (phi_res);
4054 else
4056 enum tree_code code
4057 = POINTER_TYPE_P (TREE_TYPE (op))
4058 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4059 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4060 ? sizetype : TREE_TYPE (op);
4061 widest_int cst
4062 = wi::mul (bestn->simdclone->args[i].linear_step,
4063 j * nunits);
4064 tree tcst = wide_int_to_tree (type, cst);
4065 new_temp = make_ssa_name (TREE_TYPE (op));
4066 new_stmt = gimple_build_assign (new_temp, code,
4067 arginfo[i].op, tcst);
4068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4069 vargs.safe_push (new_temp);
4071 break;
4072 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4073 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4074 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4075 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4076 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4077 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4078 default:
4079 gcc_unreachable ();
4083 new_stmt = gimple_build_call_vec (fndecl, vargs);
4084 if (vec_dest)
4086 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4087 if (ratype)
4088 new_temp = create_tmp_var (ratype);
4089 else if (simd_clone_subparts (vectype)
4090 == simd_clone_subparts (rtype))
4091 new_temp = make_ssa_name (vec_dest, new_stmt);
4092 else
4093 new_temp = make_ssa_name (rtype, new_stmt);
4094 gimple_call_set_lhs (new_stmt, new_temp);
4096 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4098 if (vec_dest)
4100 if (simd_clone_subparts (vectype) < nunits)
4102 unsigned int k, l;
4103 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4104 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4105 k = nunits / simd_clone_subparts (vectype);
4106 gcc_assert ((k & (k - 1)) == 0);
4107 for (l = 0; l < k; l++)
4109 tree t;
4110 if (ratype)
4112 t = build_fold_addr_expr (new_temp);
4113 t = build2 (MEM_REF, vectype, t,
4114 build_int_cst (TREE_TYPE (t), l * bytes));
4116 else
4117 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4118 bitsize_int (prec), bitsize_int (l * prec));
4119 new_stmt
4120 = gimple_build_assign (make_ssa_name (vectype), t);
4121 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4122 if (j == 0 && l == 0)
4123 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4124 else
4125 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4127 prev_stmt_info = vinfo_for_stmt (new_stmt);
4130 if (ratype)
4132 tree clobber = build_constructor (ratype, NULL);
4133 TREE_THIS_VOLATILE (clobber) = 1;
4134 new_stmt = gimple_build_assign (new_temp, clobber);
4135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4137 continue;
4139 else if (simd_clone_subparts (vectype) > nunits)
4141 unsigned int k = (simd_clone_subparts (vectype)
4142 / simd_clone_subparts (rtype));
4143 gcc_assert ((k & (k - 1)) == 0);
4144 if ((j & (k - 1)) == 0)
4145 vec_alloc (ret_ctor_elts, k);
4146 if (ratype)
4148 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4149 for (m = 0; m < o; m++)
4151 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4152 size_int (m), NULL_TREE, NULL_TREE);
4153 new_stmt
4154 = gimple_build_assign (make_ssa_name (rtype), tem);
4155 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4156 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4157 gimple_assign_lhs (new_stmt));
4159 tree clobber = build_constructor (ratype, NULL);
4160 TREE_THIS_VOLATILE (clobber) = 1;
4161 new_stmt = gimple_build_assign (new_temp, clobber);
4162 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4164 else
4165 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4166 if ((j & (k - 1)) != k - 1)
4167 continue;
4168 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4169 new_stmt
4170 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4171 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4173 if ((unsigned) j == k - 1)
4174 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4175 else
4176 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4178 prev_stmt_info = vinfo_for_stmt (new_stmt);
4179 continue;
4181 else if (ratype)
4183 tree t = build_fold_addr_expr (new_temp);
4184 t = build2 (MEM_REF, vectype, t,
4185 build_int_cst (TREE_TYPE (t), 0));
4186 new_stmt
4187 = gimple_build_assign (make_ssa_name (vec_dest), t);
4188 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4189 tree clobber = build_constructor (ratype, NULL);
4190 TREE_THIS_VOLATILE (clobber) = 1;
4191 vect_finish_stmt_generation (stmt,
4192 gimple_build_assign (new_temp,
4193 clobber), gsi);
4197 if (j == 0)
4198 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4199 else
4200 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4202 prev_stmt_info = vinfo_for_stmt (new_stmt);
4205 vargs.release ();
4207 /* The call in STMT might prevent it from being removed in dce.
4208 We however cannot remove it here, due to the way the ssa name
4209 it defines is mapped to the new definition. So just replace
4210 rhs of the statement with something harmless. */
4212 if (slp_node)
4213 return true;
4215 if (scalar_dest)
4217 type = TREE_TYPE (scalar_dest);
4218 if (is_pattern_stmt_p (stmt_info))
4219 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
4220 else
4221 lhs = gimple_call_lhs (stmt);
4222 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4224 else
4225 new_stmt = gimple_build_nop ();
4226 set_vinfo_for_stmt (new_stmt, stmt_info);
4227 set_vinfo_for_stmt (stmt, NULL);
4228 STMT_VINFO_STMT (stmt_info) = new_stmt;
4229 gsi_replace (gsi, new_stmt, true);
4230 unlink_stmt_vdef (stmt);
4232 return true;
4236 /* Function vect_gen_widened_results_half
4238 Create a vector stmt whose code, type, number of arguments, and result
4239 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4240 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4241 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4242 needs to be created (DECL is a function-decl of a target-builtin).
4243 STMT is the original scalar stmt that we are vectorizing. */
4245 static gimple *
4246 vect_gen_widened_results_half (enum tree_code code,
4247 tree decl,
4248 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4249 tree vec_dest, gimple_stmt_iterator *gsi,
4250 gimple *stmt)
4252 gimple *new_stmt;
4253 tree new_temp;
4255 /* Generate half of the widened result: */
4256 if (code == CALL_EXPR)
4258 /* Target specific support */
4259 if (op_type == binary_op)
4260 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4261 else
4262 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4263 new_temp = make_ssa_name (vec_dest, new_stmt);
4264 gimple_call_set_lhs (new_stmt, new_temp);
4266 else
4268 /* Generic support */
4269 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4270 if (op_type != binary_op)
4271 vec_oprnd1 = NULL;
4272 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4273 new_temp = make_ssa_name (vec_dest, new_stmt);
4274 gimple_assign_set_lhs (new_stmt, new_temp);
4276 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4278 return new_stmt;
4282 /* Get vectorized definitions for loop-based vectorization. For the first
4283 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4284 scalar operand), and for the rest we get a copy with
4285 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4286 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4287 The vectors are collected into VEC_OPRNDS. */
4289 static void
4290 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
4291 vec<tree> *vec_oprnds, int multi_step_cvt)
4293 tree vec_oprnd;
4295 /* Get first vector operand. */
4296 /* All the vector operands except the very first one (that is scalar oprnd)
4297 are stmt copies. */
4298 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4299 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
4300 else
4301 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
4303 vec_oprnds->quick_push (vec_oprnd);
4305 /* Get second vector operand. */
4306 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
4307 vec_oprnds->quick_push (vec_oprnd);
4309 *oprnd = vec_oprnd;
4311 /* For conversion in multiple steps, continue to get operands
4312 recursively. */
4313 if (multi_step_cvt)
4314 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
4318 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4319 For multi-step conversions store the resulting vectors and call the function
4320 recursively. */
4322 static void
4323 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4324 int multi_step_cvt, gimple *stmt,
4325 vec<tree> vec_dsts,
4326 gimple_stmt_iterator *gsi,
4327 slp_tree slp_node, enum tree_code code,
4328 stmt_vec_info *prev_stmt_info)
4330 unsigned int i;
4331 tree vop0, vop1, new_tmp, vec_dest;
4332 gimple *new_stmt;
4333 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4335 vec_dest = vec_dsts.pop ();
4337 for (i = 0; i < vec_oprnds->length (); i += 2)
4339 /* Create demotion operation. */
4340 vop0 = (*vec_oprnds)[i];
4341 vop1 = (*vec_oprnds)[i + 1];
4342 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4343 new_tmp = make_ssa_name (vec_dest, new_stmt);
4344 gimple_assign_set_lhs (new_stmt, new_tmp);
4345 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4347 if (multi_step_cvt)
4348 /* Store the resulting vector for next recursive call. */
4349 (*vec_oprnds)[i/2] = new_tmp;
4350 else
4352 /* This is the last step of the conversion sequence. Store the
4353 vectors in SLP_NODE or in vector info of the scalar statement
4354 (or in STMT_VINFO_RELATED_STMT chain). */
4355 if (slp_node)
4356 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4357 else
4359 if (!*prev_stmt_info)
4360 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4361 else
4362 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4364 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4369 /* For multi-step demotion operations we first generate demotion operations
4370 from the source type to the intermediate types, and then combine the
4371 results (stored in VEC_OPRNDS) in demotion operation to the destination
4372 type. */
4373 if (multi_step_cvt)
4375 /* At each level of recursion we have half of the operands we had at the
4376 previous level. */
4377 vec_oprnds->truncate ((i+1)/2);
4378 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4379 stmt, vec_dsts, gsi, slp_node,
4380 VEC_PACK_TRUNC_EXPR,
4381 prev_stmt_info);
4384 vec_dsts.quick_push (vec_dest);
4388 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4389 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4390 the resulting vectors and call the function recursively. */
4392 static void
4393 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4394 vec<tree> *vec_oprnds1,
4395 gimple *stmt, tree vec_dest,
4396 gimple_stmt_iterator *gsi,
4397 enum tree_code code1,
4398 enum tree_code code2, tree decl1,
4399 tree decl2, int op_type)
4401 int i;
4402 tree vop0, vop1, new_tmp1, new_tmp2;
4403 gimple *new_stmt1, *new_stmt2;
4404 vec<tree> vec_tmp = vNULL;
4406 vec_tmp.create (vec_oprnds0->length () * 2);
4407 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4409 if (op_type == binary_op)
4410 vop1 = (*vec_oprnds1)[i];
4411 else
4412 vop1 = NULL_TREE;
4414 /* Generate the two halves of promotion operation. */
4415 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4416 op_type, vec_dest, gsi, stmt);
4417 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4418 op_type, vec_dest, gsi, stmt);
4419 if (is_gimple_call (new_stmt1))
4421 new_tmp1 = gimple_call_lhs (new_stmt1);
4422 new_tmp2 = gimple_call_lhs (new_stmt2);
4424 else
4426 new_tmp1 = gimple_assign_lhs (new_stmt1);
4427 new_tmp2 = gimple_assign_lhs (new_stmt2);
4430 /* Store the results for the next step. */
4431 vec_tmp.quick_push (new_tmp1);
4432 vec_tmp.quick_push (new_tmp2);
4435 vec_oprnds0->release ();
4436 *vec_oprnds0 = vec_tmp;
4440 /* Check if STMT performs a conversion operation, that can be vectorized.
4441 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4442 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4443 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4445 static bool
4446 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4447 gimple **vec_stmt, slp_tree slp_node)
4449 tree vec_dest;
4450 tree scalar_dest;
4451 tree op0, op1 = NULL_TREE;
4452 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4453 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4454 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4455 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4456 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4457 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4458 tree new_temp;
4459 gimple *def_stmt;
4460 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4461 int ndts = 2;
4462 gimple *new_stmt = NULL;
4463 stmt_vec_info prev_stmt_info;
4464 poly_uint64 nunits_in;
4465 poly_uint64 nunits_out;
4466 tree vectype_out, vectype_in;
4467 int ncopies, i, j;
4468 tree lhs_type, rhs_type;
4469 enum { NARROW, NONE, WIDEN } modifier;
4470 vec<tree> vec_oprnds0 = vNULL;
4471 vec<tree> vec_oprnds1 = vNULL;
4472 tree vop0;
4473 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4474 vec_info *vinfo = stmt_info->vinfo;
4475 int multi_step_cvt = 0;
4476 vec<tree> interm_types = vNULL;
4477 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4478 int op_type;
4479 unsigned short fltsz;
4481 /* Is STMT a vectorizable conversion? */
4483 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4484 return false;
4486 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4487 && ! vec_stmt)
4488 return false;
4490 if (!is_gimple_assign (stmt))
4491 return false;
4493 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4494 return false;
4496 code = gimple_assign_rhs_code (stmt);
4497 if (!CONVERT_EXPR_CODE_P (code)
4498 && code != FIX_TRUNC_EXPR
4499 && code != FLOAT_EXPR
4500 && code != WIDEN_MULT_EXPR
4501 && code != WIDEN_LSHIFT_EXPR)
4502 return false;
4504 op_type = TREE_CODE_LENGTH (code);
4506 /* Check types of lhs and rhs. */
4507 scalar_dest = gimple_assign_lhs (stmt);
4508 lhs_type = TREE_TYPE (scalar_dest);
4509 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4511 op0 = gimple_assign_rhs1 (stmt);
4512 rhs_type = TREE_TYPE (op0);
4514 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4515 && !((INTEGRAL_TYPE_P (lhs_type)
4516 && INTEGRAL_TYPE_P (rhs_type))
4517 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4518 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4519 return false;
4521 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4522 && ((INTEGRAL_TYPE_P (lhs_type)
4523 && !type_has_mode_precision_p (lhs_type))
4524 || (INTEGRAL_TYPE_P (rhs_type)
4525 && !type_has_mode_precision_p (rhs_type))))
4527 if (dump_enabled_p ())
4528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4529 "type conversion to/from bit-precision unsupported."
4530 "\n");
4531 return false;
4534 /* Check the operands of the operation. */
4535 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4537 if (dump_enabled_p ())
4538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4539 "use not simple.\n");
4540 return false;
4542 if (op_type == binary_op)
4544 bool ok;
4546 op1 = gimple_assign_rhs2 (stmt);
4547 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4548 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4549 OP1. */
4550 if (CONSTANT_CLASS_P (op0))
4551 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4552 else
4553 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4555 if (!ok)
4557 if (dump_enabled_p ())
4558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4559 "use not simple.\n");
4560 return false;
4564 /* If op0 is an external or constant defs use a vector type of
4565 the same size as the output vector type. */
4566 if (!vectype_in)
4567 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4568 if (vec_stmt)
4569 gcc_assert (vectype_in);
4570 if (!vectype_in)
4572 if (dump_enabled_p ())
4574 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4575 "no vectype for scalar type ");
4576 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4577 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4580 return false;
4583 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4584 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4586 if (dump_enabled_p ())
4588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4589 "can't convert between boolean and non "
4590 "boolean vectors");
4591 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4592 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4595 return false;
4598 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4599 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4600 if (known_eq (nunits_out, nunits_in))
4601 modifier = NONE;
4602 else if (multiple_p (nunits_out, nunits_in))
4603 modifier = NARROW;
4604 else
4606 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4607 modifier = WIDEN;
4610 /* Multiple types in SLP are handled by creating the appropriate number of
4611 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4612 case of SLP. */
4613 if (slp_node)
4614 ncopies = 1;
4615 else if (modifier == NARROW)
4616 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4617 else
4618 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4620 /* Sanity check: make sure that at least one copy of the vectorized stmt
4621 needs to be generated. */
4622 gcc_assert (ncopies >= 1);
4624 bool found_mode = false;
4625 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4626 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4627 opt_scalar_mode rhs_mode_iter;
4629 /* Supportable by target? */
4630 switch (modifier)
4632 case NONE:
4633 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4634 return false;
4635 if (supportable_convert_operation (code, vectype_out, vectype_in,
4636 &decl1, &code1))
4637 break;
4638 /* FALLTHRU */
4639 unsupported:
4640 if (dump_enabled_p ())
4641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4642 "conversion not supported by target.\n");
4643 return false;
4645 case WIDEN:
4646 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4647 &code1, &code2, &multi_step_cvt,
4648 &interm_types))
4650 /* Binary widening operation can only be supported directly by the
4651 architecture. */
4652 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4653 break;
4656 if (code != FLOAT_EXPR
4657 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4658 goto unsupported;
4660 fltsz = GET_MODE_SIZE (lhs_mode);
4661 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4663 rhs_mode = rhs_mode_iter.require ();
4664 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4665 break;
4667 cvt_type
4668 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4669 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4670 if (cvt_type == NULL_TREE)
4671 goto unsupported;
4673 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4675 if (!supportable_convert_operation (code, vectype_out,
4676 cvt_type, &decl1, &codecvt1))
4677 goto unsupported;
4679 else if (!supportable_widening_operation (code, stmt, vectype_out,
4680 cvt_type, &codecvt1,
4681 &codecvt2, &multi_step_cvt,
4682 &interm_types))
4683 continue;
4684 else
4685 gcc_assert (multi_step_cvt == 0);
4687 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4688 vectype_in, &code1, &code2,
4689 &multi_step_cvt, &interm_types))
4691 found_mode = true;
4692 break;
4696 if (!found_mode)
4697 goto unsupported;
4699 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4700 codecvt2 = ERROR_MARK;
4701 else
4703 multi_step_cvt++;
4704 interm_types.safe_push (cvt_type);
4705 cvt_type = NULL_TREE;
4707 break;
4709 case NARROW:
4710 gcc_assert (op_type == unary_op);
4711 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4712 &code1, &multi_step_cvt,
4713 &interm_types))
4714 break;
4716 if (code != FIX_TRUNC_EXPR
4717 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4718 goto unsupported;
4720 cvt_type
4721 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4722 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4723 if (cvt_type == NULL_TREE)
4724 goto unsupported;
4725 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4726 &decl1, &codecvt1))
4727 goto unsupported;
4728 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4729 &code1, &multi_step_cvt,
4730 &interm_types))
4731 break;
4732 goto unsupported;
4734 default:
4735 gcc_unreachable ();
4738 if (!vec_stmt) /* transformation not required. */
4740 if (dump_enabled_p ())
4741 dump_printf_loc (MSG_NOTE, vect_location,
4742 "=== vectorizable_conversion ===\n");
4743 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4745 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4746 if (!slp_node)
4747 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4749 else if (modifier == NARROW)
4751 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4752 if (!slp_node)
4753 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4755 else
4757 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4758 if (!slp_node)
4759 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4761 interm_types.release ();
4762 return true;
4765 /* Transform. */
4766 if (dump_enabled_p ())
4767 dump_printf_loc (MSG_NOTE, vect_location,
4768 "transform conversion. ncopies = %d.\n", ncopies);
4770 if (op_type == binary_op)
4772 if (CONSTANT_CLASS_P (op0))
4773 op0 = fold_convert (TREE_TYPE (op1), op0);
4774 else if (CONSTANT_CLASS_P (op1))
4775 op1 = fold_convert (TREE_TYPE (op0), op1);
4778 /* In case of multi-step conversion, we first generate conversion operations
4779 to the intermediate types, and then from that types to the final one.
4780 We create vector destinations for the intermediate type (TYPES) received
4781 from supportable_*_operation, and store them in the correct order
4782 for future use in vect_create_vectorized_*_stmts (). */
4783 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4784 vec_dest = vect_create_destination_var (scalar_dest,
4785 (cvt_type && modifier == WIDEN)
4786 ? cvt_type : vectype_out);
4787 vec_dsts.quick_push (vec_dest);
4789 if (multi_step_cvt)
4791 for (i = interm_types.length () - 1;
4792 interm_types.iterate (i, &intermediate_type); i--)
4794 vec_dest = vect_create_destination_var (scalar_dest,
4795 intermediate_type);
4796 vec_dsts.quick_push (vec_dest);
4800 if (cvt_type)
4801 vec_dest = vect_create_destination_var (scalar_dest,
4802 modifier == WIDEN
4803 ? vectype_out : cvt_type);
4805 if (!slp_node)
4807 if (modifier == WIDEN)
4809 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4810 if (op_type == binary_op)
4811 vec_oprnds1.create (1);
4813 else if (modifier == NARROW)
4814 vec_oprnds0.create (
4815 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4817 else if (code == WIDEN_LSHIFT_EXPR)
4818 vec_oprnds1.create (slp_node->vec_stmts_size);
4820 last_oprnd = op0;
4821 prev_stmt_info = NULL;
4822 switch (modifier)
4824 case NONE:
4825 for (j = 0; j < ncopies; j++)
4827 if (j == 0)
4828 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4829 else
4830 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4832 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4834 /* Arguments are ready, create the new vector stmt. */
4835 if (code1 == CALL_EXPR)
4837 new_stmt = gimple_build_call (decl1, 1, vop0);
4838 new_temp = make_ssa_name (vec_dest, new_stmt);
4839 gimple_call_set_lhs (new_stmt, new_temp);
4841 else
4843 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4844 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4845 new_temp = make_ssa_name (vec_dest, new_stmt);
4846 gimple_assign_set_lhs (new_stmt, new_temp);
4849 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4850 if (slp_node)
4851 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4852 else
4854 if (!prev_stmt_info)
4855 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4856 else
4857 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4858 prev_stmt_info = vinfo_for_stmt (new_stmt);
4862 break;
4864 case WIDEN:
4865 /* In case the vectorization factor (VF) is bigger than the number
4866 of elements that we can fit in a vectype (nunits), we have to
4867 generate more than one vector stmt - i.e - we need to "unroll"
4868 the vector stmt by a factor VF/nunits. */
4869 for (j = 0; j < ncopies; j++)
4871 /* Handle uses. */
4872 if (j == 0)
4874 if (slp_node)
4876 if (code == WIDEN_LSHIFT_EXPR)
4878 unsigned int k;
4880 vec_oprnd1 = op1;
4881 /* Store vec_oprnd1 for every vector stmt to be created
4882 for SLP_NODE. We check during the analysis that all
4883 the shift arguments are the same. */
4884 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4885 vec_oprnds1.quick_push (vec_oprnd1);
4887 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4888 slp_node);
4890 else
4891 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4892 &vec_oprnds1, slp_node);
4894 else
4896 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4897 vec_oprnds0.quick_push (vec_oprnd0);
4898 if (op_type == binary_op)
4900 if (code == WIDEN_LSHIFT_EXPR)
4901 vec_oprnd1 = op1;
4902 else
4903 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4904 vec_oprnds1.quick_push (vec_oprnd1);
4908 else
4910 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4911 vec_oprnds0.truncate (0);
4912 vec_oprnds0.quick_push (vec_oprnd0);
4913 if (op_type == binary_op)
4915 if (code == WIDEN_LSHIFT_EXPR)
4916 vec_oprnd1 = op1;
4917 else
4918 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4919 vec_oprnd1);
4920 vec_oprnds1.truncate (0);
4921 vec_oprnds1.quick_push (vec_oprnd1);
4925 /* Arguments are ready. Create the new vector stmts. */
4926 for (i = multi_step_cvt; i >= 0; i--)
4928 tree this_dest = vec_dsts[i];
4929 enum tree_code c1 = code1, c2 = code2;
4930 if (i == 0 && codecvt2 != ERROR_MARK)
4932 c1 = codecvt1;
4933 c2 = codecvt2;
4935 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4936 &vec_oprnds1,
4937 stmt, this_dest, gsi,
4938 c1, c2, decl1, decl2,
4939 op_type);
4942 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4944 if (cvt_type)
4946 if (codecvt1 == CALL_EXPR)
4948 new_stmt = gimple_build_call (decl1, 1, vop0);
4949 new_temp = make_ssa_name (vec_dest, new_stmt);
4950 gimple_call_set_lhs (new_stmt, new_temp);
4952 else
4954 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4955 new_temp = make_ssa_name (vec_dest);
4956 new_stmt = gimple_build_assign (new_temp, codecvt1,
4957 vop0);
4960 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4962 else
4963 new_stmt = SSA_NAME_DEF_STMT (vop0);
4965 if (slp_node)
4966 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4967 else
4969 if (!prev_stmt_info)
4970 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4971 else
4972 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4973 prev_stmt_info = vinfo_for_stmt (new_stmt);
4978 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4979 break;
4981 case NARROW:
4982 /* In case the vectorization factor (VF) is bigger than the number
4983 of elements that we can fit in a vectype (nunits), we have to
4984 generate more than one vector stmt - i.e - we need to "unroll"
4985 the vector stmt by a factor VF/nunits. */
4986 for (j = 0; j < ncopies; j++)
4988 /* Handle uses. */
4989 if (slp_node)
4990 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4991 slp_node);
4992 else
4994 vec_oprnds0.truncate (0);
4995 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4996 vect_pow2 (multi_step_cvt) - 1);
4999 /* Arguments are ready. Create the new vector stmts. */
5000 if (cvt_type)
5001 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5003 if (codecvt1 == CALL_EXPR)
5005 new_stmt = gimple_build_call (decl1, 1, vop0);
5006 new_temp = make_ssa_name (vec_dest, new_stmt);
5007 gimple_call_set_lhs (new_stmt, new_temp);
5009 else
5011 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5012 new_temp = make_ssa_name (vec_dest);
5013 new_stmt = gimple_build_assign (new_temp, codecvt1,
5014 vop0);
5017 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5018 vec_oprnds0[i] = new_temp;
5021 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5022 stmt, vec_dsts, gsi,
5023 slp_node, code1,
5024 &prev_stmt_info);
5027 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5028 break;
5031 vec_oprnds0.release ();
5032 vec_oprnds1.release ();
5033 interm_types.release ();
5035 return true;
5039 /* Function vectorizable_assignment.
5041 Check if STMT performs an assignment (copy) that can be vectorized.
5042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5046 static bool
5047 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
5048 gimple **vec_stmt, slp_tree slp_node)
5050 tree vec_dest;
5051 tree scalar_dest;
5052 tree op;
5053 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5054 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5055 tree new_temp;
5056 gimple *def_stmt;
5057 enum vect_def_type dt[1] = {vect_unknown_def_type};
5058 int ndts = 1;
5059 int ncopies;
5060 int i, j;
5061 vec<tree> vec_oprnds = vNULL;
5062 tree vop;
5063 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5064 vec_info *vinfo = stmt_info->vinfo;
5065 gimple *new_stmt = NULL;
5066 stmt_vec_info prev_stmt_info = NULL;
5067 enum tree_code code;
5068 tree vectype_in;
5070 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5071 return false;
5073 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5074 && ! vec_stmt)
5075 return false;
5077 /* Is vectorizable assignment? */
5078 if (!is_gimple_assign (stmt))
5079 return false;
5081 scalar_dest = gimple_assign_lhs (stmt);
5082 if (TREE_CODE (scalar_dest) != SSA_NAME)
5083 return false;
5085 code = gimple_assign_rhs_code (stmt);
5086 if (gimple_assign_single_p (stmt)
5087 || code == PAREN_EXPR
5088 || CONVERT_EXPR_CODE_P (code))
5089 op = gimple_assign_rhs1 (stmt);
5090 else
5091 return false;
5093 if (code == VIEW_CONVERT_EXPR)
5094 op = TREE_OPERAND (op, 0);
5096 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5097 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5099 /* Multiple types in SLP are handled by creating the appropriate number of
5100 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5101 case of SLP. */
5102 if (slp_node)
5103 ncopies = 1;
5104 else
5105 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5107 gcc_assert (ncopies >= 1);
5109 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
5111 if (dump_enabled_p ())
5112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5113 "use not simple.\n");
5114 return false;
5117 /* We can handle NOP_EXPR conversions that do not change the number
5118 of elements or the vector size. */
5119 if ((CONVERT_EXPR_CODE_P (code)
5120 || code == VIEW_CONVERT_EXPR)
5121 && (!vectype_in
5122 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5123 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5124 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5125 return false;
5127 /* We do not handle bit-precision changes. */
5128 if ((CONVERT_EXPR_CODE_P (code)
5129 || code == VIEW_CONVERT_EXPR)
5130 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5131 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5132 || !type_has_mode_precision_p (TREE_TYPE (op)))
5133 /* But a conversion that does not change the bit-pattern is ok. */
5134 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5135 > TYPE_PRECISION (TREE_TYPE (op)))
5136 && TYPE_UNSIGNED (TREE_TYPE (op)))
5137 /* Conversion between boolean types of different sizes is
5138 a simple assignment in case their vectypes are same
5139 boolean vectors. */
5140 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5141 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5145 "type conversion to/from bit-precision "
5146 "unsupported.\n");
5147 return false;
5150 if (!vec_stmt) /* transformation not required. */
5152 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5153 if (dump_enabled_p ())
5154 dump_printf_loc (MSG_NOTE, vect_location,
5155 "=== vectorizable_assignment ===\n");
5156 if (!slp_node)
5157 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5158 return true;
5161 /* Transform. */
5162 if (dump_enabled_p ())
5163 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5165 /* Handle def. */
5166 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5168 /* Handle use. */
5169 for (j = 0; j < ncopies; j++)
5171 /* Handle uses. */
5172 if (j == 0)
5173 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
5174 else
5175 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
5177 /* Arguments are ready. create the new vector stmt. */
5178 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5180 if (CONVERT_EXPR_CODE_P (code)
5181 || code == VIEW_CONVERT_EXPR)
5182 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5183 new_stmt = gimple_build_assign (vec_dest, vop);
5184 new_temp = make_ssa_name (vec_dest, new_stmt);
5185 gimple_assign_set_lhs (new_stmt, new_temp);
5186 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5187 if (slp_node)
5188 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5191 if (slp_node)
5192 continue;
5194 if (j == 0)
5195 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5196 else
5197 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5199 prev_stmt_info = vinfo_for_stmt (new_stmt);
5202 vec_oprnds.release ();
5203 return true;
5207 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5208 either as shift by a scalar or by a vector. */
5210 bool
5211 vect_supportable_shift (enum tree_code code, tree scalar_type)
5214 machine_mode vec_mode;
5215 optab optab;
5216 int icode;
5217 tree vectype;
5219 vectype = get_vectype_for_scalar_type (scalar_type);
5220 if (!vectype)
5221 return false;
5223 optab = optab_for_tree_code (code, vectype, optab_scalar);
5224 if (!optab
5225 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5227 optab = optab_for_tree_code (code, vectype, optab_vector);
5228 if (!optab
5229 || (optab_handler (optab, TYPE_MODE (vectype))
5230 == CODE_FOR_nothing))
5231 return false;
5234 vec_mode = TYPE_MODE (vectype);
5235 icode = (int) optab_handler (optab, vec_mode);
5236 if (icode == CODE_FOR_nothing)
5237 return false;
5239 return true;
5243 /* Function vectorizable_shift.
5245 Check if STMT performs a shift operation that can be vectorized.
5246 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5247 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5248 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5250 static bool
5251 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
5252 gimple **vec_stmt, slp_tree slp_node)
5254 tree vec_dest;
5255 tree scalar_dest;
5256 tree op0, op1 = NULL;
5257 tree vec_oprnd1 = NULL_TREE;
5258 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5259 tree vectype;
5260 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5261 enum tree_code code;
5262 machine_mode vec_mode;
5263 tree new_temp;
5264 optab optab;
5265 int icode;
5266 machine_mode optab_op2_mode;
5267 gimple *def_stmt;
5268 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5269 int ndts = 2;
5270 gimple *new_stmt = NULL;
5271 stmt_vec_info prev_stmt_info;
5272 poly_uint64 nunits_in;
5273 poly_uint64 nunits_out;
5274 tree vectype_out;
5275 tree op1_vectype;
5276 int ncopies;
5277 int j, i;
5278 vec<tree> vec_oprnds0 = vNULL;
5279 vec<tree> vec_oprnds1 = vNULL;
5280 tree vop0, vop1;
5281 unsigned int k;
5282 bool scalar_shift_arg = true;
5283 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5284 vec_info *vinfo = stmt_info->vinfo;
5286 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5287 return false;
5289 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5290 && ! vec_stmt)
5291 return false;
5293 /* Is STMT a vectorizable binary/unary operation? */
5294 if (!is_gimple_assign (stmt))
5295 return false;
5297 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5298 return false;
5300 code = gimple_assign_rhs_code (stmt);
5302 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5303 || code == RROTATE_EXPR))
5304 return false;
5306 scalar_dest = gimple_assign_lhs (stmt);
5307 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5308 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5310 if (dump_enabled_p ())
5311 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5312 "bit-precision shifts not supported.\n");
5313 return false;
5316 op0 = gimple_assign_rhs1 (stmt);
5317 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5319 if (dump_enabled_p ())
5320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5321 "use not simple.\n");
5322 return false;
5324 /* If op0 is an external or constant def use a vector type with
5325 the same size as the output vector type. */
5326 if (!vectype)
5327 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5328 if (vec_stmt)
5329 gcc_assert (vectype);
5330 if (!vectype)
5332 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5334 "no vectype for scalar type\n");
5335 return false;
5338 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5339 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5340 if (maybe_ne (nunits_out, nunits_in))
5341 return false;
5343 op1 = gimple_assign_rhs2 (stmt);
5344 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
5346 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5348 "use not simple.\n");
5349 return false;
5352 /* Multiple types in SLP are handled by creating the appropriate number of
5353 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5354 case of SLP. */
5355 if (slp_node)
5356 ncopies = 1;
5357 else
5358 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5360 gcc_assert (ncopies >= 1);
5362 /* Determine whether the shift amount is a vector, or scalar. If the
5363 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5365 if ((dt[1] == vect_internal_def
5366 || dt[1] == vect_induction_def)
5367 && !slp_node)
5368 scalar_shift_arg = false;
5369 else if (dt[1] == vect_constant_def
5370 || dt[1] == vect_external_def
5371 || dt[1] == vect_internal_def)
5373 /* In SLP, need to check whether the shift count is the same,
5374 in loops if it is a constant or invariant, it is always
5375 a scalar shift. */
5376 if (slp_node)
5378 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5379 gimple *slpstmt;
5381 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5382 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5383 scalar_shift_arg = false;
5386 /* If the shift amount is computed by a pattern stmt we cannot
5387 use the scalar amount directly thus give up and use a vector
5388 shift. */
5389 if (dt[1] == vect_internal_def)
5391 gimple *def = SSA_NAME_DEF_STMT (op1);
5392 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5393 scalar_shift_arg = false;
5396 else
5398 if (dump_enabled_p ())
5399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5400 "operand mode requires invariant argument.\n");
5401 return false;
5404 /* Vector shifted by vector. */
5405 if (!scalar_shift_arg)
5407 optab = optab_for_tree_code (code, vectype, optab_vector);
5408 if (dump_enabled_p ())
5409 dump_printf_loc (MSG_NOTE, vect_location,
5410 "vector/vector shift/rotate found.\n");
5412 if (!op1_vectype)
5413 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5414 if (op1_vectype == NULL_TREE
5415 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5417 if (dump_enabled_p ())
5418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5419 "unusable type for last operand in"
5420 " vector/vector shift/rotate.\n");
5421 return false;
5424 /* See if the machine has a vector shifted by scalar insn and if not
5425 then see if it has a vector shifted by vector insn. */
5426 else
5428 optab = optab_for_tree_code (code, vectype, optab_scalar);
5429 if (optab
5430 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5432 if (dump_enabled_p ())
5433 dump_printf_loc (MSG_NOTE, vect_location,
5434 "vector/scalar shift/rotate found.\n");
5436 else
5438 optab = optab_for_tree_code (code, vectype, optab_vector);
5439 if (optab
5440 && (optab_handler (optab, TYPE_MODE (vectype))
5441 != CODE_FOR_nothing))
5443 scalar_shift_arg = false;
5445 if (dump_enabled_p ())
5446 dump_printf_loc (MSG_NOTE, vect_location,
5447 "vector/vector shift/rotate found.\n");
5449 /* Unlike the other binary operators, shifts/rotates have
5450 the rhs being int, instead of the same type as the lhs,
5451 so make sure the scalar is the right type if we are
5452 dealing with vectors of long long/long/short/char. */
5453 if (dt[1] == vect_constant_def)
5454 op1 = fold_convert (TREE_TYPE (vectype), op1);
5455 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5456 TREE_TYPE (op1)))
5458 if (slp_node
5459 && TYPE_MODE (TREE_TYPE (vectype))
5460 != TYPE_MODE (TREE_TYPE (op1)))
5462 if (dump_enabled_p ())
5463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5464 "unusable type for last operand in"
5465 " vector/vector shift/rotate.\n");
5466 return false;
5468 if (vec_stmt && !slp_node)
5470 op1 = fold_convert (TREE_TYPE (vectype), op1);
5471 op1 = vect_init_vector (stmt, op1,
5472 TREE_TYPE (vectype), NULL);
5479 /* Supportable by target? */
5480 if (!optab)
5482 if (dump_enabled_p ())
5483 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5484 "no optab.\n");
5485 return false;
5487 vec_mode = TYPE_MODE (vectype);
5488 icode = (int) optab_handler (optab, vec_mode);
5489 if (icode == CODE_FOR_nothing)
5491 if (dump_enabled_p ())
5492 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5493 "op not supported by target.\n");
5494 /* Check only during analysis. */
5495 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5496 || (!vec_stmt
5497 && !vect_worthwhile_without_simd_p (vinfo, code)))
5498 return false;
5499 if (dump_enabled_p ())
5500 dump_printf_loc (MSG_NOTE, vect_location,
5501 "proceeding using word mode.\n");
5504 /* Worthwhile without SIMD support? Check only during analysis. */
5505 if (!vec_stmt
5506 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5507 && !vect_worthwhile_without_simd_p (vinfo, code))
5509 if (dump_enabled_p ())
5510 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5511 "not worthwhile without SIMD support.\n");
5512 return false;
5515 if (!vec_stmt) /* transformation not required. */
5517 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5518 if (dump_enabled_p ())
5519 dump_printf_loc (MSG_NOTE, vect_location,
5520 "=== vectorizable_shift ===\n");
5521 if (!slp_node)
5522 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5523 return true;
5526 /* Transform. */
5528 if (dump_enabled_p ())
5529 dump_printf_loc (MSG_NOTE, vect_location,
5530 "transform binary/unary operation.\n");
5532 /* Handle def. */
5533 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5535 prev_stmt_info = NULL;
5536 for (j = 0; j < ncopies; j++)
5538 /* Handle uses. */
5539 if (j == 0)
5541 if (scalar_shift_arg)
5543 /* Vector shl and shr insn patterns can be defined with scalar
5544 operand 2 (shift operand). In this case, use constant or loop
5545 invariant op1 directly, without extending it to vector mode
5546 first. */
5547 optab_op2_mode = insn_data[icode].operand[2].mode;
5548 if (!VECTOR_MODE_P (optab_op2_mode))
5550 if (dump_enabled_p ())
5551 dump_printf_loc (MSG_NOTE, vect_location,
5552 "operand 1 using scalar mode.\n");
5553 vec_oprnd1 = op1;
5554 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5555 vec_oprnds1.quick_push (vec_oprnd1);
5556 if (slp_node)
5558 /* Store vec_oprnd1 for every vector stmt to be created
5559 for SLP_NODE. We check during the analysis that all
5560 the shift arguments are the same.
5561 TODO: Allow different constants for different vector
5562 stmts generated for an SLP instance. */
5563 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5564 vec_oprnds1.quick_push (vec_oprnd1);
5569 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5570 (a special case for certain kind of vector shifts); otherwise,
5571 operand 1 should be of a vector type (the usual case). */
5572 if (vec_oprnd1)
5573 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5574 slp_node);
5575 else
5576 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5577 slp_node);
5579 else
5580 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5582 /* Arguments are ready. Create the new vector stmt. */
5583 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5585 vop1 = vec_oprnds1[i];
5586 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5587 new_temp = make_ssa_name (vec_dest, new_stmt);
5588 gimple_assign_set_lhs (new_stmt, new_temp);
5589 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5590 if (slp_node)
5591 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5594 if (slp_node)
5595 continue;
5597 if (j == 0)
5598 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5599 else
5600 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5601 prev_stmt_info = vinfo_for_stmt (new_stmt);
5604 vec_oprnds0.release ();
5605 vec_oprnds1.release ();
5607 return true;
5611 /* Function vectorizable_operation.
5613 Check if STMT performs a binary, unary or ternary operation that can
5614 be vectorized.
5615 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5616 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5617 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5619 static bool
5620 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5621 gimple **vec_stmt, slp_tree slp_node)
5623 tree vec_dest;
5624 tree scalar_dest;
5625 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5626 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5627 tree vectype;
5628 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5629 enum tree_code code, orig_code;
5630 machine_mode vec_mode;
5631 tree new_temp;
5632 int op_type;
5633 optab optab;
5634 bool target_support_p;
5635 gimple *def_stmt;
5636 enum vect_def_type dt[3]
5637 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5638 int ndts = 3;
5639 gimple *new_stmt = NULL;
5640 stmt_vec_info prev_stmt_info;
5641 poly_uint64 nunits_in;
5642 poly_uint64 nunits_out;
5643 tree vectype_out;
5644 int ncopies;
5645 int j, i;
5646 vec<tree> vec_oprnds0 = vNULL;
5647 vec<tree> vec_oprnds1 = vNULL;
5648 vec<tree> vec_oprnds2 = vNULL;
5649 tree vop0, vop1, vop2;
5650 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5651 vec_info *vinfo = stmt_info->vinfo;
5653 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5654 return false;
5656 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5657 && ! vec_stmt)
5658 return false;
5660 /* Is STMT a vectorizable binary/unary operation? */
5661 if (!is_gimple_assign (stmt))
5662 return false;
5664 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5665 return false;
5667 orig_code = code = gimple_assign_rhs_code (stmt);
5669 /* For pointer addition and subtraction, we should use the normal
5670 plus and minus for the vector operation. */
5671 if (code == POINTER_PLUS_EXPR)
5672 code = PLUS_EXPR;
5673 if (code == POINTER_DIFF_EXPR)
5674 code = MINUS_EXPR;
5676 /* Support only unary or binary operations. */
5677 op_type = TREE_CODE_LENGTH (code);
5678 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5680 if (dump_enabled_p ())
5681 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5682 "num. args = %d (not unary/binary/ternary op).\n",
5683 op_type);
5684 return false;
5687 scalar_dest = gimple_assign_lhs (stmt);
5688 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5690 /* Most operations cannot handle bit-precision types without extra
5691 truncations. */
5692 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5693 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5694 /* Exception are bitwise binary operations. */
5695 && code != BIT_IOR_EXPR
5696 && code != BIT_XOR_EXPR
5697 && code != BIT_AND_EXPR)
5699 if (dump_enabled_p ())
5700 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5701 "bit-precision arithmetic not supported.\n");
5702 return false;
5705 op0 = gimple_assign_rhs1 (stmt);
5706 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5708 if (dump_enabled_p ())
5709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5710 "use not simple.\n");
5711 return false;
5713 /* If op0 is an external or constant def use a vector type with
5714 the same size as the output vector type. */
5715 if (!vectype)
5717 /* For boolean type we cannot determine vectype by
5718 invariant value (don't know whether it is a vector
5719 of booleans or vector of integers). We use output
5720 vectype because operations on boolean don't change
5721 type. */
5722 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5724 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5726 if (dump_enabled_p ())
5727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5728 "not supported operation on bool value.\n");
5729 return false;
5731 vectype = vectype_out;
5733 else
5734 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5736 if (vec_stmt)
5737 gcc_assert (vectype);
5738 if (!vectype)
5740 if (dump_enabled_p ())
5742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5743 "no vectype for scalar type ");
5744 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5745 TREE_TYPE (op0));
5746 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5749 return false;
5752 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5753 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5754 if (maybe_ne (nunits_out, nunits_in))
5755 return false;
5757 if (op_type == binary_op || op_type == ternary_op)
5759 op1 = gimple_assign_rhs2 (stmt);
5760 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5762 if (dump_enabled_p ())
5763 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5764 "use not simple.\n");
5765 return false;
5768 if (op_type == ternary_op)
5770 op2 = gimple_assign_rhs3 (stmt);
5771 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5773 if (dump_enabled_p ())
5774 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5775 "use not simple.\n");
5776 return false;
5780 /* Multiple types in SLP are handled by creating the appropriate number of
5781 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5782 case of SLP. */
5783 if (slp_node)
5784 ncopies = 1;
5785 else
5786 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5788 gcc_assert (ncopies >= 1);
5790 /* Shifts are handled in vectorizable_shift (). */
5791 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5792 || code == RROTATE_EXPR)
5793 return false;
5795 /* Supportable by target? */
5797 vec_mode = TYPE_MODE (vectype);
5798 if (code == MULT_HIGHPART_EXPR)
5799 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5800 else
5802 optab = optab_for_tree_code (code, vectype, optab_default);
5803 if (!optab)
5805 if (dump_enabled_p ())
5806 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5807 "no optab.\n");
5808 return false;
5810 target_support_p = (optab_handler (optab, vec_mode)
5811 != CODE_FOR_nothing);
5814 if (!target_support_p)
5816 if (dump_enabled_p ())
5817 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5818 "op not supported by target.\n");
5819 /* Check only during analysis. */
5820 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5821 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5822 return false;
5823 if (dump_enabled_p ())
5824 dump_printf_loc (MSG_NOTE, vect_location,
5825 "proceeding using word mode.\n");
5828 /* Worthwhile without SIMD support? Check only during analysis. */
5829 if (!VECTOR_MODE_P (vec_mode)
5830 && !vec_stmt
5831 && !vect_worthwhile_without_simd_p (vinfo, code))
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5835 "not worthwhile without SIMD support.\n");
5836 return false;
5839 if (!vec_stmt) /* transformation not required. */
5841 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5842 if (dump_enabled_p ())
5843 dump_printf_loc (MSG_NOTE, vect_location,
5844 "=== vectorizable_operation ===\n");
5845 if (!slp_node)
5846 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5847 return true;
5850 /* Transform. */
5852 if (dump_enabled_p ())
5853 dump_printf_loc (MSG_NOTE, vect_location,
5854 "transform binary/unary operation.\n");
5856 /* Handle def. */
5857 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5859 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5860 vectors with unsigned elements, but the result is signed. So, we
5861 need to compute the MINUS_EXPR into vectype temporary and
5862 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5863 tree vec_cvt_dest = NULL_TREE;
5864 if (orig_code == POINTER_DIFF_EXPR)
5865 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5867 /* In case the vectorization factor (VF) is bigger than the number
5868 of elements that we can fit in a vectype (nunits), we have to generate
5869 more than one vector stmt - i.e - we need to "unroll" the
5870 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5871 from one copy of the vector stmt to the next, in the field
5872 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5873 stages to find the correct vector defs to be used when vectorizing
5874 stmts that use the defs of the current stmt. The example below
5875 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5876 we need to create 4 vectorized stmts):
5878 before vectorization:
5879 RELATED_STMT VEC_STMT
5880 S1: x = memref - -
5881 S2: z = x + 1 - -
5883 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5884 there):
5885 RELATED_STMT VEC_STMT
5886 VS1_0: vx0 = memref0 VS1_1 -
5887 VS1_1: vx1 = memref1 VS1_2 -
5888 VS1_2: vx2 = memref2 VS1_3 -
5889 VS1_3: vx3 = memref3 - -
5890 S1: x = load - VS1_0
5891 S2: z = x + 1 - -
5893 step2: vectorize stmt S2 (done here):
5894 To vectorize stmt S2 we first need to find the relevant vector
5895 def for the first operand 'x'. This is, as usual, obtained from
5896 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5897 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5898 relevant vector def 'vx0'. Having found 'vx0' we can generate
5899 the vector stmt VS2_0, and as usual, record it in the
5900 STMT_VINFO_VEC_STMT of stmt S2.
5901 When creating the second copy (VS2_1), we obtain the relevant vector
5902 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5903 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5904 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5905 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5906 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5907 chain of stmts and pointers:
5908 RELATED_STMT VEC_STMT
5909 VS1_0: vx0 = memref0 VS1_1 -
5910 VS1_1: vx1 = memref1 VS1_2 -
5911 VS1_2: vx2 = memref2 VS1_3 -
5912 VS1_3: vx3 = memref3 - -
5913 S1: x = load - VS1_0
5914 VS2_0: vz0 = vx0 + v1 VS2_1 -
5915 VS2_1: vz1 = vx1 + v1 VS2_2 -
5916 VS2_2: vz2 = vx2 + v1 VS2_3 -
5917 VS2_3: vz3 = vx3 + v1 - -
5918 S2: z = x + 1 - VS2_0 */
5920 prev_stmt_info = NULL;
5921 for (j = 0; j < ncopies; j++)
5923 /* Handle uses. */
5924 if (j == 0)
5926 if (op_type == binary_op || op_type == ternary_op)
5927 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5928 slp_node);
5929 else
5930 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5931 slp_node);
5932 if (op_type == ternary_op)
5933 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5934 slp_node);
5936 else
5938 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5939 if (op_type == ternary_op)
5941 tree vec_oprnd = vec_oprnds2.pop ();
5942 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5943 vec_oprnd));
5947 /* Arguments are ready. Create the new vector stmt. */
5948 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5950 vop1 = ((op_type == binary_op || op_type == ternary_op)
5951 ? vec_oprnds1[i] : NULL_TREE);
5952 vop2 = ((op_type == ternary_op)
5953 ? vec_oprnds2[i] : NULL_TREE);
5954 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5955 new_temp = make_ssa_name (vec_dest, new_stmt);
5956 gimple_assign_set_lhs (new_stmt, new_temp);
5957 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5958 if (vec_cvt_dest)
5960 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5961 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5962 new_temp);
5963 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5964 gimple_assign_set_lhs (new_stmt, new_temp);
5965 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5967 if (slp_node)
5968 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5971 if (slp_node)
5972 continue;
5974 if (j == 0)
5975 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5976 else
5977 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5978 prev_stmt_info = vinfo_for_stmt (new_stmt);
5981 vec_oprnds0.release ();
5982 vec_oprnds1.release ();
5983 vec_oprnds2.release ();
5985 return true;
5988 /* A helper function to ensure data reference DR's base alignment. */
5990 static void
5991 ensure_base_align (struct data_reference *dr)
5993 if (!dr->aux)
5994 return;
5996 if (DR_VECT_AUX (dr)->base_misaligned)
5998 tree base_decl = DR_VECT_AUX (dr)->base_decl;
6000 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
6002 if (decl_in_symtab_p (base_decl))
6003 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6004 else
6006 SET_DECL_ALIGN (base_decl, align_base_to);
6007 DECL_USER_ALIGN (base_decl) = 1;
6009 DR_VECT_AUX (dr)->base_misaligned = false;
6014 /* Function get_group_alias_ptr_type.
6016 Return the alias type for the group starting at FIRST_STMT. */
6018 static tree
6019 get_group_alias_ptr_type (gimple *first_stmt)
6021 struct data_reference *first_dr, *next_dr;
6022 gimple *next_stmt;
6024 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6025 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
6026 while (next_stmt)
6028 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
6029 if (get_alias_set (DR_REF (first_dr))
6030 != get_alias_set (DR_REF (next_dr)))
6032 if (dump_enabled_p ())
6033 dump_printf_loc (MSG_NOTE, vect_location,
6034 "conflicting alias set types.\n");
6035 return ptr_type_node;
6037 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6039 return reference_alias_ptr_type (DR_REF (first_dr));
6043 /* Function vectorizable_store.
6045 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
6046 can be vectorized.
6047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6051 static bool
6052 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6053 slp_tree slp_node)
6055 tree data_ref;
6056 tree op;
6057 tree vec_oprnd = NULL_TREE;
6058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6059 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6060 tree elem_type;
6061 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6062 struct loop *loop = NULL;
6063 machine_mode vec_mode;
6064 tree dummy;
6065 enum dr_alignment_support alignment_support_scheme;
6066 gimple *def_stmt;
6067 enum vect_def_type rhs_dt = vect_unknown_def_type;
6068 enum vect_def_type mask_dt = vect_unknown_def_type;
6069 stmt_vec_info prev_stmt_info = NULL;
6070 tree dataref_ptr = NULL_TREE;
6071 tree dataref_offset = NULL_TREE;
6072 gimple *ptr_incr = NULL;
6073 int ncopies;
6074 int j;
6075 gimple *next_stmt, *first_stmt;
6076 bool grouped_store;
6077 unsigned int group_size, i;
6078 vec<tree> oprnds = vNULL;
6079 vec<tree> result_chain = vNULL;
6080 bool inv_p;
6081 tree offset = NULL_TREE;
6082 vec<tree> vec_oprnds = vNULL;
6083 bool slp = (slp_node != NULL);
6084 unsigned int vec_num;
6085 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6086 vec_info *vinfo = stmt_info->vinfo;
6087 tree aggr_type;
6088 gather_scatter_info gs_info;
6089 gimple *new_stmt;
6090 poly_uint64 vf;
6091 vec_load_store_type vls_type;
6092 tree ref_type;
6094 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6095 return false;
6097 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6098 && ! vec_stmt)
6099 return false;
6101 /* Is vectorizable store? */
6103 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6104 if (is_gimple_assign (stmt))
6106 tree scalar_dest = gimple_assign_lhs (stmt);
6107 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6108 && is_pattern_stmt_p (stmt_info))
6109 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6110 if (TREE_CODE (scalar_dest) != ARRAY_REF
6111 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6112 && TREE_CODE (scalar_dest) != INDIRECT_REF
6113 && TREE_CODE (scalar_dest) != COMPONENT_REF
6114 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6115 && TREE_CODE (scalar_dest) != REALPART_EXPR
6116 && TREE_CODE (scalar_dest) != MEM_REF)
6117 return false;
6119 else
6121 gcall *call = dyn_cast <gcall *> (stmt);
6122 if (!call || !gimple_call_internal_p (call))
6123 return false;
6125 internal_fn ifn = gimple_call_internal_fn (call);
6126 if (!internal_store_fn_p (ifn))
6127 return false;
6129 if (slp_node != NULL)
6131 if (dump_enabled_p ())
6132 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6133 "SLP of masked stores not supported.\n");
6134 return false;
6137 int mask_index = internal_fn_mask_index (ifn);
6138 if (mask_index >= 0)
6140 mask = gimple_call_arg (call, mask_index);
6141 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
6142 &mask_vectype))
6143 return false;
6147 op = vect_get_store_rhs (stmt);
6149 /* Cannot have hybrid store SLP -- that would mean storing to the
6150 same location twice. */
6151 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6153 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6154 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6156 if (loop_vinfo)
6158 loop = LOOP_VINFO_LOOP (loop_vinfo);
6159 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6161 else
6162 vf = 1;
6164 /* Multiple types in SLP are handled by creating the appropriate number of
6165 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6166 case of SLP. */
6167 if (slp)
6168 ncopies = 1;
6169 else
6170 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6172 gcc_assert (ncopies >= 1);
6174 /* FORNOW. This restriction should be relaxed. */
6175 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
6177 if (dump_enabled_p ())
6178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6179 "multiple types in nested loop.\n");
6180 return false;
6183 if (!vect_check_store_rhs (stmt, op, &rhs_dt, &rhs_vectype, &vls_type))
6184 return false;
6186 elem_type = TREE_TYPE (vectype);
6187 vec_mode = TYPE_MODE (vectype);
6189 if (!STMT_VINFO_DATA_REF (stmt_info))
6190 return false;
6192 vect_memory_access_type memory_access_type;
6193 if (!get_load_store_type (stmt, vectype, slp, mask, vls_type, ncopies,
6194 &memory_access_type, &gs_info))
6195 return false;
6197 if (mask)
6199 if (memory_access_type == VMAT_CONTIGUOUS)
6201 if (!VECTOR_MODE_P (vec_mode)
6202 || !can_vec_mask_load_store_p (vec_mode,
6203 TYPE_MODE (mask_vectype), false))
6204 return false;
6206 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6207 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
6209 if (dump_enabled_p ())
6210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6211 "unsupported access type for masked store.\n");
6212 return false;
6215 else
6217 /* FORNOW. In some cases can vectorize even if data-type not supported
6218 (e.g. - array initialization with 0). */
6219 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6220 return false;
6223 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6224 && memory_access_type != VMAT_GATHER_SCATTER
6225 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6226 if (grouped_store)
6228 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6229 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6230 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6232 else
6234 first_stmt = stmt;
6235 first_dr = dr;
6236 group_size = vec_num = 1;
6239 if (!vec_stmt) /* transformation not required. */
6241 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6243 if (loop_vinfo
6244 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6245 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6246 memory_access_type, &gs_info);
6248 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6249 /* The SLP costs are calculated during SLP analysis. */
6250 if (!slp_node)
6251 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
6252 vls_type, NULL, NULL, NULL);
6253 return true;
6255 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6257 /* Transform. */
6259 ensure_base_align (dr);
6261 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6263 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6264 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6265 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6266 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6267 edge pe = loop_preheader_edge (loop);
6268 gimple_seq seq;
6269 basic_block new_bb;
6270 enum { NARROW, NONE, WIDEN } modifier;
6271 poly_uint64 scatter_off_nunits
6272 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6274 if (known_eq (nunits, scatter_off_nunits))
6275 modifier = NONE;
6276 else if (known_eq (nunits * 2, scatter_off_nunits))
6278 modifier = WIDEN;
6280 /* Currently gathers and scatters are only supported for
6281 fixed-length vectors. */
6282 unsigned int count = scatter_off_nunits.to_constant ();
6283 vec_perm_builder sel (count, count, 1);
6284 for (i = 0; i < (unsigned int) count; ++i)
6285 sel.quick_push (i | (count / 2));
6287 vec_perm_indices indices (sel, 1, count);
6288 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6289 indices);
6290 gcc_assert (perm_mask != NULL_TREE);
6292 else if (known_eq (nunits, scatter_off_nunits * 2))
6294 modifier = NARROW;
6296 /* Currently gathers and scatters are only supported for
6297 fixed-length vectors. */
6298 unsigned int count = nunits.to_constant ();
6299 vec_perm_builder sel (count, count, 1);
6300 for (i = 0; i < (unsigned int) count; ++i)
6301 sel.quick_push (i | (count / 2));
6303 vec_perm_indices indices (sel, 2, count);
6304 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6305 gcc_assert (perm_mask != NULL_TREE);
6306 ncopies *= 2;
6308 else
6309 gcc_unreachable ();
6311 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6312 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6313 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6314 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6315 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6316 scaletype = TREE_VALUE (arglist);
6318 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6319 && TREE_CODE (rettype) == VOID_TYPE);
6321 ptr = fold_convert (ptrtype, gs_info.base);
6322 if (!is_gimple_min_invariant (ptr))
6324 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6325 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6326 gcc_assert (!new_bb);
6329 /* Currently we support only unconditional scatter stores,
6330 so mask should be all ones. */
6331 mask = build_int_cst (masktype, -1);
6332 mask = vect_init_vector (stmt, mask, masktype, NULL);
6334 scale = build_int_cst (scaletype, gs_info.scale);
6336 prev_stmt_info = NULL;
6337 for (j = 0; j < ncopies; ++j)
6339 if (j == 0)
6341 src = vec_oprnd1
6342 = vect_get_vec_def_for_operand (op, stmt);
6343 op = vec_oprnd0
6344 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6346 else if (modifier != NONE && (j & 1))
6348 if (modifier == WIDEN)
6350 src = vec_oprnd1
6351 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6352 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6353 stmt, gsi);
6355 else if (modifier == NARROW)
6357 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6358 stmt, gsi);
6359 op = vec_oprnd0
6360 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6361 vec_oprnd0);
6363 else
6364 gcc_unreachable ();
6366 else
6368 src = vec_oprnd1
6369 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6370 op = vec_oprnd0
6371 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6372 vec_oprnd0);
6375 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6377 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6378 TYPE_VECTOR_SUBPARTS (srctype)));
6379 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6380 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6381 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6382 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6383 src = var;
6386 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6388 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6389 TYPE_VECTOR_SUBPARTS (idxtype)));
6390 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6391 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6392 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6393 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6394 op = var;
6397 new_stmt
6398 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6400 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6402 if (prev_stmt_info == NULL)
6403 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6404 else
6405 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6406 prev_stmt_info = vinfo_for_stmt (new_stmt);
6408 return true;
6411 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6413 gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6414 GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
6417 if (grouped_store)
6419 /* FORNOW */
6420 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
6422 /* We vectorize all the stmts of the interleaving group when we
6423 reach the last stmt in the group. */
6424 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
6425 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
6426 && !slp)
6428 *vec_stmt = NULL;
6429 return true;
6432 if (slp)
6434 grouped_store = false;
6435 /* VEC_NUM is the number of vect stmts to be created for this
6436 group. */
6437 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6438 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6439 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6440 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6441 op = vect_get_store_rhs (first_stmt);
6443 else
6444 /* VEC_NUM is the number of vect stmts to be created for this
6445 group. */
6446 vec_num = group_size;
6448 ref_type = get_group_alias_ptr_type (first_stmt);
6450 else
6451 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6453 if (dump_enabled_p ())
6454 dump_printf_loc (MSG_NOTE, vect_location,
6455 "transform store. ncopies = %d\n", ncopies);
6457 if (memory_access_type == VMAT_ELEMENTWISE
6458 || memory_access_type == VMAT_STRIDED_SLP)
6460 gimple_stmt_iterator incr_gsi;
6461 bool insert_after;
6462 gimple *incr;
6463 tree offvar;
6464 tree ivstep;
6465 tree running_off;
6466 tree stride_base, stride_step, alias_off;
6467 tree vec_oprnd;
6468 unsigned int g;
6469 /* Checked by get_load_store_type. */
6470 unsigned int const_nunits = nunits.to_constant ();
6472 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6473 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6475 stride_base
6476 = fold_build_pointer_plus
6477 (DR_BASE_ADDRESS (first_dr),
6478 size_binop (PLUS_EXPR,
6479 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6480 convert_to_ptrofftype (DR_INIT (first_dr))));
6481 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6483 /* For a store with loop-invariant (but other than power-of-2)
6484 stride (i.e. not a grouped access) like so:
6486 for (i = 0; i < n; i += stride)
6487 array[i] = ...;
6489 we generate a new induction variable and new stores from
6490 the components of the (vectorized) rhs:
6492 for (j = 0; ; j += VF*stride)
6493 vectemp = ...;
6494 tmp1 = vectemp[0];
6495 array[j] = tmp1;
6496 tmp2 = vectemp[1];
6497 array[j + stride] = tmp2;
6501 unsigned nstores = const_nunits;
6502 unsigned lnel = 1;
6503 tree ltype = elem_type;
6504 tree lvectype = vectype;
6505 if (slp)
6507 if (group_size < const_nunits
6508 && const_nunits % group_size == 0)
6510 nstores = const_nunits / group_size;
6511 lnel = group_size;
6512 ltype = build_vector_type (elem_type, group_size);
6513 lvectype = vectype;
6515 /* First check if vec_extract optab doesn't support extraction
6516 of vector elts directly. */
6517 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6518 machine_mode vmode;
6519 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6520 || !VECTOR_MODE_P (vmode)
6521 || !targetm.vector_mode_supported_p (vmode)
6522 || (convert_optab_handler (vec_extract_optab,
6523 TYPE_MODE (vectype), vmode)
6524 == CODE_FOR_nothing))
6526 /* Try to avoid emitting an extract of vector elements
6527 by performing the extracts using an integer type of the
6528 same size, extracting from a vector of those and then
6529 re-interpreting it as the original vector type if
6530 supported. */
6531 unsigned lsize
6532 = group_size * GET_MODE_BITSIZE (elmode);
6533 elmode = int_mode_for_size (lsize, 0).require ();
6534 unsigned int lnunits = const_nunits / group_size;
6535 /* If we can't construct such a vector fall back to
6536 element extracts from the original vector type and
6537 element size stores. */
6538 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6539 && VECTOR_MODE_P (vmode)
6540 && targetm.vector_mode_supported_p (vmode)
6541 && (convert_optab_handler (vec_extract_optab,
6542 vmode, elmode)
6543 != CODE_FOR_nothing))
6545 nstores = lnunits;
6546 lnel = group_size;
6547 ltype = build_nonstandard_integer_type (lsize, 1);
6548 lvectype = build_vector_type (ltype, nstores);
6550 /* Else fall back to vector extraction anyway.
6551 Fewer stores are more important than avoiding spilling
6552 of the vector we extract from. Compared to the
6553 construction case in vectorizable_load no store-forwarding
6554 issue exists here for reasonable archs. */
6557 else if (group_size >= const_nunits
6558 && group_size % const_nunits == 0)
6560 nstores = 1;
6561 lnel = const_nunits;
6562 ltype = vectype;
6563 lvectype = vectype;
6565 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6566 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6569 ivstep = stride_step;
6570 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6571 build_int_cst (TREE_TYPE (ivstep), vf));
6573 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6575 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6576 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6577 create_iv (stride_base, ivstep, NULL,
6578 loop, &incr_gsi, insert_after,
6579 &offvar, NULL);
6580 incr = gsi_stmt (incr_gsi);
6581 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6583 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6585 prev_stmt_info = NULL;
6586 alias_off = build_int_cst (ref_type, 0);
6587 next_stmt = first_stmt;
6588 for (g = 0; g < group_size; g++)
6590 running_off = offvar;
6591 if (g)
6593 tree size = TYPE_SIZE_UNIT (ltype);
6594 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6595 size);
6596 tree newoff = copy_ssa_name (running_off, NULL);
6597 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6598 running_off, pos);
6599 vect_finish_stmt_generation (stmt, incr, gsi);
6600 running_off = newoff;
6602 unsigned int group_el = 0;
6603 unsigned HOST_WIDE_INT
6604 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6605 for (j = 0; j < ncopies; j++)
6607 /* We've set op and dt above, from vect_get_store_rhs,
6608 and first_stmt == stmt. */
6609 if (j == 0)
6611 if (slp)
6613 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6614 slp_node);
6615 vec_oprnd = vec_oprnds[0];
6617 else
6619 op = vect_get_store_rhs (next_stmt);
6620 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6623 else
6625 if (slp)
6626 vec_oprnd = vec_oprnds[j];
6627 else
6629 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
6630 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt,
6631 vec_oprnd);
6634 /* Pun the vector to extract from if necessary. */
6635 if (lvectype != vectype)
6637 tree tem = make_ssa_name (lvectype);
6638 gimple *pun
6639 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6640 lvectype, vec_oprnd));
6641 vect_finish_stmt_generation (stmt, pun, gsi);
6642 vec_oprnd = tem;
6644 for (i = 0; i < nstores; i++)
6646 tree newref, newoff;
6647 gimple *incr, *assign;
6648 tree size = TYPE_SIZE (ltype);
6649 /* Extract the i'th component. */
6650 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6651 bitsize_int (i), size);
6652 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6653 size, pos);
6655 elem = force_gimple_operand_gsi (gsi, elem, true,
6656 NULL_TREE, true,
6657 GSI_SAME_STMT);
6659 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6660 group_el * elsz);
6661 newref = build2 (MEM_REF, ltype,
6662 running_off, this_off);
6664 /* And store it to *running_off. */
6665 assign = gimple_build_assign (newref, elem);
6666 vect_finish_stmt_generation (stmt, assign, gsi);
6668 group_el += lnel;
6669 if (! slp
6670 || group_el == group_size)
6672 newoff = copy_ssa_name (running_off, NULL);
6673 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6674 running_off, stride_step);
6675 vect_finish_stmt_generation (stmt, incr, gsi);
6677 running_off = newoff;
6678 group_el = 0;
6680 if (g == group_size - 1
6681 && !slp)
6683 if (j == 0 && i == 0)
6684 STMT_VINFO_VEC_STMT (stmt_info)
6685 = *vec_stmt = assign;
6686 else
6687 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6688 prev_stmt_info = vinfo_for_stmt (assign);
6692 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6693 if (slp)
6694 break;
6697 vec_oprnds.release ();
6698 return true;
6701 auto_vec<tree> dr_chain (group_size);
6702 oprnds.create (group_size);
6704 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6705 gcc_assert (alignment_support_scheme);
6706 bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6707 /* Targets with store-lane instructions must not require explicit
6708 realignment. vect_supportable_dr_alignment always returns either
6709 dr_aligned or dr_unaligned_supported for masked operations. */
6710 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6711 && !mask
6712 && !masked_loop_p)
6713 || alignment_support_scheme == dr_aligned
6714 || alignment_support_scheme == dr_unaligned_supported);
6716 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6717 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6718 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6720 tree bump;
6721 tree vec_offset = NULL_TREE;
6722 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6724 aggr_type = NULL_TREE;
6725 bump = NULL_TREE;
6727 else if (memory_access_type == VMAT_GATHER_SCATTER)
6729 aggr_type = elem_type;
6730 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
6731 &bump, &vec_offset);
6733 else
6735 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6736 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6737 else
6738 aggr_type = vectype;
6739 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
6742 if (mask)
6743 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6745 /* In case the vectorization factor (VF) is bigger than the number
6746 of elements that we can fit in a vectype (nunits), we have to generate
6747 more than one vector stmt - i.e - we need to "unroll" the
6748 vector stmt by a factor VF/nunits. For more details see documentation in
6749 vect_get_vec_def_for_copy_stmt. */
6751 /* In case of interleaving (non-unit grouped access):
6753 S1: &base + 2 = x2
6754 S2: &base = x0
6755 S3: &base + 1 = x1
6756 S4: &base + 3 = x3
6758 We create vectorized stores starting from base address (the access of the
6759 first stmt in the chain (S2 in the above example), when the last store stmt
6760 of the chain (S4) is reached:
6762 VS1: &base = vx2
6763 VS2: &base + vec_size*1 = vx0
6764 VS3: &base + vec_size*2 = vx1
6765 VS4: &base + vec_size*3 = vx3
6767 Then permutation statements are generated:
6769 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6770 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6773 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6774 (the order of the data-refs in the output of vect_permute_store_chain
6775 corresponds to the order of scalar stmts in the interleaving chain - see
6776 the documentation of vect_permute_store_chain()).
6778 In case of both multiple types and interleaving, above vector stores and
6779 permutation stmts are created for every copy. The result vector stmts are
6780 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6781 STMT_VINFO_RELATED_STMT for the next copies.
6784 prev_stmt_info = NULL;
6785 tree vec_mask = NULL_TREE;
6786 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6787 for (j = 0; j < ncopies; j++)
6790 if (j == 0)
6792 if (slp)
6794 /* Get vectorized arguments for SLP_NODE. */
6795 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6796 NULL, slp_node);
6798 vec_oprnd = vec_oprnds[0];
6800 else
6802 /* For interleaved stores we collect vectorized defs for all the
6803 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6804 used as an input to vect_permute_store_chain(), and OPRNDS as
6805 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6807 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6808 OPRNDS are of size 1. */
6809 next_stmt = first_stmt;
6810 for (i = 0; i < group_size; i++)
6812 /* Since gaps are not supported for interleaved stores,
6813 GROUP_SIZE is the exact number of stmts in the chain.
6814 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6815 there is no interleaving, GROUP_SIZE is 1, and only one
6816 iteration of the loop will be executed. */
6817 op = vect_get_store_rhs (next_stmt);
6818 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6819 dr_chain.quick_push (vec_oprnd);
6820 oprnds.quick_push (vec_oprnd);
6821 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6823 if (mask)
6824 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
6825 mask_vectype);
6828 /* We should have catched mismatched types earlier. */
6829 gcc_assert (useless_type_conversion_p (vectype,
6830 TREE_TYPE (vec_oprnd)));
6831 bool simd_lane_access_p
6832 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6833 if (simd_lane_access_p
6834 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6835 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6836 && integer_zerop (DR_OFFSET (first_dr))
6837 && integer_zerop (DR_INIT (first_dr))
6838 && alias_sets_conflict_p (get_alias_set (aggr_type),
6839 get_alias_set (TREE_TYPE (ref_type))))
6841 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6842 dataref_offset = build_int_cst (ref_type, 0);
6843 inv_p = false;
6845 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6847 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
6848 &dataref_ptr, &vec_offset);
6849 inv_p = false;
6851 else
6852 dataref_ptr
6853 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6854 simd_lane_access_p ? loop : NULL,
6855 offset, &dummy, gsi, &ptr_incr,
6856 simd_lane_access_p, &inv_p,
6857 NULL_TREE, bump);
6858 gcc_assert (bb_vinfo || !inv_p);
6860 else
6862 /* For interleaved stores we created vectorized defs for all the
6863 defs stored in OPRNDS in the previous iteration (previous copy).
6864 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6865 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6866 next copy.
6867 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6868 OPRNDS are of size 1. */
6869 for (i = 0; i < group_size; i++)
6871 op = oprnds[i];
6872 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
6873 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt, op);
6874 dr_chain[i] = vec_oprnd;
6875 oprnds[i] = vec_oprnd;
6877 if (mask)
6878 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
6879 if (dataref_offset)
6880 dataref_offset
6881 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
6882 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6883 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6884 vec_offset);
6885 else
6886 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6887 bump);
6890 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6892 tree vec_array;
6894 /* Combine all the vectors into an array. */
6895 vec_array = create_vector_array (vectype, vec_num);
6896 for (i = 0; i < vec_num; i++)
6898 vec_oprnd = dr_chain[i];
6899 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6902 tree final_mask = NULL;
6903 if (masked_loop_p)
6904 final_mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
6905 if (vec_mask)
6906 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
6907 vec_mask, gsi);
6909 gcall *call;
6910 if (final_mask)
6912 /* Emit:
6913 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
6914 VEC_ARRAY). */
6915 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
6916 tree alias_ptr = build_int_cst (ref_type, align);
6917 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
6918 dataref_ptr, alias_ptr,
6919 final_mask, vec_array);
6921 else
6923 /* Emit:
6924 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6925 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6926 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6927 vec_array);
6928 gimple_call_set_lhs (call, data_ref);
6930 gimple_call_set_nothrow (call, true);
6931 new_stmt = call;
6932 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6934 else
6936 new_stmt = NULL;
6937 if (grouped_store)
6939 if (j == 0)
6940 result_chain.create (group_size);
6941 /* Permute. */
6942 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6943 &result_chain);
6946 next_stmt = first_stmt;
6947 for (i = 0; i < vec_num; i++)
6949 unsigned align, misalign;
6951 tree final_mask = NULL_TREE;
6952 if (masked_loop_p)
6953 final_mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
6954 vectype, vec_num * j + i);
6955 if (vec_mask)
6956 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
6957 vec_mask, gsi);
6959 if (memory_access_type == VMAT_GATHER_SCATTER)
6961 tree scale = size_int (gs_info.scale);
6962 gcall *call;
6963 if (masked_loop_p)
6964 call = gimple_build_call_internal
6965 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
6966 scale, vec_oprnd, final_mask);
6967 else
6968 call = gimple_build_call_internal
6969 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
6970 scale, vec_oprnd);
6971 gimple_call_set_nothrow (call, true);
6972 new_stmt = call;
6973 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6974 break;
6977 if (i > 0)
6978 /* Bump the vector pointer. */
6979 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6980 stmt, bump);
6982 if (slp)
6983 vec_oprnd = vec_oprnds[i];
6984 else if (grouped_store)
6985 /* For grouped stores vectorized defs are interleaved in
6986 vect_permute_store_chain(). */
6987 vec_oprnd = result_chain[i];
6989 align = DR_TARGET_ALIGNMENT (first_dr);
6990 if (aligned_access_p (first_dr))
6991 misalign = 0;
6992 else if (DR_MISALIGNMENT (first_dr) == -1)
6994 align = dr_alignment (vect_dr_behavior (first_dr));
6995 misalign = 0;
6997 else
6998 misalign = DR_MISALIGNMENT (first_dr);
6999 if (dataref_offset == NULL_TREE
7000 && TREE_CODE (dataref_ptr) == SSA_NAME)
7001 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7002 misalign);
7004 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7006 tree perm_mask = perm_mask_for_reverse (vectype);
7007 tree perm_dest
7008 = vect_create_destination_var (vect_get_store_rhs (stmt),
7009 vectype);
7010 tree new_temp = make_ssa_name (perm_dest);
7012 /* Generate the permute statement. */
7013 gimple *perm_stmt
7014 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7015 vec_oprnd, perm_mask);
7016 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7018 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7019 vec_oprnd = new_temp;
7022 /* Arguments are ready. Create the new vector stmt. */
7023 if (final_mask)
7025 align = least_bit_hwi (misalign | align);
7026 tree ptr = build_int_cst (ref_type, align);
7027 gcall *call
7028 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7029 dataref_ptr, ptr,
7030 final_mask, vec_oprnd);
7031 gimple_call_set_nothrow (call, true);
7032 new_stmt = call;
7034 else
7036 data_ref = fold_build2 (MEM_REF, vectype,
7037 dataref_ptr,
7038 dataref_offset
7039 ? dataref_offset
7040 : build_int_cst (ref_type, 0));
7041 if (aligned_access_p (first_dr))
7043 else if (DR_MISALIGNMENT (first_dr) == -1)
7044 TREE_TYPE (data_ref)
7045 = build_aligned_type (TREE_TYPE (data_ref),
7046 align * BITS_PER_UNIT);
7047 else
7048 TREE_TYPE (data_ref)
7049 = build_aligned_type (TREE_TYPE (data_ref),
7050 TYPE_ALIGN (elem_type));
7051 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
7053 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7055 if (slp)
7056 continue;
7058 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
7059 if (!next_stmt)
7060 break;
7063 if (!slp)
7065 if (j == 0)
7066 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7067 else
7068 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7069 prev_stmt_info = vinfo_for_stmt (new_stmt);
7073 oprnds.release ();
7074 result_chain.release ();
7075 vec_oprnds.release ();
7077 return true;
7080 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7081 VECTOR_CST mask. No checks are made that the target platform supports the
7082 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7083 vect_gen_perm_mask_checked. */
7085 tree
7086 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7088 tree mask_type;
7090 poly_uint64 nunits = sel.length ();
7091 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7093 mask_type = build_vector_type (ssizetype, nunits);
7094 return vec_perm_indices_to_tree (mask_type, sel);
7097 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7098 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7100 tree
7101 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7103 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7104 return vect_gen_perm_mask_any (vectype, sel);
7107 /* Given a vector variable X and Y, that was generated for the scalar
7108 STMT, generate instructions to permute the vector elements of X and Y
7109 using permutation mask MASK_VEC, insert them at *GSI and return the
7110 permuted vector variable. */
7112 static tree
7113 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
7114 gimple_stmt_iterator *gsi)
7116 tree vectype = TREE_TYPE (x);
7117 tree perm_dest, data_ref;
7118 gimple *perm_stmt;
7120 tree scalar_dest = gimple_get_lhs (stmt);
7121 if (TREE_CODE (scalar_dest) == SSA_NAME)
7122 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7123 else
7124 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7125 data_ref = make_ssa_name (perm_dest);
7127 /* Generate the permute statement. */
7128 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7129 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7131 return data_ref;
7134 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
7135 inserting them on the loops preheader edge. Returns true if we
7136 were successful in doing so (and thus STMT can be moved then),
7137 otherwise returns false. */
7139 static bool
7140 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
7142 ssa_op_iter i;
7143 tree op;
7144 bool any = false;
7146 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7148 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7149 if (!gimple_nop_p (def_stmt)
7150 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7152 /* Make sure we don't need to recurse. While we could do
7153 so in simple cases when there are more complex use webs
7154 we don't have an easy way to preserve stmt order to fulfil
7155 dependencies within them. */
7156 tree op2;
7157 ssa_op_iter i2;
7158 if (gimple_code (def_stmt) == GIMPLE_PHI)
7159 return false;
7160 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7162 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7163 if (!gimple_nop_p (def_stmt2)
7164 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7165 return false;
7167 any = true;
7171 if (!any)
7172 return true;
7174 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7176 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7177 if (!gimple_nop_p (def_stmt)
7178 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7180 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7181 gsi_remove (&gsi, false);
7182 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7186 return true;
7189 /* vectorizable_load.
7191 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
7192 can be vectorized.
7193 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7194 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
7195 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7197 static bool
7198 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
7199 slp_tree slp_node, slp_instance slp_node_instance)
7201 tree scalar_dest;
7202 tree vec_dest = NULL;
7203 tree data_ref = NULL;
7204 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7205 stmt_vec_info prev_stmt_info;
7206 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7207 struct loop *loop = NULL;
7208 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
7209 bool nested_in_vect_loop = false;
7210 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
7211 tree elem_type;
7212 tree new_temp;
7213 machine_mode mode;
7214 gimple *new_stmt = NULL;
7215 tree dummy;
7216 enum dr_alignment_support alignment_support_scheme;
7217 tree dataref_ptr = NULL_TREE;
7218 tree dataref_offset = NULL_TREE;
7219 gimple *ptr_incr = NULL;
7220 int ncopies;
7221 int i, j;
7222 unsigned int group_size;
7223 poly_uint64 group_gap_adj;
7224 tree msq = NULL_TREE, lsq;
7225 tree offset = NULL_TREE;
7226 tree byte_offset = NULL_TREE;
7227 tree realignment_token = NULL_TREE;
7228 gphi *phi = NULL;
7229 vec<tree> dr_chain = vNULL;
7230 bool grouped_load = false;
7231 gimple *first_stmt;
7232 gimple *first_stmt_for_drptr = NULL;
7233 bool inv_p;
7234 bool compute_in_loop = false;
7235 struct loop *at_loop;
7236 int vec_num;
7237 bool slp = (slp_node != NULL);
7238 bool slp_perm = false;
7239 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7240 poly_uint64 vf;
7241 tree aggr_type;
7242 gather_scatter_info gs_info;
7243 vec_info *vinfo = stmt_info->vinfo;
7244 tree ref_type;
7245 enum vect_def_type mask_dt = vect_unknown_def_type;
7247 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7248 return false;
7250 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7251 && ! vec_stmt)
7252 return false;
7254 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7255 if (is_gimple_assign (stmt))
7257 scalar_dest = gimple_assign_lhs (stmt);
7258 if (TREE_CODE (scalar_dest) != SSA_NAME)
7259 return false;
7261 tree_code code = gimple_assign_rhs_code (stmt);
7262 if (code != ARRAY_REF
7263 && code != BIT_FIELD_REF
7264 && code != INDIRECT_REF
7265 && code != COMPONENT_REF
7266 && code != IMAGPART_EXPR
7267 && code != REALPART_EXPR
7268 && code != MEM_REF
7269 && TREE_CODE_CLASS (code) != tcc_declaration)
7270 return false;
7272 else
7274 gcall *call = dyn_cast <gcall *> (stmt);
7275 if (!call || !gimple_call_internal_p (call))
7276 return false;
7278 internal_fn ifn = gimple_call_internal_fn (call);
7279 if (!internal_load_fn_p (ifn))
7280 return false;
7282 scalar_dest = gimple_call_lhs (call);
7283 if (!scalar_dest)
7284 return false;
7286 if (slp_node != NULL)
7288 if (dump_enabled_p ())
7289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7290 "SLP of masked loads not supported.\n");
7291 return false;
7294 int mask_index = internal_fn_mask_index (ifn);
7295 if (mask_index >= 0)
7297 mask = gimple_call_arg (call, mask_index);
7298 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
7299 &mask_vectype))
7300 return false;
7304 if (!STMT_VINFO_DATA_REF (stmt_info))
7305 return false;
7307 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7308 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7310 if (loop_vinfo)
7312 loop = LOOP_VINFO_LOOP (loop_vinfo);
7313 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
7314 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7316 else
7317 vf = 1;
7319 /* Multiple types in SLP are handled by creating the appropriate number of
7320 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7321 case of SLP. */
7322 if (slp)
7323 ncopies = 1;
7324 else
7325 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7327 gcc_assert (ncopies >= 1);
7329 /* FORNOW. This restriction should be relaxed. */
7330 if (nested_in_vect_loop && ncopies > 1)
7332 if (dump_enabled_p ())
7333 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7334 "multiple types in nested loop.\n");
7335 return false;
7338 /* Invalidate assumptions made by dependence analysis when vectorization
7339 on the unrolled body effectively re-orders stmts. */
7340 if (ncopies > 1
7341 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7342 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7343 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7345 if (dump_enabled_p ())
7346 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7347 "cannot perform implicit CSE when unrolling "
7348 "with negative dependence distance\n");
7349 return false;
7352 elem_type = TREE_TYPE (vectype);
7353 mode = TYPE_MODE (vectype);
7355 /* FORNOW. In some cases can vectorize even if data-type not supported
7356 (e.g. - data copies). */
7357 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7359 if (dump_enabled_p ())
7360 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7361 "Aligned load, but unsupported type.\n");
7362 return false;
7365 /* Check if the load is a part of an interleaving chain. */
7366 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7368 grouped_load = true;
7369 /* FORNOW */
7370 gcc_assert (!nested_in_vect_loop);
7371 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7373 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7374 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7376 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7377 slp_perm = true;
7379 /* Invalidate assumptions made by dependence analysis when vectorization
7380 on the unrolled body effectively re-orders stmts. */
7381 if (!PURE_SLP_STMT (stmt_info)
7382 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7383 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7384 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7386 if (dump_enabled_p ())
7387 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7388 "cannot perform implicit CSE when performing "
7389 "group loads with negative dependence distance\n");
7390 return false;
7393 /* Similarly when the stmt is a load that is both part of a SLP
7394 instance and a loop vectorized stmt via the same-dr mechanism
7395 we have to give up. */
7396 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
7397 && (STMT_SLP_TYPE (stmt_info)
7398 != STMT_SLP_TYPE (vinfo_for_stmt
7399 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
7401 if (dump_enabled_p ())
7402 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7403 "conflicting SLP types for CSEd load\n");
7404 return false;
7407 else
7408 group_size = 1;
7410 vect_memory_access_type memory_access_type;
7411 if (!get_load_store_type (stmt, vectype, slp, mask, VLS_LOAD, ncopies,
7412 &memory_access_type, &gs_info))
7413 return false;
7415 if (mask)
7417 if (memory_access_type == VMAT_CONTIGUOUS)
7419 machine_mode vec_mode = TYPE_MODE (vectype);
7420 if (!VECTOR_MODE_P (vec_mode)
7421 || !can_vec_mask_load_store_p (vec_mode,
7422 TYPE_MODE (mask_vectype), true))
7423 return false;
7425 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7427 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7428 tree masktype
7429 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7430 if (TREE_CODE (masktype) == INTEGER_TYPE)
7432 if (dump_enabled_p ())
7433 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7434 "masked gather with integer mask not"
7435 " supported.");
7436 return false;
7439 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7440 && memory_access_type != VMAT_GATHER_SCATTER)
7442 if (dump_enabled_p ())
7443 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7444 "unsupported access type for masked load.\n");
7445 return false;
7449 if (!vec_stmt) /* transformation not required. */
7451 if (!slp)
7452 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7454 if (loop_vinfo
7455 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7456 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7457 memory_access_type, &gs_info);
7459 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7460 /* The SLP costs are calculated during SLP analysis. */
7461 if (! slp_node)
7462 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7463 NULL, NULL, NULL);
7464 return true;
7467 if (!slp)
7468 gcc_assert (memory_access_type
7469 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7471 if (dump_enabled_p ())
7472 dump_printf_loc (MSG_NOTE, vect_location,
7473 "transform load. ncopies = %d\n", ncopies);
7475 /* Transform. */
7477 ensure_base_align (dr);
7479 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7481 vect_build_gather_load_calls (stmt, gsi, vec_stmt, &gs_info, mask,
7482 mask_dt);
7483 return true;
7486 if (memory_access_type == VMAT_ELEMENTWISE
7487 || memory_access_type == VMAT_STRIDED_SLP)
7489 gimple_stmt_iterator incr_gsi;
7490 bool insert_after;
7491 gimple *incr;
7492 tree offvar;
7493 tree ivstep;
7494 tree running_off;
7495 vec<constructor_elt, va_gc> *v = NULL;
7496 tree stride_base, stride_step, alias_off;
7497 /* Checked by get_load_store_type. */
7498 unsigned int const_nunits = nunits.to_constant ();
7499 unsigned HOST_WIDE_INT cst_offset = 0;
7501 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7502 gcc_assert (!nested_in_vect_loop);
7504 if (grouped_load)
7506 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7507 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7509 else
7511 first_stmt = stmt;
7512 first_dr = dr;
7514 if (slp && grouped_load)
7516 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7517 ref_type = get_group_alias_ptr_type (first_stmt);
7519 else
7521 if (grouped_load)
7522 cst_offset
7523 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7524 * vect_get_place_in_interleaving_chain (stmt, first_stmt));
7525 group_size = 1;
7526 ref_type = reference_alias_ptr_type (DR_REF (dr));
7529 stride_base
7530 = fold_build_pointer_plus
7531 (DR_BASE_ADDRESS (first_dr),
7532 size_binop (PLUS_EXPR,
7533 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7534 convert_to_ptrofftype (DR_INIT (first_dr))));
7535 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7537 /* For a load with loop-invariant (but other than power-of-2)
7538 stride (i.e. not a grouped access) like so:
7540 for (i = 0; i < n; i += stride)
7541 ... = array[i];
7543 we generate a new induction variable and new accesses to
7544 form a new vector (or vectors, depending on ncopies):
7546 for (j = 0; ; j += VF*stride)
7547 tmp1 = array[j];
7548 tmp2 = array[j + stride];
7550 vectemp = {tmp1, tmp2, ...}
7553 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7554 build_int_cst (TREE_TYPE (stride_step), vf));
7556 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7558 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7559 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7560 create_iv (stride_base, ivstep, NULL,
7561 loop, &incr_gsi, insert_after,
7562 &offvar, NULL);
7563 incr = gsi_stmt (incr_gsi);
7564 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7566 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7568 prev_stmt_info = NULL;
7569 running_off = offvar;
7570 alias_off = build_int_cst (ref_type, 0);
7571 int nloads = const_nunits;
7572 int lnel = 1;
7573 tree ltype = TREE_TYPE (vectype);
7574 tree lvectype = vectype;
7575 auto_vec<tree> dr_chain;
7576 if (memory_access_type == VMAT_STRIDED_SLP)
7578 if (group_size < const_nunits)
7580 /* First check if vec_init optab supports construction from
7581 vector elts directly. */
7582 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7583 machine_mode vmode;
7584 if (mode_for_vector (elmode, group_size).exists (&vmode)
7585 && VECTOR_MODE_P (vmode)
7586 && targetm.vector_mode_supported_p (vmode)
7587 && (convert_optab_handler (vec_init_optab,
7588 TYPE_MODE (vectype), vmode)
7589 != CODE_FOR_nothing))
7591 nloads = const_nunits / group_size;
7592 lnel = group_size;
7593 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7595 else
7597 /* Otherwise avoid emitting a constructor of vector elements
7598 by performing the loads using an integer type of the same
7599 size, constructing a vector of those and then
7600 re-interpreting it as the original vector type.
7601 This avoids a huge runtime penalty due to the general
7602 inability to perform store forwarding from smaller stores
7603 to a larger load. */
7604 unsigned lsize
7605 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7606 elmode = int_mode_for_size (lsize, 0).require ();
7607 unsigned int lnunits = const_nunits / group_size;
7608 /* If we can't construct such a vector fall back to
7609 element loads of the original vector type. */
7610 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7611 && VECTOR_MODE_P (vmode)
7612 && targetm.vector_mode_supported_p (vmode)
7613 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7614 != CODE_FOR_nothing))
7616 nloads = lnunits;
7617 lnel = group_size;
7618 ltype = build_nonstandard_integer_type (lsize, 1);
7619 lvectype = build_vector_type (ltype, nloads);
7623 else
7625 nloads = 1;
7626 lnel = const_nunits;
7627 ltype = vectype;
7629 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7631 if (slp)
7633 /* For SLP permutation support we need to load the whole group,
7634 not only the number of vector stmts the permutation result
7635 fits in. */
7636 if (slp_perm)
7638 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7639 variable VF. */
7640 unsigned int const_vf = vf.to_constant ();
7641 ncopies = CEIL (group_size * const_vf, const_nunits);
7642 dr_chain.create (ncopies);
7644 else
7645 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7647 unsigned int group_el = 0;
7648 unsigned HOST_WIDE_INT
7649 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7650 for (j = 0; j < ncopies; j++)
7652 if (nloads > 1)
7653 vec_alloc (v, nloads);
7654 for (i = 0; i < nloads; i++)
7656 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7657 group_el * elsz + cst_offset);
7658 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7659 build2 (MEM_REF, ltype,
7660 running_off, this_off));
7661 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7662 if (nloads > 1)
7663 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7664 gimple_assign_lhs (new_stmt));
7666 group_el += lnel;
7667 if (! slp
7668 || group_el == group_size)
7670 tree newoff = copy_ssa_name (running_off);
7671 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7672 running_off, stride_step);
7673 vect_finish_stmt_generation (stmt, incr, gsi);
7675 running_off = newoff;
7676 group_el = 0;
7679 if (nloads > 1)
7681 tree vec_inv = build_constructor (lvectype, v);
7682 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7683 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7684 if (lvectype != vectype)
7686 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7687 VIEW_CONVERT_EXPR,
7688 build1 (VIEW_CONVERT_EXPR,
7689 vectype, new_temp));
7690 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7694 if (slp)
7696 if (slp_perm)
7697 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7698 else
7699 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7701 else
7703 if (j == 0)
7704 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7705 else
7706 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7707 prev_stmt_info = vinfo_for_stmt (new_stmt);
7710 if (slp_perm)
7712 unsigned n_perms;
7713 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7714 slp_node_instance, false, &n_perms);
7716 return true;
7719 if (memory_access_type == VMAT_GATHER_SCATTER
7720 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
7721 grouped_load = false;
7723 if (grouped_load)
7725 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7726 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7727 /* For SLP vectorization we directly vectorize a subchain
7728 without permutation. */
7729 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7730 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7731 /* For BB vectorization always use the first stmt to base
7732 the data ref pointer on. */
7733 if (bb_vinfo)
7734 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7736 /* Check if the chain of loads is already vectorized. */
7737 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7738 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7739 ??? But we can only do so if there is exactly one
7740 as we have no way to get at the rest. Leave the CSE
7741 opportunity alone.
7742 ??? With the group load eventually participating
7743 in multiple different permutations (having multiple
7744 slp nodes which refer to the same group) the CSE
7745 is even wrong code. See PR56270. */
7746 && !slp)
7748 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7749 return true;
7751 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7752 group_gap_adj = 0;
7754 /* VEC_NUM is the number of vect stmts to be created for this group. */
7755 if (slp)
7757 grouped_load = false;
7758 /* For SLP permutation support we need to load the whole group,
7759 not only the number of vector stmts the permutation result
7760 fits in. */
7761 if (slp_perm)
7763 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7764 variable VF. */
7765 unsigned int const_vf = vf.to_constant ();
7766 unsigned int const_nunits = nunits.to_constant ();
7767 vec_num = CEIL (group_size * const_vf, const_nunits);
7768 group_gap_adj = vf * group_size - nunits * vec_num;
7770 else
7772 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7773 group_gap_adj
7774 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7777 else
7778 vec_num = group_size;
7780 ref_type = get_group_alias_ptr_type (first_stmt);
7782 else
7784 first_stmt = stmt;
7785 first_dr = dr;
7786 group_size = vec_num = 1;
7787 group_gap_adj = 0;
7788 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7791 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7792 gcc_assert (alignment_support_scheme);
7793 bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7794 /* Targets with store-lane instructions must not require explicit
7795 realignment. vect_supportable_dr_alignment always returns either
7796 dr_aligned or dr_unaligned_supported for masked operations. */
7797 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
7798 && !mask
7799 && !masked_loop_p)
7800 || alignment_support_scheme == dr_aligned
7801 || alignment_support_scheme == dr_unaligned_supported);
7803 /* In case the vectorization factor (VF) is bigger than the number
7804 of elements that we can fit in a vectype (nunits), we have to generate
7805 more than one vector stmt - i.e - we need to "unroll" the
7806 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7807 from one copy of the vector stmt to the next, in the field
7808 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7809 stages to find the correct vector defs to be used when vectorizing
7810 stmts that use the defs of the current stmt. The example below
7811 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7812 need to create 4 vectorized stmts):
7814 before vectorization:
7815 RELATED_STMT VEC_STMT
7816 S1: x = memref - -
7817 S2: z = x + 1 - -
7819 step 1: vectorize stmt S1:
7820 We first create the vector stmt VS1_0, and, as usual, record a
7821 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7822 Next, we create the vector stmt VS1_1, and record a pointer to
7823 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7824 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7825 stmts and pointers:
7826 RELATED_STMT VEC_STMT
7827 VS1_0: vx0 = memref0 VS1_1 -
7828 VS1_1: vx1 = memref1 VS1_2 -
7829 VS1_2: vx2 = memref2 VS1_3 -
7830 VS1_3: vx3 = memref3 - -
7831 S1: x = load - VS1_0
7832 S2: z = x + 1 - -
7834 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7835 information we recorded in RELATED_STMT field is used to vectorize
7836 stmt S2. */
7838 /* In case of interleaving (non-unit grouped access):
7840 S1: x2 = &base + 2
7841 S2: x0 = &base
7842 S3: x1 = &base + 1
7843 S4: x3 = &base + 3
7845 Vectorized loads are created in the order of memory accesses
7846 starting from the access of the first stmt of the chain:
7848 VS1: vx0 = &base
7849 VS2: vx1 = &base + vec_size*1
7850 VS3: vx3 = &base + vec_size*2
7851 VS4: vx4 = &base + vec_size*3
7853 Then permutation statements are generated:
7855 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7856 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7859 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7860 (the order of the data-refs in the output of vect_permute_load_chain
7861 corresponds to the order of scalar stmts in the interleaving chain - see
7862 the documentation of vect_permute_load_chain()).
7863 The generation of permutation stmts and recording them in
7864 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7866 In case of both multiple types and interleaving, the vector loads and
7867 permutation stmts above are created for every copy. The result vector
7868 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7869 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7871 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7872 on a target that supports unaligned accesses (dr_unaligned_supported)
7873 we generate the following code:
7874 p = initial_addr;
7875 indx = 0;
7876 loop {
7877 p = p + indx * vectype_size;
7878 vec_dest = *(p);
7879 indx = indx + 1;
7882 Otherwise, the data reference is potentially unaligned on a target that
7883 does not support unaligned accesses (dr_explicit_realign_optimized) -
7884 then generate the following code, in which the data in each iteration is
7885 obtained by two vector loads, one from the previous iteration, and one
7886 from the current iteration:
7887 p1 = initial_addr;
7888 msq_init = *(floor(p1))
7889 p2 = initial_addr + VS - 1;
7890 realignment_token = call target_builtin;
7891 indx = 0;
7892 loop {
7893 p2 = p2 + indx * vectype_size
7894 lsq = *(floor(p2))
7895 vec_dest = realign_load (msq, lsq, realignment_token)
7896 indx = indx + 1;
7897 msq = lsq;
7898 } */
7900 /* If the misalignment remains the same throughout the execution of the
7901 loop, we can create the init_addr and permutation mask at the loop
7902 preheader. Otherwise, it needs to be created inside the loop.
7903 This can only occur when vectorizing memory accesses in the inner-loop
7904 nested within an outer-loop that is being vectorized. */
7906 if (nested_in_vect_loop
7907 && !multiple_p (DR_STEP_ALIGNMENT (dr),
7908 GET_MODE_SIZE (TYPE_MODE (vectype))))
7910 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7911 compute_in_loop = true;
7914 if ((alignment_support_scheme == dr_explicit_realign_optimized
7915 || alignment_support_scheme == dr_explicit_realign)
7916 && !compute_in_loop)
7918 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7919 alignment_support_scheme, NULL_TREE,
7920 &at_loop);
7921 if (alignment_support_scheme == dr_explicit_realign_optimized)
7923 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7924 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7925 size_one_node);
7928 else
7929 at_loop = loop;
7931 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7932 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7934 tree bump;
7935 tree vec_offset = NULL_TREE;
7936 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7938 aggr_type = NULL_TREE;
7939 bump = NULL_TREE;
7941 else if (memory_access_type == VMAT_GATHER_SCATTER)
7943 aggr_type = elem_type;
7944 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
7945 &bump, &vec_offset);
7947 else
7949 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7950 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7951 else
7952 aggr_type = vectype;
7953 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
7956 tree vec_mask = NULL_TREE;
7957 prev_stmt_info = NULL;
7958 poly_uint64 group_elt = 0;
7959 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7960 for (j = 0; j < ncopies; j++)
7962 /* 1. Create the vector or array pointer update chain. */
7963 if (j == 0)
7965 bool simd_lane_access_p
7966 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7967 if (simd_lane_access_p
7968 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7969 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7970 && integer_zerop (DR_OFFSET (first_dr))
7971 && integer_zerop (DR_INIT (first_dr))
7972 && alias_sets_conflict_p (get_alias_set (aggr_type),
7973 get_alias_set (TREE_TYPE (ref_type)))
7974 && (alignment_support_scheme == dr_aligned
7975 || alignment_support_scheme == dr_unaligned_supported))
7977 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7978 dataref_offset = build_int_cst (ref_type, 0);
7979 inv_p = false;
7981 else if (first_stmt_for_drptr
7982 && first_stmt != first_stmt_for_drptr)
7984 dataref_ptr
7985 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7986 at_loop, offset, &dummy, gsi,
7987 &ptr_incr, simd_lane_access_p,
7988 &inv_p, byte_offset, bump);
7989 /* Adjust the pointer by the difference to first_stmt. */
7990 data_reference_p ptrdr
7991 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7992 tree diff = fold_convert (sizetype,
7993 size_binop (MINUS_EXPR,
7994 DR_INIT (first_dr),
7995 DR_INIT (ptrdr)));
7996 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7997 stmt, diff);
7999 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8001 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
8002 &dataref_ptr, &vec_offset);
8003 inv_p = false;
8005 else
8006 dataref_ptr
8007 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
8008 offset, &dummy, gsi, &ptr_incr,
8009 simd_lane_access_p, &inv_p,
8010 byte_offset, bump);
8011 if (mask)
8012 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
8013 mask_vectype);
8015 else
8017 if (dataref_offset)
8018 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8019 bump);
8020 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8021 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
8022 vec_offset);
8023 else
8024 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8025 stmt, bump);
8026 if (mask)
8027 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
8030 if (grouped_load || slp_perm)
8031 dr_chain.create (vec_num);
8033 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8035 tree vec_array;
8037 vec_array = create_vector_array (vectype, vec_num);
8039 tree final_mask = NULL_TREE;
8040 if (masked_loop_p)
8041 final_mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
8042 if (vec_mask)
8043 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8044 vec_mask, gsi);
8046 gcall *call;
8047 if (final_mask)
8049 /* Emit:
8050 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8051 VEC_MASK). */
8052 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8053 tree alias_ptr = build_int_cst (ref_type, align);
8054 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8055 dataref_ptr, alias_ptr,
8056 final_mask);
8058 else
8060 /* Emit:
8061 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8062 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8063 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8065 gimple_call_set_lhs (call, vec_array);
8066 gimple_call_set_nothrow (call, true);
8067 new_stmt = call;
8068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8070 /* Extract each vector into an SSA_NAME. */
8071 for (i = 0; i < vec_num; i++)
8073 new_temp = read_vector_array (stmt, gsi, scalar_dest,
8074 vec_array, i);
8075 dr_chain.quick_push (new_temp);
8078 /* Record the mapping between SSA_NAMEs and statements. */
8079 vect_record_grouped_load_vectors (stmt, dr_chain);
8081 else
8083 for (i = 0; i < vec_num; i++)
8085 tree final_mask = NULL_TREE;
8086 if (masked_loop_p
8087 && memory_access_type != VMAT_INVARIANT)
8088 final_mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
8089 vectype, vec_num * j + i);
8090 if (vec_mask)
8091 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8092 vec_mask, gsi);
8094 if (i > 0)
8095 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8096 stmt, bump);
8098 /* 2. Create the vector-load in the loop. */
8099 switch (alignment_support_scheme)
8101 case dr_aligned:
8102 case dr_unaligned_supported:
8104 unsigned int align, misalign;
8106 if (memory_access_type == VMAT_GATHER_SCATTER)
8108 tree scale = size_int (gs_info.scale);
8109 gcall *call;
8110 if (masked_loop_p)
8111 call = gimple_build_call_internal
8112 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8113 vec_offset, scale, final_mask);
8114 else
8115 call = gimple_build_call_internal
8116 (IFN_GATHER_LOAD, 3, dataref_ptr,
8117 vec_offset, scale);
8118 gimple_call_set_nothrow (call, true);
8119 new_stmt = call;
8120 data_ref = NULL_TREE;
8121 break;
8124 align = DR_TARGET_ALIGNMENT (dr);
8125 if (alignment_support_scheme == dr_aligned)
8127 gcc_assert (aligned_access_p (first_dr));
8128 misalign = 0;
8130 else if (DR_MISALIGNMENT (first_dr) == -1)
8132 align = dr_alignment (vect_dr_behavior (first_dr));
8133 misalign = 0;
8135 else
8136 misalign = DR_MISALIGNMENT (first_dr);
8137 if (dataref_offset == NULL_TREE
8138 && TREE_CODE (dataref_ptr) == SSA_NAME)
8139 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8140 align, misalign);
8142 if (final_mask)
8144 align = least_bit_hwi (misalign | align);
8145 tree ptr = build_int_cst (ref_type, align);
8146 gcall *call
8147 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8148 dataref_ptr, ptr,
8149 final_mask);
8150 gimple_call_set_nothrow (call, true);
8151 new_stmt = call;
8152 data_ref = NULL_TREE;
8154 else
8156 data_ref
8157 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8158 dataref_offset
8159 ? dataref_offset
8160 : build_int_cst (ref_type, 0));
8161 if (alignment_support_scheme == dr_aligned)
8163 else if (DR_MISALIGNMENT (first_dr) == -1)
8164 TREE_TYPE (data_ref)
8165 = build_aligned_type (TREE_TYPE (data_ref),
8166 align * BITS_PER_UNIT);
8167 else
8168 TREE_TYPE (data_ref)
8169 = build_aligned_type (TREE_TYPE (data_ref),
8170 TYPE_ALIGN (elem_type));
8172 break;
8174 case dr_explicit_realign:
8176 tree ptr, bump;
8178 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8180 if (compute_in_loop)
8181 msq = vect_setup_realignment (first_stmt, gsi,
8182 &realignment_token,
8183 dr_explicit_realign,
8184 dataref_ptr, NULL);
8186 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8187 ptr = copy_ssa_name (dataref_ptr);
8188 else
8189 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8190 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8191 new_stmt = gimple_build_assign
8192 (ptr, BIT_AND_EXPR, dataref_ptr,
8193 build_int_cst
8194 (TREE_TYPE (dataref_ptr),
8195 -(HOST_WIDE_INT) align));
8196 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8197 data_ref
8198 = build2 (MEM_REF, vectype, ptr,
8199 build_int_cst (ref_type, 0));
8200 vec_dest = vect_create_destination_var (scalar_dest,
8201 vectype);
8202 new_stmt = gimple_build_assign (vec_dest, data_ref);
8203 new_temp = make_ssa_name (vec_dest, new_stmt);
8204 gimple_assign_set_lhs (new_stmt, new_temp);
8205 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
8206 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
8207 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8208 msq = new_temp;
8210 bump = size_binop (MULT_EXPR, vs,
8211 TYPE_SIZE_UNIT (elem_type));
8212 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8213 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
8214 new_stmt = gimple_build_assign
8215 (NULL_TREE, BIT_AND_EXPR, ptr,
8216 build_int_cst
8217 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8218 ptr = copy_ssa_name (ptr, new_stmt);
8219 gimple_assign_set_lhs (new_stmt, ptr);
8220 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8221 data_ref
8222 = build2 (MEM_REF, vectype, ptr,
8223 build_int_cst (ref_type, 0));
8224 break;
8226 case dr_explicit_realign_optimized:
8228 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8229 new_temp = copy_ssa_name (dataref_ptr);
8230 else
8231 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8232 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8233 new_stmt = gimple_build_assign
8234 (new_temp, BIT_AND_EXPR, dataref_ptr,
8235 build_int_cst (TREE_TYPE (dataref_ptr),
8236 -(HOST_WIDE_INT) align));
8237 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8238 data_ref
8239 = build2 (MEM_REF, vectype, new_temp,
8240 build_int_cst (ref_type, 0));
8241 break;
8243 default:
8244 gcc_unreachable ();
8246 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8247 /* DATA_REF is null if we've already built the statement. */
8248 if (data_ref)
8249 new_stmt = gimple_build_assign (vec_dest, data_ref);
8250 new_temp = make_ssa_name (vec_dest, new_stmt);
8251 gimple_set_lhs (new_stmt, new_temp);
8252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8254 /* 3. Handle explicit realignment if necessary/supported.
8255 Create in loop:
8256 vec_dest = realign_load (msq, lsq, realignment_token) */
8257 if (alignment_support_scheme == dr_explicit_realign_optimized
8258 || alignment_support_scheme == dr_explicit_realign)
8260 lsq = gimple_assign_lhs (new_stmt);
8261 if (!realignment_token)
8262 realignment_token = dataref_ptr;
8263 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8264 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8265 msq, lsq, realignment_token);
8266 new_temp = make_ssa_name (vec_dest, new_stmt);
8267 gimple_assign_set_lhs (new_stmt, new_temp);
8268 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8270 if (alignment_support_scheme == dr_explicit_realign_optimized)
8272 gcc_assert (phi);
8273 if (i == vec_num - 1 && j == ncopies - 1)
8274 add_phi_arg (phi, lsq,
8275 loop_latch_edge (containing_loop),
8276 UNKNOWN_LOCATION);
8277 msq = lsq;
8281 /* 4. Handle invariant-load. */
8282 if (inv_p && !bb_vinfo)
8284 gcc_assert (!grouped_load);
8285 /* If we have versioned for aliasing or the loop doesn't
8286 have any data dependencies that would preclude this,
8287 then we are sure this is a loop invariant load and
8288 thus we can insert it on the preheader edge. */
8289 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
8290 && !nested_in_vect_loop
8291 && hoist_defs_of_uses (stmt, loop))
8293 if (dump_enabled_p ())
8295 dump_printf_loc (MSG_NOTE, vect_location,
8296 "hoisting out of the vectorized "
8297 "loop: ");
8298 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8300 tree tem = copy_ssa_name (scalar_dest);
8301 gsi_insert_on_edge_immediate
8302 (loop_preheader_edge (loop),
8303 gimple_build_assign (tem,
8304 unshare_expr
8305 (gimple_assign_rhs1 (stmt))));
8306 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
8307 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8308 set_vinfo_for_stmt (new_stmt,
8309 new_stmt_vec_info (new_stmt, vinfo));
8311 else
8313 gimple_stmt_iterator gsi2 = *gsi;
8314 gsi_next (&gsi2);
8315 new_temp = vect_init_vector (stmt, scalar_dest,
8316 vectype, &gsi2);
8317 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8321 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8323 tree perm_mask = perm_mask_for_reverse (vectype);
8324 new_temp = permute_vec_elements (new_temp, new_temp,
8325 perm_mask, stmt, gsi);
8326 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8329 /* Collect vector loads and later create their permutation in
8330 vect_transform_grouped_load (). */
8331 if (grouped_load || slp_perm)
8332 dr_chain.quick_push (new_temp);
8334 /* Store vector loads in the corresponding SLP_NODE. */
8335 if (slp && !slp_perm)
8336 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8338 /* With SLP permutation we load the gaps as well, without
8339 we need to skip the gaps after we manage to fully load
8340 all elements. group_gap_adj is GROUP_SIZE here. */
8341 group_elt += nunits;
8342 if (maybe_ne (group_gap_adj, 0U)
8343 && !slp_perm
8344 && known_eq (group_elt, group_size - group_gap_adj))
8346 poly_wide_int bump_val
8347 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8348 * group_gap_adj);
8349 tree bump = wide_int_to_tree (sizetype, bump_val);
8350 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8351 stmt, bump);
8352 group_elt = 0;
8355 /* Bump the vector pointer to account for a gap or for excess
8356 elements loaded for a permuted SLP load. */
8357 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8359 poly_wide_int bump_val
8360 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8361 * group_gap_adj);
8362 tree bump = wide_int_to_tree (sizetype, bump_val);
8363 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8364 stmt, bump);
8368 if (slp && !slp_perm)
8369 continue;
8371 if (slp_perm)
8373 unsigned n_perms;
8374 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8375 slp_node_instance, false,
8376 &n_perms))
8378 dr_chain.release ();
8379 return false;
8382 else
8384 if (grouped_load)
8386 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8387 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
8388 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8390 else
8392 if (j == 0)
8393 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8394 else
8395 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8396 prev_stmt_info = vinfo_for_stmt (new_stmt);
8399 dr_chain.release ();
8402 return true;
8405 /* Function vect_is_simple_cond.
8407 Input:
8408 LOOP - the loop that is being vectorized.
8409 COND - Condition that is checked for simple use.
8411 Output:
8412 *COMP_VECTYPE - the vector type for the comparison.
8413 *DTS - The def types for the arguments of the comparison
8415 Returns whether a COND can be vectorized. Checks whether
8416 condition operands are supportable using vec_is_simple_use. */
8418 static bool
8419 vect_is_simple_cond (tree cond, vec_info *vinfo,
8420 tree *comp_vectype, enum vect_def_type *dts,
8421 tree vectype)
8423 tree lhs, rhs;
8424 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8426 /* Mask case. */
8427 if (TREE_CODE (cond) == SSA_NAME
8428 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8430 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
8431 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
8432 &dts[0], comp_vectype)
8433 || !*comp_vectype
8434 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8435 return false;
8436 return true;
8439 if (!COMPARISON_CLASS_P (cond))
8440 return false;
8442 lhs = TREE_OPERAND (cond, 0);
8443 rhs = TREE_OPERAND (cond, 1);
8445 if (TREE_CODE (lhs) == SSA_NAME)
8447 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
8448 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
8449 return false;
8451 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8452 || TREE_CODE (lhs) == FIXED_CST)
8453 dts[0] = vect_constant_def;
8454 else
8455 return false;
8457 if (TREE_CODE (rhs) == SSA_NAME)
8459 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
8460 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
8461 return false;
8463 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8464 || TREE_CODE (rhs) == FIXED_CST)
8465 dts[1] = vect_constant_def;
8466 else
8467 return false;
8469 if (vectype1 && vectype2
8470 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8471 TYPE_VECTOR_SUBPARTS (vectype2)))
8472 return false;
8474 *comp_vectype = vectype1 ? vectype1 : vectype2;
8475 /* Invariant comparison. */
8476 if (! *comp_vectype)
8478 tree scalar_type = TREE_TYPE (lhs);
8479 /* If we can widen the comparison to match vectype do so. */
8480 if (INTEGRAL_TYPE_P (scalar_type)
8481 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8482 TYPE_SIZE (TREE_TYPE (vectype))))
8483 scalar_type = build_nonstandard_integer_type
8484 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8485 TYPE_UNSIGNED (scalar_type));
8486 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8489 return true;
8492 /* vectorizable_condition.
8494 Check if STMT is conditional modify expression that can be vectorized.
8495 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8496 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8497 at GSI.
8499 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
8500 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
8501 else clause if it is 2).
8503 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8505 bool
8506 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
8507 gimple **vec_stmt, tree reduc_def, int reduc_index,
8508 slp_tree slp_node)
8510 tree scalar_dest = NULL_TREE;
8511 tree vec_dest = NULL_TREE;
8512 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8513 tree then_clause, else_clause;
8514 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8515 tree comp_vectype = NULL_TREE;
8516 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8517 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8518 tree vec_compare;
8519 tree new_temp;
8520 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8521 enum vect_def_type dts[4]
8522 = {vect_unknown_def_type, vect_unknown_def_type,
8523 vect_unknown_def_type, vect_unknown_def_type};
8524 int ndts = 4;
8525 int ncopies;
8526 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8527 stmt_vec_info prev_stmt_info = NULL;
8528 int i, j;
8529 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8530 vec<tree> vec_oprnds0 = vNULL;
8531 vec<tree> vec_oprnds1 = vNULL;
8532 vec<tree> vec_oprnds2 = vNULL;
8533 vec<tree> vec_oprnds3 = vNULL;
8534 tree vec_cmp_type;
8535 bool masked = false;
8537 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8538 return false;
8540 vect_reduction_type reduction_type
8541 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8542 if (reduction_type == TREE_CODE_REDUCTION)
8544 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8545 return false;
8547 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8548 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8549 && reduc_def))
8550 return false;
8552 /* FORNOW: not yet supported. */
8553 if (STMT_VINFO_LIVE_P (stmt_info))
8555 if (dump_enabled_p ())
8556 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8557 "value used after loop.\n");
8558 return false;
8562 /* Is vectorizable conditional operation? */
8563 if (!is_gimple_assign (stmt))
8564 return false;
8566 code = gimple_assign_rhs_code (stmt);
8568 if (code != COND_EXPR)
8569 return false;
8571 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8572 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8574 if (slp_node)
8575 ncopies = 1;
8576 else
8577 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8579 gcc_assert (ncopies >= 1);
8580 if (reduc_index && ncopies > 1)
8581 return false; /* FORNOW */
8583 cond_expr = gimple_assign_rhs1 (stmt);
8584 then_clause = gimple_assign_rhs2 (stmt);
8585 else_clause = gimple_assign_rhs3 (stmt);
8587 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8588 &comp_vectype, &dts[0], vectype)
8589 || !comp_vectype)
8590 return false;
8592 gimple *def_stmt;
8593 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8594 &vectype1))
8595 return false;
8596 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8597 &vectype2))
8598 return false;
8600 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8601 return false;
8603 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8604 return false;
8606 masked = !COMPARISON_CLASS_P (cond_expr);
8607 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8609 if (vec_cmp_type == NULL_TREE)
8610 return false;
8612 cond_code = TREE_CODE (cond_expr);
8613 if (!masked)
8615 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8616 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8619 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8621 /* Boolean values may have another representation in vectors
8622 and therefore we prefer bit operations over comparison for
8623 them (which also works for scalar masks). We store opcodes
8624 to use in bitop1 and bitop2. Statement is vectorized as
8625 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8626 depending on bitop1 and bitop2 arity. */
8627 switch (cond_code)
8629 case GT_EXPR:
8630 bitop1 = BIT_NOT_EXPR;
8631 bitop2 = BIT_AND_EXPR;
8632 break;
8633 case GE_EXPR:
8634 bitop1 = BIT_NOT_EXPR;
8635 bitop2 = BIT_IOR_EXPR;
8636 break;
8637 case LT_EXPR:
8638 bitop1 = BIT_NOT_EXPR;
8639 bitop2 = BIT_AND_EXPR;
8640 std::swap (cond_expr0, cond_expr1);
8641 break;
8642 case LE_EXPR:
8643 bitop1 = BIT_NOT_EXPR;
8644 bitop2 = BIT_IOR_EXPR;
8645 std::swap (cond_expr0, cond_expr1);
8646 break;
8647 case NE_EXPR:
8648 bitop1 = BIT_XOR_EXPR;
8649 break;
8650 case EQ_EXPR:
8651 bitop1 = BIT_XOR_EXPR;
8652 bitop2 = BIT_NOT_EXPR;
8653 break;
8654 default:
8655 return false;
8657 cond_code = SSA_NAME;
8660 if (!vec_stmt)
8662 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8663 if (bitop1 != NOP_EXPR)
8665 machine_mode mode = TYPE_MODE (comp_vectype);
8666 optab optab;
8668 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8669 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8670 return false;
8672 if (bitop2 != NOP_EXPR)
8674 optab = optab_for_tree_code (bitop2, comp_vectype,
8675 optab_default);
8676 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8677 return false;
8680 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8681 cond_code))
8683 if (!slp_node)
8684 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8685 return true;
8687 return false;
8690 /* Transform. */
8692 if (!slp_node)
8694 vec_oprnds0.create (1);
8695 vec_oprnds1.create (1);
8696 vec_oprnds2.create (1);
8697 vec_oprnds3.create (1);
8700 /* Handle def. */
8701 scalar_dest = gimple_assign_lhs (stmt);
8702 if (reduction_type != EXTRACT_LAST_REDUCTION)
8703 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8705 /* Handle cond expr. */
8706 for (j = 0; j < ncopies; j++)
8708 gimple *new_stmt = NULL;
8709 if (j == 0)
8711 if (slp_node)
8713 auto_vec<tree, 4> ops;
8714 auto_vec<vec<tree>, 4> vec_defs;
8716 if (masked)
8717 ops.safe_push (cond_expr);
8718 else
8720 ops.safe_push (cond_expr0);
8721 ops.safe_push (cond_expr1);
8723 ops.safe_push (then_clause);
8724 ops.safe_push (else_clause);
8725 vect_get_slp_defs (ops, slp_node, &vec_defs);
8726 vec_oprnds3 = vec_defs.pop ();
8727 vec_oprnds2 = vec_defs.pop ();
8728 if (!masked)
8729 vec_oprnds1 = vec_defs.pop ();
8730 vec_oprnds0 = vec_defs.pop ();
8732 else
8734 gimple *gtemp;
8735 if (masked)
8737 vec_cond_lhs
8738 = vect_get_vec_def_for_operand (cond_expr, stmt,
8739 comp_vectype);
8740 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8741 &gtemp, &dts[0]);
8743 else
8745 vec_cond_lhs
8746 = vect_get_vec_def_for_operand (cond_expr0,
8747 stmt, comp_vectype);
8748 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8750 vec_cond_rhs
8751 = vect_get_vec_def_for_operand (cond_expr1,
8752 stmt, comp_vectype);
8753 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8755 if (reduc_index == 1)
8756 vec_then_clause = reduc_def;
8757 else
8759 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8760 stmt);
8761 vect_is_simple_use (then_clause, loop_vinfo,
8762 &gtemp, &dts[2]);
8764 if (reduc_index == 2)
8765 vec_else_clause = reduc_def;
8766 else
8768 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8769 stmt);
8770 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8774 else
8776 vec_cond_lhs
8777 = vect_get_vec_def_for_stmt_copy (dts[0],
8778 vec_oprnds0.pop ());
8779 if (!masked)
8780 vec_cond_rhs
8781 = vect_get_vec_def_for_stmt_copy (dts[1],
8782 vec_oprnds1.pop ());
8784 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8785 vec_oprnds2.pop ());
8786 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8787 vec_oprnds3.pop ());
8790 if (!slp_node)
8792 vec_oprnds0.quick_push (vec_cond_lhs);
8793 if (!masked)
8794 vec_oprnds1.quick_push (vec_cond_rhs);
8795 vec_oprnds2.quick_push (vec_then_clause);
8796 vec_oprnds3.quick_push (vec_else_clause);
8799 /* Arguments are ready. Create the new vector stmt. */
8800 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8802 vec_then_clause = vec_oprnds2[i];
8803 vec_else_clause = vec_oprnds3[i];
8805 if (masked)
8806 vec_compare = vec_cond_lhs;
8807 else
8809 vec_cond_rhs = vec_oprnds1[i];
8810 if (bitop1 == NOP_EXPR)
8811 vec_compare = build2 (cond_code, vec_cmp_type,
8812 vec_cond_lhs, vec_cond_rhs);
8813 else
8815 new_temp = make_ssa_name (vec_cmp_type);
8816 if (bitop1 == BIT_NOT_EXPR)
8817 new_stmt = gimple_build_assign (new_temp, bitop1,
8818 vec_cond_rhs);
8819 else
8820 new_stmt
8821 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8822 vec_cond_rhs);
8823 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8824 if (bitop2 == NOP_EXPR)
8825 vec_compare = new_temp;
8826 else if (bitop2 == BIT_NOT_EXPR)
8828 /* Instead of doing ~x ? y : z do x ? z : y. */
8829 vec_compare = new_temp;
8830 std::swap (vec_then_clause, vec_else_clause);
8832 else
8834 vec_compare = make_ssa_name (vec_cmp_type);
8835 new_stmt
8836 = gimple_build_assign (vec_compare, bitop2,
8837 vec_cond_lhs, new_temp);
8838 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8842 if (reduction_type == EXTRACT_LAST_REDUCTION)
8844 if (!is_gimple_val (vec_compare))
8846 tree vec_compare_name = make_ssa_name (vec_cmp_type);
8847 new_stmt = gimple_build_assign (vec_compare_name,
8848 vec_compare);
8849 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8850 vec_compare = vec_compare_name;
8852 gcc_assert (reduc_index == 2);
8853 new_stmt = gimple_build_call_internal
8854 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
8855 vec_then_clause);
8856 gimple_call_set_lhs (new_stmt, scalar_dest);
8857 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
8858 if (stmt == gsi_stmt (*gsi))
8859 vect_finish_replace_stmt (stmt, new_stmt);
8860 else
8862 /* In this case we're moving the definition to later in the
8863 block. That doesn't matter because the only uses of the
8864 lhs are in phi statements. */
8865 gimple_stmt_iterator old_gsi = gsi_for_stmt (stmt);
8866 gsi_remove (&old_gsi, true);
8867 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8870 else
8872 new_temp = make_ssa_name (vec_dest);
8873 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8874 vec_compare, vec_then_clause,
8875 vec_else_clause);
8876 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8878 if (slp_node)
8879 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8882 if (slp_node)
8883 continue;
8885 if (j == 0)
8886 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8887 else
8888 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8890 prev_stmt_info = vinfo_for_stmt (new_stmt);
8893 vec_oprnds0.release ();
8894 vec_oprnds1.release ();
8895 vec_oprnds2.release ();
8896 vec_oprnds3.release ();
8898 return true;
8901 /* vectorizable_comparison.
8903 Check if STMT is comparison expression that can be vectorized.
8904 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8905 comparison, put it in VEC_STMT, and insert it at GSI.
8907 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8909 static bool
8910 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8911 gimple **vec_stmt, tree reduc_def,
8912 slp_tree slp_node)
8914 tree lhs, rhs1, rhs2;
8915 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8916 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8917 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8918 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8919 tree new_temp;
8920 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8921 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8922 int ndts = 2;
8923 poly_uint64 nunits;
8924 int ncopies;
8925 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8926 stmt_vec_info prev_stmt_info = NULL;
8927 int i, j;
8928 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8929 vec<tree> vec_oprnds0 = vNULL;
8930 vec<tree> vec_oprnds1 = vNULL;
8931 gimple *def_stmt;
8932 tree mask_type;
8933 tree mask;
8935 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8936 return false;
8938 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8939 return false;
8941 mask_type = vectype;
8942 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8944 if (slp_node)
8945 ncopies = 1;
8946 else
8947 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8949 gcc_assert (ncopies >= 1);
8950 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8951 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8952 && reduc_def))
8953 return false;
8955 if (STMT_VINFO_LIVE_P (stmt_info))
8957 if (dump_enabled_p ())
8958 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8959 "value used after loop.\n");
8960 return false;
8963 if (!is_gimple_assign (stmt))
8964 return false;
8966 code = gimple_assign_rhs_code (stmt);
8968 if (TREE_CODE_CLASS (code) != tcc_comparison)
8969 return false;
8971 rhs1 = gimple_assign_rhs1 (stmt);
8972 rhs2 = gimple_assign_rhs2 (stmt);
8974 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8975 &dts[0], &vectype1))
8976 return false;
8978 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8979 &dts[1], &vectype2))
8980 return false;
8982 if (vectype1 && vectype2
8983 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8984 TYPE_VECTOR_SUBPARTS (vectype2)))
8985 return false;
8987 vectype = vectype1 ? vectype1 : vectype2;
8989 /* Invariant comparison. */
8990 if (!vectype)
8992 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8993 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
8994 return false;
8996 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
8997 return false;
8999 /* Can't compare mask and non-mask types. */
9000 if (vectype1 && vectype2
9001 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9002 return false;
9004 /* Boolean values may have another representation in vectors
9005 and therefore we prefer bit operations over comparison for
9006 them (which also works for scalar masks). We store opcodes
9007 to use in bitop1 and bitop2. Statement is vectorized as
9008 BITOP2 (rhs1 BITOP1 rhs2) or
9009 rhs1 BITOP2 (BITOP1 rhs2)
9010 depending on bitop1 and bitop2 arity. */
9011 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9013 if (code == GT_EXPR)
9015 bitop1 = BIT_NOT_EXPR;
9016 bitop2 = BIT_AND_EXPR;
9018 else if (code == GE_EXPR)
9020 bitop1 = BIT_NOT_EXPR;
9021 bitop2 = BIT_IOR_EXPR;
9023 else if (code == LT_EXPR)
9025 bitop1 = BIT_NOT_EXPR;
9026 bitop2 = BIT_AND_EXPR;
9027 std::swap (rhs1, rhs2);
9028 std::swap (dts[0], dts[1]);
9030 else if (code == LE_EXPR)
9032 bitop1 = BIT_NOT_EXPR;
9033 bitop2 = BIT_IOR_EXPR;
9034 std::swap (rhs1, rhs2);
9035 std::swap (dts[0], dts[1]);
9037 else
9039 bitop1 = BIT_XOR_EXPR;
9040 if (code == EQ_EXPR)
9041 bitop2 = BIT_NOT_EXPR;
9045 if (!vec_stmt)
9047 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9048 if (!slp_node)
9049 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9050 dts, ndts, NULL, NULL);
9051 if (bitop1 == NOP_EXPR)
9052 return expand_vec_cmp_expr_p (vectype, mask_type, code);
9053 else
9055 machine_mode mode = TYPE_MODE (vectype);
9056 optab optab;
9058 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9059 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9060 return false;
9062 if (bitop2 != NOP_EXPR)
9064 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9065 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9066 return false;
9068 return true;
9072 /* Transform. */
9073 if (!slp_node)
9075 vec_oprnds0.create (1);
9076 vec_oprnds1.create (1);
9079 /* Handle def. */
9080 lhs = gimple_assign_lhs (stmt);
9081 mask = vect_create_destination_var (lhs, mask_type);
9083 /* Handle cmp expr. */
9084 for (j = 0; j < ncopies; j++)
9086 gassign *new_stmt = NULL;
9087 if (j == 0)
9089 if (slp_node)
9091 auto_vec<tree, 2> ops;
9092 auto_vec<vec<tree>, 2> vec_defs;
9094 ops.safe_push (rhs1);
9095 ops.safe_push (rhs2);
9096 vect_get_slp_defs (ops, slp_node, &vec_defs);
9097 vec_oprnds1 = vec_defs.pop ();
9098 vec_oprnds0 = vec_defs.pop ();
9100 else
9102 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
9103 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
9106 else
9108 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
9109 vec_oprnds0.pop ());
9110 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
9111 vec_oprnds1.pop ());
9114 if (!slp_node)
9116 vec_oprnds0.quick_push (vec_rhs1);
9117 vec_oprnds1.quick_push (vec_rhs2);
9120 /* Arguments are ready. Create the new vector stmt. */
9121 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9123 vec_rhs2 = vec_oprnds1[i];
9125 new_temp = make_ssa_name (mask);
9126 if (bitop1 == NOP_EXPR)
9128 new_stmt = gimple_build_assign (new_temp, code,
9129 vec_rhs1, vec_rhs2);
9130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9132 else
9134 if (bitop1 == BIT_NOT_EXPR)
9135 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9136 else
9137 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9138 vec_rhs2);
9139 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9140 if (bitop2 != NOP_EXPR)
9142 tree res = make_ssa_name (mask);
9143 if (bitop2 == BIT_NOT_EXPR)
9144 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9145 else
9146 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9147 new_temp);
9148 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9151 if (slp_node)
9152 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9155 if (slp_node)
9156 continue;
9158 if (j == 0)
9159 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
9160 else
9161 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
9163 prev_stmt_info = vinfo_for_stmt (new_stmt);
9166 vec_oprnds0.release ();
9167 vec_oprnds1.release ();
9169 return true;
9172 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9173 can handle all live statements in the node. Otherwise return true
9174 if STMT is not live or if vectorizable_live_operation can handle it.
9175 GSI and VEC_STMT are as for vectorizable_live_operation. */
9177 static bool
9178 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
9179 slp_tree slp_node, gimple **vec_stmt)
9181 if (slp_node)
9183 gimple *slp_stmt;
9184 unsigned int i;
9185 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
9187 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
9188 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9189 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
9190 vec_stmt))
9191 return false;
9194 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
9195 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
9196 return false;
9198 return true;
9201 /* Make sure the statement is vectorizable. */
9203 bool
9204 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
9205 slp_instance node_instance)
9207 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9208 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9209 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9210 bool ok;
9211 gimple *pattern_stmt;
9212 gimple_seq pattern_def_seq;
9214 if (dump_enabled_p ())
9216 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
9217 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9220 if (gimple_has_volatile_ops (stmt))
9222 if (dump_enabled_p ())
9223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9224 "not vectorized: stmt has volatile operands\n");
9226 return false;
9229 /* Skip stmts that do not need to be vectorized. In loops this is expected
9230 to include:
9231 - the COND_EXPR which is the loop exit condition
9232 - any LABEL_EXPRs in the loop
9233 - computations that are used only for array indexing or loop control.
9234 In basic blocks we only analyze statements that are a part of some SLP
9235 instance, therefore, all the statements are relevant.
9237 Pattern statement needs to be analyzed instead of the original statement
9238 if the original statement is not relevant. Otherwise, we analyze both
9239 statements. In basic blocks we are called from some SLP instance
9240 traversal, don't analyze pattern stmts instead, the pattern stmts
9241 already will be part of SLP instance. */
9243 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
9244 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9245 && !STMT_VINFO_LIVE_P (stmt_info))
9247 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9248 && pattern_stmt
9249 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9250 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9252 /* Analyze PATTERN_STMT instead of the original stmt. */
9253 stmt = pattern_stmt;
9254 stmt_info = vinfo_for_stmt (pattern_stmt);
9255 if (dump_enabled_p ())
9257 dump_printf_loc (MSG_NOTE, vect_location,
9258 "==> examining pattern statement: ");
9259 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9262 else
9264 if (dump_enabled_p ())
9265 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9267 return true;
9270 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9271 && node == NULL
9272 && pattern_stmt
9273 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9274 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9276 /* Analyze PATTERN_STMT too. */
9277 if (dump_enabled_p ())
9279 dump_printf_loc (MSG_NOTE, vect_location,
9280 "==> examining pattern statement: ");
9281 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9284 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
9285 node_instance))
9286 return false;
9289 if (is_pattern_stmt_p (stmt_info)
9290 && node == NULL
9291 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9293 gimple_stmt_iterator si;
9295 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9297 gimple *pattern_def_stmt = gsi_stmt (si);
9298 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
9299 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
9301 /* Analyze def stmt of STMT if it's a pattern stmt. */
9302 if (dump_enabled_p ())
9304 dump_printf_loc (MSG_NOTE, vect_location,
9305 "==> examining pattern def statement: ");
9306 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
9309 if (!vect_analyze_stmt (pattern_def_stmt,
9310 need_to_vectorize, node, node_instance))
9311 return false;
9316 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9318 case vect_internal_def:
9319 break;
9321 case vect_reduction_def:
9322 case vect_nested_cycle:
9323 gcc_assert (!bb_vinfo
9324 && (relevance == vect_used_in_outer
9325 || relevance == vect_used_in_outer_by_reduction
9326 || relevance == vect_used_by_reduction
9327 || relevance == vect_unused_in_scope
9328 || relevance == vect_used_only_live));
9329 break;
9331 case vect_induction_def:
9332 gcc_assert (!bb_vinfo);
9333 break;
9335 case vect_constant_def:
9336 case vect_external_def:
9337 case vect_unknown_def_type:
9338 default:
9339 gcc_unreachable ();
9342 if (STMT_VINFO_RELEVANT_P (stmt_info))
9344 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
9345 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9346 || (is_gimple_call (stmt)
9347 && gimple_call_lhs (stmt) == NULL_TREE));
9348 *need_to_vectorize = true;
9351 if (PURE_SLP_STMT (stmt_info) && !node)
9353 dump_printf_loc (MSG_NOTE, vect_location,
9354 "handled only by SLP analysis\n");
9355 return true;
9358 ok = true;
9359 if (!bb_vinfo
9360 && (STMT_VINFO_RELEVANT_P (stmt_info)
9361 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9362 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
9363 || vectorizable_conversion (stmt, NULL, NULL, node)
9364 || vectorizable_shift (stmt, NULL, NULL, node)
9365 || vectorizable_operation (stmt, NULL, NULL, node)
9366 || vectorizable_assignment (stmt, NULL, NULL, node)
9367 || vectorizable_load (stmt, NULL, NULL, node, NULL)
9368 || vectorizable_call (stmt, NULL, NULL, node)
9369 || vectorizable_store (stmt, NULL, NULL, node)
9370 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
9371 || vectorizable_induction (stmt, NULL, NULL, node)
9372 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
9373 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
9374 else
9376 if (bb_vinfo)
9377 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
9378 || vectorizable_conversion (stmt, NULL, NULL, node)
9379 || vectorizable_shift (stmt, NULL, NULL, node)
9380 || vectorizable_operation (stmt, NULL, NULL, node)
9381 || vectorizable_assignment (stmt, NULL, NULL, node)
9382 || vectorizable_load (stmt, NULL, NULL, node, NULL)
9383 || vectorizable_call (stmt, NULL, NULL, node)
9384 || vectorizable_store (stmt, NULL, NULL, node)
9385 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
9386 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
9389 if (!ok)
9391 if (dump_enabled_p ())
9393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9394 "not vectorized: relevant stmt not ");
9395 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
9396 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9399 return false;
9402 if (bb_vinfo)
9403 return true;
9405 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9406 need extra handling, except for vectorizable reductions. */
9407 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9408 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
9410 if (dump_enabled_p ())
9412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9413 "not vectorized: live stmt not supported: ");
9414 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9417 return false;
9420 return true;
9424 /* Function vect_transform_stmt.
9426 Create a vectorized stmt to replace STMT, and insert it at BSI. */
9428 bool
9429 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
9430 bool *grouped_store, slp_tree slp_node,
9431 slp_instance slp_node_instance)
9433 bool is_store = false;
9434 gimple *vec_stmt = NULL;
9435 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9436 bool done;
9438 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9439 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
9441 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9442 && nested_in_vect_loop_p
9443 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9444 stmt));
9446 switch (STMT_VINFO_TYPE (stmt_info))
9448 case type_demotion_vec_info_type:
9449 case type_promotion_vec_info_type:
9450 case type_conversion_vec_info_type:
9451 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
9452 gcc_assert (done);
9453 break;
9455 case induc_vec_info_type:
9456 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
9457 gcc_assert (done);
9458 break;
9460 case shift_vec_info_type:
9461 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
9462 gcc_assert (done);
9463 break;
9465 case op_vec_info_type:
9466 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
9467 gcc_assert (done);
9468 break;
9470 case assignment_vec_info_type:
9471 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
9472 gcc_assert (done);
9473 break;
9475 case load_vec_info_type:
9476 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
9477 slp_node_instance);
9478 gcc_assert (done);
9479 break;
9481 case store_vec_info_type:
9482 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
9483 gcc_assert (done);
9484 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9486 /* In case of interleaving, the whole chain is vectorized when the
9487 last store in the chain is reached. Store stmts before the last
9488 one are skipped, and there vec_stmt_info shouldn't be freed
9489 meanwhile. */
9490 *grouped_store = true;
9491 stmt_vec_info group_info
9492 = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
9493 if (GROUP_STORE_COUNT (group_info) == GROUP_SIZE (group_info))
9494 is_store = true;
9496 else
9497 is_store = true;
9498 break;
9500 case condition_vec_info_type:
9501 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
9502 gcc_assert (done);
9503 break;
9505 case comparison_vec_info_type:
9506 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
9507 gcc_assert (done);
9508 break;
9510 case call_vec_info_type:
9511 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
9512 stmt = gsi_stmt (*gsi);
9513 break;
9515 case call_simd_clone_vec_info_type:
9516 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
9517 stmt = gsi_stmt (*gsi);
9518 break;
9520 case reduc_vec_info_type:
9521 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
9522 slp_node_instance);
9523 gcc_assert (done);
9524 break;
9526 default:
9527 if (!STMT_VINFO_LIVE_P (stmt_info))
9529 if (dump_enabled_p ())
9530 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9531 "stmt not supported.\n");
9532 gcc_unreachable ();
9536 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9537 This would break hybrid SLP vectorization. */
9538 if (slp_node)
9539 gcc_assert (!vec_stmt
9540 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
9542 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9543 is being vectorized, but outside the immediately enclosing loop. */
9544 if (vec_stmt
9545 && nested_p
9546 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9547 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9548 || STMT_VINFO_RELEVANT (stmt_info) ==
9549 vect_used_in_outer_by_reduction))
9551 struct loop *innerloop = LOOP_VINFO_LOOP (
9552 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9553 imm_use_iterator imm_iter;
9554 use_operand_p use_p;
9555 tree scalar_dest;
9556 gimple *exit_phi;
9558 if (dump_enabled_p ())
9559 dump_printf_loc (MSG_NOTE, vect_location,
9560 "Record the vdef for outer-loop vectorization.\n");
9562 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9563 (to be used when vectorizing outer-loop stmts that use the DEF of
9564 STMT). */
9565 if (gimple_code (stmt) == GIMPLE_PHI)
9566 scalar_dest = PHI_RESULT (stmt);
9567 else
9568 scalar_dest = gimple_assign_lhs (stmt);
9570 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9572 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9574 exit_phi = USE_STMT (use_p);
9575 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
9580 /* Handle stmts whose DEF is used outside the loop-nest that is
9581 being vectorized. */
9582 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9584 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
9585 gcc_assert (done);
9588 if (vec_stmt)
9589 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9591 return is_store;
9595 /* Remove a group of stores (for SLP or interleaving), free their
9596 stmt_vec_info. */
9598 void
9599 vect_remove_stores (gimple *first_stmt)
9601 gimple *next = first_stmt;
9602 gimple *tmp;
9603 gimple_stmt_iterator next_si;
9605 while (next)
9607 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9609 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9610 if (is_pattern_stmt_p (stmt_info))
9611 next = STMT_VINFO_RELATED_STMT (stmt_info);
9612 /* Free the attached stmt_vec_info and remove the stmt. */
9613 next_si = gsi_for_stmt (next);
9614 unlink_stmt_vdef (next);
9615 gsi_remove (&next_si, true);
9616 release_defs (next);
9617 free_stmt_vec_info (next);
9618 next = tmp;
9623 /* Function new_stmt_vec_info.
9625 Create and initialize a new stmt_vec_info struct for STMT. */
9627 stmt_vec_info
9628 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9630 stmt_vec_info res;
9631 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9633 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9634 STMT_VINFO_STMT (res) = stmt;
9635 res->vinfo = vinfo;
9636 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9637 STMT_VINFO_LIVE_P (res) = false;
9638 STMT_VINFO_VECTYPE (res) = NULL;
9639 STMT_VINFO_VEC_STMT (res) = NULL;
9640 STMT_VINFO_VECTORIZABLE (res) = true;
9641 STMT_VINFO_IN_PATTERN_P (res) = false;
9642 STMT_VINFO_RELATED_STMT (res) = NULL;
9643 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9644 STMT_VINFO_DATA_REF (res) = NULL;
9645 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9646 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9648 if (gimple_code (stmt) == GIMPLE_PHI
9649 && is_loop_header_bb_p (gimple_bb (stmt)))
9650 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9651 else
9652 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9654 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9655 STMT_SLP_TYPE (res) = loop_vect;
9656 STMT_VINFO_NUM_SLP_USES (res) = 0;
9658 GROUP_FIRST_ELEMENT (res) = NULL;
9659 GROUP_NEXT_ELEMENT (res) = NULL;
9660 GROUP_SIZE (res) = 0;
9661 GROUP_STORE_COUNT (res) = 0;
9662 GROUP_GAP (res) = 0;
9663 GROUP_SAME_DR_STMT (res) = NULL;
9665 return res;
9669 /* Create a hash table for stmt_vec_info. */
9671 void
9672 init_stmt_vec_info_vec (void)
9674 gcc_assert (!stmt_vec_info_vec.exists ());
9675 stmt_vec_info_vec.create (50);
9679 /* Free hash table for stmt_vec_info. */
9681 void
9682 free_stmt_vec_info_vec (void)
9684 unsigned int i;
9685 stmt_vec_info info;
9686 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9687 if (info != NULL)
9688 free_stmt_vec_info (STMT_VINFO_STMT (info));
9689 gcc_assert (stmt_vec_info_vec.exists ());
9690 stmt_vec_info_vec.release ();
9694 /* Free stmt vectorization related info. */
9696 void
9697 free_stmt_vec_info (gimple *stmt)
9699 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9701 if (!stmt_info)
9702 return;
9704 /* Check if this statement has a related "pattern stmt"
9705 (introduced by the vectorizer during the pattern recognition
9706 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9707 too. */
9708 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9710 stmt_vec_info patt_info
9711 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9712 if (patt_info)
9714 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9715 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9716 gimple_set_bb (patt_stmt, NULL);
9717 tree lhs = gimple_get_lhs (patt_stmt);
9718 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9719 release_ssa_name (lhs);
9720 if (seq)
9722 gimple_stmt_iterator si;
9723 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9725 gimple *seq_stmt = gsi_stmt (si);
9726 gimple_set_bb (seq_stmt, NULL);
9727 lhs = gimple_get_lhs (seq_stmt);
9728 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9729 release_ssa_name (lhs);
9730 free_stmt_vec_info (seq_stmt);
9733 free_stmt_vec_info (patt_stmt);
9737 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9738 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9739 set_vinfo_for_stmt (stmt, NULL);
9740 free (stmt_info);
9744 /* Function get_vectype_for_scalar_type_and_size.
9746 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9747 by the target. */
9749 tree
9750 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9752 tree orig_scalar_type = scalar_type;
9753 scalar_mode inner_mode;
9754 machine_mode simd_mode;
9755 poly_uint64 nunits;
9756 tree vectype;
9758 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9759 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9760 return NULL_TREE;
9762 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9764 /* For vector types of elements whose mode precision doesn't
9765 match their types precision we use a element type of mode
9766 precision. The vectorization routines will have to make sure
9767 they support the proper result truncation/extension.
9768 We also make sure to build vector types with INTEGER_TYPE
9769 component type only. */
9770 if (INTEGRAL_TYPE_P (scalar_type)
9771 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9772 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9773 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9774 TYPE_UNSIGNED (scalar_type));
9776 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9777 When the component mode passes the above test simply use a type
9778 corresponding to that mode. The theory is that any use that
9779 would cause problems with this will disable vectorization anyway. */
9780 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9781 && !INTEGRAL_TYPE_P (scalar_type))
9782 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9784 /* We can't build a vector type of elements with alignment bigger than
9785 their size. */
9786 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9787 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9788 TYPE_UNSIGNED (scalar_type));
9790 /* If we felt back to using the mode fail if there was
9791 no scalar type for it. */
9792 if (scalar_type == NULL_TREE)
9793 return NULL_TREE;
9795 /* If no size was supplied use the mode the target prefers. Otherwise
9796 lookup a vector mode of the specified size. */
9797 if (known_eq (size, 0U))
9798 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9799 else if (!multiple_p (size, nbytes, &nunits)
9800 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9801 return NULL_TREE;
9802 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9803 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9804 return NULL_TREE;
9806 vectype = build_vector_type (scalar_type, nunits);
9808 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9809 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9810 return NULL_TREE;
9812 /* Re-attach the address-space qualifier if we canonicalized the scalar
9813 type. */
9814 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9815 return build_qualified_type
9816 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9818 return vectype;
9821 poly_uint64 current_vector_size;
9823 /* Function get_vectype_for_scalar_type.
9825 Returns the vector type corresponding to SCALAR_TYPE as supported
9826 by the target. */
9828 tree
9829 get_vectype_for_scalar_type (tree scalar_type)
9831 tree vectype;
9832 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9833 current_vector_size);
9834 if (vectype
9835 && known_eq (current_vector_size, 0U))
9836 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9837 return vectype;
9840 /* Function get_mask_type_for_scalar_type.
9842 Returns the mask type corresponding to a result of comparison
9843 of vectors of specified SCALAR_TYPE as supported by target. */
9845 tree
9846 get_mask_type_for_scalar_type (tree scalar_type)
9848 tree vectype = get_vectype_for_scalar_type (scalar_type);
9850 if (!vectype)
9851 return NULL;
9853 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9854 current_vector_size);
9857 /* Function get_same_sized_vectype
9859 Returns a vector type corresponding to SCALAR_TYPE of size
9860 VECTOR_TYPE if supported by the target. */
9862 tree
9863 get_same_sized_vectype (tree scalar_type, tree vector_type)
9865 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9866 return build_same_sized_truth_vector_type (vector_type);
9868 return get_vectype_for_scalar_type_and_size
9869 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9872 /* Function vect_is_simple_use.
9874 Input:
9875 VINFO - the vect info of the loop or basic block that is being vectorized.
9876 OPERAND - operand in the loop or bb.
9877 Output:
9878 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9879 DT - the type of definition
9881 Returns whether a stmt with OPERAND can be vectorized.
9882 For loops, supportable operands are constants, loop invariants, and operands
9883 that are defined by the current iteration of the loop. Unsupportable
9884 operands are those that are defined by a previous iteration of the loop (as
9885 is the case in reduction/induction computations).
9886 For basic blocks, supportable operands are constants and bb invariants.
9887 For now, operands defined outside the basic block are not supported. */
9889 bool
9890 vect_is_simple_use (tree operand, vec_info *vinfo,
9891 gimple **def_stmt, enum vect_def_type *dt)
9893 *def_stmt = NULL;
9894 *dt = vect_unknown_def_type;
9896 if (dump_enabled_p ())
9898 dump_printf_loc (MSG_NOTE, vect_location,
9899 "vect_is_simple_use: operand ");
9900 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9901 dump_printf (MSG_NOTE, "\n");
9904 if (CONSTANT_CLASS_P (operand))
9906 *dt = vect_constant_def;
9907 return true;
9910 if (is_gimple_min_invariant (operand))
9912 *dt = vect_external_def;
9913 return true;
9916 if (TREE_CODE (operand) != SSA_NAME)
9918 if (dump_enabled_p ())
9919 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9920 "not ssa-name.\n");
9921 return false;
9924 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9926 *dt = vect_external_def;
9927 return true;
9930 *def_stmt = SSA_NAME_DEF_STMT (operand);
9931 if (dump_enabled_p ())
9933 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9934 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9937 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9938 *dt = vect_external_def;
9939 else
9941 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9942 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9945 if (dump_enabled_p ())
9947 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9948 switch (*dt)
9950 case vect_uninitialized_def:
9951 dump_printf (MSG_NOTE, "uninitialized\n");
9952 break;
9953 case vect_constant_def:
9954 dump_printf (MSG_NOTE, "constant\n");
9955 break;
9956 case vect_external_def:
9957 dump_printf (MSG_NOTE, "external\n");
9958 break;
9959 case vect_internal_def:
9960 dump_printf (MSG_NOTE, "internal\n");
9961 break;
9962 case vect_induction_def:
9963 dump_printf (MSG_NOTE, "induction\n");
9964 break;
9965 case vect_reduction_def:
9966 dump_printf (MSG_NOTE, "reduction\n");
9967 break;
9968 case vect_double_reduction_def:
9969 dump_printf (MSG_NOTE, "double reduction\n");
9970 break;
9971 case vect_nested_cycle:
9972 dump_printf (MSG_NOTE, "nested cycle\n");
9973 break;
9974 case vect_unknown_def_type:
9975 dump_printf (MSG_NOTE, "unknown\n");
9976 break;
9980 if (*dt == vect_unknown_def_type)
9982 if (dump_enabled_p ())
9983 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9984 "Unsupported pattern.\n");
9985 return false;
9988 switch (gimple_code (*def_stmt))
9990 case GIMPLE_PHI:
9991 case GIMPLE_ASSIGN:
9992 case GIMPLE_CALL:
9993 break;
9994 default:
9995 if (dump_enabled_p ())
9996 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9997 "unsupported defining stmt:\n");
9998 return false;
10001 return true;
10004 /* Function vect_is_simple_use.
10006 Same as vect_is_simple_use but also determines the vector operand
10007 type of OPERAND and stores it to *VECTYPE. If the definition of
10008 OPERAND is vect_uninitialized_def, vect_constant_def or
10009 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10010 is responsible to compute the best suited vector type for the
10011 scalar operand. */
10013 bool
10014 vect_is_simple_use (tree operand, vec_info *vinfo,
10015 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
10017 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
10018 return false;
10020 /* Now get a vector type if the def is internal, otherwise supply
10021 NULL_TREE and leave it up to the caller to figure out a proper
10022 type for the use stmt. */
10023 if (*dt == vect_internal_def
10024 || *dt == vect_induction_def
10025 || *dt == vect_reduction_def
10026 || *dt == vect_double_reduction_def
10027 || *dt == vect_nested_cycle)
10029 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
10031 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
10032 && !STMT_VINFO_RELEVANT (stmt_info)
10033 && !STMT_VINFO_LIVE_P (stmt_info))
10034 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
10036 *vectype = STMT_VINFO_VECTYPE (stmt_info);
10037 gcc_assert (*vectype != NULL_TREE);
10039 else if (*dt == vect_uninitialized_def
10040 || *dt == vect_constant_def
10041 || *dt == vect_external_def)
10042 *vectype = NULL_TREE;
10043 else
10044 gcc_unreachable ();
10046 return true;
10050 /* Function supportable_widening_operation
10052 Check whether an operation represented by the code CODE is a
10053 widening operation that is supported by the target platform in
10054 vector form (i.e., when operating on arguments of type VECTYPE_IN
10055 producing a result of type VECTYPE_OUT).
10057 Widening operations we currently support are NOP (CONVERT), FLOAT
10058 and WIDEN_MULT. This function checks if these operations are supported
10059 by the target platform either directly (via vector tree-codes), or via
10060 target builtins.
10062 Output:
10063 - CODE1 and CODE2 are codes of vector operations to be used when
10064 vectorizing the operation, if available.
10065 - MULTI_STEP_CVT determines the number of required intermediate steps in
10066 case of multi-step conversion (like char->short->int - in that case
10067 MULTI_STEP_CVT will be 1).
10068 - INTERM_TYPES contains the intermediate type required to perform the
10069 widening operation (short in the above example). */
10071 bool
10072 supportable_widening_operation (enum tree_code code, gimple *stmt,
10073 tree vectype_out, tree vectype_in,
10074 enum tree_code *code1, enum tree_code *code2,
10075 int *multi_step_cvt,
10076 vec<tree> *interm_types)
10078 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
10079 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10080 struct loop *vect_loop = NULL;
10081 machine_mode vec_mode;
10082 enum insn_code icode1, icode2;
10083 optab optab1, optab2;
10084 tree vectype = vectype_in;
10085 tree wide_vectype = vectype_out;
10086 enum tree_code c1, c2;
10087 int i;
10088 tree prev_type, intermediate_type;
10089 machine_mode intermediate_mode, prev_mode;
10090 optab optab3, optab4;
10092 *multi_step_cvt = 0;
10093 if (loop_info)
10094 vect_loop = LOOP_VINFO_LOOP (loop_info);
10096 switch (code)
10098 case WIDEN_MULT_EXPR:
10099 /* The result of a vectorized widening operation usually requires
10100 two vectors (because the widened results do not fit into one vector).
10101 The generated vector results would normally be expected to be
10102 generated in the same order as in the original scalar computation,
10103 i.e. if 8 results are generated in each vector iteration, they are
10104 to be organized as follows:
10105 vect1: [res1,res2,res3,res4],
10106 vect2: [res5,res6,res7,res8].
10108 However, in the special case that the result of the widening
10109 operation is used in a reduction computation only, the order doesn't
10110 matter (because when vectorizing a reduction we change the order of
10111 the computation). Some targets can take advantage of this and
10112 generate more efficient code. For example, targets like Altivec,
10113 that support widen_mult using a sequence of {mult_even,mult_odd}
10114 generate the following vectors:
10115 vect1: [res1,res3,res5,res7],
10116 vect2: [res2,res4,res6,res8].
10118 When vectorizing outer-loops, we execute the inner-loop sequentially
10119 (each vectorized inner-loop iteration contributes to VF outer-loop
10120 iterations in parallel). We therefore don't allow to change the
10121 order of the computation in the inner-loop during outer-loop
10122 vectorization. */
10123 /* TODO: Another case in which order doesn't *really* matter is when we
10124 widen and then contract again, e.g. (short)((int)x * y >> 8).
10125 Normally, pack_trunc performs an even/odd permute, whereas the
10126 repack from an even/odd expansion would be an interleave, which
10127 would be significantly simpler for e.g. AVX2. */
10128 /* In any case, in order to avoid duplicating the code below, recurse
10129 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10130 are properly set up for the caller. If we fail, we'll continue with
10131 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10132 if (vect_loop
10133 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10134 && !nested_in_vect_loop_p (vect_loop, stmt)
10135 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10136 stmt, vectype_out, vectype_in,
10137 code1, code2, multi_step_cvt,
10138 interm_types))
10140 /* Elements in a vector with vect_used_by_reduction property cannot
10141 be reordered if the use chain with this property does not have the
10142 same operation. One such an example is s += a * b, where elements
10143 in a and b cannot be reordered. Here we check if the vector defined
10144 by STMT is only directly used in the reduction statement. */
10145 tree lhs = gimple_assign_lhs (stmt);
10146 use_operand_p dummy;
10147 gimple *use_stmt;
10148 stmt_vec_info use_stmt_info = NULL;
10149 if (single_imm_use (lhs, &dummy, &use_stmt)
10150 && (use_stmt_info = vinfo_for_stmt (use_stmt))
10151 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10152 return true;
10154 c1 = VEC_WIDEN_MULT_LO_EXPR;
10155 c2 = VEC_WIDEN_MULT_HI_EXPR;
10156 break;
10158 case DOT_PROD_EXPR:
10159 c1 = DOT_PROD_EXPR;
10160 c2 = DOT_PROD_EXPR;
10161 break;
10163 case SAD_EXPR:
10164 c1 = SAD_EXPR;
10165 c2 = SAD_EXPR;
10166 break;
10168 case VEC_WIDEN_MULT_EVEN_EXPR:
10169 /* Support the recursion induced just above. */
10170 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10171 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10172 break;
10174 case WIDEN_LSHIFT_EXPR:
10175 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10176 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10177 break;
10179 CASE_CONVERT:
10180 c1 = VEC_UNPACK_LO_EXPR;
10181 c2 = VEC_UNPACK_HI_EXPR;
10182 break;
10184 case FLOAT_EXPR:
10185 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10186 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10187 break;
10189 case FIX_TRUNC_EXPR:
10190 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
10191 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
10192 computing the operation. */
10193 return false;
10195 default:
10196 gcc_unreachable ();
10199 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10200 std::swap (c1, c2);
10202 if (code == FIX_TRUNC_EXPR)
10204 /* The signedness is determined from output operand. */
10205 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10206 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10208 else
10210 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10211 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10214 if (!optab1 || !optab2)
10215 return false;
10217 vec_mode = TYPE_MODE (vectype);
10218 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10219 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10220 return false;
10222 *code1 = c1;
10223 *code2 = c2;
10225 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10226 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10227 /* For scalar masks we may have different boolean
10228 vector types having the same QImode. Thus we
10229 add additional check for elements number. */
10230 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10231 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10232 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10234 /* Check if it's a multi-step conversion that can be done using intermediate
10235 types. */
10237 prev_type = vectype;
10238 prev_mode = vec_mode;
10240 if (!CONVERT_EXPR_CODE_P (code))
10241 return false;
10243 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10244 intermediate steps in promotion sequence. We try
10245 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10246 not. */
10247 interm_types->create (MAX_INTERM_CVT_STEPS);
10248 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10250 intermediate_mode = insn_data[icode1].operand[0].mode;
10251 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10253 intermediate_type = vect_halve_mask_nunits (prev_type);
10254 if (intermediate_mode != TYPE_MODE (intermediate_type))
10255 return false;
10257 else
10258 intermediate_type
10259 = lang_hooks.types.type_for_mode (intermediate_mode,
10260 TYPE_UNSIGNED (prev_type));
10262 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10263 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10265 if (!optab3 || !optab4
10266 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10267 || insn_data[icode1].operand[0].mode != intermediate_mode
10268 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10269 || insn_data[icode2].operand[0].mode != intermediate_mode
10270 || ((icode1 = optab_handler (optab3, intermediate_mode))
10271 == CODE_FOR_nothing)
10272 || ((icode2 = optab_handler (optab4, intermediate_mode))
10273 == CODE_FOR_nothing))
10274 break;
10276 interm_types->quick_push (intermediate_type);
10277 (*multi_step_cvt)++;
10279 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10280 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10281 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10282 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10283 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10285 prev_type = intermediate_type;
10286 prev_mode = intermediate_mode;
10289 interm_types->release ();
10290 return false;
10294 /* Function supportable_narrowing_operation
10296 Check whether an operation represented by the code CODE is a
10297 narrowing operation that is supported by the target platform in
10298 vector form (i.e., when operating on arguments of type VECTYPE_IN
10299 and producing a result of type VECTYPE_OUT).
10301 Narrowing operations we currently support are NOP (CONVERT) and
10302 FIX_TRUNC. This function checks if these operations are supported by
10303 the target platform directly via vector tree-codes.
10305 Output:
10306 - CODE1 is the code of a vector operation to be used when
10307 vectorizing the operation, if available.
10308 - MULTI_STEP_CVT determines the number of required intermediate steps in
10309 case of multi-step conversion (like int->short->char - in that case
10310 MULTI_STEP_CVT will be 1).
10311 - INTERM_TYPES contains the intermediate type required to perform the
10312 narrowing operation (short in the above example). */
10314 bool
10315 supportable_narrowing_operation (enum tree_code code,
10316 tree vectype_out, tree vectype_in,
10317 enum tree_code *code1, int *multi_step_cvt,
10318 vec<tree> *interm_types)
10320 machine_mode vec_mode;
10321 enum insn_code icode1;
10322 optab optab1, interm_optab;
10323 tree vectype = vectype_in;
10324 tree narrow_vectype = vectype_out;
10325 enum tree_code c1;
10326 tree intermediate_type, prev_type;
10327 machine_mode intermediate_mode, prev_mode;
10328 int i;
10329 bool uns;
10331 *multi_step_cvt = 0;
10332 switch (code)
10334 CASE_CONVERT:
10335 c1 = VEC_PACK_TRUNC_EXPR;
10336 break;
10338 case FIX_TRUNC_EXPR:
10339 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10340 break;
10342 case FLOAT_EXPR:
10343 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
10344 tree code and optabs used for computing the operation. */
10345 return false;
10347 default:
10348 gcc_unreachable ();
10351 if (code == FIX_TRUNC_EXPR)
10352 /* The signedness is determined from output operand. */
10353 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10354 else
10355 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10357 if (!optab1)
10358 return false;
10360 vec_mode = TYPE_MODE (vectype);
10361 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10362 return false;
10364 *code1 = c1;
10366 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10367 /* For scalar masks we may have different boolean
10368 vector types having the same QImode. Thus we
10369 add additional check for elements number. */
10370 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10371 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10372 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10374 /* Check if it's a multi-step conversion that can be done using intermediate
10375 types. */
10376 prev_mode = vec_mode;
10377 prev_type = vectype;
10378 if (code == FIX_TRUNC_EXPR)
10379 uns = TYPE_UNSIGNED (vectype_out);
10380 else
10381 uns = TYPE_UNSIGNED (vectype);
10383 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10384 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10385 costly than signed. */
10386 if (code == FIX_TRUNC_EXPR && uns)
10388 enum insn_code icode2;
10390 intermediate_type
10391 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10392 interm_optab
10393 = optab_for_tree_code (c1, intermediate_type, optab_default);
10394 if (interm_optab != unknown_optab
10395 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10396 && insn_data[icode1].operand[0].mode
10397 == insn_data[icode2].operand[0].mode)
10399 uns = false;
10400 optab1 = interm_optab;
10401 icode1 = icode2;
10405 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10406 intermediate steps in promotion sequence. We try
10407 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10408 interm_types->create (MAX_INTERM_CVT_STEPS);
10409 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10411 intermediate_mode = insn_data[icode1].operand[0].mode;
10412 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10414 intermediate_type = vect_double_mask_nunits (prev_type);
10415 if (intermediate_mode != TYPE_MODE (intermediate_type))
10416 return false;
10418 else
10419 intermediate_type
10420 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10421 interm_optab
10422 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10423 optab_default);
10424 if (!interm_optab
10425 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10426 || insn_data[icode1].operand[0].mode != intermediate_mode
10427 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10428 == CODE_FOR_nothing))
10429 break;
10431 interm_types->quick_push (intermediate_type);
10432 (*multi_step_cvt)++;
10434 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10435 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10436 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10437 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10439 prev_mode = intermediate_mode;
10440 prev_type = intermediate_type;
10441 optab1 = interm_optab;
10444 interm_types->release ();
10445 return false;
10448 /* Generate and return a statement that sets vector mask MASK such that
10449 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10451 gcall *
10452 vect_gen_while (tree mask, tree start_index, tree end_index)
10454 tree cmp_type = TREE_TYPE (start_index);
10455 tree mask_type = TREE_TYPE (mask);
10456 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10457 cmp_type, mask_type,
10458 OPTIMIZE_FOR_SPEED));
10459 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10460 start_index, end_index,
10461 build_zero_cst (mask_type));
10462 gimple_call_set_lhs (call, mask);
10463 return call;
10466 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10467 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10469 tree
10470 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10471 tree end_index)
10473 tree tmp = make_ssa_name (mask_type);
10474 gcall *call = vect_gen_while (tmp, start_index, end_index);
10475 gimple_seq_add_stmt (seq, call);
10476 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);