2018-02-09 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / gcc / tree-vect-stmts.c
blob6066a52c23e2dd95ff86b7de110aeee785f7e70d
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
100 if (body_cost_vec)
102 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
103 stmt_info_for_cost si = { count, kind,
104 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
105 misalign };
106 body_cost_vec->safe_push (si);
107 return (unsigned)
108 (builtin_vectorization_cost (kind, vectype, misalign) * count);
110 else
111 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
112 count, kind, stmt_info, misalign, where);
115 /* Return a variable of type ELEM_TYPE[NELEMS]. */
117 static tree
118 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
120 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
121 "vect_array");
124 /* ARRAY is an array of vectors created by create_vector_array.
125 Return an SSA_NAME for the vector in index N. The reference
126 is part of the vectorization of STMT and the vector is associated
127 with scalar destination SCALAR_DEST. */
129 static tree
130 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
131 tree array, unsigned HOST_WIDE_INT n)
133 tree vect_type, vect, vect_name, array_ref;
134 gimple *new_stmt;
136 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
137 vect_type = TREE_TYPE (TREE_TYPE (array));
138 vect = vect_create_destination_var (scalar_dest, vect_type);
139 array_ref = build4 (ARRAY_REF, vect_type, array,
140 build_int_cst (size_type_node, n),
141 NULL_TREE, NULL_TREE);
143 new_stmt = gimple_build_assign (vect, array_ref);
144 vect_name = make_ssa_name (vect, new_stmt);
145 gimple_assign_set_lhs (new_stmt, vect_name);
146 vect_finish_stmt_generation (stmt, new_stmt, gsi);
148 return vect_name;
151 /* ARRAY is an array of vectors created by create_vector_array.
152 Emit code to store SSA_NAME VECT in index N of the array.
153 The store is part of the vectorization of STMT. */
155 static void
156 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
157 tree array, unsigned HOST_WIDE_INT n)
159 tree array_ref;
160 gimple *new_stmt;
162 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
163 build_int_cst (size_type_node, n),
164 NULL_TREE, NULL_TREE);
166 new_stmt = gimple_build_assign (array_ref, vect);
167 vect_finish_stmt_generation (stmt, new_stmt, gsi);
170 /* PTR is a pointer to an array of type TYPE. Return a representation
171 of *PTR. The memory reference replaces those in FIRST_DR
172 (and its group). */
174 static tree
175 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
177 tree mem_ref;
179 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
180 /* Arrays have the same alignment as their type. */
181 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
182 return mem_ref;
185 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
187 /* Function vect_mark_relevant.
189 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
191 static void
192 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
193 enum vect_relevant relevant, bool live_p)
195 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
196 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
197 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
198 gimple *pattern_stmt;
200 if (dump_enabled_p ())
202 dump_printf_loc (MSG_NOTE, vect_location,
203 "mark relevant %d, live %d: ", relevant, live_p);
204 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
207 /* If this stmt is an original stmt in a pattern, we might need to mark its
208 related pattern stmt instead of the original stmt. However, such stmts
209 may have their own uses that are not in any pattern, in such cases the
210 stmt itself should be marked. */
211 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
213 /* This is the last stmt in a sequence that was detected as a
214 pattern that can potentially be vectorized. Don't mark the stmt
215 as relevant/live because it's not going to be vectorized.
216 Instead mark the pattern-stmt that replaces it. */
218 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
220 if (dump_enabled_p ())
221 dump_printf_loc (MSG_NOTE, vect_location,
222 "last stmt in pattern. don't mark"
223 " relevant/live.\n");
224 stmt_info = vinfo_for_stmt (pattern_stmt);
225 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
226 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
227 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
228 stmt = pattern_stmt;
231 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
232 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
233 STMT_VINFO_RELEVANT (stmt_info) = relevant;
235 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
236 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
238 if (dump_enabled_p ())
239 dump_printf_loc (MSG_NOTE, vect_location,
240 "already marked relevant/live.\n");
241 return;
244 worklist->safe_push (stmt);
248 /* Function is_simple_and_all_uses_invariant
250 Return true if STMT is simple and all uses of it are invariant. */
252 bool
253 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
255 tree op;
256 gimple *def_stmt;
257 ssa_op_iter iter;
259 if (!is_gimple_assign (stmt))
260 return false;
262 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
264 enum vect_def_type dt = vect_uninitialized_def;
266 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
268 if (dump_enabled_p ())
269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
270 "use not simple.\n");
271 return false;
274 if (dt != vect_external_def && dt != vect_constant_def)
275 return false;
277 return true;
280 /* Function vect_stmt_relevant_p.
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
290 CHECKME: what other side effects would the vectorizer allow? */
292 static bool
293 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
294 enum vect_relevant *relevant, bool *live_p)
296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
297 ssa_op_iter op_iter;
298 imm_use_iterator imm_iter;
299 use_operand_p use_p;
300 def_operand_p def_p;
302 *relevant = vect_unused_in_scope;
303 *live_p = false;
305 /* cond stmt other than loop exit cond. */
306 if (is_ctrl_stmt (stmt)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
308 != loop_exit_ctrl_vec_info_type)
309 *relevant = vect_used_in_scope;
311 /* changing memory. */
312 if (gimple_code (stmt) != GIMPLE_PHI)
313 if (gimple_vdef (stmt)
314 && !gimple_clobber_p (stmt))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE, vect_location,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant = vect_used_in_scope;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
325 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 basic_block bb = gimple_bb (USE_STMT (use_p));
328 if (!flow_bb_inside_loop_p (loop, bb))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE, vect_location,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p)))
335 continue;
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
340 gcc_assert (bb == single_exit (loop)->dest);
342 *live_p = true;
347 if (*live_p && *relevant == vect_unused_in_scope
348 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "vec_stmt_relevant_p: stmt live but not relevant.\n");
353 *relevant = vect_used_only_live;
356 return (*live_p || *relevant);
360 /* Function exist_non_indexing_operands_for_use_p
362 USE is one of the uses attached to STMT. Check if USE is
363 used in STMT for anything other than indexing an array. */
365 static bool
366 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
368 tree operand;
369 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
371 /* USE corresponds to some operand in STMT. If there is no data
372 reference in STMT, then any operand that corresponds to USE
373 is not indexing an array. */
374 if (!STMT_VINFO_DATA_REF (stmt_info))
375 return true;
377 /* STMT has a data_ref. FORNOW this means that its of one of
378 the following forms:
379 -1- ARRAY_REF = var
380 -2- var = ARRAY_REF
381 (This should have been verified in analyze_data_refs).
383 'var' in the second case corresponds to a def, not a use,
384 so USE cannot correspond to any operands that are not used
385 for array indexing.
387 Therefore, all we need to check is if STMT falls into the
388 first case, and whether var corresponds to USE. */
390 if (!gimple_assign_copy_p (stmt))
392 if (is_gimple_call (stmt)
393 && gimple_call_internal_p (stmt))
395 internal_fn ifn = gimple_call_internal_fn (stmt);
396 int mask_index = internal_fn_mask_index (ifn);
397 if (mask_index >= 0
398 && use == gimple_call_arg (stmt, mask_index))
399 return true;
400 int stored_value_index = internal_fn_stored_value_index (ifn);
401 if (stored_value_index >= 0
402 && use == gimple_call_arg (stmt, stored_value_index))
403 return true;
404 if (internal_gather_scatter_fn_p (ifn)
405 && use == gimple_call_arg (stmt, 1))
406 return true;
408 return false;
411 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
412 return false;
413 operand = gimple_assign_rhs1 (stmt);
414 if (TREE_CODE (operand) != SSA_NAME)
415 return false;
417 if (operand == use)
418 return true;
420 return false;
425 Function process_use.
427 Inputs:
428 - a USE in STMT in a loop represented by LOOP_VINFO
429 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
430 that defined USE. This is done by calling mark_relevant and passing it
431 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
432 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
433 be performed.
435 Outputs:
436 Generally, LIVE_P and RELEVANT are used to define the liveness and
437 relevance info of the DEF_STMT of this USE:
438 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
439 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 Exceptions:
441 - case 1: If USE is used only for address computations (e.g. array indexing),
442 which does not need to be directly vectorized, then the liveness/relevance
443 of the respective DEF_STMT is left unchanged.
444 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
445 skip DEF_STMT cause it had already been processed.
446 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
447 be modified accordingly.
449 Return true if everything is as expected. Return false otherwise. */
451 static bool
452 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
453 enum vect_relevant relevant, vec<gimple *> *worklist,
454 bool force)
456 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
457 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
458 stmt_vec_info dstmt_vinfo;
459 basic_block bb, def_bb;
460 gimple *def_stmt;
461 enum vect_def_type dt;
463 /* case 1: we are only interested in uses that need to be vectorized. Uses
464 that are used for address computation are not considered relevant. */
465 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
466 return true;
468 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
472 "not vectorized: unsupported use in stmt.\n");
473 return false;
476 if (!def_stmt || gimple_nop_p (def_stmt))
477 return true;
479 def_bb = gimple_bb (def_stmt);
480 if (!flow_bb_inside_loop_p (loop, def_bb))
482 if (dump_enabled_p ())
483 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
484 return true;
487 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
488 DEF_STMT must have already been processed, because this should be the
489 only way that STMT, which is a reduction-phi, was put in the worklist,
490 as there should be no other uses for DEF_STMT in the loop. So we just
491 check that everything is as expected, and we are done. */
492 dstmt_vinfo = vinfo_for_stmt (def_stmt);
493 bb = gimple_bb (stmt);
494 if (gimple_code (stmt) == GIMPLE_PHI
495 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
496 && gimple_code (def_stmt) != GIMPLE_PHI
497 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
498 && bb->loop_father == def_bb->loop_father)
500 if (dump_enabled_p ())
501 dump_printf_loc (MSG_NOTE, vect_location,
502 "reduc-stmt defining reduc-phi in the same nest.\n");
503 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
504 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
505 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
506 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
507 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
508 return true;
511 /* case 3a: outer-loop stmt defining an inner-loop stmt:
512 outer-loop-header-bb:
513 d = def_stmt
514 inner-loop:
515 stmt # use (d)
516 outer-loop-tail-bb:
517 ... */
518 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
520 if (dump_enabled_p ())
521 dump_printf_loc (MSG_NOTE, vect_location,
522 "outer-loop def-stmt defining inner-loop stmt.\n");
524 switch (relevant)
526 case vect_unused_in_scope:
527 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
528 vect_used_in_scope : vect_unused_in_scope;
529 break;
531 case vect_used_in_outer_by_reduction:
532 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
533 relevant = vect_used_by_reduction;
534 break;
536 case vect_used_in_outer:
537 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
538 relevant = vect_used_in_scope;
539 break;
541 case vect_used_in_scope:
542 break;
544 default:
545 gcc_unreachable ();
549 /* case 3b: inner-loop stmt defining an outer-loop stmt:
550 outer-loop-header-bb:
552 inner-loop:
553 d = def_stmt
554 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 stmt # use (d) */
556 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_NOTE, vect_location,
560 "inner-loop def-stmt defining outer-loop stmt.\n");
562 switch (relevant)
564 case vect_unused_in_scope:
565 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
566 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
567 vect_used_in_outer_by_reduction : vect_unused_in_scope;
568 break;
570 case vect_used_by_reduction:
571 case vect_used_only_live:
572 relevant = vect_used_in_outer_by_reduction;
573 break;
575 case vect_used_in_scope:
576 relevant = vect_used_in_outer;
577 break;
579 default:
580 gcc_unreachable ();
583 /* We are also not interested in uses on loop PHI backedges that are
584 inductions. Otherwise we'll needlessly vectorize the IV increment
585 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 of course. */
587 else if (gimple_code (stmt) == GIMPLE_PHI
588 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
589 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
590 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
591 == use))
593 if (dump_enabled_p ())
594 dump_printf_loc (MSG_NOTE, vect_location,
595 "induction value on backedge.\n");
596 return true;
600 vect_mark_relevant (worklist, def_stmt, relevant, false);
601 return true;
605 /* Function vect_mark_stmts_to_be_vectorized.
607 Not all stmts in the loop need to be vectorized. For example:
609 for i...
610 for j...
611 1. T0 = i + j
612 2. T1 = a[T0]
614 3. j = j + 1
616 Stmt 1 and 3 do not need to be vectorized, because loop control and
617 addressing of vectorized data-refs are handled differently.
619 This pass detects such stmts. */
621 bool
622 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
624 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
625 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
626 unsigned int nbbs = loop->num_nodes;
627 gimple_stmt_iterator si;
628 gimple *stmt;
629 unsigned int i;
630 stmt_vec_info stmt_vinfo;
631 basic_block bb;
632 gimple *phi;
633 bool live_p;
634 enum vect_relevant relevant;
636 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE, vect_location,
638 "=== vect_mark_stmts_to_be_vectorized ===\n");
640 auto_vec<gimple *, 64> worklist;
642 /* 1. Init worklist. */
643 for (i = 0; i < nbbs; i++)
645 bb = bbs[i];
646 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
648 phi = gsi_stmt (si);
649 if (dump_enabled_p ())
651 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
652 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
655 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
656 vect_mark_relevant (&worklist, phi, relevant, live_p);
658 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
660 stmt = gsi_stmt (si);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
664 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
667 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
668 vect_mark_relevant (&worklist, stmt, relevant, live_p);
672 /* 2. Process_worklist */
673 while (worklist.length () > 0)
675 use_operand_p use_p;
676 ssa_op_iter iter;
678 stmt = worklist.pop ();
679 if (dump_enabled_p ())
681 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
682 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
685 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
686 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 of STMT. */
688 stmt_vinfo = vinfo_for_stmt (stmt);
689 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
691 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
692 propagated as is to the DEF_STMTs of its USEs.
694 One exception is when STMT has been identified as defining a reduction
695 variable; in this case we set the relevance to vect_used_by_reduction.
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
704 case vect_reduction_def:
705 gcc_assert (relevant != vect_unused_in_scope);
706 if (relevant != vect_unused_in_scope
707 && relevant != vect_used_in_scope
708 && relevant != vect_used_by_reduction
709 && relevant != vect_used_only_live)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
713 "unsupported use of reduction.\n");
714 return false;
716 break;
718 case vect_nested_cycle:
719 if (relevant != vect_unused_in_scope
720 && relevant != vect_used_in_outer_by_reduction
721 && relevant != vect_used_in_outer)
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
725 "unsupported use of nested cycle.\n");
727 return false;
729 break;
731 case vect_double_reduction_def:
732 if (relevant != vect_unused_in_scope
733 && relevant != vect_used_by_reduction
734 && relevant != vect_used_only_live)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
738 "unsupported use of double reduction.\n");
740 return false;
742 break;
744 default:
745 break;
748 if (is_pattern_stmt_p (stmt_vinfo))
750 /* Pattern statements are not inserted into the code, so
751 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
752 have to scan the RHS or function arguments instead. */
753 if (is_gimple_assign (stmt))
755 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
756 tree op = gimple_assign_rhs1 (stmt);
758 i = 1;
759 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
761 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
762 relevant, &worklist, false)
763 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
764 relevant, &worklist, false))
765 return false;
766 i = 2;
768 for (; i < gimple_num_ops (stmt); i++)
770 op = gimple_op (stmt, i);
771 if (TREE_CODE (op) == SSA_NAME
772 && !process_use (stmt, op, loop_vinfo, relevant,
773 &worklist, false))
774 return false;
777 else if (is_gimple_call (stmt))
779 for (i = 0; i < gimple_call_num_args (stmt); i++)
781 tree arg = gimple_call_arg (stmt, i);
782 if (!process_use (stmt, arg, loop_vinfo, relevant,
783 &worklist, false))
784 return false;
788 else
789 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
791 tree op = USE_FROM_PTR (use_p);
792 if (!process_use (stmt, op, loop_vinfo, relevant,
793 &worklist, false))
794 return false;
797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
799 gather_scatter_info gs_info;
800 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
801 gcc_unreachable ();
802 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
803 &worklist, true))
804 return false;
806 } /* while worklist */
808 return true;
812 /* Function vect_model_simple_cost.
814 Models cost for simple operations, i.e. those that only emit ncopies of a
815 single op. Right now, this does not account for multiple insns that could
816 be generated for the single vector op. We will handle that shortly. */
818 void
819 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
820 enum vect_def_type *dt,
821 int ndts,
822 stmt_vector_for_cost *prologue_cost_vec,
823 stmt_vector_for_cost *body_cost_vec)
825 int i;
826 int inside_cost = 0, prologue_cost = 0;
828 /* The SLP costs were already calculated during SLP tree build. */
829 if (PURE_SLP_STMT (stmt_info))
830 return;
832 /* Cost the "broadcast" of a scalar operand in to a vector operand.
833 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 cost model. */
835 for (i = 0; i < ndts; i++)
836 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
837 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
838 stmt_info, 0, vect_prologue);
840 /* Pass the inside-of-loop statements to the target-specific cost model. */
841 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
842 stmt_info, 0, vect_body);
844 if (dump_enabled_p ())
845 dump_printf_loc (MSG_NOTE, vect_location,
846 "vect_model_simple_cost: inside_cost = %d, "
847 "prologue_cost = %d .\n", inside_cost, prologue_cost);
851 /* Model cost for type demotion and promotion operations. PWR is normally
852 zero for single-step promotions and demotions. It will be one if
853 two-step promotion/demotion is required, and so on. Each additional
854 step doubles the number of instructions required. */
856 static void
857 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
858 enum vect_def_type *dt, int pwr)
860 int i, tmp;
861 int inside_cost = 0, prologue_cost = 0;
862 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
863 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
864 void *target_cost_data;
866 /* The SLP costs were already calculated during SLP tree build. */
867 if (PURE_SLP_STMT (stmt_info))
868 return;
870 if (loop_vinfo)
871 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
872 else
873 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
875 for (i = 0; i < pwr + 1; i++)
877 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
878 (i + 1) : i;
879 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
880 vec_promote_demote, stmt_info, 0,
881 vect_body);
884 /* FORNOW: Assuming maximum 2 args per stmts. */
885 for (i = 0; i < 2; i++)
886 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
887 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
888 stmt_info, 0, vect_prologue);
890 if (dump_enabled_p ())
891 dump_printf_loc (MSG_NOTE, vect_location,
892 "vect_model_promotion_demotion_cost: inside_cost = %d, "
893 "prologue_cost = %d .\n", inside_cost, prologue_cost);
896 /* Function vect_model_store_cost
898 Models cost for stores. In the case of grouped accesses, one access
899 has the overhead of the grouped access attributed to it. */
901 void
902 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
903 vect_memory_access_type memory_access_type,
904 vec_load_store_type vls_type, slp_tree slp_node,
905 stmt_vector_for_cost *prologue_cost_vec,
906 stmt_vector_for_cost *body_cost_vec)
908 unsigned int inside_cost = 0, prologue_cost = 0;
909 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
910 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
911 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
913 if (vls_type == VLS_STORE_INVARIANT)
914 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
915 stmt_info, 0, vect_prologue);
917 /* Grouped stores update all elements in the group at once,
918 so we want the DR for the first statement. */
919 if (!slp_node && grouped_access_p)
921 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
922 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
925 /* True if we should include any once-per-group costs as well as
926 the cost of the statement itself. For SLP we only get called
927 once per group anyhow. */
928 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
930 /* We assume that the cost of a single store-lanes instruction is
931 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
932 access is instead being provided by a permute-and-store operation,
933 include the cost of the permutes. */
934 if (first_stmt_p
935 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
937 /* Uses a high and low interleave or shuffle operations for each
938 needed permute. */
939 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
940 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
941 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
942 stmt_info, 0, vect_body);
944 if (dump_enabled_p ())
945 dump_printf_loc (MSG_NOTE, vect_location,
946 "vect_model_store_cost: strided group_size = %d .\n",
947 group_size);
950 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
951 /* Costs of the stores. */
952 if (memory_access_type == VMAT_ELEMENTWISE
953 || memory_access_type == VMAT_GATHER_SCATTER)
955 /* N scalar stores plus extracting the elements. */
956 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
957 inside_cost += record_stmt_cost (body_cost_vec,
958 ncopies * assumed_nunits,
959 scalar_store, stmt_info, 0, vect_body);
961 else
962 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
964 if (memory_access_type == VMAT_ELEMENTWISE
965 || memory_access_type == VMAT_STRIDED_SLP)
967 /* N scalar stores plus extracting the elements. */
968 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
969 inside_cost += record_stmt_cost (body_cost_vec,
970 ncopies * assumed_nunits,
971 vec_to_scalar, stmt_info, 0, vect_body);
974 if (dump_enabled_p ())
975 dump_printf_loc (MSG_NOTE, vect_location,
976 "vect_model_store_cost: inside_cost = %d, "
977 "prologue_cost = %d .\n", inside_cost, prologue_cost);
981 /* Calculate cost of DR's memory access. */
982 void
983 vect_get_store_cost (struct data_reference *dr, int ncopies,
984 unsigned int *inside_cost,
985 stmt_vector_for_cost *body_cost_vec)
987 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
988 gimple *stmt = DR_STMT (dr);
989 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
991 switch (alignment_support_scheme)
993 case dr_aligned:
995 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
996 vector_store, stmt_info, 0,
997 vect_body);
999 if (dump_enabled_p ())
1000 dump_printf_loc (MSG_NOTE, vect_location,
1001 "vect_model_store_cost: aligned.\n");
1002 break;
1005 case dr_unaligned_supported:
1007 /* Here, we assign an additional cost for the unaligned store. */
1008 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1009 unaligned_store, stmt_info,
1010 DR_MISALIGNMENT (dr), vect_body);
1011 if (dump_enabled_p ())
1012 dump_printf_loc (MSG_NOTE, vect_location,
1013 "vect_model_store_cost: unaligned supported by "
1014 "hardware.\n");
1015 break;
1018 case dr_unaligned_unsupported:
1020 *inside_cost = VECT_MAX_COST;
1022 if (dump_enabled_p ())
1023 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1024 "vect_model_store_cost: unsupported access.\n");
1025 break;
1028 default:
1029 gcc_unreachable ();
1034 /* Function vect_model_load_cost
1036 Models cost for loads. In the case of grouped accesses, one access has
1037 the overhead of the grouped access attributed to it. Since unaligned
1038 accesses are supported for loads, we also account for the costs of the
1039 access scheme chosen. */
1041 void
1042 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1043 vect_memory_access_type memory_access_type,
1044 slp_tree slp_node,
1045 stmt_vector_for_cost *prologue_cost_vec,
1046 stmt_vector_for_cost *body_cost_vec)
1048 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1049 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1050 unsigned int inside_cost = 0, prologue_cost = 0;
1051 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1053 /* Grouped loads read all elements in the group at once,
1054 so we want the DR for the first statement. */
1055 if (!slp_node && grouped_access_p)
1057 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1058 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1061 /* True if we should include any once-per-group costs as well as
1062 the cost of the statement itself. For SLP we only get called
1063 once per group anyhow. */
1064 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1066 /* We assume that the cost of a single load-lanes instruction is
1067 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1068 access is instead being provided by a load-and-permute operation,
1069 include the cost of the permutes. */
1070 if (first_stmt_p
1071 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1073 /* Uses an even and odd extract operations or shuffle operations
1074 for each needed permute. */
1075 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1076 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1077 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1078 stmt_info, 0, vect_body);
1080 if (dump_enabled_p ())
1081 dump_printf_loc (MSG_NOTE, vect_location,
1082 "vect_model_load_cost: strided group_size = %d .\n",
1083 group_size);
1086 /* The loads themselves. */
1087 if (memory_access_type == VMAT_ELEMENTWISE
1088 || memory_access_type == VMAT_GATHER_SCATTER)
1090 /* N scalar loads plus gathering them into a vector. */
1091 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1092 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1093 inside_cost += record_stmt_cost (body_cost_vec,
1094 ncopies * assumed_nunits,
1095 scalar_load, stmt_info, 0, vect_body);
1097 else
1098 vect_get_load_cost (dr, ncopies, first_stmt_p,
1099 &inside_cost, &prologue_cost,
1100 prologue_cost_vec, body_cost_vec, true);
1101 if (memory_access_type == VMAT_ELEMENTWISE
1102 || memory_access_type == VMAT_STRIDED_SLP)
1103 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1104 stmt_info, 0, vect_body);
1106 if (dump_enabled_p ())
1107 dump_printf_loc (MSG_NOTE, vect_location,
1108 "vect_model_load_cost: inside_cost = %d, "
1109 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1113 /* Calculate cost of DR's memory access. */
1114 void
1115 vect_get_load_cost (struct data_reference *dr, int ncopies,
1116 bool add_realign_cost, unsigned int *inside_cost,
1117 unsigned int *prologue_cost,
1118 stmt_vector_for_cost *prologue_cost_vec,
1119 stmt_vector_for_cost *body_cost_vec,
1120 bool record_prologue_costs)
1122 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1123 gimple *stmt = DR_STMT (dr);
1124 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1126 switch (alignment_support_scheme)
1128 case dr_aligned:
1130 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1131 stmt_info, 0, vect_body);
1133 if (dump_enabled_p ())
1134 dump_printf_loc (MSG_NOTE, vect_location,
1135 "vect_model_load_cost: aligned.\n");
1137 break;
1139 case dr_unaligned_supported:
1141 /* Here, we assign an additional cost for the unaligned load. */
1142 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1143 unaligned_load, stmt_info,
1144 DR_MISALIGNMENT (dr), vect_body);
1146 if (dump_enabled_p ())
1147 dump_printf_loc (MSG_NOTE, vect_location,
1148 "vect_model_load_cost: unaligned supported by "
1149 "hardware.\n");
1151 break;
1153 case dr_explicit_realign:
1155 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1156 vector_load, stmt_info, 0, vect_body);
1157 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1158 vec_perm, stmt_info, 0, vect_body);
1160 /* FIXME: If the misalignment remains fixed across the iterations of
1161 the containing loop, the following cost should be added to the
1162 prologue costs. */
1163 if (targetm.vectorize.builtin_mask_for_load)
1164 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1165 stmt_info, 0, vect_body);
1167 if (dump_enabled_p ())
1168 dump_printf_loc (MSG_NOTE, vect_location,
1169 "vect_model_load_cost: explicit realign\n");
1171 break;
1173 case dr_explicit_realign_optimized:
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE, vect_location,
1177 "vect_model_load_cost: unaligned software "
1178 "pipelined.\n");
1180 /* Unaligned software pipeline has a load of an address, an initial
1181 load, and possibly a mask operation to "prime" the loop. However,
1182 if this is an access in a group of loads, which provide grouped
1183 access, then the above cost should only be considered for one
1184 access in the group. Inside the loop, there is a load op
1185 and a realignment op. */
1187 if (add_realign_cost && record_prologue_costs)
1189 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1190 vector_stmt, stmt_info,
1191 0, vect_prologue);
1192 if (targetm.vectorize.builtin_mask_for_load)
1193 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1194 vector_stmt, stmt_info,
1195 0, vect_prologue);
1198 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1199 stmt_info, 0, vect_body);
1200 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1201 stmt_info, 0, vect_body);
1203 if (dump_enabled_p ())
1204 dump_printf_loc (MSG_NOTE, vect_location,
1205 "vect_model_load_cost: explicit realign optimized"
1206 "\n");
1208 break;
1211 case dr_unaligned_unsupported:
1213 *inside_cost = VECT_MAX_COST;
1215 if (dump_enabled_p ())
1216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1217 "vect_model_load_cost: unsupported access.\n");
1218 break;
1221 default:
1222 gcc_unreachable ();
1226 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1227 the loop preheader for the vectorized stmt STMT. */
1229 static void
1230 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1232 if (gsi)
1233 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1234 else
1236 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1237 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1239 if (loop_vinfo)
1241 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1242 basic_block new_bb;
1243 edge pe;
1245 if (nested_in_vect_loop_p (loop, stmt))
1246 loop = loop->inner;
1248 pe = loop_preheader_edge (loop);
1249 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1250 gcc_assert (!new_bb);
1252 else
1254 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1255 basic_block bb;
1256 gimple_stmt_iterator gsi_bb_start;
1258 gcc_assert (bb_vinfo);
1259 bb = BB_VINFO_BB (bb_vinfo);
1260 gsi_bb_start = gsi_after_labels (bb);
1261 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1265 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_NOTE, vect_location,
1268 "created new init_stmt: ");
1269 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1273 /* Function vect_init_vector.
1275 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1276 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1277 vector type a vector with all elements equal to VAL is created first.
1278 Place the initialization at BSI if it is not NULL. Otherwise, place the
1279 initialization at the loop preheader.
1280 Return the DEF of INIT_STMT.
1281 It will be used in the vectorization of STMT. */
1283 tree
1284 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1286 gimple *init_stmt;
1287 tree new_temp;
1289 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1290 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1292 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1293 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1295 /* Scalar boolean value should be transformed into
1296 all zeros or all ones value before building a vector. */
1297 if (VECTOR_BOOLEAN_TYPE_P (type))
1299 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1300 tree false_val = build_zero_cst (TREE_TYPE (type));
1302 if (CONSTANT_CLASS_P (val))
1303 val = integer_zerop (val) ? false_val : true_val;
1304 else
1306 new_temp = make_ssa_name (TREE_TYPE (type));
1307 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1308 val, true_val, false_val);
1309 vect_init_vector_1 (stmt, init_stmt, gsi);
1310 val = new_temp;
1313 else if (CONSTANT_CLASS_P (val))
1314 val = fold_convert (TREE_TYPE (type), val);
1315 else
1317 new_temp = make_ssa_name (TREE_TYPE (type));
1318 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1319 init_stmt = gimple_build_assign (new_temp,
1320 fold_build1 (VIEW_CONVERT_EXPR,
1321 TREE_TYPE (type),
1322 val));
1323 else
1324 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1325 vect_init_vector_1 (stmt, init_stmt, gsi);
1326 val = new_temp;
1329 val = build_vector_from_val (type, val);
1332 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1333 init_stmt = gimple_build_assign (new_temp, val);
1334 vect_init_vector_1 (stmt, init_stmt, gsi);
1335 return new_temp;
1338 /* Function vect_get_vec_def_for_operand_1.
1340 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1341 DT that will be used in the vectorized stmt. */
1343 tree
1344 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1346 tree vec_oprnd;
1347 gimple *vec_stmt;
1348 stmt_vec_info def_stmt_info = NULL;
1350 switch (dt)
1352 /* operand is a constant or a loop invariant. */
1353 case vect_constant_def:
1354 case vect_external_def:
1355 /* Code should use vect_get_vec_def_for_operand. */
1356 gcc_unreachable ();
1358 /* operand is defined inside the loop. */
1359 case vect_internal_def:
1361 /* Get the def from the vectorized stmt. */
1362 def_stmt_info = vinfo_for_stmt (def_stmt);
1364 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1365 /* Get vectorized pattern statement. */
1366 if (!vec_stmt
1367 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1368 && !STMT_VINFO_RELEVANT (def_stmt_info))
1369 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1370 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1371 gcc_assert (vec_stmt);
1372 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1373 vec_oprnd = PHI_RESULT (vec_stmt);
1374 else if (is_gimple_call (vec_stmt))
1375 vec_oprnd = gimple_call_lhs (vec_stmt);
1376 else
1377 vec_oprnd = gimple_assign_lhs (vec_stmt);
1378 return vec_oprnd;
1381 /* operand is defined by a loop header phi. */
1382 case vect_reduction_def:
1383 case vect_double_reduction_def:
1384 case vect_nested_cycle:
1385 case vect_induction_def:
1387 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1389 /* Get the def from the vectorized stmt. */
1390 def_stmt_info = vinfo_for_stmt (def_stmt);
1391 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1392 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1393 vec_oprnd = PHI_RESULT (vec_stmt);
1394 else
1395 vec_oprnd = gimple_get_lhs (vec_stmt);
1396 return vec_oprnd;
1399 default:
1400 gcc_unreachable ();
1405 /* Function vect_get_vec_def_for_operand.
1407 OP is an operand in STMT. This function returns a (vector) def that will be
1408 used in the vectorized stmt for STMT.
1410 In the case that OP is an SSA_NAME which is defined in the loop, then
1411 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1413 In case OP is an invariant or constant, a new stmt that creates a vector def
1414 needs to be introduced. VECTYPE may be used to specify a required type for
1415 vector invariant. */
1417 tree
1418 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1420 gimple *def_stmt;
1421 enum vect_def_type dt;
1422 bool is_simple_use;
1423 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1424 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1426 if (dump_enabled_p ())
1428 dump_printf_loc (MSG_NOTE, vect_location,
1429 "vect_get_vec_def_for_operand: ");
1430 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1431 dump_printf (MSG_NOTE, "\n");
1434 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1435 gcc_assert (is_simple_use);
1436 if (def_stmt && dump_enabled_p ())
1438 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1439 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1442 if (dt == vect_constant_def || dt == vect_external_def)
1444 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1445 tree vector_type;
1447 if (vectype)
1448 vector_type = vectype;
1449 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1450 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1451 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1452 else
1453 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1455 gcc_assert (vector_type);
1456 return vect_init_vector (stmt, op, vector_type, NULL);
1458 else
1459 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1463 /* Function vect_get_vec_def_for_stmt_copy
1465 Return a vector-def for an operand. This function is used when the
1466 vectorized stmt to be created (by the caller to this function) is a "copy"
1467 created in case the vectorized result cannot fit in one vector, and several
1468 copies of the vector-stmt are required. In this case the vector-def is
1469 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1470 of the stmt that defines VEC_OPRND.
1471 DT is the type of the vector def VEC_OPRND.
1473 Context:
1474 In case the vectorization factor (VF) is bigger than the number
1475 of elements that can fit in a vectype (nunits), we have to generate
1476 more than one vector stmt to vectorize the scalar stmt. This situation
1477 arises when there are multiple data-types operated upon in the loop; the
1478 smallest data-type determines the VF, and as a result, when vectorizing
1479 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1480 vector stmt (each computing a vector of 'nunits' results, and together
1481 computing 'VF' results in each iteration). This function is called when
1482 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1483 which VF=16 and nunits=4, so the number of copies required is 4):
1485 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1487 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1488 VS1.1: vx.1 = memref1 VS1.2
1489 VS1.2: vx.2 = memref2 VS1.3
1490 VS1.3: vx.3 = memref3
1492 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1493 VSnew.1: vz1 = vx.1 + ... VSnew.2
1494 VSnew.2: vz2 = vx.2 + ... VSnew.3
1495 VSnew.3: vz3 = vx.3 + ...
1497 The vectorization of S1 is explained in vectorizable_load.
1498 The vectorization of S2:
1499 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1500 the function 'vect_get_vec_def_for_operand' is called to
1501 get the relevant vector-def for each operand of S2. For operand x it
1502 returns the vector-def 'vx.0'.
1504 To create the remaining copies of the vector-stmt (VSnew.j), this
1505 function is called to get the relevant vector-def for each operand. It is
1506 obtained from the respective VS1.j stmt, which is recorded in the
1507 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1509 For example, to obtain the vector-def 'vx.1' in order to create the
1510 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1511 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1512 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1513 and return its def ('vx.1').
1514 Overall, to create the above sequence this function will be called 3 times:
1515 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1516 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1517 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1519 tree
1520 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1522 gimple *vec_stmt_for_operand;
1523 stmt_vec_info def_stmt_info;
1525 /* Do nothing; can reuse same def. */
1526 if (dt == vect_external_def || dt == vect_constant_def )
1527 return vec_oprnd;
1529 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1530 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1531 gcc_assert (def_stmt_info);
1532 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1533 gcc_assert (vec_stmt_for_operand);
1534 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1535 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1536 else
1537 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1538 return vec_oprnd;
1542 /* Get vectorized definitions for the operands to create a copy of an original
1543 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1545 void
1546 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1547 vec<tree> *vec_oprnds0,
1548 vec<tree> *vec_oprnds1)
1550 tree vec_oprnd = vec_oprnds0->pop ();
1552 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1553 vec_oprnds0->quick_push (vec_oprnd);
1555 if (vec_oprnds1 && vec_oprnds1->length ())
1557 vec_oprnd = vec_oprnds1->pop ();
1558 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1559 vec_oprnds1->quick_push (vec_oprnd);
1564 /* Get vectorized definitions for OP0 and OP1. */
1566 void
1567 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1568 vec<tree> *vec_oprnds0,
1569 vec<tree> *vec_oprnds1,
1570 slp_tree slp_node)
1572 if (slp_node)
1574 int nops = (op1 == NULL_TREE) ? 1 : 2;
1575 auto_vec<tree> ops (nops);
1576 auto_vec<vec<tree> > vec_defs (nops);
1578 ops.quick_push (op0);
1579 if (op1)
1580 ops.quick_push (op1);
1582 vect_get_slp_defs (ops, slp_node, &vec_defs);
1584 *vec_oprnds0 = vec_defs[0];
1585 if (op1)
1586 *vec_oprnds1 = vec_defs[1];
1588 else
1590 tree vec_oprnd;
1592 vec_oprnds0->create (1);
1593 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1594 vec_oprnds0->quick_push (vec_oprnd);
1596 if (op1)
1598 vec_oprnds1->create (1);
1599 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1600 vec_oprnds1->quick_push (vec_oprnd);
1605 /* Helper function called by vect_finish_replace_stmt and
1606 vect_finish_stmt_generation. Set the location of the new
1607 statement and create a stmt_vec_info for it. */
1609 static void
1610 vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
1612 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1613 vec_info *vinfo = stmt_info->vinfo;
1615 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1617 if (dump_enabled_p ())
1619 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1620 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1623 gimple_set_location (vec_stmt, gimple_location (stmt));
1625 /* While EH edges will generally prevent vectorization, stmt might
1626 e.g. be in a must-not-throw region. Ensure newly created stmts
1627 that could throw are part of the same region. */
1628 int lp_nr = lookup_stmt_eh_lp (stmt);
1629 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1630 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1633 /* Replace the scalar statement STMT with a new vector statement VEC_STMT,
1634 which sets the same scalar result as STMT did. */
1636 void
1637 vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt)
1639 gcc_assert (gimple_get_lhs (stmt) == gimple_get_lhs (vec_stmt));
1641 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1642 gsi_replace (&gsi, vec_stmt, false);
1644 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1647 /* Function vect_finish_stmt_generation.
1649 Insert a new stmt. */
1651 void
1652 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1653 gimple_stmt_iterator *gsi)
1655 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1657 if (!gsi_end_p (*gsi)
1658 && gimple_has_mem_ops (vec_stmt))
1660 gimple *at_stmt = gsi_stmt (*gsi);
1661 tree vuse = gimple_vuse (at_stmt);
1662 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1664 tree vdef = gimple_vdef (at_stmt);
1665 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1666 /* If we have an SSA vuse and insert a store, update virtual
1667 SSA form to avoid triggering the renamer. Do so only
1668 if we can easily see all uses - which is what almost always
1669 happens with the way vectorized stmts are inserted. */
1670 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1671 && ((is_gimple_assign (vec_stmt)
1672 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1673 || (is_gimple_call (vec_stmt)
1674 && !(gimple_call_flags (vec_stmt)
1675 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1677 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1678 gimple_set_vdef (vec_stmt, new_vdef);
1679 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1683 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1684 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1687 /* We want to vectorize a call to combined function CFN with function
1688 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1689 as the types of all inputs. Check whether this is possible using
1690 an internal function, returning its code if so or IFN_LAST if not. */
1692 static internal_fn
1693 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1694 tree vectype_out, tree vectype_in)
1696 internal_fn ifn;
1697 if (internal_fn_p (cfn))
1698 ifn = as_internal_fn (cfn);
1699 else
1700 ifn = associated_internal_fn (fndecl);
1701 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1703 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1704 if (info.vectorizable)
1706 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1707 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1708 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1709 OPTIMIZE_FOR_SPEED))
1710 return ifn;
1713 return IFN_LAST;
1717 static tree permute_vec_elements (tree, tree, tree, gimple *,
1718 gimple_stmt_iterator *);
1720 /* Check whether a load or store statement in the loop described by
1721 LOOP_VINFO is possible in a fully-masked loop. This is testing
1722 whether the vectorizer pass has the appropriate support, as well as
1723 whether the target does.
1725 VLS_TYPE says whether the statement is a load or store and VECTYPE
1726 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1727 says how the load or store is going to be implemented and GROUP_SIZE
1728 is the number of load or store statements in the containing group.
1729 If the access is a gather load or scatter store, GS_INFO describes
1730 its arguments.
1732 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1733 supported, otherwise record the required mask types. */
1735 static void
1736 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1737 vec_load_store_type vls_type, int group_size,
1738 vect_memory_access_type memory_access_type,
1739 gather_scatter_info *gs_info)
1741 /* Invariant loads need no special support. */
1742 if (memory_access_type == VMAT_INVARIANT)
1743 return;
1745 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1746 machine_mode vecmode = TYPE_MODE (vectype);
1747 bool is_load = (vls_type == VLS_LOAD);
1748 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1750 if (is_load
1751 ? !vect_load_lanes_supported (vectype, group_size, true)
1752 : !vect_store_lanes_supported (vectype, group_size, true))
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "can't use a fully-masked loop because the"
1757 " target doesn't have an appropriate masked"
1758 " load/store-lanes instruction.\n");
1759 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1760 return;
1762 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1763 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1764 return;
1767 if (memory_access_type == VMAT_GATHER_SCATTER)
1769 internal_fn ifn = (is_load
1770 ? IFN_MASK_GATHER_LOAD
1771 : IFN_MASK_SCATTER_STORE);
1772 tree offset_type = TREE_TYPE (gs_info->offset);
1773 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1774 gs_info->memory_type,
1775 TYPE_SIGN (offset_type),
1776 gs_info->scale))
1778 if (dump_enabled_p ())
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1780 "can't use a fully-masked loop because the"
1781 " target doesn't have an appropriate masked"
1782 " gather load or scatter store instruction.\n");
1783 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1784 return;
1786 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1787 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1788 return;
1791 if (memory_access_type != VMAT_CONTIGUOUS
1792 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1794 /* Element X of the data must come from iteration i * VF + X of the
1795 scalar loop. We need more work to support other mappings. */
1796 if (dump_enabled_p ())
1797 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1798 "can't use a fully-masked loop because an access"
1799 " isn't contiguous.\n");
1800 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1801 return;
1804 machine_mode mask_mode;
1805 if (!(targetm.vectorize.get_mask_mode
1806 (GET_MODE_NUNITS (vecmode),
1807 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1808 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1810 if (dump_enabled_p ())
1811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1812 "can't use a fully-masked loop because the target"
1813 " doesn't have the appropriate masked load or"
1814 " store.\n");
1815 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1816 return;
1818 /* We might load more scalars than we need for permuting SLP loads.
1819 We checked in get_group_load_store_type that the extra elements
1820 don't leak into a new vector. */
1821 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1822 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1823 unsigned int nvectors;
1824 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1825 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1826 else
1827 gcc_unreachable ();
1830 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1831 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1832 that needs to be applied to all loads and stores in a vectorized loop.
1833 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1835 MASK_TYPE is the type of both masks. If new statements are needed,
1836 insert them before GSI. */
1838 static tree
1839 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1840 gimple_stmt_iterator *gsi)
1842 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1843 if (!loop_mask)
1844 return vec_mask;
1846 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1847 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1848 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1849 vec_mask, loop_mask);
1850 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1851 return and_res;
1854 /* Determine whether we can use a gather load or scatter store to vectorize
1855 strided load or store STMT by truncating the current offset to a smaller
1856 width. We need to be able to construct an offset vector:
1858 { 0, X, X*2, X*3, ... }
1860 without loss of precision, where X is STMT's DR_STEP.
1862 Return true if this is possible, describing the gather load or scatter
1863 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1865 static bool
1866 vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
1867 bool masked_p,
1868 gather_scatter_info *gs_info)
1870 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1871 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1872 tree step = DR_STEP (dr);
1873 if (TREE_CODE (step) != INTEGER_CST)
1875 /* ??? Perhaps we could use range information here? */
1876 if (dump_enabled_p ())
1877 dump_printf_loc (MSG_NOTE, vect_location,
1878 "cannot truncate variable step.\n");
1879 return false;
1882 /* Get the number of bits in an element. */
1883 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1884 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1885 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1887 /* Set COUNT to the upper limit on the number of elements - 1.
1888 Start with the maximum vectorization factor. */
1889 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
1891 /* Try lowering COUNT to the number of scalar latch iterations. */
1892 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1893 widest_int max_iters;
1894 if (max_loop_iterations (loop, &max_iters)
1895 && max_iters < count)
1896 count = max_iters.to_shwi ();
1898 /* Try scales of 1 and the element size. */
1899 int scales[] = { 1, vect_get_scalar_dr_size (dr) };
1900 bool overflow_p = false;
1901 for (int i = 0; i < 2; ++i)
1903 int scale = scales[i];
1904 widest_int factor;
1905 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
1906 continue;
1908 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
1909 in OFFSET_BITS bits. */
1910 widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
1911 if (overflow_p)
1912 continue;
1913 signop sign = range >= 0 ? UNSIGNED : SIGNED;
1914 if (wi::min_precision (range, sign) > element_bits)
1916 overflow_p = true;
1917 continue;
1920 /* See whether the target supports the operation. */
1921 tree memory_type = TREE_TYPE (DR_REF (dr));
1922 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
1923 memory_type, element_bits, sign, scale,
1924 &gs_info->ifn, &gs_info->element_type))
1925 continue;
1927 tree offset_type = build_nonstandard_integer_type (element_bits,
1928 sign == UNSIGNED);
1930 gs_info->decl = NULL_TREE;
1931 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
1932 but we don't need to store that here. */
1933 gs_info->base = NULL_TREE;
1934 gs_info->offset = fold_convert (offset_type, step);
1935 gs_info->offset_dt = vect_constant_def;
1936 gs_info->offset_vectype = NULL_TREE;
1937 gs_info->scale = scale;
1938 gs_info->memory_type = memory_type;
1939 return true;
1942 if (overflow_p && dump_enabled_p ())
1943 dump_printf_loc (MSG_NOTE, vect_location,
1944 "truncating gather/scatter offset to %d bits"
1945 " might change its value.\n", element_bits);
1947 return false;
1950 /* Return true if we can use gather/scatter internal functions to
1951 vectorize STMT, which is a grouped or strided load or store.
1952 MASKED_P is true if load or store is conditional. When returning
1953 true, fill in GS_INFO with the information required to perform the
1954 operation. */
1956 static bool
1957 vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
1958 bool masked_p,
1959 gather_scatter_info *gs_info)
1961 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)
1962 || gs_info->decl)
1963 return vect_truncate_gather_scatter_offset (stmt, loop_vinfo,
1964 masked_p, gs_info);
1966 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
1967 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1968 tree offset_type = TREE_TYPE (gs_info->offset);
1969 unsigned int offset_bits = TYPE_PRECISION (offset_type);
1971 /* Enforced by vect_check_gather_scatter. */
1972 gcc_assert (element_bits >= offset_bits);
1974 /* If the elements are wider than the offset, convert the offset to the
1975 same width, without changing its sign. */
1976 if (element_bits > offset_bits)
1978 bool unsigned_p = TYPE_UNSIGNED (offset_type);
1979 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
1980 gs_info->offset = fold_convert (offset_type, gs_info->offset);
1983 if (dump_enabled_p ())
1984 dump_printf_loc (MSG_NOTE, vect_location,
1985 "using gather/scatter for strided/grouped access,"
1986 " scale = %d\n", gs_info->scale);
1988 return true;
1991 /* STMT is a non-strided load or store, meaning that it accesses
1992 elements with a known constant step. Return -1 if that step
1993 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1995 static int
1996 compare_step_with_zero (gimple *stmt)
1998 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1999 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2000 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
2001 size_zero_node);
2004 /* If the target supports a permute mask that reverses the elements in
2005 a vector of type VECTYPE, return that mask, otherwise return null. */
2007 static tree
2008 perm_mask_for_reverse (tree vectype)
2010 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2012 /* The encoding has a single stepped pattern. */
2013 vec_perm_builder sel (nunits, 1, 3);
2014 for (int i = 0; i < 3; ++i)
2015 sel.quick_push (nunits - 1 - i);
2017 vec_perm_indices indices (sel, 1, nunits);
2018 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2019 return NULL_TREE;
2020 return vect_gen_perm_mask_checked (vectype, indices);
2023 /* STMT is either a masked or unconditional store. Return the value
2024 being stored. */
2026 tree
2027 vect_get_store_rhs (gimple *stmt)
2029 if (gassign *assign = dyn_cast <gassign *> (stmt))
2031 gcc_assert (gimple_assign_single_p (assign));
2032 return gimple_assign_rhs1 (assign);
2034 if (gcall *call = dyn_cast <gcall *> (stmt))
2036 internal_fn ifn = gimple_call_internal_fn (call);
2037 int index = internal_fn_stored_value_index (ifn);
2038 gcc_assert (index >= 0);
2039 return gimple_call_arg (stmt, index);
2041 gcc_unreachable ();
2044 /* A subroutine of get_load_store_type, with a subset of the same
2045 arguments. Handle the case where STMT is part of a grouped load
2046 or store.
2048 For stores, the statements in the group are all consecutive
2049 and there is no gap at the end. For loads, the statements in the
2050 group might not be consecutive; there can be gaps between statements
2051 as well as at the end. */
2053 static bool
2054 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
2055 bool masked_p, vec_load_store_type vls_type,
2056 vect_memory_access_type *memory_access_type,
2057 gather_scatter_info *gs_info)
2059 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2060 vec_info *vinfo = stmt_info->vinfo;
2061 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2062 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2063 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
2064 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2065 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
2066 bool single_element_p = (stmt == first_stmt
2067 && !GROUP_NEXT_ELEMENT (stmt_info));
2068 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
2069 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2071 /* True if the vectorized statements would access beyond the last
2072 statement in the group. */
2073 bool overrun_p = false;
2075 /* True if we can cope with such overrun by peeling for gaps, so that
2076 there is at least one final scalar iteration after the vector loop. */
2077 bool can_overrun_p = (!masked_p
2078 && vls_type == VLS_LOAD
2079 && loop_vinfo
2080 && !loop->inner);
2082 /* There can only be a gap at the end of the group if the stride is
2083 known at compile time. */
2084 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
2086 /* Stores can't yet have gaps. */
2087 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2089 if (slp)
2091 if (STMT_VINFO_STRIDED_P (stmt_info))
2093 /* Try to use consecutive accesses of GROUP_SIZE elements,
2094 separated by the stride, until we have a complete vector.
2095 Fall back to scalar accesses if that isn't possible. */
2096 if (multiple_p (nunits, group_size))
2097 *memory_access_type = VMAT_STRIDED_SLP;
2098 else
2099 *memory_access_type = VMAT_ELEMENTWISE;
2101 else
2103 overrun_p = loop_vinfo && gap != 0;
2104 if (overrun_p && vls_type != VLS_LOAD)
2106 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2107 "Grouped store with gaps requires"
2108 " non-consecutive accesses\n");
2109 return false;
2111 /* An overrun is fine if the trailing elements are smaller
2112 than the alignment boundary B. Every vector access will
2113 be a multiple of B and so we are guaranteed to access a
2114 non-gap element in the same B-sized block. */
2115 if (overrun_p
2116 && gap < (vect_known_alignment_in_bytes (first_dr)
2117 / vect_get_scalar_dr_size (first_dr)))
2118 overrun_p = false;
2119 if (overrun_p && !can_overrun_p)
2121 if (dump_enabled_p ())
2122 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2123 "Peeling for outer loop is not supported\n");
2124 return false;
2126 *memory_access_type = VMAT_CONTIGUOUS;
2129 else
2131 /* We can always handle this case using elementwise accesses,
2132 but see if something more efficient is available. */
2133 *memory_access_type = VMAT_ELEMENTWISE;
2135 /* If there is a gap at the end of the group then these optimizations
2136 would access excess elements in the last iteration. */
2137 bool would_overrun_p = (gap != 0);
2138 /* An overrun is fine if the trailing elements are smaller than the
2139 alignment boundary B. Every vector access will be a multiple of B
2140 and so we are guaranteed to access a non-gap element in the
2141 same B-sized block. */
2142 if (would_overrun_p
2143 && !masked_p
2144 && gap < (vect_known_alignment_in_bytes (first_dr)
2145 / vect_get_scalar_dr_size (first_dr)))
2146 would_overrun_p = false;
2148 if (!STMT_VINFO_STRIDED_P (stmt_info)
2149 && (can_overrun_p || !would_overrun_p)
2150 && compare_step_with_zero (stmt) > 0)
2152 /* First cope with the degenerate case of a single-element
2153 vector. */
2154 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2155 *memory_access_type = VMAT_CONTIGUOUS;
2157 /* Otherwise try using LOAD/STORE_LANES. */
2158 if (*memory_access_type == VMAT_ELEMENTWISE
2159 && (vls_type == VLS_LOAD
2160 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2161 : vect_store_lanes_supported (vectype, group_size,
2162 masked_p)))
2164 *memory_access_type = VMAT_LOAD_STORE_LANES;
2165 overrun_p = would_overrun_p;
2168 /* If that fails, try using permuting loads. */
2169 if (*memory_access_type == VMAT_ELEMENTWISE
2170 && (vls_type == VLS_LOAD
2171 ? vect_grouped_load_supported (vectype, single_element_p,
2172 group_size)
2173 : vect_grouped_store_supported (vectype, group_size)))
2175 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2176 overrun_p = would_overrun_p;
2180 /* As a last resort, trying using a gather load or scatter store.
2182 ??? Although the code can handle all group sizes correctly,
2183 it probably isn't a win to use separate strided accesses based
2184 on nearby locations. Or, even if it's a win over scalar code,
2185 it might not be a win over vectorizing at a lower VF, if that
2186 allows us to use contiguous accesses. */
2187 if (*memory_access_type == VMAT_ELEMENTWISE
2188 && single_element_p
2189 && loop_vinfo
2190 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2191 masked_p, gs_info))
2192 *memory_access_type = VMAT_GATHER_SCATTER;
2195 if (vls_type != VLS_LOAD && first_stmt == stmt)
2197 /* STMT is the leader of the group. Check the operands of all the
2198 stmts of the group. */
2199 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
2200 while (next_stmt)
2202 tree op = vect_get_store_rhs (next_stmt);
2203 gimple *def_stmt;
2204 enum vect_def_type dt;
2205 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
2207 if (dump_enabled_p ())
2208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2209 "use not simple.\n");
2210 return false;
2212 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2216 if (overrun_p)
2218 gcc_assert (can_overrun_p);
2219 if (dump_enabled_p ())
2220 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2221 "Data access with gaps requires scalar "
2222 "epilogue loop\n");
2223 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2226 return true;
2229 /* A subroutine of get_load_store_type, with a subset of the same
2230 arguments. Handle the case where STMT is a load or store that
2231 accesses consecutive elements with a negative step. */
2233 static vect_memory_access_type
2234 get_negative_load_store_type (gimple *stmt, tree vectype,
2235 vec_load_store_type vls_type,
2236 unsigned int ncopies)
2238 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2239 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2240 dr_alignment_support alignment_support_scheme;
2242 if (ncopies > 1)
2244 if (dump_enabled_p ())
2245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2246 "multiple types with negative step.\n");
2247 return VMAT_ELEMENTWISE;
2250 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
2251 if (alignment_support_scheme != dr_aligned
2252 && alignment_support_scheme != dr_unaligned_supported)
2254 if (dump_enabled_p ())
2255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2256 "negative step but alignment required.\n");
2257 return VMAT_ELEMENTWISE;
2260 if (vls_type == VLS_STORE_INVARIANT)
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_NOTE, vect_location,
2264 "negative step with invariant source;"
2265 " no permute needed.\n");
2266 return VMAT_CONTIGUOUS_DOWN;
2269 if (!perm_mask_for_reverse (vectype))
2271 if (dump_enabled_p ())
2272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2273 "negative step and reversing not supported.\n");
2274 return VMAT_ELEMENTWISE;
2277 return VMAT_CONTIGUOUS_REVERSE;
2280 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
2281 if there is a memory access type that the vectorized form can use,
2282 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2283 or scatters, fill in GS_INFO accordingly.
2285 SLP says whether we're performing SLP rather than loop vectorization.
2286 MASKED_P is true if the statement is conditional on a vectorized mask.
2287 VECTYPE is the vector type that the vectorized statements will use.
2288 NCOPIES is the number of vector statements that will be needed. */
2290 static bool
2291 get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
2292 vec_load_store_type vls_type, unsigned int ncopies,
2293 vect_memory_access_type *memory_access_type,
2294 gather_scatter_info *gs_info)
2296 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2297 vec_info *vinfo = stmt_info->vinfo;
2298 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2299 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2300 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2302 *memory_access_type = VMAT_GATHER_SCATTER;
2303 gimple *def_stmt;
2304 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
2305 gcc_unreachable ();
2306 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
2307 &gs_info->offset_dt,
2308 &gs_info->offset_vectype))
2310 if (dump_enabled_p ())
2311 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2312 "%s index use not simple.\n",
2313 vls_type == VLS_LOAD ? "gather" : "scatter");
2314 return false;
2317 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2319 if (!get_group_load_store_type (stmt, vectype, slp, masked_p, vls_type,
2320 memory_access_type, gs_info))
2321 return false;
2323 else if (STMT_VINFO_STRIDED_P (stmt_info))
2325 gcc_assert (!slp);
2326 if (loop_vinfo
2327 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2328 masked_p, gs_info))
2329 *memory_access_type = VMAT_GATHER_SCATTER;
2330 else
2331 *memory_access_type = VMAT_ELEMENTWISE;
2333 else
2335 int cmp = compare_step_with_zero (stmt);
2336 if (cmp < 0)
2337 *memory_access_type = get_negative_load_store_type
2338 (stmt, vectype, vls_type, ncopies);
2339 else if (cmp == 0)
2341 gcc_assert (vls_type == VLS_LOAD);
2342 *memory_access_type = VMAT_INVARIANT;
2344 else
2345 *memory_access_type = VMAT_CONTIGUOUS;
2348 if ((*memory_access_type == VMAT_ELEMENTWISE
2349 || *memory_access_type == VMAT_STRIDED_SLP)
2350 && !nunits.is_constant ())
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2354 "Not using elementwise accesses due to variable "
2355 "vectorization factor.\n");
2356 return false;
2359 /* FIXME: At the moment the cost model seems to underestimate the
2360 cost of using elementwise accesses. This check preserves the
2361 traditional behavior until that can be fixed. */
2362 if (*memory_access_type == VMAT_ELEMENTWISE
2363 && !STMT_VINFO_STRIDED_P (stmt_info)
2364 && !(stmt == GROUP_FIRST_ELEMENT (stmt_info)
2365 && !GROUP_NEXT_ELEMENT (stmt_info)
2366 && !pow2p_hwi (GROUP_SIZE (stmt_info))))
2368 if (dump_enabled_p ())
2369 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2370 "not falling back to elementwise accesses\n");
2371 return false;
2373 return true;
2376 /* Return true if boolean argument MASK is suitable for vectorizing
2377 conditional load or store STMT. When returning true, store the type
2378 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2379 in *MASK_VECTYPE_OUT. */
2381 static bool
2382 vect_check_load_store_mask (gimple *stmt, tree mask,
2383 vect_def_type *mask_dt_out,
2384 tree *mask_vectype_out)
2386 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2388 if (dump_enabled_p ())
2389 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2390 "mask argument is not a boolean.\n");
2391 return false;
2394 if (TREE_CODE (mask) != SSA_NAME)
2396 if (dump_enabled_p ())
2397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2398 "mask argument is not an SSA name.\n");
2399 return false;
2402 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2403 gimple *def_stmt;
2404 enum vect_def_type mask_dt;
2405 tree mask_vectype;
2406 if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &mask_dt,
2407 &mask_vectype))
2409 if (dump_enabled_p ())
2410 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2411 "mask use not simple.\n");
2412 return false;
2415 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2416 if (!mask_vectype)
2417 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2419 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2421 if (dump_enabled_p ())
2422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2423 "could not find an appropriate vector mask type.\n");
2424 return false;
2427 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2428 TYPE_VECTOR_SUBPARTS (vectype)))
2430 if (dump_enabled_p ())
2432 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2433 "vector mask type ");
2434 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2435 dump_printf (MSG_MISSED_OPTIMIZATION,
2436 " does not match vector data type ");
2437 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2438 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2440 return false;
2443 *mask_dt_out = mask_dt;
2444 *mask_vectype_out = mask_vectype;
2445 return true;
2448 /* Return true if stored value RHS is suitable for vectorizing store
2449 statement STMT. When returning true, store the type of the
2450 definition in *RHS_DT_OUT, the type of the vectorized store value in
2451 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2453 static bool
2454 vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
2455 tree *rhs_vectype_out, vec_load_store_type *vls_type_out)
2457 /* In the case this is a store from a constant make sure
2458 native_encode_expr can handle it. */
2459 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2461 if (dump_enabled_p ())
2462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2463 "cannot encode constant as a byte sequence.\n");
2464 return false;
2467 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2468 gimple *def_stmt;
2469 enum vect_def_type rhs_dt;
2470 tree rhs_vectype;
2471 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &rhs_dt,
2472 &rhs_vectype))
2474 if (dump_enabled_p ())
2475 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2476 "use not simple.\n");
2477 return false;
2480 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2481 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2483 if (dump_enabled_p ())
2484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2485 "incompatible vector types.\n");
2486 return false;
2489 *rhs_dt_out = rhs_dt;
2490 *rhs_vectype_out = rhs_vectype;
2491 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2492 *vls_type_out = VLS_STORE_INVARIANT;
2493 else
2494 *vls_type_out = VLS_STORE;
2495 return true;
2498 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
2499 Note that we support masks with floating-point type, in which case the
2500 floats are interpreted as a bitmask. */
2502 static tree
2503 vect_build_all_ones_mask (gimple *stmt, tree masktype)
2505 if (TREE_CODE (masktype) == INTEGER_TYPE)
2506 return build_int_cst (masktype, -1);
2507 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2509 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2510 mask = build_vector_from_val (masktype, mask);
2511 return vect_init_vector (stmt, mask, masktype, NULL);
2513 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2515 REAL_VALUE_TYPE r;
2516 long tmp[6];
2517 for (int j = 0; j < 6; ++j)
2518 tmp[j] = -1;
2519 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2520 tree mask = build_real (TREE_TYPE (masktype), r);
2521 mask = build_vector_from_val (masktype, mask);
2522 return vect_init_vector (stmt, mask, masktype, NULL);
2524 gcc_unreachable ();
2527 /* Build an all-zero merge value of type VECTYPE while vectorizing
2528 STMT as a gather load. */
2530 static tree
2531 vect_build_zero_merge_argument (gimple *stmt, tree vectype)
2533 tree merge;
2534 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2535 merge = build_int_cst (TREE_TYPE (vectype), 0);
2536 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2538 REAL_VALUE_TYPE r;
2539 long tmp[6];
2540 for (int j = 0; j < 6; ++j)
2541 tmp[j] = 0;
2542 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2543 merge = build_real (TREE_TYPE (vectype), r);
2545 else
2546 gcc_unreachable ();
2547 merge = build_vector_from_val (vectype, merge);
2548 return vect_init_vector (stmt, merge, vectype, NULL);
2551 /* Build a gather load call while vectorizing STMT. Insert new instructions
2552 before GSI and add them to VEC_STMT. GS_INFO describes the gather load
2553 operation. If the load is conditional, MASK is the unvectorized
2554 condition and MASK_DT is its definition type, otherwise MASK is null. */
2556 static void
2557 vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
2558 gimple **vec_stmt, gather_scatter_info *gs_info,
2559 tree mask, vect_def_type mask_dt)
2561 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2562 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2563 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2564 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2565 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2566 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2567 edge pe = loop_preheader_edge (loop);
2568 enum { NARROW, NONE, WIDEN } modifier;
2569 poly_uint64 gather_off_nunits
2570 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2572 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2573 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2574 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2575 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2576 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2577 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2578 tree scaletype = TREE_VALUE (arglist);
2579 gcc_checking_assert (types_compatible_p (srctype, rettype)
2580 && (!mask || types_compatible_p (srctype, masktype)));
2582 tree perm_mask = NULL_TREE;
2583 tree mask_perm_mask = NULL_TREE;
2584 if (known_eq (nunits, gather_off_nunits))
2585 modifier = NONE;
2586 else if (known_eq (nunits * 2, gather_off_nunits))
2588 modifier = WIDEN;
2590 /* Currently widening gathers and scatters are only supported for
2591 fixed-length vectors. */
2592 int count = gather_off_nunits.to_constant ();
2593 vec_perm_builder sel (count, count, 1);
2594 for (int i = 0; i < count; ++i)
2595 sel.quick_push (i | (count / 2));
2597 vec_perm_indices indices (sel, 1, count);
2598 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2599 indices);
2601 else if (known_eq (nunits, gather_off_nunits * 2))
2603 modifier = NARROW;
2605 /* Currently narrowing gathers and scatters are only supported for
2606 fixed-length vectors. */
2607 int count = nunits.to_constant ();
2608 vec_perm_builder sel (count, count, 1);
2609 sel.quick_grow (count);
2610 for (int i = 0; i < count; ++i)
2611 sel[i] = i < count / 2 ? i : i + count / 2;
2612 vec_perm_indices indices (sel, 2, count);
2613 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2615 ncopies *= 2;
2617 if (mask)
2619 for (int i = 0; i < count; ++i)
2620 sel[i] = i | (count / 2);
2621 indices.new_vector (sel, 2, count);
2622 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2625 else
2626 gcc_unreachable ();
2628 tree vec_dest = vect_create_destination_var (gimple_get_lhs (stmt),
2629 vectype);
2631 tree ptr = fold_convert (ptrtype, gs_info->base);
2632 if (!is_gimple_min_invariant (ptr))
2634 gimple_seq seq;
2635 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2636 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2637 gcc_assert (!new_bb);
2640 tree scale = build_int_cst (scaletype, gs_info->scale);
2642 tree vec_oprnd0 = NULL_TREE;
2643 tree vec_mask = NULL_TREE;
2644 tree src_op = NULL_TREE;
2645 tree mask_op = NULL_TREE;
2646 tree prev_res = NULL_TREE;
2647 stmt_vec_info prev_stmt_info = NULL;
2649 if (!mask)
2651 src_op = vect_build_zero_merge_argument (stmt, rettype);
2652 mask_op = vect_build_all_ones_mask (stmt, masktype);
2655 for (int j = 0; j < ncopies; ++j)
2657 tree op, var;
2658 gimple *new_stmt;
2659 if (modifier == WIDEN && (j & 1))
2660 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2661 perm_mask, stmt, gsi);
2662 else if (j == 0)
2663 op = vec_oprnd0
2664 = vect_get_vec_def_for_operand (gs_info->offset, stmt);
2665 else
2666 op = vec_oprnd0
2667 = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
2669 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2671 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2672 TYPE_VECTOR_SUBPARTS (idxtype)));
2673 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2674 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2675 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2676 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2677 op = var;
2680 if (mask)
2682 if (mask_perm_mask && (j & 1))
2683 mask_op = permute_vec_elements (mask_op, mask_op,
2684 mask_perm_mask, stmt, gsi);
2685 else
2687 if (j == 0)
2688 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2689 else
2690 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
2692 mask_op = vec_mask;
2693 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2695 gcc_assert
2696 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2697 TYPE_VECTOR_SUBPARTS (masktype)));
2698 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2699 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2700 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR,
2701 mask_op);
2702 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2703 mask_op = var;
2706 src_op = mask_op;
2709 new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2710 mask_op, scale);
2712 if (!useless_type_conversion_p (vectype, rettype))
2714 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2715 TYPE_VECTOR_SUBPARTS (rettype)));
2716 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2717 gimple_call_set_lhs (new_stmt, op);
2718 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2719 var = make_ssa_name (vec_dest);
2720 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2721 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2723 else
2725 var = make_ssa_name (vec_dest, new_stmt);
2726 gimple_call_set_lhs (new_stmt, var);
2729 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2731 if (modifier == NARROW)
2733 if ((j & 1) == 0)
2735 prev_res = var;
2736 continue;
2738 var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi);
2739 new_stmt = SSA_NAME_DEF_STMT (var);
2742 if (prev_stmt_info == NULL)
2743 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2744 else
2745 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2746 prev_stmt_info = vinfo_for_stmt (new_stmt);
2750 /* Prepare the base and offset in GS_INFO for vectorization.
2751 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2752 to the vectorized offset argument for the first copy of STMT. STMT
2753 is the statement described by GS_INFO and LOOP is the containing loop. */
2755 static void
2756 vect_get_gather_scatter_ops (struct loop *loop, gimple *stmt,
2757 gather_scatter_info *gs_info,
2758 tree *dataref_ptr, tree *vec_offset)
2760 gimple_seq stmts = NULL;
2761 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2762 if (stmts != NULL)
2764 basic_block new_bb;
2765 edge pe = loop_preheader_edge (loop);
2766 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2767 gcc_assert (!new_bb);
2769 tree offset_type = TREE_TYPE (gs_info->offset);
2770 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2771 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt,
2772 offset_vectype);
2775 /* Prepare to implement a grouped or strided load or store using
2776 the gather load or scatter store operation described by GS_INFO.
2777 STMT is the load or store statement.
2779 Set *DATAREF_BUMP to the amount that should be added to the base
2780 address after each copy of the vectorized statement. Set *VEC_OFFSET
2781 to an invariant offset vector in which element I has the value
2782 I * DR_STEP / SCALE. */
2784 static void
2785 vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo,
2786 gather_scatter_info *gs_info,
2787 tree *dataref_bump, tree *vec_offset)
2789 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2790 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2791 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2792 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2793 gimple_seq stmts;
2795 tree bump = size_binop (MULT_EXPR,
2796 fold_convert (sizetype, DR_STEP (dr)),
2797 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2798 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2799 if (stmts)
2800 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2802 /* The offset given in GS_INFO can have pointer type, so use the element
2803 type of the vector instead. */
2804 tree offset_type = TREE_TYPE (gs_info->offset);
2805 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2806 offset_type = TREE_TYPE (offset_vectype);
2808 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2809 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2810 ssize_int (gs_info->scale));
2811 step = fold_convert (offset_type, step);
2812 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2814 /* Create {0, X, X*2, X*3, ...}. */
2815 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2816 build_zero_cst (offset_type), step);
2817 if (stmts)
2818 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2821 /* Return the amount that should be added to a vector pointer to move
2822 to the next or previous copy of AGGR_TYPE. DR is the data reference
2823 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2824 vectorization. */
2826 static tree
2827 vect_get_data_ptr_increment (data_reference *dr, tree aggr_type,
2828 vect_memory_access_type memory_access_type)
2830 if (memory_access_type == VMAT_INVARIANT)
2831 return size_zero_node;
2833 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2834 tree step = vect_dr_behavior (dr)->step;
2835 if (tree_int_cst_sgn (step) == -1)
2836 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2837 return iv_step;
2840 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2842 static bool
2843 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2844 gimple **vec_stmt, slp_tree slp_node,
2845 tree vectype_in, enum vect_def_type *dt)
2847 tree op, vectype;
2848 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2849 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2850 unsigned ncopies;
2851 unsigned HOST_WIDE_INT nunits, num_bytes;
2853 op = gimple_call_arg (stmt, 0);
2854 vectype = STMT_VINFO_VECTYPE (stmt_info);
2856 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2857 return false;
2859 /* Multiple types in SLP are handled by creating the appropriate number of
2860 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2861 case of SLP. */
2862 if (slp_node)
2863 ncopies = 1;
2864 else
2865 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2867 gcc_assert (ncopies >= 1);
2869 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2870 if (! char_vectype)
2871 return false;
2873 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2874 return false;
2876 unsigned word_bytes = num_bytes / nunits;
2878 /* The encoding uses one stepped pattern for each byte in the word. */
2879 vec_perm_builder elts (num_bytes, word_bytes, 3);
2880 for (unsigned i = 0; i < 3; ++i)
2881 for (unsigned j = 0; j < word_bytes; ++j)
2882 elts.quick_push ((i + 1) * word_bytes - j - 1);
2884 vec_perm_indices indices (elts, 1, num_bytes);
2885 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2886 return false;
2888 if (! vec_stmt)
2890 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2891 if (dump_enabled_p ())
2892 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2893 "\n");
2894 if (! PURE_SLP_STMT (stmt_info))
2896 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2897 1, vector_stmt, stmt_info, 0, vect_prologue);
2898 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2899 ncopies, vec_perm, stmt_info, 0, vect_body);
2901 return true;
2904 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2906 /* Transform. */
2907 vec<tree> vec_oprnds = vNULL;
2908 gimple *new_stmt = NULL;
2909 stmt_vec_info prev_stmt_info = NULL;
2910 for (unsigned j = 0; j < ncopies; j++)
2912 /* Handle uses. */
2913 if (j == 0)
2914 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2915 else
2916 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2918 /* Arguments are ready. create the new vector stmt. */
2919 unsigned i;
2920 tree vop;
2921 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2923 tree tem = make_ssa_name (char_vectype);
2924 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2925 char_vectype, vop));
2926 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2927 tree tem2 = make_ssa_name (char_vectype);
2928 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2929 tem, tem, bswap_vconst);
2930 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2931 tem = make_ssa_name (vectype);
2932 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2933 vectype, tem2));
2934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2935 if (slp_node)
2936 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2939 if (slp_node)
2940 continue;
2942 if (j == 0)
2943 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2944 else
2945 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2947 prev_stmt_info = vinfo_for_stmt (new_stmt);
2950 vec_oprnds.release ();
2951 return true;
2954 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2955 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2956 in a single step. On success, store the binary pack code in
2957 *CONVERT_CODE. */
2959 static bool
2960 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2961 tree_code *convert_code)
2963 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2964 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2965 return false;
2967 tree_code code;
2968 int multi_step_cvt = 0;
2969 auto_vec <tree, 8> interm_types;
2970 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2971 &code, &multi_step_cvt,
2972 &interm_types)
2973 || multi_step_cvt)
2974 return false;
2976 *convert_code = code;
2977 return true;
2980 /* Function vectorizable_call.
2982 Check if GS performs a function call that can be vectorized.
2983 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2984 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2985 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2987 static bool
2988 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2989 slp_tree slp_node)
2991 gcall *stmt;
2992 tree vec_dest;
2993 tree scalar_dest;
2994 tree op, type;
2995 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2996 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2997 tree vectype_out, vectype_in;
2998 poly_uint64 nunits_in;
2999 poly_uint64 nunits_out;
3000 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3001 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3002 vec_info *vinfo = stmt_info->vinfo;
3003 tree fndecl, new_temp, rhs_type;
3004 gimple *def_stmt;
3005 enum vect_def_type dt[3]
3006 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
3007 int ndts = 3;
3008 gimple *new_stmt = NULL;
3009 int ncopies, j;
3010 vec<tree> vargs = vNULL;
3011 enum { NARROW, NONE, WIDEN } modifier;
3012 size_t i, nargs;
3013 tree lhs;
3015 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3016 return false;
3018 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3019 && ! vec_stmt)
3020 return false;
3022 /* Is GS a vectorizable call? */
3023 stmt = dyn_cast <gcall *> (gs);
3024 if (!stmt)
3025 return false;
3027 if (gimple_call_internal_p (stmt)
3028 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3029 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3030 /* Handled by vectorizable_load and vectorizable_store. */
3031 return false;
3033 if (gimple_call_lhs (stmt) == NULL_TREE
3034 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3035 return false;
3037 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3039 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3041 /* Process function arguments. */
3042 rhs_type = NULL_TREE;
3043 vectype_in = NULL_TREE;
3044 nargs = gimple_call_num_args (stmt);
3046 /* Bail out if the function has more than three arguments, we do not have
3047 interesting builtin functions to vectorize with more than two arguments
3048 except for fma. No arguments is also not good. */
3049 if (nargs == 0 || nargs > 3)
3050 return false;
3052 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3053 if (gimple_call_internal_p (stmt)
3054 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3056 nargs = 0;
3057 rhs_type = unsigned_type_node;
3060 for (i = 0; i < nargs; i++)
3062 tree opvectype;
3064 op = gimple_call_arg (stmt, i);
3066 /* We can only handle calls with arguments of the same type. */
3067 if (rhs_type
3068 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3070 if (dump_enabled_p ())
3071 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3072 "argument types differ.\n");
3073 return false;
3075 if (!rhs_type)
3076 rhs_type = TREE_TYPE (op);
3078 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
3080 if (dump_enabled_p ())
3081 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3082 "use not simple.\n");
3083 return false;
3086 if (!vectype_in)
3087 vectype_in = opvectype;
3088 else if (opvectype
3089 && opvectype != vectype_in)
3091 if (dump_enabled_p ())
3092 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3093 "argument vector types differ.\n");
3094 return false;
3097 /* If all arguments are external or constant defs use a vector type with
3098 the same size as the output vector type. */
3099 if (!vectype_in)
3100 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3101 if (vec_stmt)
3102 gcc_assert (vectype_in);
3103 if (!vectype_in)
3105 if (dump_enabled_p ())
3107 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3108 "no vectype for scalar type ");
3109 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3110 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3113 return false;
3116 /* FORNOW */
3117 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3118 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3119 if (known_eq (nunits_in * 2, nunits_out))
3120 modifier = NARROW;
3121 else if (known_eq (nunits_out, nunits_in))
3122 modifier = NONE;
3123 else if (known_eq (nunits_out * 2, nunits_in))
3124 modifier = WIDEN;
3125 else
3126 return false;
3128 /* We only handle functions that do not read or clobber memory. */
3129 if (gimple_vuse (stmt))
3131 if (dump_enabled_p ())
3132 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3133 "function reads from or writes to memory.\n");
3134 return false;
3137 /* For now, we only vectorize functions if a target specific builtin
3138 is available. TODO -- in some cases, it might be profitable to
3139 insert the calls for pieces of the vector, in order to be able
3140 to vectorize other operations in the loop. */
3141 fndecl = NULL_TREE;
3142 internal_fn ifn = IFN_LAST;
3143 combined_fn cfn = gimple_call_combined_fn (stmt);
3144 tree callee = gimple_call_fndecl (stmt);
3146 /* First try using an internal function. */
3147 tree_code convert_code = ERROR_MARK;
3148 if (cfn != CFN_LAST
3149 && (modifier == NONE
3150 || (modifier == NARROW
3151 && simple_integer_narrowing (vectype_out, vectype_in,
3152 &convert_code))))
3153 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3154 vectype_in);
3156 /* If that fails, try asking for a target-specific built-in function. */
3157 if (ifn == IFN_LAST)
3159 if (cfn != CFN_LAST)
3160 fndecl = targetm.vectorize.builtin_vectorized_function
3161 (cfn, vectype_out, vectype_in);
3162 else if (callee)
3163 fndecl = targetm.vectorize.builtin_md_vectorized_function
3164 (callee, vectype_out, vectype_in);
3167 if (ifn == IFN_LAST && !fndecl)
3169 if (cfn == CFN_GOMP_SIMD_LANE
3170 && !slp_node
3171 && loop_vinfo
3172 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3173 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3174 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3175 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3177 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3178 { 0, 1, 2, ... vf - 1 } vector. */
3179 gcc_assert (nargs == 0);
3181 else if (modifier == NONE
3182 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3183 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3184 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3185 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
3186 vectype_in, dt);
3187 else
3189 if (dump_enabled_p ())
3190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3191 "function is not vectorizable.\n");
3192 return false;
3196 if (slp_node)
3197 ncopies = 1;
3198 else if (modifier == NARROW && ifn == IFN_LAST)
3199 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3200 else
3201 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3203 /* Sanity check: make sure that at least one copy of the vectorized stmt
3204 needs to be generated. */
3205 gcc_assert (ncopies >= 1);
3207 if (!vec_stmt) /* transformation not required. */
3209 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3210 if (dump_enabled_p ())
3211 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
3212 "\n");
3213 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
3214 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3215 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
3216 vec_promote_demote, stmt_info, 0, vect_body);
3218 return true;
3221 /* Transform. */
3223 if (dump_enabled_p ())
3224 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3226 /* Handle def. */
3227 scalar_dest = gimple_call_lhs (stmt);
3228 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3230 prev_stmt_info = NULL;
3231 if (modifier == NONE || ifn != IFN_LAST)
3233 tree prev_res = NULL_TREE;
3234 for (j = 0; j < ncopies; ++j)
3236 /* Build argument list for the vectorized call. */
3237 if (j == 0)
3238 vargs.create (nargs);
3239 else
3240 vargs.truncate (0);
3242 if (slp_node)
3244 auto_vec<vec<tree> > vec_defs (nargs);
3245 vec<tree> vec_oprnds0;
3247 for (i = 0; i < nargs; i++)
3248 vargs.quick_push (gimple_call_arg (stmt, i));
3249 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3250 vec_oprnds0 = vec_defs[0];
3252 /* Arguments are ready. Create the new vector stmt. */
3253 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3255 size_t k;
3256 for (k = 0; k < nargs; k++)
3258 vec<tree> vec_oprndsk = vec_defs[k];
3259 vargs[k] = vec_oprndsk[i];
3261 if (modifier == NARROW)
3263 tree half_res = make_ssa_name (vectype_in);
3264 gcall *call
3265 = gimple_build_call_internal_vec (ifn, vargs);
3266 gimple_call_set_lhs (call, half_res);
3267 gimple_call_set_nothrow (call, true);
3268 new_stmt = call;
3269 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3270 if ((i & 1) == 0)
3272 prev_res = half_res;
3273 continue;
3275 new_temp = make_ssa_name (vec_dest);
3276 new_stmt = gimple_build_assign (new_temp, convert_code,
3277 prev_res, half_res);
3279 else
3281 gcall *call;
3282 if (ifn != IFN_LAST)
3283 call = gimple_build_call_internal_vec (ifn, vargs);
3284 else
3285 call = gimple_build_call_vec (fndecl, vargs);
3286 new_temp = make_ssa_name (vec_dest, call);
3287 gimple_call_set_lhs (call, new_temp);
3288 gimple_call_set_nothrow (call, true);
3289 new_stmt = call;
3291 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3292 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3295 for (i = 0; i < nargs; i++)
3297 vec<tree> vec_oprndsi = vec_defs[i];
3298 vec_oprndsi.release ();
3300 continue;
3303 for (i = 0; i < nargs; i++)
3305 op = gimple_call_arg (stmt, i);
3306 if (j == 0)
3307 vec_oprnd0
3308 = vect_get_vec_def_for_operand (op, stmt);
3309 else
3311 vec_oprnd0 = gimple_call_arg (new_stmt, i);
3312 vec_oprnd0
3313 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3316 vargs.quick_push (vec_oprnd0);
3319 if (gimple_call_internal_p (stmt)
3320 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3322 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3323 tree new_var
3324 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3325 gimple *init_stmt = gimple_build_assign (new_var, cst);
3326 vect_init_vector_1 (stmt, init_stmt, NULL);
3327 new_temp = make_ssa_name (vec_dest);
3328 new_stmt = gimple_build_assign (new_temp, new_var);
3330 else if (modifier == NARROW)
3332 tree half_res = make_ssa_name (vectype_in);
3333 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3334 gimple_call_set_lhs (call, half_res);
3335 gimple_call_set_nothrow (call, true);
3336 new_stmt = call;
3337 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3338 if ((j & 1) == 0)
3340 prev_res = half_res;
3341 continue;
3343 new_temp = make_ssa_name (vec_dest);
3344 new_stmt = gimple_build_assign (new_temp, convert_code,
3345 prev_res, half_res);
3347 else
3349 gcall *call;
3350 if (ifn != IFN_LAST)
3351 call = gimple_build_call_internal_vec (ifn, vargs);
3352 else
3353 call = gimple_build_call_vec (fndecl, vargs);
3354 new_temp = make_ssa_name (vec_dest, new_stmt);
3355 gimple_call_set_lhs (call, new_temp);
3356 gimple_call_set_nothrow (call, true);
3357 new_stmt = call;
3359 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3361 if (j == (modifier == NARROW ? 1 : 0))
3362 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3363 else
3364 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3366 prev_stmt_info = vinfo_for_stmt (new_stmt);
3369 else if (modifier == NARROW)
3371 for (j = 0; j < ncopies; ++j)
3373 /* Build argument list for the vectorized call. */
3374 if (j == 0)
3375 vargs.create (nargs * 2);
3376 else
3377 vargs.truncate (0);
3379 if (slp_node)
3381 auto_vec<vec<tree> > vec_defs (nargs);
3382 vec<tree> vec_oprnds0;
3384 for (i = 0; i < nargs; i++)
3385 vargs.quick_push (gimple_call_arg (stmt, i));
3386 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3387 vec_oprnds0 = vec_defs[0];
3389 /* Arguments are ready. Create the new vector stmt. */
3390 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3392 size_t k;
3393 vargs.truncate (0);
3394 for (k = 0; k < nargs; k++)
3396 vec<tree> vec_oprndsk = vec_defs[k];
3397 vargs.quick_push (vec_oprndsk[i]);
3398 vargs.quick_push (vec_oprndsk[i + 1]);
3400 gcall *call;
3401 if (ifn != IFN_LAST)
3402 call = gimple_build_call_internal_vec (ifn, vargs);
3403 else
3404 call = gimple_build_call_vec (fndecl, vargs);
3405 new_temp = make_ssa_name (vec_dest, call);
3406 gimple_call_set_lhs (call, new_temp);
3407 gimple_call_set_nothrow (call, true);
3408 new_stmt = call;
3409 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3410 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3413 for (i = 0; i < nargs; i++)
3415 vec<tree> vec_oprndsi = vec_defs[i];
3416 vec_oprndsi.release ();
3418 continue;
3421 for (i = 0; i < nargs; i++)
3423 op = gimple_call_arg (stmt, i);
3424 if (j == 0)
3426 vec_oprnd0
3427 = vect_get_vec_def_for_operand (op, stmt);
3428 vec_oprnd1
3429 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3431 else
3433 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3434 vec_oprnd0
3435 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3436 vec_oprnd1
3437 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3440 vargs.quick_push (vec_oprnd0);
3441 vargs.quick_push (vec_oprnd1);
3444 new_stmt = gimple_build_call_vec (fndecl, vargs);
3445 new_temp = make_ssa_name (vec_dest, new_stmt);
3446 gimple_call_set_lhs (new_stmt, new_temp);
3447 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3449 if (j == 0)
3450 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3451 else
3452 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3454 prev_stmt_info = vinfo_for_stmt (new_stmt);
3457 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3459 else
3460 /* No current target implements this case. */
3461 return false;
3463 vargs.release ();
3465 /* The call in STMT might prevent it from being removed in dce.
3466 We however cannot remove it here, due to the way the ssa name
3467 it defines is mapped to the new definition. So just replace
3468 rhs of the statement with something harmless. */
3470 if (slp_node)
3471 return true;
3473 type = TREE_TYPE (scalar_dest);
3474 if (is_pattern_stmt_p (stmt_info))
3475 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3476 else
3477 lhs = gimple_call_lhs (stmt);
3479 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3480 set_vinfo_for_stmt (new_stmt, stmt_info);
3481 set_vinfo_for_stmt (stmt, NULL);
3482 STMT_VINFO_STMT (stmt_info) = new_stmt;
3483 gsi_replace (gsi, new_stmt, false);
3485 return true;
3489 struct simd_call_arg_info
3491 tree vectype;
3492 tree op;
3493 HOST_WIDE_INT linear_step;
3494 enum vect_def_type dt;
3495 unsigned int align;
3496 bool simd_lane_linear;
3499 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3500 is linear within simd lane (but not within whole loop), note it in
3501 *ARGINFO. */
3503 static void
3504 vect_simd_lane_linear (tree op, struct loop *loop,
3505 struct simd_call_arg_info *arginfo)
3507 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3509 if (!is_gimple_assign (def_stmt)
3510 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3511 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3512 return;
3514 tree base = gimple_assign_rhs1 (def_stmt);
3515 HOST_WIDE_INT linear_step = 0;
3516 tree v = gimple_assign_rhs2 (def_stmt);
3517 while (TREE_CODE (v) == SSA_NAME)
3519 tree t;
3520 def_stmt = SSA_NAME_DEF_STMT (v);
3521 if (is_gimple_assign (def_stmt))
3522 switch (gimple_assign_rhs_code (def_stmt))
3524 case PLUS_EXPR:
3525 t = gimple_assign_rhs2 (def_stmt);
3526 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3527 return;
3528 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3529 v = gimple_assign_rhs1 (def_stmt);
3530 continue;
3531 case MULT_EXPR:
3532 t = gimple_assign_rhs2 (def_stmt);
3533 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3534 return;
3535 linear_step = tree_to_shwi (t);
3536 v = gimple_assign_rhs1 (def_stmt);
3537 continue;
3538 CASE_CONVERT:
3539 t = gimple_assign_rhs1 (def_stmt);
3540 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3541 || (TYPE_PRECISION (TREE_TYPE (v))
3542 < TYPE_PRECISION (TREE_TYPE (t))))
3543 return;
3544 if (!linear_step)
3545 linear_step = 1;
3546 v = t;
3547 continue;
3548 default:
3549 return;
3551 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3552 && loop->simduid
3553 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3554 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3555 == loop->simduid))
3557 if (!linear_step)
3558 linear_step = 1;
3559 arginfo->linear_step = linear_step;
3560 arginfo->op = base;
3561 arginfo->simd_lane_linear = true;
3562 return;
3567 /* Return the number of elements in vector type VECTYPE, which is associated
3568 with a SIMD clone. At present these vectors always have a constant
3569 length. */
3571 static unsigned HOST_WIDE_INT
3572 simd_clone_subparts (tree vectype)
3574 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3577 /* Function vectorizable_simd_clone_call.
3579 Check if STMT performs a function call that can be vectorized
3580 by calling a simd clone of the function.
3581 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3582 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3583 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3585 static bool
3586 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3587 gimple **vec_stmt, slp_tree slp_node)
3589 tree vec_dest;
3590 tree scalar_dest;
3591 tree op, type;
3592 tree vec_oprnd0 = NULL_TREE;
3593 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3594 tree vectype;
3595 unsigned int nunits;
3596 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3597 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3598 vec_info *vinfo = stmt_info->vinfo;
3599 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3600 tree fndecl, new_temp;
3601 gimple *def_stmt;
3602 gimple *new_stmt = NULL;
3603 int ncopies, j;
3604 auto_vec<simd_call_arg_info> arginfo;
3605 vec<tree> vargs = vNULL;
3606 size_t i, nargs;
3607 tree lhs, rtype, ratype;
3608 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3610 /* Is STMT a vectorizable call? */
3611 if (!is_gimple_call (stmt))
3612 return false;
3614 fndecl = gimple_call_fndecl (stmt);
3615 if (fndecl == NULL_TREE)
3616 return false;
3618 struct cgraph_node *node = cgraph_node::get (fndecl);
3619 if (node == NULL || node->simd_clones == NULL)
3620 return false;
3622 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3623 return false;
3625 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3626 && ! vec_stmt)
3627 return false;
3629 if (gimple_call_lhs (stmt)
3630 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3631 return false;
3633 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3635 vectype = STMT_VINFO_VECTYPE (stmt_info);
3637 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3638 return false;
3640 /* FORNOW */
3641 if (slp_node)
3642 return false;
3644 /* Process function arguments. */
3645 nargs = gimple_call_num_args (stmt);
3647 /* Bail out if the function has zero arguments. */
3648 if (nargs == 0)
3649 return false;
3651 arginfo.reserve (nargs, true);
3653 for (i = 0; i < nargs; i++)
3655 simd_call_arg_info thisarginfo;
3656 affine_iv iv;
3658 thisarginfo.linear_step = 0;
3659 thisarginfo.align = 0;
3660 thisarginfo.op = NULL_TREE;
3661 thisarginfo.simd_lane_linear = false;
3663 op = gimple_call_arg (stmt, i);
3664 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3665 &thisarginfo.vectype)
3666 || thisarginfo.dt == vect_uninitialized_def)
3668 if (dump_enabled_p ())
3669 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3670 "use not simple.\n");
3671 return false;
3674 if (thisarginfo.dt == vect_constant_def
3675 || thisarginfo.dt == vect_external_def)
3676 gcc_assert (thisarginfo.vectype == NULL_TREE);
3677 else
3678 gcc_assert (thisarginfo.vectype != NULL_TREE);
3680 /* For linear arguments, the analyze phase should have saved
3681 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3682 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3683 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3685 gcc_assert (vec_stmt);
3686 thisarginfo.linear_step
3687 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3688 thisarginfo.op
3689 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3690 thisarginfo.simd_lane_linear
3691 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3692 == boolean_true_node);
3693 /* If loop has been peeled for alignment, we need to adjust it. */
3694 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3695 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3696 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3698 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3699 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3700 tree opt = TREE_TYPE (thisarginfo.op);
3701 bias = fold_convert (TREE_TYPE (step), bias);
3702 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3703 thisarginfo.op
3704 = fold_build2 (POINTER_TYPE_P (opt)
3705 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3706 thisarginfo.op, bias);
3709 else if (!vec_stmt
3710 && thisarginfo.dt != vect_constant_def
3711 && thisarginfo.dt != vect_external_def
3712 && loop_vinfo
3713 && TREE_CODE (op) == SSA_NAME
3714 && simple_iv (loop, loop_containing_stmt (stmt), op,
3715 &iv, false)
3716 && tree_fits_shwi_p (iv.step))
3718 thisarginfo.linear_step = tree_to_shwi (iv.step);
3719 thisarginfo.op = iv.base;
3721 else if ((thisarginfo.dt == vect_constant_def
3722 || thisarginfo.dt == vect_external_def)
3723 && POINTER_TYPE_P (TREE_TYPE (op)))
3724 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3725 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3726 linear too. */
3727 if (POINTER_TYPE_P (TREE_TYPE (op))
3728 && !thisarginfo.linear_step
3729 && !vec_stmt
3730 && thisarginfo.dt != vect_constant_def
3731 && thisarginfo.dt != vect_external_def
3732 && loop_vinfo
3733 && !slp_node
3734 && TREE_CODE (op) == SSA_NAME)
3735 vect_simd_lane_linear (op, loop, &thisarginfo);
3737 arginfo.quick_push (thisarginfo);
3740 unsigned HOST_WIDE_INT vf;
3741 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3743 if (dump_enabled_p ())
3744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3745 "not considering SIMD clones; not yet supported"
3746 " for variable-width vectors.\n");
3747 return NULL;
3750 unsigned int badness = 0;
3751 struct cgraph_node *bestn = NULL;
3752 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3753 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3754 else
3755 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3756 n = n->simdclone->next_clone)
3758 unsigned int this_badness = 0;
3759 if (n->simdclone->simdlen > vf
3760 || n->simdclone->nargs != nargs)
3761 continue;
3762 if (n->simdclone->simdlen < vf)
3763 this_badness += (exact_log2 (vf)
3764 - exact_log2 (n->simdclone->simdlen)) * 1024;
3765 if (n->simdclone->inbranch)
3766 this_badness += 2048;
3767 int target_badness = targetm.simd_clone.usable (n);
3768 if (target_badness < 0)
3769 continue;
3770 this_badness += target_badness * 512;
3771 /* FORNOW: Have to add code to add the mask argument. */
3772 if (n->simdclone->inbranch)
3773 continue;
3774 for (i = 0; i < nargs; i++)
3776 switch (n->simdclone->args[i].arg_type)
3778 case SIMD_CLONE_ARG_TYPE_VECTOR:
3779 if (!useless_type_conversion_p
3780 (n->simdclone->args[i].orig_type,
3781 TREE_TYPE (gimple_call_arg (stmt, i))))
3782 i = -1;
3783 else if (arginfo[i].dt == vect_constant_def
3784 || arginfo[i].dt == vect_external_def
3785 || arginfo[i].linear_step)
3786 this_badness += 64;
3787 break;
3788 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3789 if (arginfo[i].dt != vect_constant_def
3790 && arginfo[i].dt != vect_external_def)
3791 i = -1;
3792 break;
3793 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3794 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3795 if (arginfo[i].dt == vect_constant_def
3796 || arginfo[i].dt == vect_external_def
3797 || (arginfo[i].linear_step
3798 != n->simdclone->args[i].linear_step))
3799 i = -1;
3800 break;
3801 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3802 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3803 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3804 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3805 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3806 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3807 /* FORNOW */
3808 i = -1;
3809 break;
3810 case SIMD_CLONE_ARG_TYPE_MASK:
3811 gcc_unreachable ();
3813 if (i == (size_t) -1)
3814 break;
3815 if (n->simdclone->args[i].alignment > arginfo[i].align)
3817 i = -1;
3818 break;
3820 if (arginfo[i].align)
3821 this_badness += (exact_log2 (arginfo[i].align)
3822 - exact_log2 (n->simdclone->args[i].alignment));
3824 if (i == (size_t) -1)
3825 continue;
3826 if (bestn == NULL || this_badness < badness)
3828 bestn = n;
3829 badness = this_badness;
3833 if (bestn == NULL)
3834 return false;
3836 for (i = 0; i < nargs; i++)
3837 if ((arginfo[i].dt == vect_constant_def
3838 || arginfo[i].dt == vect_external_def)
3839 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3841 arginfo[i].vectype
3842 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3843 i)));
3844 if (arginfo[i].vectype == NULL
3845 || (simd_clone_subparts (arginfo[i].vectype)
3846 > bestn->simdclone->simdlen))
3847 return false;
3850 fndecl = bestn->decl;
3851 nunits = bestn->simdclone->simdlen;
3852 ncopies = vf / nunits;
3854 /* If the function isn't const, only allow it in simd loops where user
3855 has asserted that at least nunits consecutive iterations can be
3856 performed using SIMD instructions. */
3857 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3858 && gimple_vuse (stmt))
3859 return false;
3861 /* Sanity check: make sure that at least one copy of the vectorized stmt
3862 needs to be generated. */
3863 gcc_assert (ncopies >= 1);
3865 if (!vec_stmt) /* transformation not required. */
3867 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3868 for (i = 0; i < nargs; i++)
3869 if ((bestn->simdclone->args[i].arg_type
3870 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3871 || (bestn->simdclone->args[i].arg_type
3872 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3874 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3875 + 1);
3876 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3877 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3878 ? size_type_node : TREE_TYPE (arginfo[i].op);
3879 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3880 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3881 tree sll = arginfo[i].simd_lane_linear
3882 ? boolean_true_node : boolean_false_node;
3883 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3885 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3886 if (dump_enabled_p ())
3887 dump_printf_loc (MSG_NOTE, vect_location,
3888 "=== vectorizable_simd_clone_call ===\n");
3889 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3890 return true;
3893 /* Transform. */
3895 if (dump_enabled_p ())
3896 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3898 /* Handle def. */
3899 scalar_dest = gimple_call_lhs (stmt);
3900 vec_dest = NULL_TREE;
3901 rtype = NULL_TREE;
3902 ratype = NULL_TREE;
3903 if (scalar_dest)
3905 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3906 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3907 if (TREE_CODE (rtype) == ARRAY_TYPE)
3909 ratype = rtype;
3910 rtype = TREE_TYPE (ratype);
3914 prev_stmt_info = NULL;
3915 for (j = 0; j < ncopies; ++j)
3917 /* Build argument list for the vectorized call. */
3918 if (j == 0)
3919 vargs.create (nargs);
3920 else
3921 vargs.truncate (0);
3923 for (i = 0; i < nargs; i++)
3925 unsigned int k, l, m, o;
3926 tree atype;
3927 op = gimple_call_arg (stmt, i);
3928 switch (bestn->simdclone->args[i].arg_type)
3930 case SIMD_CLONE_ARG_TYPE_VECTOR:
3931 atype = bestn->simdclone->args[i].vector_type;
3932 o = nunits / simd_clone_subparts (atype);
3933 for (m = j * o; m < (j + 1) * o; m++)
3935 if (simd_clone_subparts (atype)
3936 < simd_clone_subparts (arginfo[i].vectype))
3938 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3939 k = (simd_clone_subparts (arginfo[i].vectype)
3940 / simd_clone_subparts (atype));
3941 gcc_assert ((k & (k - 1)) == 0);
3942 if (m == 0)
3943 vec_oprnd0
3944 = vect_get_vec_def_for_operand (op, stmt);
3945 else
3947 vec_oprnd0 = arginfo[i].op;
3948 if ((m & (k - 1)) == 0)
3949 vec_oprnd0
3950 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3951 vec_oprnd0);
3953 arginfo[i].op = vec_oprnd0;
3954 vec_oprnd0
3955 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3956 bitsize_int (prec),
3957 bitsize_int ((m & (k - 1)) * prec));
3958 new_stmt
3959 = gimple_build_assign (make_ssa_name (atype),
3960 vec_oprnd0);
3961 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3962 vargs.safe_push (gimple_assign_lhs (new_stmt));
3964 else
3966 k = (simd_clone_subparts (atype)
3967 / simd_clone_subparts (arginfo[i].vectype));
3968 gcc_assert ((k & (k - 1)) == 0);
3969 vec<constructor_elt, va_gc> *ctor_elts;
3970 if (k != 1)
3971 vec_alloc (ctor_elts, k);
3972 else
3973 ctor_elts = NULL;
3974 for (l = 0; l < k; l++)
3976 if (m == 0 && l == 0)
3977 vec_oprnd0
3978 = vect_get_vec_def_for_operand (op, stmt);
3979 else
3980 vec_oprnd0
3981 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3982 arginfo[i].op);
3983 arginfo[i].op = vec_oprnd0;
3984 if (k == 1)
3985 break;
3986 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3987 vec_oprnd0);
3989 if (k == 1)
3990 vargs.safe_push (vec_oprnd0);
3991 else
3993 vec_oprnd0 = build_constructor (atype, ctor_elts);
3994 new_stmt
3995 = gimple_build_assign (make_ssa_name (atype),
3996 vec_oprnd0);
3997 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3998 vargs.safe_push (gimple_assign_lhs (new_stmt));
4002 break;
4003 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4004 vargs.safe_push (op);
4005 break;
4006 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4007 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4008 if (j == 0)
4010 gimple_seq stmts;
4011 arginfo[i].op
4012 = force_gimple_operand (arginfo[i].op, &stmts, true,
4013 NULL_TREE);
4014 if (stmts != NULL)
4016 basic_block new_bb;
4017 edge pe = loop_preheader_edge (loop);
4018 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4019 gcc_assert (!new_bb);
4021 if (arginfo[i].simd_lane_linear)
4023 vargs.safe_push (arginfo[i].op);
4024 break;
4026 tree phi_res = copy_ssa_name (op);
4027 gphi *new_phi = create_phi_node (phi_res, loop->header);
4028 set_vinfo_for_stmt (new_phi,
4029 new_stmt_vec_info (new_phi, loop_vinfo));
4030 add_phi_arg (new_phi, arginfo[i].op,
4031 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4032 enum tree_code code
4033 = POINTER_TYPE_P (TREE_TYPE (op))
4034 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4035 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4036 ? sizetype : TREE_TYPE (op);
4037 widest_int cst
4038 = wi::mul (bestn->simdclone->args[i].linear_step,
4039 ncopies * nunits);
4040 tree tcst = wide_int_to_tree (type, cst);
4041 tree phi_arg = copy_ssa_name (op);
4042 new_stmt
4043 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4044 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4045 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4046 set_vinfo_for_stmt (new_stmt,
4047 new_stmt_vec_info (new_stmt, loop_vinfo));
4048 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4049 UNKNOWN_LOCATION);
4050 arginfo[i].op = phi_res;
4051 vargs.safe_push (phi_res);
4053 else
4055 enum tree_code code
4056 = POINTER_TYPE_P (TREE_TYPE (op))
4057 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4058 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4059 ? sizetype : TREE_TYPE (op);
4060 widest_int cst
4061 = wi::mul (bestn->simdclone->args[i].linear_step,
4062 j * nunits);
4063 tree tcst = wide_int_to_tree (type, cst);
4064 new_temp = make_ssa_name (TREE_TYPE (op));
4065 new_stmt = gimple_build_assign (new_temp, code,
4066 arginfo[i].op, tcst);
4067 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4068 vargs.safe_push (new_temp);
4070 break;
4071 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4072 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4073 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4074 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4075 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4076 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4077 default:
4078 gcc_unreachable ();
4082 new_stmt = gimple_build_call_vec (fndecl, vargs);
4083 if (vec_dest)
4085 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4086 if (ratype)
4087 new_temp = create_tmp_var (ratype);
4088 else if (simd_clone_subparts (vectype)
4089 == simd_clone_subparts (rtype))
4090 new_temp = make_ssa_name (vec_dest, new_stmt);
4091 else
4092 new_temp = make_ssa_name (rtype, new_stmt);
4093 gimple_call_set_lhs (new_stmt, new_temp);
4095 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4097 if (vec_dest)
4099 if (simd_clone_subparts (vectype) < nunits)
4101 unsigned int k, l;
4102 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4103 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4104 k = nunits / simd_clone_subparts (vectype);
4105 gcc_assert ((k & (k - 1)) == 0);
4106 for (l = 0; l < k; l++)
4108 tree t;
4109 if (ratype)
4111 t = build_fold_addr_expr (new_temp);
4112 t = build2 (MEM_REF, vectype, t,
4113 build_int_cst (TREE_TYPE (t), l * bytes));
4115 else
4116 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4117 bitsize_int (prec), bitsize_int (l * prec));
4118 new_stmt
4119 = gimple_build_assign (make_ssa_name (vectype), t);
4120 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4121 if (j == 0 && l == 0)
4122 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4123 else
4124 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4126 prev_stmt_info = vinfo_for_stmt (new_stmt);
4129 if (ratype)
4131 tree clobber = build_constructor (ratype, NULL);
4132 TREE_THIS_VOLATILE (clobber) = 1;
4133 new_stmt = gimple_build_assign (new_temp, clobber);
4134 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4136 continue;
4138 else if (simd_clone_subparts (vectype) > nunits)
4140 unsigned int k = (simd_clone_subparts (vectype)
4141 / simd_clone_subparts (rtype));
4142 gcc_assert ((k & (k - 1)) == 0);
4143 if ((j & (k - 1)) == 0)
4144 vec_alloc (ret_ctor_elts, k);
4145 if (ratype)
4147 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4148 for (m = 0; m < o; m++)
4150 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4151 size_int (m), NULL_TREE, NULL_TREE);
4152 new_stmt
4153 = gimple_build_assign (make_ssa_name (rtype), tem);
4154 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4155 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4156 gimple_assign_lhs (new_stmt));
4158 tree clobber = build_constructor (ratype, NULL);
4159 TREE_THIS_VOLATILE (clobber) = 1;
4160 new_stmt = gimple_build_assign (new_temp, clobber);
4161 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4163 else
4164 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4165 if ((j & (k - 1)) != k - 1)
4166 continue;
4167 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4168 new_stmt
4169 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4170 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4172 if ((unsigned) j == k - 1)
4173 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4174 else
4175 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4177 prev_stmt_info = vinfo_for_stmt (new_stmt);
4178 continue;
4180 else if (ratype)
4182 tree t = build_fold_addr_expr (new_temp);
4183 t = build2 (MEM_REF, vectype, t,
4184 build_int_cst (TREE_TYPE (t), 0));
4185 new_stmt
4186 = gimple_build_assign (make_ssa_name (vec_dest), t);
4187 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4188 tree clobber = build_constructor (ratype, NULL);
4189 TREE_THIS_VOLATILE (clobber) = 1;
4190 vect_finish_stmt_generation (stmt,
4191 gimple_build_assign (new_temp,
4192 clobber), gsi);
4196 if (j == 0)
4197 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4198 else
4199 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4201 prev_stmt_info = vinfo_for_stmt (new_stmt);
4204 vargs.release ();
4206 /* The call in STMT might prevent it from being removed in dce.
4207 We however cannot remove it here, due to the way the ssa name
4208 it defines is mapped to the new definition. So just replace
4209 rhs of the statement with something harmless. */
4211 if (slp_node)
4212 return true;
4214 if (scalar_dest)
4216 type = TREE_TYPE (scalar_dest);
4217 if (is_pattern_stmt_p (stmt_info))
4218 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
4219 else
4220 lhs = gimple_call_lhs (stmt);
4221 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4223 else
4224 new_stmt = gimple_build_nop ();
4225 set_vinfo_for_stmt (new_stmt, stmt_info);
4226 set_vinfo_for_stmt (stmt, NULL);
4227 STMT_VINFO_STMT (stmt_info) = new_stmt;
4228 gsi_replace (gsi, new_stmt, true);
4229 unlink_stmt_vdef (stmt);
4231 return true;
4235 /* Function vect_gen_widened_results_half
4237 Create a vector stmt whose code, type, number of arguments, and result
4238 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4239 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4240 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4241 needs to be created (DECL is a function-decl of a target-builtin).
4242 STMT is the original scalar stmt that we are vectorizing. */
4244 static gimple *
4245 vect_gen_widened_results_half (enum tree_code code,
4246 tree decl,
4247 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4248 tree vec_dest, gimple_stmt_iterator *gsi,
4249 gimple *stmt)
4251 gimple *new_stmt;
4252 tree new_temp;
4254 /* Generate half of the widened result: */
4255 if (code == CALL_EXPR)
4257 /* Target specific support */
4258 if (op_type == binary_op)
4259 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4260 else
4261 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4262 new_temp = make_ssa_name (vec_dest, new_stmt);
4263 gimple_call_set_lhs (new_stmt, new_temp);
4265 else
4267 /* Generic support */
4268 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4269 if (op_type != binary_op)
4270 vec_oprnd1 = NULL;
4271 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4272 new_temp = make_ssa_name (vec_dest, new_stmt);
4273 gimple_assign_set_lhs (new_stmt, new_temp);
4275 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4277 return new_stmt;
4281 /* Get vectorized definitions for loop-based vectorization. For the first
4282 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4283 scalar operand), and for the rest we get a copy with
4284 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4285 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4286 The vectors are collected into VEC_OPRNDS. */
4288 static void
4289 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
4290 vec<tree> *vec_oprnds, int multi_step_cvt)
4292 tree vec_oprnd;
4294 /* Get first vector operand. */
4295 /* All the vector operands except the very first one (that is scalar oprnd)
4296 are stmt copies. */
4297 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4298 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
4299 else
4300 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
4302 vec_oprnds->quick_push (vec_oprnd);
4304 /* Get second vector operand. */
4305 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
4306 vec_oprnds->quick_push (vec_oprnd);
4308 *oprnd = vec_oprnd;
4310 /* For conversion in multiple steps, continue to get operands
4311 recursively. */
4312 if (multi_step_cvt)
4313 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
4317 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4318 For multi-step conversions store the resulting vectors and call the function
4319 recursively. */
4321 static void
4322 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4323 int multi_step_cvt, gimple *stmt,
4324 vec<tree> vec_dsts,
4325 gimple_stmt_iterator *gsi,
4326 slp_tree slp_node, enum tree_code code,
4327 stmt_vec_info *prev_stmt_info)
4329 unsigned int i;
4330 tree vop0, vop1, new_tmp, vec_dest;
4331 gimple *new_stmt;
4332 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4334 vec_dest = vec_dsts.pop ();
4336 for (i = 0; i < vec_oprnds->length (); i += 2)
4338 /* Create demotion operation. */
4339 vop0 = (*vec_oprnds)[i];
4340 vop1 = (*vec_oprnds)[i + 1];
4341 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4342 new_tmp = make_ssa_name (vec_dest, new_stmt);
4343 gimple_assign_set_lhs (new_stmt, new_tmp);
4344 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4346 if (multi_step_cvt)
4347 /* Store the resulting vector for next recursive call. */
4348 (*vec_oprnds)[i/2] = new_tmp;
4349 else
4351 /* This is the last step of the conversion sequence. Store the
4352 vectors in SLP_NODE or in vector info of the scalar statement
4353 (or in STMT_VINFO_RELATED_STMT chain). */
4354 if (slp_node)
4355 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4356 else
4358 if (!*prev_stmt_info)
4359 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4360 else
4361 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4363 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4368 /* For multi-step demotion operations we first generate demotion operations
4369 from the source type to the intermediate types, and then combine the
4370 results (stored in VEC_OPRNDS) in demotion operation to the destination
4371 type. */
4372 if (multi_step_cvt)
4374 /* At each level of recursion we have half of the operands we had at the
4375 previous level. */
4376 vec_oprnds->truncate ((i+1)/2);
4377 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4378 stmt, vec_dsts, gsi, slp_node,
4379 VEC_PACK_TRUNC_EXPR,
4380 prev_stmt_info);
4383 vec_dsts.quick_push (vec_dest);
4387 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4388 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4389 the resulting vectors and call the function recursively. */
4391 static void
4392 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4393 vec<tree> *vec_oprnds1,
4394 gimple *stmt, tree vec_dest,
4395 gimple_stmt_iterator *gsi,
4396 enum tree_code code1,
4397 enum tree_code code2, tree decl1,
4398 tree decl2, int op_type)
4400 int i;
4401 tree vop0, vop1, new_tmp1, new_tmp2;
4402 gimple *new_stmt1, *new_stmt2;
4403 vec<tree> vec_tmp = vNULL;
4405 vec_tmp.create (vec_oprnds0->length () * 2);
4406 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4408 if (op_type == binary_op)
4409 vop1 = (*vec_oprnds1)[i];
4410 else
4411 vop1 = NULL_TREE;
4413 /* Generate the two halves of promotion operation. */
4414 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4415 op_type, vec_dest, gsi, stmt);
4416 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4417 op_type, vec_dest, gsi, stmt);
4418 if (is_gimple_call (new_stmt1))
4420 new_tmp1 = gimple_call_lhs (new_stmt1);
4421 new_tmp2 = gimple_call_lhs (new_stmt2);
4423 else
4425 new_tmp1 = gimple_assign_lhs (new_stmt1);
4426 new_tmp2 = gimple_assign_lhs (new_stmt2);
4429 /* Store the results for the next step. */
4430 vec_tmp.quick_push (new_tmp1);
4431 vec_tmp.quick_push (new_tmp2);
4434 vec_oprnds0->release ();
4435 *vec_oprnds0 = vec_tmp;
4439 /* Check if STMT performs a conversion operation, that can be vectorized.
4440 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4441 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4442 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4444 static bool
4445 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4446 gimple **vec_stmt, slp_tree slp_node)
4448 tree vec_dest;
4449 tree scalar_dest;
4450 tree op0, op1 = NULL_TREE;
4451 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4452 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4453 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4454 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4455 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4456 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4457 tree new_temp;
4458 gimple *def_stmt;
4459 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4460 int ndts = 2;
4461 gimple *new_stmt = NULL;
4462 stmt_vec_info prev_stmt_info;
4463 poly_uint64 nunits_in;
4464 poly_uint64 nunits_out;
4465 tree vectype_out, vectype_in;
4466 int ncopies, i, j;
4467 tree lhs_type, rhs_type;
4468 enum { NARROW, NONE, WIDEN } modifier;
4469 vec<tree> vec_oprnds0 = vNULL;
4470 vec<tree> vec_oprnds1 = vNULL;
4471 tree vop0;
4472 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4473 vec_info *vinfo = stmt_info->vinfo;
4474 int multi_step_cvt = 0;
4475 vec<tree> interm_types = vNULL;
4476 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4477 int op_type;
4478 unsigned short fltsz;
4480 /* Is STMT a vectorizable conversion? */
4482 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4483 return false;
4485 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4486 && ! vec_stmt)
4487 return false;
4489 if (!is_gimple_assign (stmt))
4490 return false;
4492 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4493 return false;
4495 code = gimple_assign_rhs_code (stmt);
4496 if (!CONVERT_EXPR_CODE_P (code)
4497 && code != FIX_TRUNC_EXPR
4498 && code != FLOAT_EXPR
4499 && code != WIDEN_MULT_EXPR
4500 && code != WIDEN_LSHIFT_EXPR)
4501 return false;
4503 op_type = TREE_CODE_LENGTH (code);
4505 /* Check types of lhs and rhs. */
4506 scalar_dest = gimple_assign_lhs (stmt);
4507 lhs_type = TREE_TYPE (scalar_dest);
4508 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4510 op0 = gimple_assign_rhs1 (stmt);
4511 rhs_type = TREE_TYPE (op0);
4513 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4514 && !((INTEGRAL_TYPE_P (lhs_type)
4515 && INTEGRAL_TYPE_P (rhs_type))
4516 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4517 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4518 return false;
4520 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4521 && ((INTEGRAL_TYPE_P (lhs_type)
4522 && !type_has_mode_precision_p (lhs_type))
4523 || (INTEGRAL_TYPE_P (rhs_type)
4524 && !type_has_mode_precision_p (rhs_type))))
4526 if (dump_enabled_p ())
4527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4528 "type conversion to/from bit-precision unsupported."
4529 "\n");
4530 return false;
4533 /* Check the operands of the operation. */
4534 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4536 if (dump_enabled_p ())
4537 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4538 "use not simple.\n");
4539 return false;
4541 if (op_type == binary_op)
4543 bool ok;
4545 op1 = gimple_assign_rhs2 (stmt);
4546 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4547 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4548 OP1. */
4549 if (CONSTANT_CLASS_P (op0))
4550 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4551 else
4552 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4554 if (!ok)
4556 if (dump_enabled_p ())
4557 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4558 "use not simple.\n");
4559 return false;
4563 /* If op0 is an external or constant defs use a vector type of
4564 the same size as the output vector type. */
4565 if (!vectype_in)
4566 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4567 if (vec_stmt)
4568 gcc_assert (vectype_in);
4569 if (!vectype_in)
4571 if (dump_enabled_p ())
4573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4574 "no vectype for scalar type ");
4575 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4576 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4579 return false;
4582 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4583 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4585 if (dump_enabled_p ())
4587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4588 "can't convert between boolean and non "
4589 "boolean vectors");
4590 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4591 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4594 return false;
4597 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4598 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4599 if (known_eq (nunits_out, nunits_in))
4600 modifier = NONE;
4601 else if (multiple_p (nunits_out, nunits_in))
4602 modifier = NARROW;
4603 else
4605 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4606 modifier = WIDEN;
4609 /* Multiple types in SLP are handled by creating the appropriate number of
4610 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4611 case of SLP. */
4612 if (slp_node)
4613 ncopies = 1;
4614 else if (modifier == NARROW)
4615 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4616 else
4617 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4619 /* Sanity check: make sure that at least one copy of the vectorized stmt
4620 needs to be generated. */
4621 gcc_assert (ncopies >= 1);
4623 bool found_mode = false;
4624 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4625 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4626 opt_scalar_mode rhs_mode_iter;
4628 /* Supportable by target? */
4629 switch (modifier)
4631 case NONE:
4632 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4633 return false;
4634 if (supportable_convert_operation (code, vectype_out, vectype_in,
4635 &decl1, &code1))
4636 break;
4637 /* FALLTHRU */
4638 unsupported:
4639 if (dump_enabled_p ())
4640 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4641 "conversion not supported by target.\n");
4642 return false;
4644 case WIDEN:
4645 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4646 &code1, &code2, &multi_step_cvt,
4647 &interm_types))
4649 /* Binary widening operation can only be supported directly by the
4650 architecture. */
4651 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4652 break;
4655 if (code != FLOAT_EXPR
4656 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4657 goto unsupported;
4659 fltsz = GET_MODE_SIZE (lhs_mode);
4660 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4662 rhs_mode = rhs_mode_iter.require ();
4663 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4664 break;
4666 cvt_type
4667 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4668 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4669 if (cvt_type == NULL_TREE)
4670 goto unsupported;
4672 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4674 if (!supportable_convert_operation (code, vectype_out,
4675 cvt_type, &decl1, &codecvt1))
4676 goto unsupported;
4678 else if (!supportable_widening_operation (code, stmt, vectype_out,
4679 cvt_type, &codecvt1,
4680 &codecvt2, &multi_step_cvt,
4681 &interm_types))
4682 continue;
4683 else
4684 gcc_assert (multi_step_cvt == 0);
4686 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4687 vectype_in, &code1, &code2,
4688 &multi_step_cvt, &interm_types))
4690 found_mode = true;
4691 break;
4695 if (!found_mode)
4696 goto unsupported;
4698 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4699 codecvt2 = ERROR_MARK;
4700 else
4702 multi_step_cvt++;
4703 interm_types.safe_push (cvt_type);
4704 cvt_type = NULL_TREE;
4706 break;
4708 case NARROW:
4709 gcc_assert (op_type == unary_op);
4710 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4711 &code1, &multi_step_cvt,
4712 &interm_types))
4713 break;
4715 if (code != FIX_TRUNC_EXPR
4716 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4717 goto unsupported;
4719 cvt_type
4720 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4721 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4722 if (cvt_type == NULL_TREE)
4723 goto unsupported;
4724 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4725 &decl1, &codecvt1))
4726 goto unsupported;
4727 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4728 &code1, &multi_step_cvt,
4729 &interm_types))
4730 break;
4731 goto unsupported;
4733 default:
4734 gcc_unreachable ();
4737 if (!vec_stmt) /* transformation not required. */
4739 if (dump_enabled_p ())
4740 dump_printf_loc (MSG_NOTE, vect_location,
4741 "=== vectorizable_conversion ===\n");
4742 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4744 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4745 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4747 else if (modifier == NARROW)
4749 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4750 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4752 else
4754 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4755 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4757 interm_types.release ();
4758 return true;
4761 /* Transform. */
4762 if (dump_enabled_p ())
4763 dump_printf_loc (MSG_NOTE, vect_location,
4764 "transform conversion. ncopies = %d.\n", ncopies);
4766 if (op_type == binary_op)
4768 if (CONSTANT_CLASS_P (op0))
4769 op0 = fold_convert (TREE_TYPE (op1), op0);
4770 else if (CONSTANT_CLASS_P (op1))
4771 op1 = fold_convert (TREE_TYPE (op0), op1);
4774 /* In case of multi-step conversion, we first generate conversion operations
4775 to the intermediate types, and then from that types to the final one.
4776 We create vector destinations for the intermediate type (TYPES) received
4777 from supportable_*_operation, and store them in the correct order
4778 for future use in vect_create_vectorized_*_stmts (). */
4779 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4780 vec_dest = vect_create_destination_var (scalar_dest,
4781 (cvt_type && modifier == WIDEN)
4782 ? cvt_type : vectype_out);
4783 vec_dsts.quick_push (vec_dest);
4785 if (multi_step_cvt)
4787 for (i = interm_types.length () - 1;
4788 interm_types.iterate (i, &intermediate_type); i--)
4790 vec_dest = vect_create_destination_var (scalar_dest,
4791 intermediate_type);
4792 vec_dsts.quick_push (vec_dest);
4796 if (cvt_type)
4797 vec_dest = vect_create_destination_var (scalar_dest,
4798 modifier == WIDEN
4799 ? vectype_out : cvt_type);
4801 if (!slp_node)
4803 if (modifier == WIDEN)
4805 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4806 if (op_type == binary_op)
4807 vec_oprnds1.create (1);
4809 else if (modifier == NARROW)
4810 vec_oprnds0.create (
4811 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4813 else if (code == WIDEN_LSHIFT_EXPR)
4814 vec_oprnds1.create (slp_node->vec_stmts_size);
4816 last_oprnd = op0;
4817 prev_stmt_info = NULL;
4818 switch (modifier)
4820 case NONE:
4821 for (j = 0; j < ncopies; j++)
4823 if (j == 0)
4824 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4825 else
4826 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4828 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4830 /* Arguments are ready, create the new vector stmt. */
4831 if (code1 == CALL_EXPR)
4833 new_stmt = gimple_build_call (decl1, 1, vop0);
4834 new_temp = make_ssa_name (vec_dest, new_stmt);
4835 gimple_call_set_lhs (new_stmt, new_temp);
4837 else
4839 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4840 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4841 new_temp = make_ssa_name (vec_dest, new_stmt);
4842 gimple_assign_set_lhs (new_stmt, new_temp);
4845 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4846 if (slp_node)
4847 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4848 else
4850 if (!prev_stmt_info)
4851 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4852 else
4853 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4854 prev_stmt_info = vinfo_for_stmt (new_stmt);
4858 break;
4860 case WIDEN:
4861 /* In case the vectorization factor (VF) is bigger than the number
4862 of elements that we can fit in a vectype (nunits), we have to
4863 generate more than one vector stmt - i.e - we need to "unroll"
4864 the vector stmt by a factor VF/nunits. */
4865 for (j = 0; j < ncopies; j++)
4867 /* Handle uses. */
4868 if (j == 0)
4870 if (slp_node)
4872 if (code == WIDEN_LSHIFT_EXPR)
4874 unsigned int k;
4876 vec_oprnd1 = op1;
4877 /* Store vec_oprnd1 for every vector stmt to be created
4878 for SLP_NODE. We check during the analysis that all
4879 the shift arguments are the same. */
4880 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4881 vec_oprnds1.quick_push (vec_oprnd1);
4883 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4884 slp_node);
4886 else
4887 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4888 &vec_oprnds1, slp_node);
4890 else
4892 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4893 vec_oprnds0.quick_push (vec_oprnd0);
4894 if (op_type == binary_op)
4896 if (code == WIDEN_LSHIFT_EXPR)
4897 vec_oprnd1 = op1;
4898 else
4899 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4900 vec_oprnds1.quick_push (vec_oprnd1);
4904 else
4906 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4907 vec_oprnds0.truncate (0);
4908 vec_oprnds0.quick_push (vec_oprnd0);
4909 if (op_type == binary_op)
4911 if (code == WIDEN_LSHIFT_EXPR)
4912 vec_oprnd1 = op1;
4913 else
4914 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4915 vec_oprnd1);
4916 vec_oprnds1.truncate (0);
4917 vec_oprnds1.quick_push (vec_oprnd1);
4921 /* Arguments are ready. Create the new vector stmts. */
4922 for (i = multi_step_cvt; i >= 0; i--)
4924 tree this_dest = vec_dsts[i];
4925 enum tree_code c1 = code1, c2 = code2;
4926 if (i == 0 && codecvt2 != ERROR_MARK)
4928 c1 = codecvt1;
4929 c2 = codecvt2;
4931 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4932 &vec_oprnds1,
4933 stmt, this_dest, gsi,
4934 c1, c2, decl1, decl2,
4935 op_type);
4938 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4940 if (cvt_type)
4942 if (codecvt1 == CALL_EXPR)
4944 new_stmt = gimple_build_call (decl1, 1, vop0);
4945 new_temp = make_ssa_name (vec_dest, new_stmt);
4946 gimple_call_set_lhs (new_stmt, new_temp);
4948 else
4950 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4951 new_temp = make_ssa_name (vec_dest);
4952 new_stmt = gimple_build_assign (new_temp, codecvt1,
4953 vop0);
4956 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4958 else
4959 new_stmt = SSA_NAME_DEF_STMT (vop0);
4961 if (slp_node)
4962 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4963 else
4965 if (!prev_stmt_info)
4966 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4967 else
4968 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4969 prev_stmt_info = vinfo_for_stmt (new_stmt);
4974 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4975 break;
4977 case NARROW:
4978 /* In case the vectorization factor (VF) is bigger than the number
4979 of elements that we can fit in a vectype (nunits), we have to
4980 generate more than one vector stmt - i.e - we need to "unroll"
4981 the vector stmt by a factor VF/nunits. */
4982 for (j = 0; j < ncopies; j++)
4984 /* Handle uses. */
4985 if (slp_node)
4986 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4987 slp_node);
4988 else
4990 vec_oprnds0.truncate (0);
4991 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4992 vect_pow2 (multi_step_cvt) - 1);
4995 /* Arguments are ready. Create the new vector stmts. */
4996 if (cvt_type)
4997 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4999 if (codecvt1 == CALL_EXPR)
5001 new_stmt = gimple_build_call (decl1, 1, vop0);
5002 new_temp = make_ssa_name (vec_dest, new_stmt);
5003 gimple_call_set_lhs (new_stmt, new_temp);
5005 else
5007 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5008 new_temp = make_ssa_name (vec_dest);
5009 new_stmt = gimple_build_assign (new_temp, codecvt1,
5010 vop0);
5013 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5014 vec_oprnds0[i] = new_temp;
5017 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5018 stmt, vec_dsts, gsi,
5019 slp_node, code1,
5020 &prev_stmt_info);
5023 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5024 break;
5027 vec_oprnds0.release ();
5028 vec_oprnds1.release ();
5029 interm_types.release ();
5031 return true;
5035 /* Function vectorizable_assignment.
5037 Check if STMT performs an assignment (copy) that can be vectorized.
5038 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5039 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5040 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5042 static bool
5043 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
5044 gimple **vec_stmt, slp_tree slp_node)
5046 tree vec_dest;
5047 tree scalar_dest;
5048 tree op;
5049 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5050 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5051 tree new_temp;
5052 gimple *def_stmt;
5053 enum vect_def_type dt[1] = {vect_unknown_def_type};
5054 int ndts = 1;
5055 int ncopies;
5056 int i, j;
5057 vec<tree> vec_oprnds = vNULL;
5058 tree vop;
5059 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5060 vec_info *vinfo = stmt_info->vinfo;
5061 gimple *new_stmt = NULL;
5062 stmt_vec_info prev_stmt_info = NULL;
5063 enum tree_code code;
5064 tree vectype_in;
5066 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5067 return false;
5069 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5070 && ! vec_stmt)
5071 return false;
5073 /* Is vectorizable assignment? */
5074 if (!is_gimple_assign (stmt))
5075 return false;
5077 scalar_dest = gimple_assign_lhs (stmt);
5078 if (TREE_CODE (scalar_dest) != SSA_NAME)
5079 return false;
5081 code = gimple_assign_rhs_code (stmt);
5082 if (gimple_assign_single_p (stmt)
5083 || code == PAREN_EXPR
5084 || CONVERT_EXPR_CODE_P (code))
5085 op = gimple_assign_rhs1 (stmt);
5086 else
5087 return false;
5089 if (code == VIEW_CONVERT_EXPR)
5090 op = TREE_OPERAND (op, 0);
5092 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5093 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5095 /* Multiple types in SLP are handled by creating the appropriate number of
5096 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5097 case of SLP. */
5098 if (slp_node)
5099 ncopies = 1;
5100 else
5101 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5103 gcc_assert (ncopies >= 1);
5105 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
5107 if (dump_enabled_p ())
5108 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5109 "use not simple.\n");
5110 return false;
5113 /* We can handle NOP_EXPR conversions that do not change the number
5114 of elements or the vector size. */
5115 if ((CONVERT_EXPR_CODE_P (code)
5116 || code == VIEW_CONVERT_EXPR)
5117 && (!vectype_in
5118 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5119 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5120 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5121 return false;
5123 /* We do not handle bit-precision changes. */
5124 if ((CONVERT_EXPR_CODE_P (code)
5125 || code == VIEW_CONVERT_EXPR)
5126 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5127 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5128 || !type_has_mode_precision_p (TREE_TYPE (op)))
5129 /* But a conversion that does not change the bit-pattern is ok. */
5130 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5131 > TYPE_PRECISION (TREE_TYPE (op)))
5132 && TYPE_UNSIGNED (TREE_TYPE (op)))
5133 /* Conversion between boolean types of different sizes is
5134 a simple assignment in case their vectypes are same
5135 boolean vectors. */
5136 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5137 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5139 if (dump_enabled_p ())
5140 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5141 "type conversion to/from bit-precision "
5142 "unsupported.\n");
5143 return false;
5146 if (!vec_stmt) /* transformation not required. */
5148 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5149 if (dump_enabled_p ())
5150 dump_printf_loc (MSG_NOTE, vect_location,
5151 "=== vectorizable_assignment ===\n");
5152 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5153 return true;
5156 /* Transform. */
5157 if (dump_enabled_p ())
5158 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5160 /* Handle def. */
5161 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5163 /* Handle use. */
5164 for (j = 0; j < ncopies; j++)
5166 /* Handle uses. */
5167 if (j == 0)
5168 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
5169 else
5170 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
5172 /* Arguments are ready. create the new vector stmt. */
5173 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5175 if (CONVERT_EXPR_CODE_P (code)
5176 || code == VIEW_CONVERT_EXPR)
5177 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5178 new_stmt = gimple_build_assign (vec_dest, vop);
5179 new_temp = make_ssa_name (vec_dest, new_stmt);
5180 gimple_assign_set_lhs (new_stmt, new_temp);
5181 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5182 if (slp_node)
5183 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5186 if (slp_node)
5187 continue;
5189 if (j == 0)
5190 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5191 else
5192 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5194 prev_stmt_info = vinfo_for_stmt (new_stmt);
5197 vec_oprnds.release ();
5198 return true;
5202 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5203 either as shift by a scalar or by a vector. */
5205 bool
5206 vect_supportable_shift (enum tree_code code, tree scalar_type)
5209 machine_mode vec_mode;
5210 optab optab;
5211 int icode;
5212 tree vectype;
5214 vectype = get_vectype_for_scalar_type (scalar_type);
5215 if (!vectype)
5216 return false;
5218 optab = optab_for_tree_code (code, vectype, optab_scalar);
5219 if (!optab
5220 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5222 optab = optab_for_tree_code (code, vectype, optab_vector);
5223 if (!optab
5224 || (optab_handler (optab, TYPE_MODE (vectype))
5225 == CODE_FOR_nothing))
5226 return false;
5229 vec_mode = TYPE_MODE (vectype);
5230 icode = (int) optab_handler (optab, vec_mode);
5231 if (icode == CODE_FOR_nothing)
5232 return false;
5234 return true;
5238 /* Function vectorizable_shift.
5240 Check if STMT performs a shift operation that can be vectorized.
5241 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5242 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5243 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5245 static bool
5246 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
5247 gimple **vec_stmt, slp_tree slp_node)
5249 tree vec_dest;
5250 tree scalar_dest;
5251 tree op0, op1 = NULL;
5252 tree vec_oprnd1 = NULL_TREE;
5253 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5254 tree vectype;
5255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5256 enum tree_code code;
5257 machine_mode vec_mode;
5258 tree new_temp;
5259 optab optab;
5260 int icode;
5261 machine_mode optab_op2_mode;
5262 gimple *def_stmt;
5263 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5264 int ndts = 2;
5265 gimple *new_stmt = NULL;
5266 stmt_vec_info prev_stmt_info;
5267 poly_uint64 nunits_in;
5268 poly_uint64 nunits_out;
5269 tree vectype_out;
5270 tree op1_vectype;
5271 int ncopies;
5272 int j, i;
5273 vec<tree> vec_oprnds0 = vNULL;
5274 vec<tree> vec_oprnds1 = vNULL;
5275 tree vop0, vop1;
5276 unsigned int k;
5277 bool scalar_shift_arg = true;
5278 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5279 vec_info *vinfo = stmt_info->vinfo;
5281 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5282 return false;
5284 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5285 && ! vec_stmt)
5286 return false;
5288 /* Is STMT a vectorizable binary/unary operation? */
5289 if (!is_gimple_assign (stmt))
5290 return false;
5292 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5293 return false;
5295 code = gimple_assign_rhs_code (stmt);
5297 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5298 || code == RROTATE_EXPR))
5299 return false;
5301 scalar_dest = gimple_assign_lhs (stmt);
5302 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5303 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5305 if (dump_enabled_p ())
5306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5307 "bit-precision shifts not supported.\n");
5308 return false;
5311 op0 = gimple_assign_rhs1 (stmt);
5312 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5314 if (dump_enabled_p ())
5315 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5316 "use not simple.\n");
5317 return false;
5319 /* If op0 is an external or constant def use a vector type with
5320 the same size as the output vector type. */
5321 if (!vectype)
5322 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5323 if (vec_stmt)
5324 gcc_assert (vectype);
5325 if (!vectype)
5327 if (dump_enabled_p ())
5328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5329 "no vectype for scalar type\n");
5330 return false;
5333 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5334 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5335 if (maybe_ne (nunits_out, nunits_in))
5336 return false;
5338 op1 = gimple_assign_rhs2 (stmt);
5339 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
5341 if (dump_enabled_p ())
5342 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5343 "use not simple.\n");
5344 return false;
5347 /* Multiple types in SLP are handled by creating the appropriate number of
5348 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5349 case of SLP. */
5350 if (slp_node)
5351 ncopies = 1;
5352 else
5353 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5355 gcc_assert (ncopies >= 1);
5357 /* Determine whether the shift amount is a vector, or scalar. If the
5358 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5360 if ((dt[1] == vect_internal_def
5361 || dt[1] == vect_induction_def)
5362 && !slp_node)
5363 scalar_shift_arg = false;
5364 else if (dt[1] == vect_constant_def
5365 || dt[1] == vect_external_def
5366 || dt[1] == vect_internal_def)
5368 /* In SLP, need to check whether the shift count is the same,
5369 in loops if it is a constant or invariant, it is always
5370 a scalar shift. */
5371 if (slp_node)
5373 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5374 gimple *slpstmt;
5376 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5377 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5378 scalar_shift_arg = false;
5381 /* If the shift amount is computed by a pattern stmt we cannot
5382 use the scalar amount directly thus give up and use a vector
5383 shift. */
5384 if (dt[1] == vect_internal_def)
5386 gimple *def = SSA_NAME_DEF_STMT (op1);
5387 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5388 scalar_shift_arg = false;
5391 else
5393 if (dump_enabled_p ())
5394 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5395 "operand mode requires invariant argument.\n");
5396 return false;
5399 /* Vector shifted by vector. */
5400 if (!scalar_shift_arg)
5402 optab = optab_for_tree_code (code, vectype, optab_vector);
5403 if (dump_enabled_p ())
5404 dump_printf_loc (MSG_NOTE, vect_location,
5405 "vector/vector shift/rotate found.\n");
5407 if (!op1_vectype)
5408 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5409 if (op1_vectype == NULL_TREE
5410 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5412 if (dump_enabled_p ())
5413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5414 "unusable type for last operand in"
5415 " vector/vector shift/rotate.\n");
5416 return false;
5419 /* See if the machine has a vector shifted by scalar insn and if not
5420 then see if it has a vector shifted by vector insn. */
5421 else
5423 optab = optab_for_tree_code (code, vectype, optab_scalar);
5424 if (optab
5425 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_NOTE, vect_location,
5429 "vector/scalar shift/rotate found.\n");
5431 else
5433 optab = optab_for_tree_code (code, vectype, optab_vector);
5434 if (optab
5435 && (optab_handler (optab, TYPE_MODE (vectype))
5436 != CODE_FOR_nothing))
5438 scalar_shift_arg = false;
5440 if (dump_enabled_p ())
5441 dump_printf_loc (MSG_NOTE, vect_location,
5442 "vector/vector shift/rotate found.\n");
5444 /* Unlike the other binary operators, shifts/rotates have
5445 the rhs being int, instead of the same type as the lhs,
5446 so make sure the scalar is the right type if we are
5447 dealing with vectors of long long/long/short/char. */
5448 if (dt[1] == vect_constant_def)
5449 op1 = fold_convert (TREE_TYPE (vectype), op1);
5450 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5451 TREE_TYPE (op1)))
5453 if (slp_node
5454 && TYPE_MODE (TREE_TYPE (vectype))
5455 != TYPE_MODE (TREE_TYPE (op1)))
5457 if (dump_enabled_p ())
5458 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5459 "unusable type for last operand in"
5460 " vector/vector shift/rotate.\n");
5461 return false;
5463 if (vec_stmt && !slp_node)
5465 op1 = fold_convert (TREE_TYPE (vectype), op1);
5466 op1 = vect_init_vector (stmt, op1,
5467 TREE_TYPE (vectype), NULL);
5474 /* Supportable by target? */
5475 if (!optab)
5477 if (dump_enabled_p ())
5478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5479 "no optab.\n");
5480 return false;
5482 vec_mode = TYPE_MODE (vectype);
5483 icode = (int) optab_handler (optab, vec_mode);
5484 if (icode == CODE_FOR_nothing)
5486 if (dump_enabled_p ())
5487 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5488 "op not supported by target.\n");
5489 /* Check only during analysis. */
5490 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5491 || (!vec_stmt
5492 && !vect_worthwhile_without_simd_p (vinfo, code)))
5493 return false;
5494 if (dump_enabled_p ())
5495 dump_printf_loc (MSG_NOTE, vect_location,
5496 "proceeding using word mode.\n");
5499 /* Worthwhile without SIMD support? Check only during analysis. */
5500 if (!vec_stmt
5501 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5502 && !vect_worthwhile_without_simd_p (vinfo, code))
5504 if (dump_enabled_p ())
5505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5506 "not worthwhile without SIMD support.\n");
5507 return false;
5510 if (!vec_stmt) /* transformation not required. */
5512 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5513 if (dump_enabled_p ())
5514 dump_printf_loc (MSG_NOTE, vect_location,
5515 "=== vectorizable_shift ===\n");
5516 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5517 return true;
5520 /* Transform. */
5522 if (dump_enabled_p ())
5523 dump_printf_loc (MSG_NOTE, vect_location,
5524 "transform binary/unary operation.\n");
5526 /* Handle def. */
5527 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5529 prev_stmt_info = NULL;
5530 for (j = 0; j < ncopies; j++)
5532 /* Handle uses. */
5533 if (j == 0)
5535 if (scalar_shift_arg)
5537 /* Vector shl and shr insn patterns can be defined with scalar
5538 operand 2 (shift operand). In this case, use constant or loop
5539 invariant op1 directly, without extending it to vector mode
5540 first. */
5541 optab_op2_mode = insn_data[icode].operand[2].mode;
5542 if (!VECTOR_MODE_P (optab_op2_mode))
5544 if (dump_enabled_p ())
5545 dump_printf_loc (MSG_NOTE, vect_location,
5546 "operand 1 using scalar mode.\n");
5547 vec_oprnd1 = op1;
5548 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5549 vec_oprnds1.quick_push (vec_oprnd1);
5550 if (slp_node)
5552 /* Store vec_oprnd1 for every vector stmt to be created
5553 for SLP_NODE. We check during the analysis that all
5554 the shift arguments are the same.
5555 TODO: Allow different constants for different vector
5556 stmts generated for an SLP instance. */
5557 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5558 vec_oprnds1.quick_push (vec_oprnd1);
5563 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5564 (a special case for certain kind of vector shifts); otherwise,
5565 operand 1 should be of a vector type (the usual case). */
5566 if (vec_oprnd1)
5567 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5568 slp_node);
5569 else
5570 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5571 slp_node);
5573 else
5574 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5576 /* Arguments are ready. Create the new vector stmt. */
5577 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5579 vop1 = vec_oprnds1[i];
5580 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5581 new_temp = make_ssa_name (vec_dest, new_stmt);
5582 gimple_assign_set_lhs (new_stmt, new_temp);
5583 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5584 if (slp_node)
5585 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5588 if (slp_node)
5589 continue;
5591 if (j == 0)
5592 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5593 else
5594 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5595 prev_stmt_info = vinfo_for_stmt (new_stmt);
5598 vec_oprnds0.release ();
5599 vec_oprnds1.release ();
5601 return true;
5605 /* Function vectorizable_operation.
5607 Check if STMT performs a binary, unary or ternary operation that can
5608 be vectorized.
5609 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5610 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5611 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5613 static bool
5614 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5615 gimple **vec_stmt, slp_tree slp_node)
5617 tree vec_dest;
5618 tree scalar_dest;
5619 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5620 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5621 tree vectype;
5622 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5623 enum tree_code code, orig_code;
5624 machine_mode vec_mode;
5625 tree new_temp;
5626 int op_type;
5627 optab optab;
5628 bool target_support_p;
5629 gimple *def_stmt;
5630 enum vect_def_type dt[3]
5631 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5632 int ndts = 3;
5633 gimple *new_stmt = NULL;
5634 stmt_vec_info prev_stmt_info;
5635 poly_uint64 nunits_in;
5636 poly_uint64 nunits_out;
5637 tree vectype_out;
5638 int ncopies;
5639 int j, i;
5640 vec<tree> vec_oprnds0 = vNULL;
5641 vec<tree> vec_oprnds1 = vNULL;
5642 vec<tree> vec_oprnds2 = vNULL;
5643 tree vop0, vop1, vop2;
5644 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5645 vec_info *vinfo = stmt_info->vinfo;
5647 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5648 return false;
5650 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5651 && ! vec_stmt)
5652 return false;
5654 /* Is STMT a vectorizable binary/unary operation? */
5655 if (!is_gimple_assign (stmt))
5656 return false;
5658 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5659 return false;
5661 orig_code = code = gimple_assign_rhs_code (stmt);
5663 /* For pointer addition and subtraction, we should use the normal
5664 plus and minus for the vector operation. */
5665 if (code == POINTER_PLUS_EXPR)
5666 code = PLUS_EXPR;
5667 if (code == POINTER_DIFF_EXPR)
5668 code = MINUS_EXPR;
5670 /* Support only unary or binary operations. */
5671 op_type = TREE_CODE_LENGTH (code);
5672 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5674 if (dump_enabled_p ())
5675 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5676 "num. args = %d (not unary/binary/ternary op).\n",
5677 op_type);
5678 return false;
5681 scalar_dest = gimple_assign_lhs (stmt);
5682 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5684 /* Most operations cannot handle bit-precision types without extra
5685 truncations. */
5686 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5687 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5688 /* Exception are bitwise binary operations. */
5689 && code != BIT_IOR_EXPR
5690 && code != BIT_XOR_EXPR
5691 && code != BIT_AND_EXPR)
5693 if (dump_enabled_p ())
5694 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5695 "bit-precision arithmetic not supported.\n");
5696 return false;
5699 op0 = gimple_assign_rhs1 (stmt);
5700 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5702 if (dump_enabled_p ())
5703 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5704 "use not simple.\n");
5705 return false;
5707 /* If op0 is an external or constant def use a vector type with
5708 the same size as the output vector type. */
5709 if (!vectype)
5711 /* For boolean type we cannot determine vectype by
5712 invariant value (don't know whether it is a vector
5713 of booleans or vector of integers). We use output
5714 vectype because operations on boolean don't change
5715 type. */
5716 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5718 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5720 if (dump_enabled_p ())
5721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5722 "not supported operation on bool value.\n");
5723 return false;
5725 vectype = vectype_out;
5727 else
5728 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5730 if (vec_stmt)
5731 gcc_assert (vectype);
5732 if (!vectype)
5734 if (dump_enabled_p ())
5736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5737 "no vectype for scalar type ");
5738 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5739 TREE_TYPE (op0));
5740 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5743 return false;
5746 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5747 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5748 if (maybe_ne (nunits_out, nunits_in))
5749 return false;
5751 if (op_type == binary_op || op_type == ternary_op)
5753 op1 = gimple_assign_rhs2 (stmt);
5754 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5756 if (dump_enabled_p ())
5757 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5758 "use not simple.\n");
5759 return false;
5762 if (op_type == ternary_op)
5764 op2 = gimple_assign_rhs3 (stmt);
5765 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5767 if (dump_enabled_p ())
5768 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5769 "use not simple.\n");
5770 return false;
5774 /* Multiple types in SLP are handled by creating the appropriate number of
5775 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5776 case of SLP. */
5777 if (slp_node)
5778 ncopies = 1;
5779 else
5780 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5782 gcc_assert (ncopies >= 1);
5784 /* Shifts are handled in vectorizable_shift (). */
5785 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5786 || code == RROTATE_EXPR)
5787 return false;
5789 /* Supportable by target? */
5791 vec_mode = TYPE_MODE (vectype);
5792 if (code == MULT_HIGHPART_EXPR)
5793 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5794 else
5796 optab = optab_for_tree_code (code, vectype, optab_default);
5797 if (!optab)
5799 if (dump_enabled_p ())
5800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5801 "no optab.\n");
5802 return false;
5804 target_support_p = (optab_handler (optab, vec_mode)
5805 != CODE_FOR_nothing);
5808 if (!target_support_p)
5810 if (dump_enabled_p ())
5811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5812 "op not supported by target.\n");
5813 /* Check only during analysis. */
5814 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5815 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5816 return false;
5817 if (dump_enabled_p ())
5818 dump_printf_loc (MSG_NOTE, vect_location,
5819 "proceeding using word mode.\n");
5822 /* Worthwhile without SIMD support? Check only during analysis. */
5823 if (!VECTOR_MODE_P (vec_mode)
5824 && !vec_stmt
5825 && !vect_worthwhile_without_simd_p (vinfo, code))
5827 if (dump_enabled_p ())
5828 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5829 "not worthwhile without SIMD support.\n");
5830 return false;
5833 if (!vec_stmt) /* transformation not required. */
5835 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5836 if (dump_enabled_p ())
5837 dump_printf_loc (MSG_NOTE, vect_location,
5838 "=== vectorizable_operation ===\n");
5839 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5840 return true;
5843 /* Transform. */
5845 if (dump_enabled_p ())
5846 dump_printf_loc (MSG_NOTE, vect_location,
5847 "transform binary/unary operation.\n");
5849 /* Handle def. */
5850 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5852 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5853 vectors with unsigned elements, but the result is signed. So, we
5854 need to compute the MINUS_EXPR into vectype temporary and
5855 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5856 tree vec_cvt_dest = NULL_TREE;
5857 if (orig_code == POINTER_DIFF_EXPR)
5858 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5860 /* In case the vectorization factor (VF) is bigger than the number
5861 of elements that we can fit in a vectype (nunits), we have to generate
5862 more than one vector stmt - i.e - we need to "unroll" the
5863 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5864 from one copy of the vector stmt to the next, in the field
5865 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5866 stages to find the correct vector defs to be used when vectorizing
5867 stmts that use the defs of the current stmt. The example below
5868 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5869 we need to create 4 vectorized stmts):
5871 before vectorization:
5872 RELATED_STMT VEC_STMT
5873 S1: x = memref - -
5874 S2: z = x + 1 - -
5876 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5877 there):
5878 RELATED_STMT VEC_STMT
5879 VS1_0: vx0 = memref0 VS1_1 -
5880 VS1_1: vx1 = memref1 VS1_2 -
5881 VS1_2: vx2 = memref2 VS1_3 -
5882 VS1_3: vx3 = memref3 - -
5883 S1: x = load - VS1_0
5884 S2: z = x + 1 - -
5886 step2: vectorize stmt S2 (done here):
5887 To vectorize stmt S2 we first need to find the relevant vector
5888 def for the first operand 'x'. This is, as usual, obtained from
5889 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5890 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5891 relevant vector def 'vx0'. Having found 'vx0' we can generate
5892 the vector stmt VS2_0, and as usual, record it in the
5893 STMT_VINFO_VEC_STMT of stmt S2.
5894 When creating the second copy (VS2_1), we obtain the relevant vector
5895 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5896 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5897 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5898 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5899 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5900 chain of stmts and pointers:
5901 RELATED_STMT VEC_STMT
5902 VS1_0: vx0 = memref0 VS1_1 -
5903 VS1_1: vx1 = memref1 VS1_2 -
5904 VS1_2: vx2 = memref2 VS1_3 -
5905 VS1_3: vx3 = memref3 - -
5906 S1: x = load - VS1_0
5907 VS2_0: vz0 = vx0 + v1 VS2_1 -
5908 VS2_1: vz1 = vx1 + v1 VS2_2 -
5909 VS2_2: vz2 = vx2 + v1 VS2_3 -
5910 VS2_3: vz3 = vx3 + v1 - -
5911 S2: z = x + 1 - VS2_0 */
5913 prev_stmt_info = NULL;
5914 for (j = 0; j < ncopies; j++)
5916 /* Handle uses. */
5917 if (j == 0)
5919 if (op_type == binary_op || op_type == ternary_op)
5920 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5921 slp_node);
5922 else
5923 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5924 slp_node);
5925 if (op_type == ternary_op)
5926 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5927 slp_node);
5929 else
5931 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5932 if (op_type == ternary_op)
5934 tree vec_oprnd = vec_oprnds2.pop ();
5935 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5936 vec_oprnd));
5940 /* Arguments are ready. Create the new vector stmt. */
5941 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5943 vop1 = ((op_type == binary_op || op_type == ternary_op)
5944 ? vec_oprnds1[i] : NULL_TREE);
5945 vop2 = ((op_type == ternary_op)
5946 ? vec_oprnds2[i] : NULL_TREE);
5947 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5948 new_temp = make_ssa_name (vec_dest, new_stmt);
5949 gimple_assign_set_lhs (new_stmt, new_temp);
5950 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5951 if (vec_cvt_dest)
5953 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5954 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5955 new_temp);
5956 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5957 gimple_assign_set_lhs (new_stmt, new_temp);
5958 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5960 if (slp_node)
5961 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5964 if (slp_node)
5965 continue;
5967 if (j == 0)
5968 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5969 else
5970 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5971 prev_stmt_info = vinfo_for_stmt (new_stmt);
5974 vec_oprnds0.release ();
5975 vec_oprnds1.release ();
5976 vec_oprnds2.release ();
5978 return true;
5981 /* A helper function to ensure data reference DR's base alignment. */
5983 static void
5984 ensure_base_align (struct data_reference *dr)
5986 if (!dr->aux)
5987 return;
5989 if (DR_VECT_AUX (dr)->base_misaligned)
5991 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5993 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5995 if (decl_in_symtab_p (base_decl))
5996 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5997 else
5999 SET_DECL_ALIGN (base_decl, align_base_to);
6000 DECL_USER_ALIGN (base_decl) = 1;
6002 DR_VECT_AUX (dr)->base_misaligned = false;
6007 /* Function get_group_alias_ptr_type.
6009 Return the alias type for the group starting at FIRST_STMT. */
6011 static tree
6012 get_group_alias_ptr_type (gimple *first_stmt)
6014 struct data_reference *first_dr, *next_dr;
6015 gimple *next_stmt;
6017 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6018 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
6019 while (next_stmt)
6021 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
6022 if (get_alias_set (DR_REF (first_dr))
6023 != get_alias_set (DR_REF (next_dr)))
6025 if (dump_enabled_p ())
6026 dump_printf_loc (MSG_NOTE, vect_location,
6027 "conflicting alias set types.\n");
6028 return ptr_type_node;
6030 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6032 return reference_alias_ptr_type (DR_REF (first_dr));
6036 /* Function vectorizable_store.
6038 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
6039 can be vectorized.
6040 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6041 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6042 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6044 static bool
6045 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6046 slp_tree slp_node)
6048 tree data_ref;
6049 tree op;
6050 tree vec_oprnd = NULL_TREE;
6051 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6052 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6053 tree elem_type;
6054 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6055 struct loop *loop = NULL;
6056 machine_mode vec_mode;
6057 tree dummy;
6058 enum dr_alignment_support alignment_support_scheme;
6059 gimple *def_stmt;
6060 enum vect_def_type rhs_dt = vect_unknown_def_type;
6061 enum vect_def_type mask_dt = vect_unknown_def_type;
6062 stmt_vec_info prev_stmt_info = NULL;
6063 tree dataref_ptr = NULL_TREE;
6064 tree dataref_offset = NULL_TREE;
6065 gimple *ptr_incr = NULL;
6066 int ncopies;
6067 int j;
6068 gimple *next_stmt, *first_stmt;
6069 bool grouped_store;
6070 unsigned int group_size, i;
6071 vec<tree> oprnds = vNULL;
6072 vec<tree> result_chain = vNULL;
6073 bool inv_p;
6074 tree offset = NULL_TREE;
6075 vec<tree> vec_oprnds = vNULL;
6076 bool slp = (slp_node != NULL);
6077 unsigned int vec_num;
6078 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6079 vec_info *vinfo = stmt_info->vinfo;
6080 tree aggr_type;
6081 gather_scatter_info gs_info;
6082 gimple *new_stmt;
6083 poly_uint64 vf;
6084 vec_load_store_type vls_type;
6085 tree ref_type;
6087 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6088 return false;
6090 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6091 && ! vec_stmt)
6092 return false;
6094 /* Is vectorizable store? */
6096 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6097 if (is_gimple_assign (stmt))
6099 tree scalar_dest = gimple_assign_lhs (stmt);
6100 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6101 && is_pattern_stmt_p (stmt_info))
6102 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6103 if (TREE_CODE (scalar_dest) != ARRAY_REF
6104 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6105 && TREE_CODE (scalar_dest) != INDIRECT_REF
6106 && TREE_CODE (scalar_dest) != COMPONENT_REF
6107 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6108 && TREE_CODE (scalar_dest) != REALPART_EXPR
6109 && TREE_CODE (scalar_dest) != MEM_REF)
6110 return false;
6112 else
6114 gcall *call = dyn_cast <gcall *> (stmt);
6115 if (!call || !gimple_call_internal_p (call))
6116 return false;
6118 internal_fn ifn = gimple_call_internal_fn (call);
6119 if (!internal_store_fn_p (ifn))
6120 return false;
6122 if (slp_node != NULL)
6124 if (dump_enabled_p ())
6125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6126 "SLP of masked stores not supported.\n");
6127 return false;
6130 int mask_index = internal_fn_mask_index (ifn);
6131 if (mask_index >= 0)
6133 mask = gimple_call_arg (call, mask_index);
6134 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
6135 &mask_vectype))
6136 return false;
6140 op = vect_get_store_rhs (stmt);
6142 /* Cannot have hybrid store SLP -- that would mean storing to the
6143 same location twice. */
6144 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6146 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6147 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6149 if (loop_vinfo)
6151 loop = LOOP_VINFO_LOOP (loop_vinfo);
6152 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6154 else
6155 vf = 1;
6157 /* Multiple types in SLP are handled by creating the appropriate number of
6158 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6159 case of SLP. */
6160 if (slp)
6161 ncopies = 1;
6162 else
6163 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6165 gcc_assert (ncopies >= 1);
6167 /* FORNOW. This restriction should be relaxed. */
6168 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
6170 if (dump_enabled_p ())
6171 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6172 "multiple types in nested loop.\n");
6173 return false;
6176 if (!vect_check_store_rhs (stmt, op, &rhs_dt, &rhs_vectype, &vls_type))
6177 return false;
6179 elem_type = TREE_TYPE (vectype);
6180 vec_mode = TYPE_MODE (vectype);
6182 if (!STMT_VINFO_DATA_REF (stmt_info))
6183 return false;
6185 vect_memory_access_type memory_access_type;
6186 if (!get_load_store_type (stmt, vectype, slp, mask, vls_type, ncopies,
6187 &memory_access_type, &gs_info))
6188 return false;
6190 if (mask)
6192 if (memory_access_type == VMAT_CONTIGUOUS)
6194 if (!VECTOR_MODE_P (vec_mode)
6195 || !can_vec_mask_load_store_p (vec_mode,
6196 TYPE_MODE (mask_vectype), false))
6197 return false;
6199 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6200 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
6202 if (dump_enabled_p ())
6203 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6204 "unsupported access type for masked store.\n");
6205 return false;
6208 else
6210 /* FORNOW. In some cases can vectorize even if data-type not supported
6211 (e.g. - array initialization with 0). */
6212 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6213 return false;
6216 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6217 && memory_access_type != VMAT_GATHER_SCATTER
6218 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6219 if (grouped_store)
6221 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6222 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6223 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6225 else
6227 first_stmt = stmt;
6228 first_dr = dr;
6229 group_size = vec_num = 1;
6232 if (!vec_stmt) /* transformation not required. */
6234 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6236 if (loop_vinfo
6237 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6238 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6239 memory_access_type, &gs_info);
6241 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6242 /* The SLP costs are calculated during SLP analysis. */
6243 if (!PURE_SLP_STMT (stmt_info))
6244 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
6245 vls_type, NULL, NULL, NULL);
6246 return true;
6248 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6250 /* Transform. */
6252 ensure_base_align (dr);
6254 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6256 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6257 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6258 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6259 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6260 edge pe = loop_preheader_edge (loop);
6261 gimple_seq seq;
6262 basic_block new_bb;
6263 enum { NARROW, NONE, WIDEN } modifier;
6264 poly_uint64 scatter_off_nunits
6265 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6267 if (known_eq (nunits, scatter_off_nunits))
6268 modifier = NONE;
6269 else if (known_eq (nunits * 2, scatter_off_nunits))
6271 modifier = WIDEN;
6273 /* Currently gathers and scatters are only supported for
6274 fixed-length vectors. */
6275 unsigned int count = scatter_off_nunits.to_constant ();
6276 vec_perm_builder sel (count, count, 1);
6277 for (i = 0; i < (unsigned int) count; ++i)
6278 sel.quick_push (i | (count / 2));
6280 vec_perm_indices indices (sel, 1, count);
6281 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6282 indices);
6283 gcc_assert (perm_mask != NULL_TREE);
6285 else if (known_eq (nunits, scatter_off_nunits * 2))
6287 modifier = NARROW;
6289 /* Currently gathers and scatters are only supported for
6290 fixed-length vectors. */
6291 unsigned int count = nunits.to_constant ();
6292 vec_perm_builder sel (count, count, 1);
6293 for (i = 0; i < (unsigned int) count; ++i)
6294 sel.quick_push (i | (count / 2));
6296 vec_perm_indices indices (sel, 2, count);
6297 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6298 gcc_assert (perm_mask != NULL_TREE);
6299 ncopies *= 2;
6301 else
6302 gcc_unreachable ();
6304 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6305 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6306 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6307 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6308 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6309 scaletype = TREE_VALUE (arglist);
6311 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6312 && TREE_CODE (rettype) == VOID_TYPE);
6314 ptr = fold_convert (ptrtype, gs_info.base);
6315 if (!is_gimple_min_invariant (ptr))
6317 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6318 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6319 gcc_assert (!new_bb);
6322 /* Currently we support only unconditional scatter stores,
6323 so mask should be all ones. */
6324 mask = build_int_cst (masktype, -1);
6325 mask = vect_init_vector (stmt, mask, masktype, NULL);
6327 scale = build_int_cst (scaletype, gs_info.scale);
6329 prev_stmt_info = NULL;
6330 for (j = 0; j < ncopies; ++j)
6332 if (j == 0)
6334 src = vec_oprnd1
6335 = vect_get_vec_def_for_operand (op, stmt);
6336 op = vec_oprnd0
6337 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6339 else if (modifier != NONE && (j & 1))
6341 if (modifier == WIDEN)
6343 src = vec_oprnd1
6344 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6345 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6346 stmt, gsi);
6348 else if (modifier == NARROW)
6350 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6351 stmt, gsi);
6352 op = vec_oprnd0
6353 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6354 vec_oprnd0);
6356 else
6357 gcc_unreachable ();
6359 else
6361 src = vec_oprnd1
6362 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6363 op = vec_oprnd0
6364 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6365 vec_oprnd0);
6368 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6370 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6371 TYPE_VECTOR_SUBPARTS (srctype)));
6372 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6373 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6374 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6375 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6376 src = var;
6379 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6381 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6382 TYPE_VECTOR_SUBPARTS (idxtype)));
6383 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6384 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6385 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6386 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6387 op = var;
6390 new_stmt
6391 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6393 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6395 if (prev_stmt_info == NULL)
6396 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6397 else
6398 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6399 prev_stmt_info = vinfo_for_stmt (new_stmt);
6401 return true;
6404 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6406 gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6407 GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
6410 if (grouped_store)
6412 /* FORNOW */
6413 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
6415 /* We vectorize all the stmts of the interleaving group when we
6416 reach the last stmt in the group. */
6417 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
6418 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
6419 && !slp)
6421 *vec_stmt = NULL;
6422 return true;
6425 if (slp)
6427 grouped_store = false;
6428 /* VEC_NUM is the number of vect stmts to be created for this
6429 group. */
6430 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6431 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6432 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6433 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6434 op = vect_get_store_rhs (first_stmt);
6436 else
6437 /* VEC_NUM is the number of vect stmts to be created for this
6438 group. */
6439 vec_num = group_size;
6441 ref_type = get_group_alias_ptr_type (first_stmt);
6443 else
6444 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6446 if (dump_enabled_p ())
6447 dump_printf_loc (MSG_NOTE, vect_location,
6448 "transform store. ncopies = %d\n", ncopies);
6450 if (memory_access_type == VMAT_ELEMENTWISE
6451 || memory_access_type == VMAT_STRIDED_SLP)
6453 gimple_stmt_iterator incr_gsi;
6454 bool insert_after;
6455 gimple *incr;
6456 tree offvar;
6457 tree ivstep;
6458 tree running_off;
6459 tree stride_base, stride_step, alias_off;
6460 tree vec_oprnd;
6461 unsigned int g;
6462 /* Checked by get_load_store_type. */
6463 unsigned int const_nunits = nunits.to_constant ();
6465 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6466 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6468 stride_base
6469 = fold_build_pointer_plus
6470 (DR_BASE_ADDRESS (first_dr),
6471 size_binop (PLUS_EXPR,
6472 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6473 convert_to_ptrofftype (DR_INIT (first_dr))));
6474 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6476 /* For a store with loop-invariant (but other than power-of-2)
6477 stride (i.e. not a grouped access) like so:
6479 for (i = 0; i < n; i += stride)
6480 array[i] = ...;
6482 we generate a new induction variable and new stores from
6483 the components of the (vectorized) rhs:
6485 for (j = 0; ; j += VF*stride)
6486 vectemp = ...;
6487 tmp1 = vectemp[0];
6488 array[j] = tmp1;
6489 tmp2 = vectemp[1];
6490 array[j + stride] = tmp2;
6494 unsigned nstores = const_nunits;
6495 unsigned lnel = 1;
6496 tree ltype = elem_type;
6497 tree lvectype = vectype;
6498 if (slp)
6500 if (group_size < const_nunits
6501 && const_nunits % group_size == 0)
6503 nstores = const_nunits / group_size;
6504 lnel = group_size;
6505 ltype = build_vector_type (elem_type, group_size);
6506 lvectype = vectype;
6508 /* First check if vec_extract optab doesn't support extraction
6509 of vector elts directly. */
6510 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6511 machine_mode vmode;
6512 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6513 || !VECTOR_MODE_P (vmode)
6514 || !targetm.vector_mode_supported_p (vmode)
6515 || (convert_optab_handler (vec_extract_optab,
6516 TYPE_MODE (vectype), vmode)
6517 == CODE_FOR_nothing))
6519 /* Try to avoid emitting an extract of vector elements
6520 by performing the extracts using an integer type of the
6521 same size, extracting from a vector of those and then
6522 re-interpreting it as the original vector type if
6523 supported. */
6524 unsigned lsize
6525 = group_size * GET_MODE_BITSIZE (elmode);
6526 elmode = int_mode_for_size (lsize, 0).require ();
6527 unsigned int lnunits = const_nunits / group_size;
6528 /* If we can't construct such a vector fall back to
6529 element extracts from the original vector type and
6530 element size stores. */
6531 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6532 && VECTOR_MODE_P (vmode)
6533 && targetm.vector_mode_supported_p (vmode)
6534 && (convert_optab_handler (vec_extract_optab,
6535 vmode, elmode)
6536 != CODE_FOR_nothing))
6538 nstores = lnunits;
6539 lnel = group_size;
6540 ltype = build_nonstandard_integer_type (lsize, 1);
6541 lvectype = build_vector_type (ltype, nstores);
6543 /* Else fall back to vector extraction anyway.
6544 Fewer stores are more important than avoiding spilling
6545 of the vector we extract from. Compared to the
6546 construction case in vectorizable_load no store-forwarding
6547 issue exists here for reasonable archs. */
6550 else if (group_size >= const_nunits
6551 && group_size % const_nunits == 0)
6553 nstores = 1;
6554 lnel = const_nunits;
6555 ltype = vectype;
6556 lvectype = vectype;
6558 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6559 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6562 ivstep = stride_step;
6563 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6564 build_int_cst (TREE_TYPE (ivstep), vf));
6566 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6568 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6569 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6570 create_iv (stride_base, ivstep, NULL,
6571 loop, &incr_gsi, insert_after,
6572 &offvar, NULL);
6573 incr = gsi_stmt (incr_gsi);
6574 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6576 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6578 prev_stmt_info = NULL;
6579 alias_off = build_int_cst (ref_type, 0);
6580 next_stmt = first_stmt;
6581 for (g = 0; g < group_size; g++)
6583 running_off = offvar;
6584 if (g)
6586 tree size = TYPE_SIZE_UNIT (ltype);
6587 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6588 size);
6589 tree newoff = copy_ssa_name (running_off, NULL);
6590 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6591 running_off, pos);
6592 vect_finish_stmt_generation (stmt, incr, gsi);
6593 running_off = newoff;
6595 unsigned int group_el = 0;
6596 unsigned HOST_WIDE_INT
6597 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6598 for (j = 0; j < ncopies; j++)
6600 /* We've set op and dt above, from vect_get_store_rhs,
6601 and first_stmt == stmt. */
6602 if (j == 0)
6604 if (slp)
6606 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6607 slp_node);
6608 vec_oprnd = vec_oprnds[0];
6610 else
6612 op = vect_get_store_rhs (next_stmt);
6613 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6616 else
6618 if (slp)
6619 vec_oprnd = vec_oprnds[j];
6620 else
6622 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
6623 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt,
6624 vec_oprnd);
6627 /* Pun the vector to extract from if necessary. */
6628 if (lvectype != vectype)
6630 tree tem = make_ssa_name (lvectype);
6631 gimple *pun
6632 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6633 lvectype, vec_oprnd));
6634 vect_finish_stmt_generation (stmt, pun, gsi);
6635 vec_oprnd = tem;
6637 for (i = 0; i < nstores; i++)
6639 tree newref, newoff;
6640 gimple *incr, *assign;
6641 tree size = TYPE_SIZE (ltype);
6642 /* Extract the i'th component. */
6643 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6644 bitsize_int (i), size);
6645 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6646 size, pos);
6648 elem = force_gimple_operand_gsi (gsi, elem, true,
6649 NULL_TREE, true,
6650 GSI_SAME_STMT);
6652 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6653 group_el * elsz);
6654 newref = build2 (MEM_REF, ltype,
6655 running_off, this_off);
6657 /* And store it to *running_off. */
6658 assign = gimple_build_assign (newref, elem);
6659 vect_finish_stmt_generation (stmt, assign, gsi);
6661 group_el += lnel;
6662 if (! slp
6663 || group_el == group_size)
6665 newoff = copy_ssa_name (running_off, NULL);
6666 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6667 running_off, stride_step);
6668 vect_finish_stmt_generation (stmt, incr, gsi);
6670 running_off = newoff;
6671 group_el = 0;
6673 if (g == group_size - 1
6674 && !slp)
6676 if (j == 0 && i == 0)
6677 STMT_VINFO_VEC_STMT (stmt_info)
6678 = *vec_stmt = assign;
6679 else
6680 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6681 prev_stmt_info = vinfo_for_stmt (assign);
6685 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6686 if (slp)
6687 break;
6690 vec_oprnds.release ();
6691 return true;
6694 auto_vec<tree> dr_chain (group_size);
6695 oprnds.create (group_size);
6697 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6698 gcc_assert (alignment_support_scheme);
6699 bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6700 /* Targets with store-lane instructions must not require explicit
6701 realignment. vect_supportable_dr_alignment always returns either
6702 dr_aligned or dr_unaligned_supported for masked operations. */
6703 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6704 && !mask
6705 && !masked_loop_p)
6706 || alignment_support_scheme == dr_aligned
6707 || alignment_support_scheme == dr_unaligned_supported);
6709 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6710 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6711 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6713 tree bump;
6714 tree vec_offset = NULL_TREE;
6715 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6717 aggr_type = NULL_TREE;
6718 bump = NULL_TREE;
6720 else if (memory_access_type == VMAT_GATHER_SCATTER)
6722 aggr_type = elem_type;
6723 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
6724 &bump, &vec_offset);
6726 else
6728 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6729 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6730 else
6731 aggr_type = vectype;
6732 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
6735 if (mask)
6736 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6738 /* In case the vectorization factor (VF) is bigger than the number
6739 of elements that we can fit in a vectype (nunits), we have to generate
6740 more than one vector stmt - i.e - we need to "unroll" the
6741 vector stmt by a factor VF/nunits. For more details see documentation in
6742 vect_get_vec_def_for_copy_stmt. */
6744 /* In case of interleaving (non-unit grouped access):
6746 S1: &base + 2 = x2
6747 S2: &base = x0
6748 S3: &base + 1 = x1
6749 S4: &base + 3 = x3
6751 We create vectorized stores starting from base address (the access of the
6752 first stmt in the chain (S2 in the above example), when the last store stmt
6753 of the chain (S4) is reached:
6755 VS1: &base = vx2
6756 VS2: &base + vec_size*1 = vx0
6757 VS3: &base + vec_size*2 = vx1
6758 VS4: &base + vec_size*3 = vx3
6760 Then permutation statements are generated:
6762 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6763 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6766 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6767 (the order of the data-refs in the output of vect_permute_store_chain
6768 corresponds to the order of scalar stmts in the interleaving chain - see
6769 the documentation of vect_permute_store_chain()).
6771 In case of both multiple types and interleaving, above vector stores and
6772 permutation stmts are created for every copy. The result vector stmts are
6773 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6774 STMT_VINFO_RELATED_STMT for the next copies.
6777 prev_stmt_info = NULL;
6778 tree vec_mask = NULL_TREE;
6779 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6780 for (j = 0; j < ncopies; j++)
6783 if (j == 0)
6785 if (slp)
6787 /* Get vectorized arguments for SLP_NODE. */
6788 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6789 NULL, slp_node);
6791 vec_oprnd = vec_oprnds[0];
6793 else
6795 /* For interleaved stores we collect vectorized defs for all the
6796 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6797 used as an input to vect_permute_store_chain(), and OPRNDS as
6798 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6800 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6801 OPRNDS are of size 1. */
6802 next_stmt = first_stmt;
6803 for (i = 0; i < group_size; i++)
6805 /* Since gaps are not supported for interleaved stores,
6806 GROUP_SIZE is the exact number of stmts in the chain.
6807 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6808 there is no interleaving, GROUP_SIZE is 1, and only one
6809 iteration of the loop will be executed. */
6810 op = vect_get_store_rhs (next_stmt);
6811 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6812 dr_chain.quick_push (vec_oprnd);
6813 oprnds.quick_push (vec_oprnd);
6814 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6816 if (mask)
6817 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
6818 mask_vectype);
6821 /* We should have catched mismatched types earlier. */
6822 gcc_assert (useless_type_conversion_p (vectype,
6823 TREE_TYPE (vec_oprnd)));
6824 bool simd_lane_access_p
6825 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6826 if (simd_lane_access_p
6827 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6828 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6829 && integer_zerop (DR_OFFSET (first_dr))
6830 && integer_zerop (DR_INIT (first_dr))
6831 && alias_sets_conflict_p (get_alias_set (aggr_type),
6832 get_alias_set (TREE_TYPE (ref_type))))
6834 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6835 dataref_offset = build_int_cst (ref_type, 0);
6836 inv_p = false;
6838 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6840 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
6841 &dataref_ptr, &vec_offset);
6842 inv_p = false;
6844 else
6845 dataref_ptr
6846 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6847 simd_lane_access_p ? loop : NULL,
6848 offset, &dummy, gsi, &ptr_incr,
6849 simd_lane_access_p, &inv_p,
6850 NULL_TREE, bump);
6851 gcc_assert (bb_vinfo || !inv_p);
6853 else
6855 /* For interleaved stores we created vectorized defs for all the
6856 defs stored in OPRNDS in the previous iteration (previous copy).
6857 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6858 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6859 next copy.
6860 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6861 OPRNDS are of size 1. */
6862 for (i = 0; i < group_size; i++)
6864 op = oprnds[i];
6865 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
6866 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt, op);
6867 dr_chain[i] = vec_oprnd;
6868 oprnds[i] = vec_oprnd;
6870 if (mask)
6871 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
6872 if (dataref_offset)
6873 dataref_offset
6874 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
6875 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6876 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6877 vec_offset);
6878 else
6879 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6880 bump);
6883 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6885 tree vec_array;
6887 /* Combine all the vectors into an array. */
6888 vec_array = create_vector_array (vectype, vec_num);
6889 for (i = 0; i < vec_num; i++)
6891 vec_oprnd = dr_chain[i];
6892 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6895 tree final_mask = NULL;
6896 if (masked_loop_p)
6897 final_mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
6898 if (vec_mask)
6899 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
6900 vec_mask, gsi);
6902 gcall *call;
6903 if (final_mask)
6905 /* Emit:
6906 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
6907 VEC_ARRAY). */
6908 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
6909 tree alias_ptr = build_int_cst (ref_type, align);
6910 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
6911 dataref_ptr, alias_ptr,
6912 final_mask, vec_array);
6914 else
6916 /* Emit:
6917 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6918 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6919 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6920 vec_array);
6921 gimple_call_set_lhs (call, data_ref);
6923 gimple_call_set_nothrow (call, true);
6924 new_stmt = call;
6925 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6927 else
6929 new_stmt = NULL;
6930 if (grouped_store)
6932 if (j == 0)
6933 result_chain.create (group_size);
6934 /* Permute. */
6935 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6936 &result_chain);
6939 next_stmt = first_stmt;
6940 for (i = 0; i < vec_num; i++)
6942 unsigned align, misalign;
6944 tree final_mask = NULL_TREE;
6945 if (masked_loop_p)
6946 final_mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
6947 vectype, vec_num * j + i);
6948 if (vec_mask)
6949 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
6950 vec_mask, gsi);
6952 if (memory_access_type == VMAT_GATHER_SCATTER)
6954 tree scale = size_int (gs_info.scale);
6955 gcall *call;
6956 if (masked_loop_p)
6957 call = gimple_build_call_internal
6958 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
6959 scale, vec_oprnd, final_mask);
6960 else
6961 call = gimple_build_call_internal
6962 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
6963 scale, vec_oprnd);
6964 gimple_call_set_nothrow (call, true);
6965 new_stmt = call;
6966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6967 break;
6970 if (i > 0)
6971 /* Bump the vector pointer. */
6972 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6973 stmt, bump);
6975 if (slp)
6976 vec_oprnd = vec_oprnds[i];
6977 else if (grouped_store)
6978 /* For grouped stores vectorized defs are interleaved in
6979 vect_permute_store_chain(). */
6980 vec_oprnd = result_chain[i];
6982 align = DR_TARGET_ALIGNMENT (first_dr);
6983 if (aligned_access_p (first_dr))
6984 misalign = 0;
6985 else if (DR_MISALIGNMENT (first_dr) == -1)
6987 align = dr_alignment (vect_dr_behavior (first_dr));
6988 misalign = 0;
6990 else
6991 misalign = DR_MISALIGNMENT (first_dr);
6992 if (dataref_offset == NULL_TREE
6993 && TREE_CODE (dataref_ptr) == SSA_NAME)
6994 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6995 misalign);
6997 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6999 tree perm_mask = perm_mask_for_reverse (vectype);
7000 tree perm_dest
7001 = vect_create_destination_var (vect_get_store_rhs (stmt),
7002 vectype);
7003 tree new_temp = make_ssa_name (perm_dest);
7005 /* Generate the permute statement. */
7006 gimple *perm_stmt
7007 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7008 vec_oprnd, perm_mask);
7009 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7011 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7012 vec_oprnd = new_temp;
7015 /* Arguments are ready. Create the new vector stmt. */
7016 if (final_mask)
7018 align = least_bit_hwi (misalign | align);
7019 tree ptr = build_int_cst (ref_type, align);
7020 gcall *call
7021 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7022 dataref_ptr, ptr,
7023 final_mask, vec_oprnd);
7024 gimple_call_set_nothrow (call, true);
7025 new_stmt = call;
7027 else
7029 data_ref = fold_build2 (MEM_REF, vectype,
7030 dataref_ptr,
7031 dataref_offset
7032 ? dataref_offset
7033 : build_int_cst (ref_type, 0));
7034 if (aligned_access_p (first_dr))
7036 else if (DR_MISALIGNMENT (first_dr) == -1)
7037 TREE_TYPE (data_ref)
7038 = build_aligned_type (TREE_TYPE (data_ref),
7039 align * BITS_PER_UNIT);
7040 else
7041 TREE_TYPE (data_ref)
7042 = build_aligned_type (TREE_TYPE (data_ref),
7043 TYPE_ALIGN (elem_type));
7044 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
7046 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7048 if (slp)
7049 continue;
7051 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
7052 if (!next_stmt)
7053 break;
7056 if (!slp)
7058 if (j == 0)
7059 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7060 else
7061 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7062 prev_stmt_info = vinfo_for_stmt (new_stmt);
7066 oprnds.release ();
7067 result_chain.release ();
7068 vec_oprnds.release ();
7070 return true;
7073 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7074 VECTOR_CST mask. No checks are made that the target platform supports the
7075 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7076 vect_gen_perm_mask_checked. */
7078 tree
7079 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7081 tree mask_type;
7083 poly_uint64 nunits = sel.length ();
7084 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7086 mask_type = build_vector_type (ssizetype, nunits);
7087 return vec_perm_indices_to_tree (mask_type, sel);
7090 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7091 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7093 tree
7094 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7096 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7097 return vect_gen_perm_mask_any (vectype, sel);
7100 /* Given a vector variable X and Y, that was generated for the scalar
7101 STMT, generate instructions to permute the vector elements of X and Y
7102 using permutation mask MASK_VEC, insert them at *GSI and return the
7103 permuted vector variable. */
7105 static tree
7106 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
7107 gimple_stmt_iterator *gsi)
7109 tree vectype = TREE_TYPE (x);
7110 tree perm_dest, data_ref;
7111 gimple *perm_stmt;
7113 tree scalar_dest = gimple_get_lhs (stmt);
7114 if (TREE_CODE (scalar_dest) == SSA_NAME)
7115 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7116 else
7117 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7118 data_ref = make_ssa_name (perm_dest);
7120 /* Generate the permute statement. */
7121 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7122 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7124 return data_ref;
7127 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
7128 inserting them on the loops preheader edge. Returns true if we
7129 were successful in doing so (and thus STMT can be moved then),
7130 otherwise returns false. */
7132 static bool
7133 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
7135 ssa_op_iter i;
7136 tree op;
7137 bool any = false;
7139 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7141 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7142 if (!gimple_nop_p (def_stmt)
7143 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7145 /* Make sure we don't need to recurse. While we could do
7146 so in simple cases when there are more complex use webs
7147 we don't have an easy way to preserve stmt order to fulfil
7148 dependencies within them. */
7149 tree op2;
7150 ssa_op_iter i2;
7151 if (gimple_code (def_stmt) == GIMPLE_PHI)
7152 return false;
7153 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7155 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7156 if (!gimple_nop_p (def_stmt2)
7157 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7158 return false;
7160 any = true;
7164 if (!any)
7165 return true;
7167 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7169 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7170 if (!gimple_nop_p (def_stmt)
7171 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7173 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7174 gsi_remove (&gsi, false);
7175 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7179 return true;
7182 /* vectorizable_load.
7184 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
7185 can be vectorized.
7186 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7187 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
7188 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7190 static bool
7191 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
7192 slp_tree slp_node, slp_instance slp_node_instance)
7194 tree scalar_dest;
7195 tree vec_dest = NULL;
7196 tree data_ref = NULL;
7197 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7198 stmt_vec_info prev_stmt_info;
7199 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7200 struct loop *loop = NULL;
7201 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
7202 bool nested_in_vect_loop = false;
7203 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
7204 tree elem_type;
7205 tree new_temp;
7206 machine_mode mode;
7207 gimple *new_stmt = NULL;
7208 tree dummy;
7209 enum dr_alignment_support alignment_support_scheme;
7210 tree dataref_ptr = NULL_TREE;
7211 tree dataref_offset = NULL_TREE;
7212 gimple *ptr_incr = NULL;
7213 int ncopies;
7214 int i, j;
7215 unsigned int group_size;
7216 poly_uint64 group_gap_adj;
7217 tree msq = NULL_TREE, lsq;
7218 tree offset = NULL_TREE;
7219 tree byte_offset = NULL_TREE;
7220 tree realignment_token = NULL_TREE;
7221 gphi *phi = NULL;
7222 vec<tree> dr_chain = vNULL;
7223 bool grouped_load = false;
7224 gimple *first_stmt;
7225 gimple *first_stmt_for_drptr = NULL;
7226 bool inv_p;
7227 bool compute_in_loop = false;
7228 struct loop *at_loop;
7229 int vec_num;
7230 bool slp = (slp_node != NULL);
7231 bool slp_perm = false;
7232 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7233 poly_uint64 vf;
7234 tree aggr_type;
7235 gather_scatter_info gs_info;
7236 vec_info *vinfo = stmt_info->vinfo;
7237 tree ref_type;
7238 enum vect_def_type mask_dt = vect_unknown_def_type;
7240 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7241 return false;
7243 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7244 && ! vec_stmt)
7245 return false;
7247 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7248 if (is_gimple_assign (stmt))
7250 scalar_dest = gimple_assign_lhs (stmt);
7251 if (TREE_CODE (scalar_dest) != SSA_NAME)
7252 return false;
7254 tree_code code = gimple_assign_rhs_code (stmt);
7255 if (code != ARRAY_REF
7256 && code != BIT_FIELD_REF
7257 && code != INDIRECT_REF
7258 && code != COMPONENT_REF
7259 && code != IMAGPART_EXPR
7260 && code != REALPART_EXPR
7261 && code != MEM_REF
7262 && TREE_CODE_CLASS (code) != tcc_declaration)
7263 return false;
7265 else
7267 gcall *call = dyn_cast <gcall *> (stmt);
7268 if (!call || !gimple_call_internal_p (call))
7269 return false;
7271 internal_fn ifn = gimple_call_internal_fn (call);
7272 if (!internal_load_fn_p (ifn))
7273 return false;
7275 scalar_dest = gimple_call_lhs (call);
7276 if (!scalar_dest)
7277 return false;
7279 if (slp_node != NULL)
7281 if (dump_enabled_p ())
7282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7283 "SLP of masked loads not supported.\n");
7284 return false;
7287 int mask_index = internal_fn_mask_index (ifn);
7288 if (mask_index >= 0)
7290 mask = gimple_call_arg (call, mask_index);
7291 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
7292 &mask_vectype))
7293 return false;
7297 if (!STMT_VINFO_DATA_REF (stmt_info))
7298 return false;
7300 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7301 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7303 if (loop_vinfo)
7305 loop = LOOP_VINFO_LOOP (loop_vinfo);
7306 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
7307 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7309 else
7310 vf = 1;
7312 /* Multiple types in SLP are handled by creating the appropriate number of
7313 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7314 case of SLP. */
7315 if (slp)
7316 ncopies = 1;
7317 else
7318 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7320 gcc_assert (ncopies >= 1);
7322 /* FORNOW. This restriction should be relaxed. */
7323 if (nested_in_vect_loop && ncopies > 1)
7325 if (dump_enabled_p ())
7326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7327 "multiple types in nested loop.\n");
7328 return false;
7331 /* Invalidate assumptions made by dependence analysis when vectorization
7332 on the unrolled body effectively re-orders stmts. */
7333 if (ncopies > 1
7334 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7335 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7336 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7338 if (dump_enabled_p ())
7339 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7340 "cannot perform implicit CSE when unrolling "
7341 "with negative dependence distance\n");
7342 return false;
7345 elem_type = TREE_TYPE (vectype);
7346 mode = TYPE_MODE (vectype);
7348 /* FORNOW. In some cases can vectorize even if data-type not supported
7349 (e.g. - data copies). */
7350 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7352 if (dump_enabled_p ())
7353 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7354 "Aligned load, but unsupported type.\n");
7355 return false;
7358 /* Check if the load is a part of an interleaving chain. */
7359 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7361 grouped_load = true;
7362 /* FORNOW */
7363 gcc_assert (!nested_in_vect_loop);
7364 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7366 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7367 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7369 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7370 slp_perm = true;
7372 /* Invalidate assumptions made by dependence analysis when vectorization
7373 on the unrolled body effectively re-orders stmts. */
7374 if (!PURE_SLP_STMT (stmt_info)
7375 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7376 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7377 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7379 if (dump_enabled_p ())
7380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7381 "cannot perform implicit CSE when performing "
7382 "group loads with negative dependence distance\n");
7383 return false;
7386 /* Similarly when the stmt is a load that is both part of a SLP
7387 instance and a loop vectorized stmt via the same-dr mechanism
7388 we have to give up. */
7389 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
7390 && (STMT_SLP_TYPE (stmt_info)
7391 != STMT_SLP_TYPE (vinfo_for_stmt
7392 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
7394 if (dump_enabled_p ())
7395 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7396 "conflicting SLP types for CSEd load\n");
7397 return false;
7400 else
7401 group_size = 1;
7403 vect_memory_access_type memory_access_type;
7404 if (!get_load_store_type (stmt, vectype, slp, mask, VLS_LOAD, ncopies,
7405 &memory_access_type, &gs_info))
7406 return false;
7408 if (mask)
7410 if (memory_access_type == VMAT_CONTIGUOUS)
7412 machine_mode vec_mode = TYPE_MODE (vectype);
7413 if (!VECTOR_MODE_P (vec_mode)
7414 || !can_vec_mask_load_store_p (vec_mode,
7415 TYPE_MODE (mask_vectype), true))
7416 return false;
7418 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7420 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7421 tree masktype
7422 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7423 if (TREE_CODE (masktype) == INTEGER_TYPE)
7425 if (dump_enabled_p ())
7426 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7427 "masked gather with integer mask not"
7428 " supported.");
7429 return false;
7432 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7433 && memory_access_type != VMAT_GATHER_SCATTER)
7435 if (dump_enabled_p ())
7436 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7437 "unsupported access type for masked load.\n");
7438 return false;
7442 if (!vec_stmt) /* transformation not required. */
7444 if (!slp)
7445 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7447 if (loop_vinfo
7448 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7449 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7450 memory_access_type, &gs_info);
7452 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7453 /* The SLP costs are calculated during SLP analysis. */
7454 if (!PURE_SLP_STMT (stmt_info))
7455 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7456 NULL, NULL, NULL);
7457 return true;
7460 if (!slp)
7461 gcc_assert (memory_access_type
7462 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7464 if (dump_enabled_p ())
7465 dump_printf_loc (MSG_NOTE, vect_location,
7466 "transform load. ncopies = %d\n", ncopies);
7468 /* Transform. */
7470 ensure_base_align (dr);
7472 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7474 vect_build_gather_load_calls (stmt, gsi, vec_stmt, &gs_info, mask,
7475 mask_dt);
7476 return true;
7479 if (memory_access_type == VMAT_ELEMENTWISE
7480 || memory_access_type == VMAT_STRIDED_SLP)
7482 gimple_stmt_iterator incr_gsi;
7483 bool insert_after;
7484 gimple *incr;
7485 tree offvar;
7486 tree ivstep;
7487 tree running_off;
7488 vec<constructor_elt, va_gc> *v = NULL;
7489 tree stride_base, stride_step, alias_off;
7490 /* Checked by get_load_store_type. */
7491 unsigned int const_nunits = nunits.to_constant ();
7492 unsigned HOST_WIDE_INT cst_offset = 0;
7494 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7495 gcc_assert (!nested_in_vect_loop);
7497 if (grouped_load)
7499 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7500 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7502 else
7504 first_stmt = stmt;
7505 first_dr = dr;
7507 if (slp && grouped_load)
7509 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7510 ref_type = get_group_alias_ptr_type (first_stmt);
7512 else
7514 if (grouped_load)
7515 cst_offset
7516 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7517 * vect_get_place_in_interleaving_chain (stmt, first_stmt));
7518 group_size = 1;
7519 ref_type = reference_alias_ptr_type (DR_REF (dr));
7522 stride_base
7523 = fold_build_pointer_plus
7524 (DR_BASE_ADDRESS (first_dr),
7525 size_binop (PLUS_EXPR,
7526 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7527 convert_to_ptrofftype (DR_INIT (first_dr))));
7528 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7530 /* For a load with loop-invariant (but other than power-of-2)
7531 stride (i.e. not a grouped access) like so:
7533 for (i = 0; i < n; i += stride)
7534 ... = array[i];
7536 we generate a new induction variable and new accesses to
7537 form a new vector (or vectors, depending on ncopies):
7539 for (j = 0; ; j += VF*stride)
7540 tmp1 = array[j];
7541 tmp2 = array[j + stride];
7543 vectemp = {tmp1, tmp2, ...}
7546 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7547 build_int_cst (TREE_TYPE (stride_step), vf));
7549 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7551 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7552 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7553 create_iv (stride_base, ivstep, NULL,
7554 loop, &incr_gsi, insert_after,
7555 &offvar, NULL);
7556 incr = gsi_stmt (incr_gsi);
7557 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7559 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7561 prev_stmt_info = NULL;
7562 running_off = offvar;
7563 alias_off = build_int_cst (ref_type, 0);
7564 int nloads = const_nunits;
7565 int lnel = 1;
7566 tree ltype = TREE_TYPE (vectype);
7567 tree lvectype = vectype;
7568 auto_vec<tree> dr_chain;
7569 if (memory_access_type == VMAT_STRIDED_SLP)
7571 if (group_size < const_nunits)
7573 /* First check if vec_init optab supports construction from
7574 vector elts directly. */
7575 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7576 machine_mode vmode;
7577 if (mode_for_vector (elmode, group_size).exists (&vmode)
7578 && VECTOR_MODE_P (vmode)
7579 && targetm.vector_mode_supported_p (vmode)
7580 && (convert_optab_handler (vec_init_optab,
7581 TYPE_MODE (vectype), vmode)
7582 != CODE_FOR_nothing))
7584 nloads = const_nunits / group_size;
7585 lnel = group_size;
7586 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7588 else
7590 /* Otherwise avoid emitting a constructor of vector elements
7591 by performing the loads using an integer type of the same
7592 size, constructing a vector of those and then
7593 re-interpreting it as the original vector type.
7594 This avoids a huge runtime penalty due to the general
7595 inability to perform store forwarding from smaller stores
7596 to a larger load. */
7597 unsigned lsize
7598 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7599 elmode = int_mode_for_size (lsize, 0).require ();
7600 unsigned int lnunits = const_nunits / group_size;
7601 /* If we can't construct such a vector fall back to
7602 element loads of the original vector type. */
7603 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7604 && VECTOR_MODE_P (vmode)
7605 && targetm.vector_mode_supported_p (vmode)
7606 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7607 != CODE_FOR_nothing))
7609 nloads = lnunits;
7610 lnel = group_size;
7611 ltype = build_nonstandard_integer_type (lsize, 1);
7612 lvectype = build_vector_type (ltype, nloads);
7616 else
7618 nloads = 1;
7619 lnel = const_nunits;
7620 ltype = vectype;
7622 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7624 if (slp)
7626 /* For SLP permutation support we need to load the whole group,
7627 not only the number of vector stmts the permutation result
7628 fits in. */
7629 if (slp_perm)
7631 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7632 variable VF. */
7633 unsigned int const_vf = vf.to_constant ();
7634 ncopies = CEIL (group_size * const_vf, const_nunits);
7635 dr_chain.create (ncopies);
7637 else
7638 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7640 unsigned int group_el = 0;
7641 unsigned HOST_WIDE_INT
7642 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7643 for (j = 0; j < ncopies; j++)
7645 if (nloads > 1)
7646 vec_alloc (v, nloads);
7647 for (i = 0; i < nloads; i++)
7649 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7650 group_el * elsz + cst_offset);
7651 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7652 build2 (MEM_REF, ltype,
7653 running_off, this_off));
7654 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7655 if (nloads > 1)
7656 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7657 gimple_assign_lhs (new_stmt));
7659 group_el += lnel;
7660 if (! slp
7661 || group_el == group_size)
7663 tree newoff = copy_ssa_name (running_off);
7664 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7665 running_off, stride_step);
7666 vect_finish_stmt_generation (stmt, incr, gsi);
7668 running_off = newoff;
7669 group_el = 0;
7672 if (nloads > 1)
7674 tree vec_inv = build_constructor (lvectype, v);
7675 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7676 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7677 if (lvectype != vectype)
7679 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7680 VIEW_CONVERT_EXPR,
7681 build1 (VIEW_CONVERT_EXPR,
7682 vectype, new_temp));
7683 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7687 if (slp)
7689 if (slp_perm)
7690 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7691 else
7692 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7694 else
7696 if (j == 0)
7697 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7698 else
7699 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7700 prev_stmt_info = vinfo_for_stmt (new_stmt);
7703 if (slp_perm)
7705 unsigned n_perms;
7706 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7707 slp_node_instance, false, &n_perms);
7709 return true;
7712 if (memory_access_type == VMAT_GATHER_SCATTER
7713 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
7714 grouped_load = false;
7716 if (grouped_load)
7718 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7719 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7720 /* For SLP vectorization we directly vectorize a subchain
7721 without permutation. */
7722 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7723 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7724 /* For BB vectorization always use the first stmt to base
7725 the data ref pointer on. */
7726 if (bb_vinfo)
7727 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7729 /* Check if the chain of loads is already vectorized. */
7730 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7731 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7732 ??? But we can only do so if there is exactly one
7733 as we have no way to get at the rest. Leave the CSE
7734 opportunity alone.
7735 ??? With the group load eventually participating
7736 in multiple different permutations (having multiple
7737 slp nodes which refer to the same group) the CSE
7738 is even wrong code. See PR56270. */
7739 && !slp)
7741 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7742 return true;
7744 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7745 group_gap_adj = 0;
7747 /* VEC_NUM is the number of vect stmts to be created for this group. */
7748 if (slp)
7750 grouped_load = false;
7751 /* For SLP permutation support we need to load the whole group,
7752 not only the number of vector stmts the permutation result
7753 fits in. */
7754 if (slp_perm)
7756 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7757 variable VF. */
7758 unsigned int const_vf = vf.to_constant ();
7759 unsigned int const_nunits = nunits.to_constant ();
7760 vec_num = CEIL (group_size * const_vf, const_nunits);
7761 group_gap_adj = vf * group_size - nunits * vec_num;
7763 else
7765 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7766 group_gap_adj
7767 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7770 else
7771 vec_num = group_size;
7773 ref_type = get_group_alias_ptr_type (first_stmt);
7775 else
7777 first_stmt = stmt;
7778 first_dr = dr;
7779 group_size = vec_num = 1;
7780 group_gap_adj = 0;
7781 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7784 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7785 gcc_assert (alignment_support_scheme);
7786 bool masked_loop_p = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7787 /* Targets with store-lane instructions must not require explicit
7788 realignment. vect_supportable_dr_alignment always returns either
7789 dr_aligned or dr_unaligned_supported for masked operations. */
7790 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
7791 && !mask
7792 && !masked_loop_p)
7793 || alignment_support_scheme == dr_aligned
7794 || alignment_support_scheme == dr_unaligned_supported);
7796 /* In case the vectorization factor (VF) is bigger than the number
7797 of elements that we can fit in a vectype (nunits), we have to generate
7798 more than one vector stmt - i.e - we need to "unroll" the
7799 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7800 from one copy of the vector stmt to the next, in the field
7801 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7802 stages to find the correct vector defs to be used when vectorizing
7803 stmts that use the defs of the current stmt. The example below
7804 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7805 need to create 4 vectorized stmts):
7807 before vectorization:
7808 RELATED_STMT VEC_STMT
7809 S1: x = memref - -
7810 S2: z = x + 1 - -
7812 step 1: vectorize stmt S1:
7813 We first create the vector stmt VS1_0, and, as usual, record a
7814 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7815 Next, we create the vector stmt VS1_1, and record a pointer to
7816 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7817 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7818 stmts and pointers:
7819 RELATED_STMT VEC_STMT
7820 VS1_0: vx0 = memref0 VS1_1 -
7821 VS1_1: vx1 = memref1 VS1_2 -
7822 VS1_2: vx2 = memref2 VS1_3 -
7823 VS1_3: vx3 = memref3 - -
7824 S1: x = load - VS1_0
7825 S2: z = x + 1 - -
7827 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7828 information we recorded in RELATED_STMT field is used to vectorize
7829 stmt S2. */
7831 /* In case of interleaving (non-unit grouped access):
7833 S1: x2 = &base + 2
7834 S2: x0 = &base
7835 S3: x1 = &base + 1
7836 S4: x3 = &base + 3
7838 Vectorized loads are created in the order of memory accesses
7839 starting from the access of the first stmt of the chain:
7841 VS1: vx0 = &base
7842 VS2: vx1 = &base + vec_size*1
7843 VS3: vx3 = &base + vec_size*2
7844 VS4: vx4 = &base + vec_size*3
7846 Then permutation statements are generated:
7848 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7849 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7852 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7853 (the order of the data-refs in the output of vect_permute_load_chain
7854 corresponds to the order of scalar stmts in the interleaving chain - see
7855 the documentation of vect_permute_load_chain()).
7856 The generation of permutation stmts and recording them in
7857 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7859 In case of both multiple types and interleaving, the vector loads and
7860 permutation stmts above are created for every copy. The result vector
7861 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7862 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7864 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7865 on a target that supports unaligned accesses (dr_unaligned_supported)
7866 we generate the following code:
7867 p = initial_addr;
7868 indx = 0;
7869 loop {
7870 p = p + indx * vectype_size;
7871 vec_dest = *(p);
7872 indx = indx + 1;
7875 Otherwise, the data reference is potentially unaligned on a target that
7876 does not support unaligned accesses (dr_explicit_realign_optimized) -
7877 then generate the following code, in which the data in each iteration is
7878 obtained by two vector loads, one from the previous iteration, and one
7879 from the current iteration:
7880 p1 = initial_addr;
7881 msq_init = *(floor(p1))
7882 p2 = initial_addr + VS - 1;
7883 realignment_token = call target_builtin;
7884 indx = 0;
7885 loop {
7886 p2 = p2 + indx * vectype_size
7887 lsq = *(floor(p2))
7888 vec_dest = realign_load (msq, lsq, realignment_token)
7889 indx = indx + 1;
7890 msq = lsq;
7891 } */
7893 /* If the misalignment remains the same throughout the execution of the
7894 loop, we can create the init_addr and permutation mask at the loop
7895 preheader. Otherwise, it needs to be created inside the loop.
7896 This can only occur when vectorizing memory accesses in the inner-loop
7897 nested within an outer-loop that is being vectorized. */
7899 if (nested_in_vect_loop
7900 && !multiple_p (DR_STEP_ALIGNMENT (dr),
7901 GET_MODE_SIZE (TYPE_MODE (vectype))))
7903 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7904 compute_in_loop = true;
7907 if ((alignment_support_scheme == dr_explicit_realign_optimized
7908 || alignment_support_scheme == dr_explicit_realign)
7909 && !compute_in_loop)
7911 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7912 alignment_support_scheme, NULL_TREE,
7913 &at_loop);
7914 if (alignment_support_scheme == dr_explicit_realign_optimized)
7916 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7917 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7918 size_one_node);
7921 else
7922 at_loop = loop;
7924 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7925 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7927 tree bump;
7928 tree vec_offset = NULL_TREE;
7929 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7931 aggr_type = NULL_TREE;
7932 bump = NULL_TREE;
7934 else if (memory_access_type == VMAT_GATHER_SCATTER)
7936 aggr_type = elem_type;
7937 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
7938 &bump, &vec_offset);
7940 else
7942 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7943 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7944 else
7945 aggr_type = vectype;
7946 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
7949 tree vec_mask = NULL_TREE;
7950 prev_stmt_info = NULL;
7951 poly_uint64 group_elt = 0;
7952 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
7953 for (j = 0; j < ncopies; j++)
7955 /* 1. Create the vector or array pointer update chain. */
7956 if (j == 0)
7958 bool simd_lane_access_p
7959 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7960 if (simd_lane_access_p
7961 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7962 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7963 && integer_zerop (DR_OFFSET (first_dr))
7964 && integer_zerop (DR_INIT (first_dr))
7965 && alias_sets_conflict_p (get_alias_set (aggr_type),
7966 get_alias_set (TREE_TYPE (ref_type)))
7967 && (alignment_support_scheme == dr_aligned
7968 || alignment_support_scheme == dr_unaligned_supported))
7970 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7971 dataref_offset = build_int_cst (ref_type, 0);
7972 inv_p = false;
7974 else if (first_stmt_for_drptr
7975 && first_stmt != first_stmt_for_drptr)
7977 dataref_ptr
7978 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7979 at_loop, offset, &dummy, gsi,
7980 &ptr_incr, simd_lane_access_p,
7981 &inv_p, byte_offset, bump);
7982 /* Adjust the pointer by the difference to first_stmt. */
7983 data_reference_p ptrdr
7984 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7985 tree diff = fold_convert (sizetype,
7986 size_binop (MINUS_EXPR,
7987 DR_INIT (first_dr),
7988 DR_INIT (ptrdr)));
7989 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7990 stmt, diff);
7992 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7994 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
7995 &dataref_ptr, &vec_offset);
7996 inv_p = false;
7998 else
7999 dataref_ptr
8000 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
8001 offset, &dummy, gsi, &ptr_incr,
8002 simd_lane_access_p, &inv_p,
8003 byte_offset, bump);
8004 if (mask)
8005 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
8006 mask_vectype);
8008 else
8010 if (dataref_offset)
8011 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8012 bump);
8013 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8014 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
8015 vec_offset);
8016 else
8017 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8018 stmt, bump);
8019 if (mask)
8020 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
8023 if (grouped_load || slp_perm)
8024 dr_chain.create (vec_num);
8026 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8028 tree vec_array;
8030 vec_array = create_vector_array (vectype, vec_num);
8032 tree final_mask = NULL_TREE;
8033 if (masked_loop_p)
8034 final_mask = vect_get_loop_mask (gsi, masks, ncopies, vectype, j);
8035 if (vec_mask)
8036 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8037 vec_mask, gsi);
8039 gcall *call;
8040 if (final_mask)
8042 /* Emit:
8043 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8044 VEC_MASK). */
8045 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8046 tree alias_ptr = build_int_cst (ref_type, align);
8047 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8048 dataref_ptr, alias_ptr,
8049 final_mask);
8051 else
8053 /* Emit:
8054 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8055 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8056 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8058 gimple_call_set_lhs (call, vec_array);
8059 gimple_call_set_nothrow (call, true);
8060 new_stmt = call;
8061 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8063 /* Extract each vector into an SSA_NAME. */
8064 for (i = 0; i < vec_num; i++)
8066 new_temp = read_vector_array (stmt, gsi, scalar_dest,
8067 vec_array, i);
8068 dr_chain.quick_push (new_temp);
8071 /* Record the mapping between SSA_NAMEs and statements. */
8072 vect_record_grouped_load_vectors (stmt, dr_chain);
8074 else
8076 for (i = 0; i < vec_num; i++)
8078 tree final_mask = NULL_TREE;
8079 if (masked_loop_p
8080 && memory_access_type != VMAT_INVARIANT)
8081 final_mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
8082 vectype, vec_num * j + i);
8083 if (vec_mask)
8084 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8085 vec_mask, gsi);
8087 if (i > 0)
8088 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8089 stmt, bump);
8091 /* 2. Create the vector-load in the loop. */
8092 switch (alignment_support_scheme)
8094 case dr_aligned:
8095 case dr_unaligned_supported:
8097 unsigned int align, misalign;
8099 if (memory_access_type == VMAT_GATHER_SCATTER)
8101 tree scale = size_int (gs_info.scale);
8102 gcall *call;
8103 if (masked_loop_p)
8104 call = gimple_build_call_internal
8105 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8106 vec_offset, scale, final_mask);
8107 else
8108 call = gimple_build_call_internal
8109 (IFN_GATHER_LOAD, 3, dataref_ptr,
8110 vec_offset, scale);
8111 gimple_call_set_nothrow (call, true);
8112 new_stmt = call;
8113 data_ref = NULL_TREE;
8114 break;
8117 align = DR_TARGET_ALIGNMENT (dr);
8118 if (alignment_support_scheme == dr_aligned)
8120 gcc_assert (aligned_access_p (first_dr));
8121 misalign = 0;
8123 else if (DR_MISALIGNMENT (first_dr) == -1)
8125 align = dr_alignment (vect_dr_behavior (first_dr));
8126 misalign = 0;
8128 else
8129 misalign = DR_MISALIGNMENT (first_dr);
8130 if (dataref_offset == NULL_TREE
8131 && TREE_CODE (dataref_ptr) == SSA_NAME)
8132 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8133 align, misalign);
8135 if (final_mask)
8137 align = least_bit_hwi (misalign | align);
8138 tree ptr = build_int_cst (ref_type, align);
8139 gcall *call
8140 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8141 dataref_ptr, ptr,
8142 final_mask);
8143 gimple_call_set_nothrow (call, true);
8144 new_stmt = call;
8145 data_ref = NULL_TREE;
8147 else
8149 data_ref
8150 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8151 dataref_offset
8152 ? dataref_offset
8153 : build_int_cst (ref_type, 0));
8154 if (alignment_support_scheme == dr_aligned)
8156 else if (DR_MISALIGNMENT (first_dr) == -1)
8157 TREE_TYPE (data_ref)
8158 = build_aligned_type (TREE_TYPE (data_ref),
8159 align * BITS_PER_UNIT);
8160 else
8161 TREE_TYPE (data_ref)
8162 = build_aligned_type (TREE_TYPE (data_ref),
8163 TYPE_ALIGN (elem_type));
8165 break;
8167 case dr_explicit_realign:
8169 tree ptr, bump;
8171 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8173 if (compute_in_loop)
8174 msq = vect_setup_realignment (first_stmt, gsi,
8175 &realignment_token,
8176 dr_explicit_realign,
8177 dataref_ptr, NULL);
8179 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8180 ptr = copy_ssa_name (dataref_ptr);
8181 else
8182 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8183 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8184 new_stmt = gimple_build_assign
8185 (ptr, BIT_AND_EXPR, dataref_ptr,
8186 build_int_cst
8187 (TREE_TYPE (dataref_ptr),
8188 -(HOST_WIDE_INT) align));
8189 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8190 data_ref
8191 = build2 (MEM_REF, vectype, ptr,
8192 build_int_cst (ref_type, 0));
8193 vec_dest = vect_create_destination_var (scalar_dest,
8194 vectype);
8195 new_stmt = gimple_build_assign (vec_dest, data_ref);
8196 new_temp = make_ssa_name (vec_dest, new_stmt);
8197 gimple_assign_set_lhs (new_stmt, new_temp);
8198 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
8199 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
8200 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8201 msq = new_temp;
8203 bump = size_binop (MULT_EXPR, vs,
8204 TYPE_SIZE_UNIT (elem_type));
8205 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8206 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
8207 new_stmt = gimple_build_assign
8208 (NULL_TREE, BIT_AND_EXPR, ptr,
8209 build_int_cst
8210 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8211 ptr = copy_ssa_name (ptr, new_stmt);
8212 gimple_assign_set_lhs (new_stmt, ptr);
8213 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8214 data_ref
8215 = build2 (MEM_REF, vectype, ptr,
8216 build_int_cst (ref_type, 0));
8217 break;
8219 case dr_explicit_realign_optimized:
8221 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8222 new_temp = copy_ssa_name (dataref_ptr);
8223 else
8224 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8225 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8226 new_stmt = gimple_build_assign
8227 (new_temp, BIT_AND_EXPR, dataref_ptr,
8228 build_int_cst (TREE_TYPE (dataref_ptr),
8229 -(HOST_WIDE_INT) align));
8230 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8231 data_ref
8232 = build2 (MEM_REF, vectype, new_temp,
8233 build_int_cst (ref_type, 0));
8234 break;
8236 default:
8237 gcc_unreachable ();
8239 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8240 /* DATA_REF is null if we've already built the statement. */
8241 if (data_ref)
8242 new_stmt = gimple_build_assign (vec_dest, data_ref);
8243 new_temp = make_ssa_name (vec_dest, new_stmt);
8244 gimple_set_lhs (new_stmt, new_temp);
8245 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8247 /* 3. Handle explicit realignment if necessary/supported.
8248 Create in loop:
8249 vec_dest = realign_load (msq, lsq, realignment_token) */
8250 if (alignment_support_scheme == dr_explicit_realign_optimized
8251 || alignment_support_scheme == dr_explicit_realign)
8253 lsq = gimple_assign_lhs (new_stmt);
8254 if (!realignment_token)
8255 realignment_token = dataref_ptr;
8256 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8257 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8258 msq, lsq, realignment_token);
8259 new_temp = make_ssa_name (vec_dest, new_stmt);
8260 gimple_assign_set_lhs (new_stmt, new_temp);
8261 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8263 if (alignment_support_scheme == dr_explicit_realign_optimized)
8265 gcc_assert (phi);
8266 if (i == vec_num - 1 && j == ncopies - 1)
8267 add_phi_arg (phi, lsq,
8268 loop_latch_edge (containing_loop),
8269 UNKNOWN_LOCATION);
8270 msq = lsq;
8274 /* 4. Handle invariant-load. */
8275 if (inv_p && !bb_vinfo)
8277 gcc_assert (!grouped_load);
8278 /* If we have versioned for aliasing or the loop doesn't
8279 have any data dependencies that would preclude this,
8280 then we are sure this is a loop invariant load and
8281 thus we can insert it on the preheader edge. */
8282 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
8283 && !nested_in_vect_loop
8284 && hoist_defs_of_uses (stmt, loop))
8286 if (dump_enabled_p ())
8288 dump_printf_loc (MSG_NOTE, vect_location,
8289 "hoisting out of the vectorized "
8290 "loop: ");
8291 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8293 tree tem = copy_ssa_name (scalar_dest);
8294 gsi_insert_on_edge_immediate
8295 (loop_preheader_edge (loop),
8296 gimple_build_assign (tem,
8297 unshare_expr
8298 (gimple_assign_rhs1 (stmt))));
8299 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
8300 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8301 set_vinfo_for_stmt (new_stmt,
8302 new_stmt_vec_info (new_stmt, vinfo));
8304 else
8306 gimple_stmt_iterator gsi2 = *gsi;
8307 gsi_next (&gsi2);
8308 new_temp = vect_init_vector (stmt, scalar_dest,
8309 vectype, &gsi2);
8310 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8314 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8316 tree perm_mask = perm_mask_for_reverse (vectype);
8317 new_temp = permute_vec_elements (new_temp, new_temp,
8318 perm_mask, stmt, gsi);
8319 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8322 /* Collect vector loads and later create their permutation in
8323 vect_transform_grouped_load (). */
8324 if (grouped_load || slp_perm)
8325 dr_chain.quick_push (new_temp);
8327 /* Store vector loads in the corresponding SLP_NODE. */
8328 if (slp && !slp_perm)
8329 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8331 /* With SLP permutation we load the gaps as well, without
8332 we need to skip the gaps after we manage to fully load
8333 all elements. group_gap_adj is GROUP_SIZE here. */
8334 group_elt += nunits;
8335 if (maybe_ne (group_gap_adj, 0U)
8336 && !slp_perm
8337 && known_eq (group_elt, group_size - group_gap_adj))
8339 poly_wide_int bump_val
8340 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8341 * group_gap_adj);
8342 tree bump = wide_int_to_tree (sizetype, bump_val);
8343 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8344 stmt, bump);
8345 group_elt = 0;
8348 /* Bump the vector pointer to account for a gap or for excess
8349 elements loaded for a permuted SLP load. */
8350 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8352 poly_wide_int bump_val
8353 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8354 * group_gap_adj);
8355 tree bump = wide_int_to_tree (sizetype, bump_val);
8356 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8357 stmt, bump);
8361 if (slp && !slp_perm)
8362 continue;
8364 if (slp_perm)
8366 unsigned n_perms;
8367 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8368 slp_node_instance, false,
8369 &n_perms))
8371 dr_chain.release ();
8372 return false;
8375 else
8377 if (grouped_load)
8379 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8380 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
8381 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8383 else
8385 if (j == 0)
8386 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8387 else
8388 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8389 prev_stmt_info = vinfo_for_stmt (new_stmt);
8392 dr_chain.release ();
8395 return true;
8398 /* Function vect_is_simple_cond.
8400 Input:
8401 LOOP - the loop that is being vectorized.
8402 COND - Condition that is checked for simple use.
8404 Output:
8405 *COMP_VECTYPE - the vector type for the comparison.
8406 *DTS - The def types for the arguments of the comparison
8408 Returns whether a COND can be vectorized. Checks whether
8409 condition operands are supportable using vec_is_simple_use. */
8411 static bool
8412 vect_is_simple_cond (tree cond, vec_info *vinfo,
8413 tree *comp_vectype, enum vect_def_type *dts,
8414 tree vectype)
8416 tree lhs, rhs;
8417 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8419 /* Mask case. */
8420 if (TREE_CODE (cond) == SSA_NAME
8421 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8423 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
8424 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
8425 &dts[0], comp_vectype)
8426 || !*comp_vectype
8427 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8428 return false;
8429 return true;
8432 if (!COMPARISON_CLASS_P (cond))
8433 return false;
8435 lhs = TREE_OPERAND (cond, 0);
8436 rhs = TREE_OPERAND (cond, 1);
8438 if (TREE_CODE (lhs) == SSA_NAME)
8440 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
8441 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
8442 return false;
8444 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8445 || TREE_CODE (lhs) == FIXED_CST)
8446 dts[0] = vect_constant_def;
8447 else
8448 return false;
8450 if (TREE_CODE (rhs) == SSA_NAME)
8452 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
8453 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
8454 return false;
8456 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8457 || TREE_CODE (rhs) == FIXED_CST)
8458 dts[1] = vect_constant_def;
8459 else
8460 return false;
8462 if (vectype1 && vectype2
8463 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8464 TYPE_VECTOR_SUBPARTS (vectype2)))
8465 return false;
8467 *comp_vectype = vectype1 ? vectype1 : vectype2;
8468 /* Invariant comparison. */
8469 if (! *comp_vectype)
8471 tree scalar_type = TREE_TYPE (lhs);
8472 /* If we can widen the comparison to match vectype do so. */
8473 if (INTEGRAL_TYPE_P (scalar_type)
8474 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8475 TYPE_SIZE (TREE_TYPE (vectype))))
8476 scalar_type = build_nonstandard_integer_type
8477 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8478 TYPE_UNSIGNED (scalar_type));
8479 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8482 return true;
8485 /* vectorizable_condition.
8487 Check if STMT is conditional modify expression that can be vectorized.
8488 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8489 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8490 at GSI.
8492 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
8493 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
8494 else clause if it is 2).
8496 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8498 bool
8499 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
8500 gimple **vec_stmt, tree reduc_def, int reduc_index,
8501 slp_tree slp_node)
8503 tree scalar_dest = NULL_TREE;
8504 tree vec_dest = NULL_TREE;
8505 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8506 tree then_clause, else_clause;
8507 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8508 tree comp_vectype = NULL_TREE;
8509 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8510 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8511 tree vec_compare;
8512 tree new_temp;
8513 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8514 enum vect_def_type dts[4]
8515 = {vect_unknown_def_type, vect_unknown_def_type,
8516 vect_unknown_def_type, vect_unknown_def_type};
8517 int ndts = 4;
8518 int ncopies;
8519 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8520 stmt_vec_info prev_stmt_info = NULL;
8521 int i, j;
8522 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8523 vec<tree> vec_oprnds0 = vNULL;
8524 vec<tree> vec_oprnds1 = vNULL;
8525 vec<tree> vec_oprnds2 = vNULL;
8526 vec<tree> vec_oprnds3 = vNULL;
8527 tree vec_cmp_type;
8528 bool masked = false;
8530 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8531 return false;
8533 vect_reduction_type reduction_type
8534 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8535 if (reduction_type == TREE_CODE_REDUCTION)
8537 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8538 return false;
8540 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8541 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8542 && reduc_def))
8543 return false;
8545 /* FORNOW: not yet supported. */
8546 if (STMT_VINFO_LIVE_P (stmt_info))
8548 if (dump_enabled_p ())
8549 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8550 "value used after loop.\n");
8551 return false;
8555 /* Is vectorizable conditional operation? */
8556 if (!is_gimple_assign (stmt))
8557 return false;
8559 code = gimple_assign_rhs_code (stmt);
8561 if (code != COND_EXPR)
8562 return false;
8564 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8565 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8567 if (slp_node)
8568 ncopies = 1;
8569 else
8570 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8572 gcc_assert (ncopies >= 1);
8573 if (reduc_index && ncopies > 1)
8574 return false; /* FORNOW */
8576 cond_expr = gimple_assign_rhs1 (stmt);
8577 then_clause = gimple_assign_rhs2 (stmt);
8578 else_clause = gimple_assign_rhs3 (stmt);
8580 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8581 &comp_vectype, &dts[0], vectype)
8582 || !comp_vectype)
8583 return false;
8585 gimple *def_stmt;
8586 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8587 &vectype1))
8588 return false;
8589 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8590 &vectype2))
8591 return false;
8593 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8594 return false;
8596 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8597 return false;
8599 masked = !COMPARISON_CLASS_P (cond_expr);
8600 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8602 if (vec_cmp_type == NULL_TREE)
8603 return false;
8605 cond_code = TREE_CODE (cond_expr);
8606 if (!masked)
8608 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8609 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8612 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8614 /* Boolean values may have another representation in vectors
8615 and therefore we prefer bit operations over comparison for
8616 them (which also works for scalar masks). We store opcodes
8617 to use in bitop1 and bitop2. Statement is vectorized as
8618 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8619 depending on bitop1 and bitop2 arity. */
8620 switch (cond_code)
8622 case GT_EXPR:
8623 bitop1 = BIT_NOT_EXPR;
8624 bitop2 = BIT_AND_EXPR;
8625 break;
8626 case GE_EXPR:
8627 bitop1 = BIT_NOT_EXPR;
8628 bitop2 = BIT_IOR_EXPR;
8629 break;
8630 case LT_EXPR:
8631 bitop1 = BIT_NOT_EXPR;
8632 bitop2 = BIT_AND_EXPR;
8633 std::swap (cond_expr0, cond_expr1);
8634 break;
8635 case LE_EXPR:
8636 bitop1 = BIT_NOT_EXPR;
8637 bitop2 = BIT_IOR_EXPR;
8638 std::swap (cond_expr0, cond_expr1);
8639 break;
8640 case NE_EXPR:
8641 bitop1 = BIT_XOR_EXPR;
8642 break;
8643 case EQ_EXPR:
8644 bitop1 = BIT_XOR_EXPR;
8645 bitop2 = BIT_NOT_EXPR;
8646 break;
8647 default:
8648 return false;
8650 cond_code = SSA_NAME;
8653 if (!vec_stmt)
8655 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8656 if (bitop1 != NOP_EXPR)
8658 machine_mode mode = TYPE_MODE (comp_vectype);
8659 optab optab;
8661 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8662 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8663 return false;
8665 if (bitop2 != NOP_EXPR)
8667 optab = optab_for_tree_code (bitop2, comp_vectype,
8668 optab_default);
8669 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8670 return false;
8673 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8674 cond_code))
8676 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8677 return true;
8679 return false;
8682 /* Transform. */
8684 if (!slp_node)
8686 vec_oprnds0.create (1);
8687 vec_oprnds1.create (1);
8688 vec_oprnds2.create (1);
8689 vec_oprnds3.create (1);
8692 /* Handle def. */
8693 scalar_dest = gimple_assign_lhs (stmt);
8694 if (reduction_type != EXTRACT_LAST_REDUCTION)
8695 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8697 /* Handle cond expr. */
8698 for (j = 0; j < ncopies; j++)
8700 gimple *new_stmt = NULL;
8701 if (j == 0)
8703 if (slp_node)
8705 auto_vec<tree, 4> ops;
8706 auto_vec<vec<tree>, 4> vec_defs;
8708 if (masked)
8709 ops.safe_push (cond_expr);
8710 else
8712 ops.safe_push (cond_expr0);
8713 ops.safe_push (cond_expr1);
8715 ops.safe_push (then_clause);
8716 ops.safe_push (else_clause);
8717 vect_get_slp_defs (ops, slp_node, &vec_defs);
8718 vec_oprnds3 = vec_defs.pop ();
8719 vec_oprnds2 = vec_defs.pop ();
8720 if (!masked)
8721 vec_oprnds1 = vec_defs.pop ();
8722 vec_oprnds0 = vec_defs.pop ();
8724 else
8726 gimple *gtemp;
8727 if (masked)
8729 vec_cond_lhs
8730 = vect_get_vec_def_for_operand (cond_expr, stmt,
8731 comp_vectype);
8732 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8733 &gtemp, &dts[0]);
8735 else
8737 vec_cond_lhs
8738 = vect_get_vec_def_for_operand (cond_expr0,
8739 stmt, comp_vectype);
8740 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8742 vec_cond_rhs
8743 = vect_get_vec_def_for_operand (cond_expr1,
8744 stmt, comp_vectype);
8745 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8747 if (reduc_index == 1)
8748 vec_then_clause = reduc_def;
8749 else
8751 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8752 stmt);
8753 vect_is_simple_use (then_clause, loop_vinfo,
8754 &gtemp, &dts[2]);
8756 if (reduc_index == 2)
8757 vec_else_clause = reduc_def;
8758 else
8760 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8761 stmt);
8762 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8766 else
8768 vec_cond_lhs
8769 = vect_get_vec_def_for_stmt_copy (dts[0],
8770 vec_oprnds0.pop ());
8771 if (!masked)
8772 vec_cond_rhs
8773 = vect_get_vec_def_for_stmt_copy (dts[1],
8774 vec_oprnds1.pop ());
8776 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8777 vec_oprnds2.pop ());
8778 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8779 vec_oprnds3.pop ());
8782 if (!slp_node)
8784 vec_oprnds0.quick_push (vec_cond_lhs);
8785 if (!masked)
8786 vec_oprnds1.quick_push (vec_cond_rhs);
8787 vec_oprnds2.quick_push (vec_then_clause);
8788 vec_oprnds3.quick_push (vec_else_clause);
8791 /* Arguments are ready. Create the new vector stmt. */
8792 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8794 vec_then_clause = vec_oprnds2[i];
8795 vec_else_clause = vec_oprnds3[i];
8797 if (masked)
8798 vec_compare = vec_cond_lhs;
8799 else
8801 vec_cond_rhs = vec_oprnds1[i];
8802 if (bitop1 == NOP_EXPR)
8803 vec_compare = build2 (cond_code, vec_cmp_type,
8804 vec_cond_lhs, vec_cond_rhs);
8805 else
8807 new_temp = make_ssa_name (vec_cmp_type);
8808 if (bitop1 == BIT_NOT_EXPR)
8809 new_stmt = gimple_build_assign (new_temp, bitop1,
8810 vec_cond_rhs);
8811 else
8812 new_stmt
8813 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8814 vec_cond_rhs);
8815 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8816 if (bitop2 == NOP_EXPR)
8817 vec_compare = new_temp;
8818 else if (bitop2 == BIT_NOT_EXPR)
8820 /* Instead of doing ~x ? y : z do x ? z : y. */
8821 vec_compare = new_temp;
8822 std::swap (vec_then_clause, vec_else_clause);
8824 else
8826 vec_compare = make_ssa_name (vec_cmp_type);
8827 new_stmt
8828 = gimple_build_assign (vec_compare, bitop2,
8829 vec_cond_lhs, new_temp);
8830 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8834 if (reduction_type == EXTRACT_LAST_REDUCTION)
8836 if (!is_gimple_val (vec_compare))
8838 tree vec_compare_name = make_ssa_name (vec_cmp_type);
8839 new_stmt = gimple_build_assign (vec_compare_name,
8840 vec_compare);
8841 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8842 vec_compare = vec_compare_name;
8844 gcc_assert (reduc_index == 2);
8845 new_stmt = gimple_build_call_internal
8846 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
8847 vec_then_clause);
8848 gimple_call_set_lhs (new_stmt, scalar_dest);
8849 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
8850 if (stmt == gsi_stmt (*gsi))
8851 vect_finish_replace_stmt (stmt, new_stmt);
8852 else
8854 /* In this case we're moving the definition to later in the
8855 block. That doesn't matter because the only uses of the
8856 lhs are in phi statements. */
8857 gimple_stmt_iterator old_gsi = gsi_for_stmt (stmt);
8858 gsi_remove (&old_gsi, true);
8859 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8862 else
8864 new_temp = make_ssa_name (vec_dest);
8865 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8866 vec_compare, vec_then_clause,
8867 vec_else_clause);
8868 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8870 if (slp_node)
8871 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8874 if (slp_node)
8875 continue;
8877 if (j == 0)
8878 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8879 else
8880 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8882 prev_stmt_info = vinfo_for_stmt (new_stmt);
8885 vec_oprnds0.release ();
8886 vec_oprnds1.release ();
8887 vec_oprnds2.release ();
8888 vec_oprnds3.release ();
8890 return true;
8893 /* vectorizable_comparison.
8895 Check if STMT is comparison expression that can be vectorized.
8896 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8897 comparison, put it in VEC_STMT, and insert it at GSI.
8899 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8901 static bool
8902 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8903 gimple **vec_stmt, tree reduc_def,
8904 slp_tree slp_node)
8906 tree lhs, rhs1, rhs2;
8907 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8908 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8909 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8910 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8911 tree new_temp;
8912 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8913 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8914 int ndts = 2;
8915 poly_uint64 nunits;
8916 int ncopies;
8917 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8918 stmt_vec_info prev_stmt_info = NULL;
8919 int i, j;
8920 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8921 vec<tree> vec_oprnds0 = vNULL;
8922 vec<tree> vec_oprnds1 = vNULL;
8923 gimple *def_stmt;
8924 tree mask_type;
8925 tree mask;
8927 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8928 return false;
8930 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8931 return false;
8933 mask_type = vectype;
8934 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8936 if (slp_node)
8937 ncopies = 1;
8938 else
8939 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8941 gcc_assert (ncopies >= 1);
8942 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8943 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8944 && reduc_def))
8945 return false;
8947 if (STMT_VINFO_LIVE_P (stmt_info))
8949 if (dump_enabled_p ())
8950 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8951 "value used after loop.\n");
8952 return false;
8955 if (!is_gimple_assign (stmt))
8956 return false;
8958 code = gimple_assign_rhs_code (stmt);
8960 if (TREE_CODE_CLASS (code) != tcc_comparison)
8961 return false;
8963 rhs1 = gimple_assign_rhs1 (stmt);
8964 rhs2 = gimple_assign_rhs2 (stmt);
8966 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8967 &dts[0], &vectype1))
8968 return false;
8970 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8971 &dts[1], &vectype2))
8972 return false;
8974 if (vectype1 && vectype2
8975 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8976 TYPE_VECTOR_SUBPARTS (vectype2)))
8977 return false;
8979 vectype = vectype1 ? vectype1 : vectype2;
8981 /* Invariant comparison. */
8982 if (!vectype)
8984 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8985 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
8986 return false;
8988 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
8989 return false;
8991 /* Can't compare mask and non-mask types. */
8992 if (vectype1 && vectype2
8993 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8994 return false;
8996 /* Boolean values may have another representation in vectors
8997 and therefore we prefer bit operations over comparison for
8998 them (which also works for scalar masks). We store opcodes
8999 to use in bitop1 and bitop2. Statement is vectorized as
9000 BITOP2 (rhs1 BITOP1 rhs2) or
9001 rhs1 BITOP2 (BITOP1 rhs2)
9002 depending on bitop1 and bitop2 arity. */
9003 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9005 if (code == GT_EXPR)
9007 bitop1 = BIT_NOT_EXPR;
9008 bitop2 = BIT_AND_EXPR;
9010 else if (code == GE_EXPR)
9012 bitop1 = BIT_NOT_EXPR;
9013 bitop2 = BIT_IOR_EXPR;
9015 else if (code == LT_EXPR)
9017 bitop1 = BIT_NOT_EXPR;
9018 bitop2 = BIT_AND_EXPR;
9019 std::swap (rhs1, rhs2);
9020 std::swap (dts[0], dts[1]);
9022 else if (code == LE_EXPR)
9024 bitop1 = BIT_NOT_EXPR;
9025 bitop2 = BIT_IOR_EXPR;
9026 std::swap (rhs1, rhs2);
9027 std::swap (dts[0], dts[1]);
9029 else
9031 bitop1 = BIT_XOR_EXPR;
9032 if (code == EQ_EXPR)
9033 bitop2 = BIT_NOT_EXPR;
9037 if (!vec_stmt)
9039 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9040 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9041 dts, ndts, NULL, NULL);
9042 if (bitop1 == NOP_EXPR)
9043 return expand_vec_cmp_expr_p (vectype, mask_type, code);
9044 else
9046 machine_mode mode = TYPE_MODE (vectype);
9047 optab optab;
9049 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9050 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9051 return false;
9053 if (bitop2 != NOP_EXPR)
9055 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9056 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9057 return false;
9059 return true;
9063 /* Transform. */
9064 if (!slp_node)
9066 vec_oprnds0.create (1);
9067 vec_oprnds1.create (1);
9070 /* Handle def. */
9071 lhs = gimple_assign_lhs (stmt);
9072 mask = vect_create_destination_var (lhs, mask_type);
9074 /* Handle cmp expr. */
9075 for (j = 0; j < ncopies; j++)
9077 gassign *new_stmt = NULL;
9078 if (j == 0)
9080 if (slp_node)
9082 auto_vec<tree, 2> ops;
9083 auto_vec<vec<tree>, 2> vec_defs;
9085 ops.safe_push (rhs1);
9086 ops.safe_push (rhs2);
9087 vect_get_slp_defs (ops, slp_node, &vec_defs);
9088 vec_oprnds1 = vec_defs.pop ();
9089 vec_oprnds0 = vec_defs.pop ();
9091 else
9093 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
9094 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
9097 else
9099 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
9100 vec_oprnds0.pop ());
9101 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
9102 vec_oprnds1.pop ());
9105 if (!slp_node)
9107 vec_oprnds0.quick_push (vec_rhs1);
9108 vec_oprnds1.quick_push (vec_rhs2);
9111 /* Arguments are ready. Create the new vector stmt. */
9112 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9114 vec_rhs2 = vec_oprnds1[i];
9116 new_temp = make_ssa_name (mask);
9117 if (bitop1 == NOP_EXPR)
9119 new_stmt = gimple_build_assign (new_temp, code,
9120 vec_rhs1, vec_rhs2);
9121 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9123 else
9125 if (bitop1 == BIT_NOT_EXPR)
9126 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9127 else
9128 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9129 vec_rhs2);
9130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9131 if (bitop2 != NOP_EXPR)
9133 tree res = make_ssa_name (mask);
9134 if (bitop2 == BIT_NOT_EXPR)
9135 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9136 else
9137 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9138 new_temp);
9139 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9142 if (slp_node)
9143 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9146 if (slp_node)
9147 continue;
9149 if (j == 0)
9150 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
9151 else
9152 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
9154 prev_stmt_info = vinfo_for_stmt (new_stmt);
9157 vec_oprnds0.release ();
9158 vec_oprnds1.release ();
9160 return true;
9163 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9164 can handle all live statements in the node. Otherwise return true
9165 if STMT is not live or if vectorizable_live_operation can handle it.
9166 GSI and VEC_STMT are as for vectorizable_live_operation. */
9168 static bool
9169 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
9170 slp_tree slp_node, gimple **vec_stmt)
9172 if (slp_node)
9174 gimple *slp_stmt;
9175 unsigned int i;
9176 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
9178 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
9179 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9180 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
9181 vec_stmt))
9182 return false;
9185 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
9186 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
9187 return false;
9189 return true;
9192 /* Make sure the statement is vectorizable. */
9194 bool
9195 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
9196 slp_instance node_instance)
9198 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9199 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9200 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9201 bool ok;
9202 gimple *pattern_stmt;
9203 gimple_seq pattern_def_seq;
9205 if (dump_enabled_p ())
9207 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
9208 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9211 if (gimple_has_volatile_ops (stmt))
9213 if (dump_enabled_p ())
9214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9215 "not vectorized: stmt has volatile operands\n");
9217 return false;
9220 /* Skip stmts that do not need to be vectorized. In loops this is expected
9221 to include:
9222 - the COND_EXPR which is the loop exit condition
9223 - any LABEL_EXPRs in the loop
9224 - computations that are used only for array indexing or loop control.
9225 In basic blocks we only analyze statements that are a part of some SLP
9226 instance, therefore, all the statements are relevant.
9228 Pattern statement needs to be analyzed instead of the original statement
9229 if the original statement is not relevant. Otherwise, we analyze both
9230 statements. In basic blocks we are called from some SLP instance
9231 traversal, don't analyze pattern stmts instead, the pattern stmts
9232 already will be part of SLP instance. */
9234 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
9235 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9236 && !STMT_VINFO_LIVE_P (stmt_info))
9238 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9239 && pattern_stmt
9240 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9241 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9243 /* Analyze PATTERN_STMT instead of the original stmt. */
9244 stmt = pattern_stmt;
9245 stmt_info = vinfo_for_stmt (pattern_stmt);
9246 if (dump_enabled_p ())
9248 dump_printf_loc (MSG_NOTE, vect_location,
9249 "==> examining pattern statement: ");
9250 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9253 else
9255 if (dump_enabled_p ())
9256 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9258 return true;
9261 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9262 && node == NULL
9263 && pattern_stmt
9264 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9265 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9267 /* Analyze PATTERN_STMT too. */
9268 if (dump_enabled_p ())
9270 dump_printf_loc (MSG_NOTE, vect_location,
9271 "==> examining pattern statement: ");
9272 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9275 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
9276 node_instance))
9277 return false;
9280 if (is_pattern_stmt_p (stmt_info)
9281 && node == NULL
9282 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9284 gimple_stmt_iterator si;
9286 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9288 gimple *pattern_def_stmt = gsi_stmt (si);
9289 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
9290 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
9292 /* Analyze def stmt of STMT if it's a pattern stmt. */
9293 if (dump_enabled_p ())
9295 dump_printf_loc (MSG_NOTE, vect_location,
9296 "==> examining pattern def statement: ");
9297 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
9300 if (!vect_analyze_stmt (pattern_def_stmt,
9301 need_to_vectorize, node, node_instance))
9302 return false;
9307 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9309 case vect_internal_def:
9310 break;
9312 case vect_reduction_def:
9313 case vect_nested_cycle:
9314 gcc_assert (!bb_vinfo
9315 && (relevance == vect_used_in_outer
9316 || relevance == vect_used_in_outer_by_reduction
9317 || relevance == vect_used_by_reduction
9318 || relevance == vect_unused_in_scope
9319 || relevance == vect_used_only_live));
9320 break;
9322 case vect_induction_def:
9323 gcc_assert (!bb_vinfo);
9324 break;
9326 case vect_constant_def:
9327 case vect_external_def:
9328 case vect_unknown_def_type:
9329 default:
9330 gcc_unreachable ();
9333 if (STMT_VINFO_RELEVANT_P (stmt_info))
9335 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
9336 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9337 || (is_gimple_call (stmt)
9338 && gimple_call_lhs (stmt) == NULL_TREE));
9339 *need_to_vectorize = true;
9342 if (PURE_SLP_STMT (stmt_info) && !node)
9344 dump_printf_loc (MSG_NOTE, vect_location,
9345 "handled only by SLP analysis\n");
9346 return true;
9349 ok = true;
9350 if (!bb_vinfo
9351 && (STMT_VINFO_RELEVANT_P (stmt_info)
9352 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9353 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
9354 || vectorizable_conversion (stmt, NULL, NULL, node)
9355 || vectorizable_shift (stmt, NULL, NULL, node)
9356 || vectorizable_operation (stmt, NULL, NULL, node)
9357 || vectorizable_assignment (stmt, NULL, NULL, node)
9358 || vectorizable_load (stmt, NULL, NULL, node, NULL)
9359 || vectorizable_call (stmt, NULL, NULL, node)
9360 || vectorizable_store (stmt, NULL, NULL, node)
9361 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
9362 || vectorizable_induction (stmt, NULL, NULL, node)
9363 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
9364 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
9365 else
9367 if (bb_vinfo)
9368 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
9369 || vectorizable_conversion (stmt, NULL, NULL, node)
9370 || vectorizable_shift (stmt, NULL, NULL, node)
9371 || vectorizable_operation (stmt, NULL, NULL, node)
9372 || vectorizable_assignment (stmt, NULL, NULL, node)
9373 || vectorizable_load (stmt, NULL, NULL, node, NULL)
9374 || vectorizable_call (stmt, NULL, NULL, node)
9375 || vectorizable_store (stmt, NULL, NULL, node)
9376 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
9377 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
9380 if (!ok)
9382 if (dump_enabled_p ())
9384 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9385 "not vectorized: relevant stmt not ");
9386 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
9387 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9390 return false;
9393 if (bb_vinfo)
9394 return true;
9396 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9397 need extra handling, except for vectorizable reductions. */
9398 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9399 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
9401 if (dump_enabled_p ())
9403 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9404 "not vectorized: live stmt not supported: ");
9405 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9408 return false;
9411 return true;
9415 /* Function vect_transform_stmt.
9417 Create a vectorized stmt to replace STMT, and insert it at BSI. */
9419 bool
9420 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
9421 bool *grouped_store, slp_tree slp_node,
9422 slp_instance slp_node_instance)
9424 bool is_store = false;
9425 gimple *vec_stmt = NULL;
9426 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9427 bool done;
9429 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9430 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
9432 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9433 && nested_in_vect_loop_p
9434 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9435 stmt));
9437 switch (STMT_VINFO_TYPE (stmt_info))
9439 case type_demotion_vec_info_type:
9440 case type_promotion_vec_info_type:
9441 case type_conversion_vec_info_type:
9442 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
9443 gcc_assert (done);
9444 break;
9446 case induc_vec_info_type:
9447 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
9448 gcc_assert (done);
9449 break;
9451 case shift_vec_info_type:
9452 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
9453 gcc_assert (done);
9454 break;
9456 case op_vec_info_type:
9457 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
9458 gcc_assert (done);
9459 break;
9461 case assignment_vec_info_type:
9462 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
9463 gcc_assert (done);
9464 break;
9466 case load_vec_info_type:
9467 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
9468 slp_node_instance);
9469 gcc_assert (done);
9470 break;
9472 case store_vec_info_type:
9473 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
9474 gcc_assert (done);
9475 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9477 /* In case of interleaving, the whole chain is vectorized when the
9478 last store in the chain is reached. Store stmts before the last
9479 one are skipped, and there vec_stmt_info shouldn't be freed
9480 meanwhile. */
9481 *grouped_store = true;
9482 stmt_vec_info group_info
9483 = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
9484 if (GROUP_STORE_COUNT (group_info) == GROUP_SIZE (group_info))
9485 is_store = true;
9487 else
9488 is_store = true;
9489 break;
9491 case condition_vec_info_type:
9492 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
9493 gcc_assert (done);
9494 break;
9496 case comparison_vec_info_type:
9497 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
9498 gcc_assert (done);
9499 break;
9501 case call_vec_info_type:
9502 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
9503 stmt = gsi_stmt (*gsi);
9504 break;
9506 case call_simd_clone_vec_info_type:
9507 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
9508 stmt = gsi_stmt (*gsi);
9509 break;
9511 case reduc_vec_info_type:
9512 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
9513 slp_node_instance);
9514 gcc_assert (done);
9515 break;
9517 default:
9518 if (!STMT_VINFO_LIVE_P (stmt_info))
9520 if (dump_enabled_p ())
9521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9522 "stmt not supported.\n");
9523 gcc_unreachable ();
9527 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9528 This would break hybrid SLP vectorization. */
9529 if (slp_node)
9530 gcc_assert (!vec_stmt
9531 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
9533 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9534 is being vectorized, but outside the immediately enclosing loop. */
9535 if (vec_stmt
9536 && nested_p
9537 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9538 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9539 || STMT_VINFO_RELEVANT (stmt_info) ==
9540 vect_used_in_outer_by_reduction))
9542 struct loop *innerloop = LOOP_VINFO_LOOP (
9543 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9544 imm_use_iterator imm_iter;
9545 use_operand_p use_p;
9546 tree scalar_dest;
9547 gimple *exit_phi;
9549 if (dump_enabled_p ())
9550 dump_printf_loc (MSG_NOTE, vect_location,
9551 "Record the vdef for outer-loop vectorization.\n");
9553 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9554 (to be used when vectorizing outer-loop stmts that use the DEF of
9555 STMT). */
9556 if (gimple_code (stmt) == GIMPLE_PHI)
9557 scalar_dest = PHI_RESULT (stmt);
9558 else
9559 scalar_dest = gimple_assign_lhs (stmt);
9561 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9563 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9565 exit_phi = USE_STMT (use_p);
9566 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
9571 /* Handle stmts whose DEF is used outside the loop-nest that is
9572 being vectorized. */
9573 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9575 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
9576 gcc_assert (done);
9579 if (vec_stmt)
9580 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9582 return is_store;
9586 /* Remove a group of stores (for SLP or interleaving), free their
9587 stmt_vec_info. */
9589 void
9590 vect_remove_stores (gimple *first_stmt)
9592 gimple *next = first_stmt;
9593 gimple *tmp;
9594 gimple_stmt_iterator next_si;
9596 while (next)
9598 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9600 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9601 if (is_pattern_stmt_p (stmt_info))
9602 next = STMT_VINFO_RELATED_STMT (stmt_info);
9603 /* Free the attached stmt_vec_info and remove the stmt. */
9604 next_si = gsi_for_stmt (next);
9605 unlink_stmt_vdef (next);
9606 gsi_remove (&next_si, true);
9607 release_defs (next);
9608 free_stmt_vec_info (next);
9609 next = tmp;
9614 /* Function new_stmt_vec_info.
9616 Create and initialize a new stmt_vec_info struct for STMT. */
9618 stmt_vec_info
9619 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9621 stmt_vec_info res;
9622 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9624 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9625 STMT_VINFO_STMT (res) = stmt;
9626 res->vinfo = vinfo;
9627 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9628 STMT_VINFO_LIVE_P (res) = false;
9629 STMT_VINFO_VECTYPE (res) = NULL;
9630 STMT_VINFO_VEC_STMT (res) = NULL;
9631 STMT_VINFO_VECTORIZABLE (res) = true;
9632 STMT_VINFO_IN_PATTERN_P (res) = false;
9633 STMT_VINFO_RELATED_STMT (res) = NULL;
9634 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9635 STMT_VINFO_DATA_REF (res) = NULL;
9636 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9637 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9639 if (gimple_code (stmt) == GIMPLE_PHI
9640 && is_loop_header_bb_p (gimple_bb (stmt)))
9641 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9642 else
9643 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9645 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9646 STMT_SLP_TYPE (res) = loop_vect;
9647 STMT_VINFO_NUM_SLP_USES (res) = 0;
9649 GROUP_FIRST_ELEMENT (res) = NULL;
9650 GROUP_NEXT_ELEMENT (res) = NULL;
9651 GROUP_SIZE (res) = 0;
9652 GROUP_STORE_COUNT (res) = 0;
9653 GROUP_GAP (res) = 0;
9654 GROUP_SAME_DR_STMT (res) = NULL;
9656 return res;
9660 /* Create a hash table for stmt_vec_info. */
9662 void
9663 init_stmt_vec_info_vec (void)
9665 gcc_assert (!stmt_vec_info_vec.exists ());
9666 stmt_vec_info_vec.create (50);
9670 /* Free hash table for stmt_vec_info. */
9672 void
9673 free_stmt_vec_info_vec (void)
9675 unsigned int i;
9676 stmt_vec_info info;
9677 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9678 if (info != NULL)
9679 free_stmt_vec_info (STMT_VINFO_STMT (info));
9680 gcc_assert (stmt_vec_info_vec.exists ());
9681 stmt_vec_info_vec.release ();
9685 /* Free stmt vectorization related info. */
9687 void
9688 free_stmt_vec_info (gimple *stmt)
9690 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9692 if (!stmt_info)
9693 return;
9695 /* Check if this statement has a related "pattern stmt"
9696 (introduced by the vectorizer during the pattern recognition
9697 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9698 too. */
9699 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9701 stmt_vec_info patt_info
9702 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9703 if (patt_info)
9705 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9706 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9707 gimple_set_bb (patt_stmt, NULL);
9708 tree lhs = gimple_get_lhs (patt_stmt);
9709 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9710 release_ssa_name (lhs);
9711 if (seq)
9713 gimple_stmt_iterator si;
9714 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9716 gimple *seq_stmt = gsi_stmt (si);
9717 gimple_set_bb (seq_stmt, NULL);
9718 lhs = gimple_get_lhs (seq_stmt);
9719 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9720 release_ssa_name (lhs);
9721 free_stmt_vec_info (seq_stmt);
9724 free_stmt_vec_info (patt_stmt);
9728 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9729 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9730 set_vinfo_for_stmt (stmt, NULL);
9731 free (stmt_info);
9735 /* Function get_vectype_for_scalar_type_and_size.
9737 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9738 by the target. */
9740 tree
9741 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9743 tree orig_scalar_type = scalar_type;
9744 scalar_mode inner_mode;
9745 machine_mode simd_mode;
9746 poly_uint64 nunits;
9747 tree vectype;
9749 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9750 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9751 return NULL_TREE;
9753 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9755 /* For vector types of elements whose mode precision doesn't
9756 match their types precision we use a element type of mode
9757 precision. The vectorization routines will have to make sure
9758 they support the proper result truncation/extension.
9759 We also make sure to build vector types with INTEGER_TYPE
9760 component type only. */
9761 if (INTEGRAL_TYPE_P (scalar_type)
9762 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9763 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9764 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9765 TYPE_UNSIGNED (scalar_type));
9767 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9768 When the component mode passes the above test simply use a type
9769 corresponding to that mode. The theory is that any use that
9770 would cause problems with this will disable vectorization anyway. */
9771 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9772 && !INTEGRAL_TYPE_P (scalar_type))
9773 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9775 /* We can't build a vector type of elements with alignment bigger than
9776 their size. */
9777 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9778 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9779 TYPE_UNSIGNED (scalar_type));
9781 /* If we felt back to using the mode fail if there was
9782 no scalar type for it. */
9783 if (scalar_type == NULL_TREE)
9784 return NULL_TREE;
9786 /* If no size was supplied use the mode the target prefers. Otherwise
9787 lookup a vector mode of the specified size. */
9788 if (known_eq (size, 0U))
9789 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9790 else if (!multiple_p (size, nbytes, &nunits)
9791 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9792 return NULL_TREE;
9793 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9794 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9795 return NULL_TREE;
9797 vectype = build_vector_type (scalar_type, nunits);
9799 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9800 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9801 return NULL_TREE;
9803 /* Re-attach the address-space qualifier if we canonicalized the scalar
9804 type. */
9805 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9806 return build_qualified_type
9807 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9809 return vectype;
9812 poly_uint64 current_vector_size;
9814 /* Function get_vectype_for_scalar_type.
9816 Returns the vector type corresponding to SCALAR_TYPE as supported
9817 by the target. */
9819 tree
9820 get_vectype_for_scalar_type (tree scalar_type)
9822 tree vectype;
9823 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9824 current_vector_size);
9825 if (vectype
9826 && known_eq (current_vector_size, 0U))
9827 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9828 return vectype;
9831 /* Function get_mask_type_for_scalar_type.
9833 Returns the mask type corresponding to a result of comparison
9834 of vectors of specified SCALAR_TYPE as supported by target. */
9836 tree
9837 get_mask_type_for_scalar_type (tree scalar_type)
9839 tree vectype = get_vectype_for_scalar_type (scalar_type);
9841 if (!vectype)
9842 return NULL;
9844 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9845 current_vector_size);
9848 /* Function get_same_sized_vectype
9850 Returns a vector type corresponding to SCALAR_TYPE of size
9851 VECTOR_TYPE if supported by the target. */
9853 tree
9854 get_same_sized_vectype (tree scalar_type, tree vector_type)
9856 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9857 return build_same_sized_truth_vector_type (vector_type);
9859 return get_vectype_for_scalar_type_and_size
9860 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9863 /* Function vect_is_simple_use.
9865 Input:
9866 VINFO - the vect info of the loop or basic block that is being vectorized.
9867 OPERAND - operand in the loop or bb.
9868 Output:
9869 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9870 DT - the type of definition
9872 Returns whether a stmt with OPERAND can be vectorized.
9873 For loops, supportable operands are constants, loop invariants, and operands
9874 that are defined by the current iteration of the loop. Unsupportable
9875 operands are those that are defined by a previous iteration of the loop (as
9876 is the case in reduction/induction computations).
9877 For basic blocks, supportable operands are constants and bb invariants.
9878 For now, operands defined outside the basic block are not supported. */
9880 bool
9881 vect_is_simple_use (tree operand, vec_info *vinfo,
9882 gimple **def_stmt, enum vect_def_type *dt)
9884 *def_stmt = NULL;
9885 *dt = vect_unknown_def_type;
9887 if (dump_enabled_p ())
9889 dump_printf_loc (MSG_NOTE, vect_location,
9890 "vect_is_simple_use: operand ");
9891 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9892 dump_printf (MSG_NOTE, "\n");
9895 if (CONSTANT_CLASS_P (operand))
9897 *dt = vect_constant_def;
9898 return true;
9901 if (is_gimple_min_invariant (operand))
9903 *dt = vect_external_def;
9904 return true;
9907 if (TREE_CODE (operand) != SSA_NAME)
9909 if (dump_enabled_p ())
9910 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9911 "not ssa-name.\n");
9912 return false;
9915 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9917 *dt = vect_external_def;
9918 return true;
9921 *def_stmt = SSA_NAME_DEF_STMT (operand);
9922 if (dump_enabled_p ())
9924 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9925 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9928 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9929 *dt = vect_external_def;
9930 else
9932 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9933 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9936 if (dump_enabled_p ())
9938 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9939 switch (*dt)
9941 case vect_uninitialized_def:
9942 dump_printf (MSG_NOTE, "uninitialized\n");
9943 break;
9944 case vect_constant_def:
9945 dump_printf (MSG_NOTE, "constant\n");
9946 break;
9947 case vect_external_def:
9948 dump_printf (MSG_NOTE, "external\n");
9949 break;
9950 case vect_internal_def:
9951 dump_printf (MSG_NOTE, "internal\n");
9952 break;
9953 case vect_induction_def:
9954 dump_printf (MSG_NOTE, "induction\n");
9955 break;
9956 case vect_reduction_def:
9957 dump_printf (MSG_NOTE, "reduction\n");
9958 break;
9959 case vect_double_reduction_def:
9960 dump_printf (MSG_NOTE, "double reduction\n");
9961 break;
9962 case vect_nested_cycle:
9963 dump_printf (MSG_NOTE, "nested cycle\n");
9964 break;
9965 case vect_unknown_def_type:
9966 dump_printf (MSG_NOTE, "unknown\n");
9967 break;
9971 if (*dt == vect_unknown_def_type)
9973 if (dump_enabled_p ())
9974 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9975 "Unsupported pattern.\n");
9976 return false;
9979 switch (gimple_code (*def_stmt))
9981 case GIMPLE_PHI:
9982 case GIMPLE_ASSIGN:
9983 case GIMPLE_CALL:
9984 break;
9985 default:
9986 if (dump_enabled_p ())
9987 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9988 "unsupported defining stmt:\n");
9989 return false;
9992 return true;
9995 /* Function vect_is_simple_use.
9997 Same as vect_is_simple_use but also determines the vector operand
9998 type of OPERAND and stores it to *VECTYPE. If the definition of
9999 OPERAND is vect_uninitialized_def, vect_constant_def or
10000 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10001 is responsible to compute the best suited vector type for the
10002 scalar operand. */
10004 bool
10005 vect_is_simple_use (tree operand, vec_info *vinfo,
10006 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
10008 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
10009 return false;
10011 /* Now get a vector type if the def is internal, otherwise supply
10012 NULL_TREE and leave it up to the caller to figure out a proper
10013 type for the use stmt. */
10014 if (*dt == vect_internal_def
10015 || *dt == vect_induction_def
10016 || *dt == vect_reduction_def
10017 || *dt == vect_double_reduction_def
10018 || *dt == vect_nested_cycle)
10020 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
10022 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
10023 && !STMT_VINFO_RELEVANT (stmt_info)
10024 && !STMT_VINFO_LIVE_P (stmt_info))
10025 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
10027 *vectype = STMT_VINFO_VECTYPE (stmt_info);
10028 gcc_assert (*vectype != NULL_TREE);
10030 else if (*dt == vect_uninitialized_def
10031 || *dt == vect_constant_def
10032 || *dt == vect_external_def)
10033 *vectype = NULL_TREE;
10034 else
10035 gcc_unreachable ();
10037 return true;
10041 /* Function supportable_widening_operation
10043 Check whether an operation represented by the code CODE is a
10044 widening operation that is supported by the target platform in
10045 vector form (i.e., when operating on arguments of type VECTYPE_IN
10046 producing a result of type VECTYPE_OUT).
10048 Widening operations we currently support are NOP (CONVERT), FLOAT
10049 and WIDEN_MULT. This function checks if these operations are supported
10050 by the target platform either directly (via vector tree-codes), or via
10051 target builtins.
10053 Output:
10054 - CODE1 and CODE2 are codes of vector operations to be used when
10055 vectorizing the operation, if available.
10056 - MULTI_STEP_CVT determines the number of required intermediate steps in
10057 case of multi-step conversion (like char->short->int - in that case
10058 MULTI_STEP_CVT will be 1).
10059 - INTERM_TYPES contains the intermediate type required to perform the
10060 widening operation (short in the above example). */
10062 bool
10063 supportable_widening_operation (enum tree_code code, gimple *stmt,
10064 tree vectype_out, tree vectype_in,
10065 enum tree_code *code1, enum tree_code *code2,
10066 int *multi_step_cvt,
10067 vec<tree> *interm_types)
10069 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
10070 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10071 struct loop *vect_loop = NULL;
10072 machine_mode vec_mode;
10073 enum insn_code icode1, icode2;
10074 optab optab1, optab2;
10075 tree vectype = vectype_in;
10076 tree wide_vectype = vectype_out;
10077 enum tree_code c1, c2;
10078 int i;
10079 tree prev_type, intermediate_type;
10080 machine_mode intermediate_mode, prev_mode;
10081 optab optab3, optab4;
10083 *multi_step_cvt = 0;
10084 if (loop_info)
10085 vect_loop = LOOP_VINFO_LOOP (loop_info);
10087 switch (code)
10089 case WIDEN_MULT_EXPR:
10090 /* The result of a vectorized widening operation usually requires
10091 two vectors (because the widened results do not fit into one vector).
10092 The generated vector results would normally be expected to be
10093 generated in the same order as in the original scalar computation,
10094 i.e. if 8 results are generated in each vector iteration, they are
10095 to be organized as follows:
10096 vect1: [res1,res2,res3,res4],
10097 vect2: [res5,res6,res7,res8].
10099 However, in the special case that the result of the widening
10100 operation is used in a reduction computation only, the order doesn't
10101 matter (because when vectorizing a reduction we change the order of
10102 the computation). Some targets can take advantage of this and
10103 generate more efficient code. For example, targets like Altivec,
10104 that support widen_mult using a sequence of {mult_even,mult_odd}
10105 generate the following vectors:
10106 vect1: [res1,res3,res5,res7],
10107 vect2: [res2,res4,res6,res8].
10109 When vectorizing outer-loops, we execute the inner-loop sequentially
10110 (each vectorized inner-loop iteration contributes to VF outer-loop
10111 iterations in parallel). We therefore don't allow to change the
10112 order of the computation in the inner-loop during outer-loop
10113 vectorization. */
10114 /* TODO: Another case in which order doesn't *really* matter is when we
10115 widen and then contract again, e.g. (short)((int)x * y >> 8).
10116 Normally, pack_trunc performs an even/odd permute, whereas the
10117 repack from an even/odd expansion would be an interleave, which
10118 would be significantly simpler for e.g. AVX2. */
10119 /* In any case, in order to avoid duplicating the code below, recurse
10120 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10121 are properly set up for the caller. If we fail, we'll continue with
10122 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10123 if (vect_loop
10124 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10125 && !nested_in_vect_loop_p (vect_loop, stmt)
10126 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10127 stmt, vectype_out, vectype_in,
10128 code1, code2, multi_step_cvt,
10129 interm_types))
10131 /* Elements in a vector with vect_used_by_reduction property cannot
10132 be reordered if the use chain with this property does not have the
10133 same operation. One such an example is s += a * b, where elements
10134 in a and b cannot be reordered. Here we check if the vector defined
10135 by STMT is only directly used in the reduction statement. */
10136 tree lhs = gimple_assign_lhs (stmt);
10137 use_operand_p dummy;
10138 gimple *use_stmt;
10139 stmt_vec_info use_stmt_info = NULL;
10140 if (single_imm_use (lhs, &dummy, &use_stmt)
10141 && (use_stmt_info = vinfo_for_stmt (use_stmt))
10142 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10143 return true;
10145 c1 = VEC_WIDEN_MULT_LO_EXPR;
10146 c2 = VEC_WIDEN_MULT_HI_EXPR;
10147 break;
10149 case DOT_PROD_EXPR:
10150 c1 = DOT_PROD_EXPR;
10151 c2 = DOT_PROD_EXPR;
10152 break;
10154 case SAD_EXPR:
10155 c1 = SAD_EXPR;
10156 c2 = SAD_EXPR;
10157 break;
10159 case VEC_WIDEN_MULT_EVEN_EXPR:
10160 /* Support the recursion induced just above. */
10161 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10162 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10163 break;
10165 case WIDEN_LSHIFT_EXPR:
10166 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10167 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10168 break;
10170 CASE_CONVERT:
10171 c1 = VEC_UNPACK_LO_EXPR;
10172 c2 = VEC_UNPACK_HI_EXPR;
10173 break;
10175 case FLOAT_EXPR:
10176 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10177 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10178 break;
10180 case FIX_TRUNC_EXPR:
10181 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
10182 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
10183 computing the operation. */
10184 return false;
10186 default:
10187 gcc_unreachable ();
10190 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10191 std::swap (c1, c2);
10193 if (code == FIX_TRUNC_EXPR)
10195 /* The signedness is determined from output operand. */
10196 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10197 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10199 else
10201 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10202 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10205 if (!optab1 || !optab2)
10206 return false;
10208 vec_mode = TYPE_MODE (vectype);
10209 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10210 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10211 return false;
10213 *code1 = c1;
10214 *code2 = c2;
10216 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10217 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10218 /* For scalar masks we may have different boolean
10219 vector types having the same QImode. Thus we
10220 add additional check for elements number. */
10221 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10222 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10223 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10225 /* Check if it's a multi-step conversion that can be done using intermediate
10226 types. */
10228 prev_type = vectype;
10229 prev_mode = vec_mode;
10231 if (!CONVERT_EXPR_CODE_P (code))
10232 return false;
10234 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10235 intermediate steps in promotion sequence. We try
10236 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10237 not. */
10238 interm_types->create (MAX_INTERM_CVT_STEPS);
10239 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10241 intermediate_mode = insn_data[icode1].operand[0].mode;
10242 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10244 intermediate_type = vect_halve_mask_nunits (prev_type);
10245 if (intermediate_mode != TYPE_MODE (intermediate_type))
10246 return false;
10248 else
10249 intermediate_type
10250 = lang_hooks.types.type_for_mode (intermediate_mode,
10251 TYPE_UNSIGNED (prev_type));
10253 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10254 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10256 if (!optab3 || !optab4
10257 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10258 || insn_data[icode1].operand[0].mode != intermediate_mode
10259 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10260 || insn_data[icode2].operand[0].mode != intermediate_mode
10261 || ((icode1 = optab_handler (optab3, intermediate_mode))
10262 == CODE_FOR_nothing)
10263 || ((icode2 = optab_handler (optab4, intermediate_mode))
10264 == CODE_FOR_nothing))
10265 break;
10267 interm_types->quick_push (intermediate_type);
10268 (*multi_step_cvt)++;
10270 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10271 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10272 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10273 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10274 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10276 prev_type = intermediate_type;
10277 prev_mode = intermediate_mode;
10280 interm_types->release ();
10281 return false;
10285 /* Function supportable_narrowing_operation
10287 Check whether an operation represented by the code CODE is a
10288 narrowing operation that is supported by the target platform in
10289 vector form (i.e., when operating on arguments of type VECTYPE_IN
10290 and producing a result of type VECTYPE_OUT).
10292 Narrowing operations we currently support are NOP (CONVERT) and
10293 FIX_TRUNC. This function checks if these operations are supported by
10294 the target platform directly via vector tree-codes.
10296 Output:
10297 - CODE1 is the code of a vector operation to be used when
10298 vectorizing the operation, if available.
10299 - MULTI_STEP_CVT determines the number of required intermediate steps in
10300 case of multi-step conversion (like int->short->char - in that case
10301 MULTI_STEP_CVT will be 1).
10302 - INTERM_TYPES contains the intermediate type required to perform the
10303 narrowing operation (short in the above example). */
10305 bool
10306 supportable_narrowing_operation (enum tree_code code,
10307 tree vectype_out, tree vectype_in,
10308 enum tree_code *code1, int *multi_step_cvt,
10309 vec<tree> *interm_types)
10311 machine_mode vec_mode;
10312 enum insn_code icode1;
10313 optab optab1, interm_optab;
10314 tree vectype = vectype_in;
10315 tree narrow_vectype = vectype_out;
10316 enum tree_code c1;
10317 tree intermediate_type, prev_type;
10318 machine_mode intermediate_mode, prev_mode;
10319 int i;
10320 bool uns;
10322 *multi_step_cvt = 0;
10323 switch (code)
10325 CASE_CONVERT:
10326 c1 = VEC_PACK_TRUNC_EXPR;
10327 break;
10329 case FIX_TRUNC_EXPR:
10330 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10331 break;
10333 case FLOAT_EXPR:
10334 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
10335 tree code and optabs used for computing the operation. */
10336 return false;
10338 default:
10339 gcc_unreachable ();
10342 if (code == FIX_TRUNC_EXPR)
10343 /* The signedness is determined from output operand. */
10344 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10345 else
10346 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10348 if (!optab1)
10349 return false;
10351 vec_mode = TYPE_MODE (vectype);
10352 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10353 return false;
10355 *code1 = c1;
10357 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10358 /* For scalar masks we may have different boolean
10359 vector types having the same QImode. Thus we
10360 add additional check for elements number. */
10361 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10362 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10363 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10365 /* Check if it's a multi-step conversion that can be done using intermediate
10366 types. */
10367 prev_mode = vec_mode;
10368 prev_type = vectype;
10369 if (code == FIX_TRUNC_EXPR)
10370 uns = TYPE_UNSIGNED (vectype_out);
10371 else
10372 uns = TYPE_UNSIGNED (vectype);
10374 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10375 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10376 costly than signed. */
10377 if (code == FIX_TRUNC_EXPR && uns)
10379 enum insn_code icode2;
10381 intermediate_type
10382 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10383 interm_optab
10384 = optab_for_tree_code (c1, intermediate_type, optab_default);
10385 if (interm_optab != unknown_optab
10386 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10387 && insn_data[icode1].operand[0].mode
10388 == insn_data[icode2].operand[0].mode)
10390 uns = false;
10391 optab1 = interm_optab;
10392 icode1 = icode2;
10396 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10397 intermediate steps in promotion sequence. We try
10398 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10399 interm_types->create (MAX_INTERM_CVT_STEPS);
10400 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10402 intermediate_mode = insn_data[icode1].operand[0].mode;
10403 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10405 intermediate_type = vect_double_mask_nunits (prev_type);
10406 if (intermediate_mode != TYPE_MODE (intermediate_type))
10407 return false;
10409 else
10410 intermediate_type
10411 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10412 interm_optab
10413 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10414 optab_default);
10415 if (!interm_optab
10416 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10417 || insn_data[icode1].operand[0].mode != intermediate_mode
10418 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10419 == CODE_FOR_nothing))
10420 break;
10422 interm_types->quick_push (intermediate_type);
10423 (*multi_step_cvt)++;
10425 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10426 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10427 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10428 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10430 prev_mode = intermediate_mode;
10431 prev_type = intermediate_type;
10432 optab1 = interm_optab;
10435 interm_types->release ();
10436 return false;
10439 /* Generate and return a statement that sets vector mask MASK such that
10440 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10442 gcall *
10443 vect_gen_while (tree mask, tree start_index, tree end_index)
10445 tree cmp_type = TREE_TYPE (start_index);
10446 tree mask_type = TREE_TYPE (mask);
10447 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10448 cmp_type, mask_type,
10449 OPTIMIZE_FOR_SPEED));
10450 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10451 start_index, end_index,
10452 build_zero_cst (mask_type));
10453 gimple_call_set_lhs (call, mask);
10454 return call;
10457 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10458 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10460 tree
10461 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10462 tree end_index)
10464 tree tmp = make_ssa_name (mask_type);
10465 gcall *call = vect_gen_while (tmp, start_index, end_index);
10466 gimple_seq_add_stmt (seq, call);
10467 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);