[1/n] PR85694: Allow pattern definition statements to be reused
[official-gcc.git] / gcc / tree-vect-stmts.c
blob047edcdae5f296ccc49f99493c35b9c03c722d43
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
61 tree
62 stmt_vectype (struct _stmt_vec_info *stmt_info)
64 return STMT_VINFO_VECTYPE (stmt_info);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
69 bool
70 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
72 gimple *stmt = STMT_VINFO_STMT (stmt_info);
73 basic_block bb = gimple_bb (stmt);
74 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
75 struct loop* loop;
77 if (!loop_vinfo)
78 return false;
80 loop = LOOP_VINFO_LOOP (loop_vinfo);
82 return (bb->loop_father == loop->inner);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
89 unsigned
90 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
91 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
92 int misalign, enum vect_cost_model_location where)
94 if ((kind == vector_load || kind == unaligned_load)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
96 kind = vector_gather_load;
97 if ((kind == vector_store || kind == unaligned_store)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
99 kind = vector_scatter_store;
101 stmt_info_for_cost si = { count, kind, where,
102 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
103 misalign };
104 body_cost_vec->safe_push (si);
106 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
107 return (unsigned)
108 (builtin_vectorization_cost (kind, vectype, misalign) * count);
111 /* Return a variable of type ELEM_TYPE[NELEMS]. */
113 static tree
114 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
116 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
117 "vect_array");
120 /* ARRAY is an array of vectors created by create_vector_array.
121 Return an SSA_NAME for the vector in index N. The reference
122 is part of the vectorization of STMT and the vector is associated
123 with scalar destination SCALAR_DEST. */
125 static tree
126 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
127 tree array, unsigned HOST_WIDE_INT n)
129 tree vect_type, vect, vect_name, array_ref;
130 gimple *new_stmt;
132 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
133 vect_type = TREE_TYPE (TREE_TYPE (array));
134 vect = vect_create_destination_var (scalar_dest, vect_type);
135 array_ref = build4 (ARRAY_REF, vect_type, array,
136 build_int_cst (size_type_node, n),
137 NULL_TREE, NULL_TREE);
139 new_stmt = gimple_build_assign (vect, array_ref);
140 vect_name = make_ssa_name (vect, new_stmt);
141 gimple_assign_set_lhs (new_stmt, vect_name);
142 vect_finish_stmt_generation (stmt, new_stmt, gsi);
144 return vect_name;
147 /* ARRAY is an array of vectors created by create_vector_array.
148 Emit code to store SSA_NAME VECT in index N of the array.
149 The store is part of the vectorization of STMT. */
151 static void
152 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
153 tree array, unsigned HOST_WIDE_INT n)
155 tree array_ref;
156 gimple *new_stmt;
158 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
159 build_int_cst (size_type_node, n),
160 NULL_TREE, NULL_TREE);
162 new_stmt = gimple_build_assign (array_ref, vect);
163 vect_finish_stmt_generation (stmt, new_stmt, gsi);
166 /* PTR is a pointer to an array of type TYPE. Return a representation
167 of *PTR. The memory reference replaces those in FIRST_DR
168 (and its group). */
170 static tree
171 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
173 tree mem_ref;
175 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
176 /* Arrays have the same alignment as their type. */
177 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
178 return mem_ref;
181 /* Add a clobber of variable VAR to the vectorization of STMT.
182 Emit the clobber before *GSI. */
184 static void
185 vect_clobber_variable (gimple *stmt, gimple_stmt_iterator *gsi, tree var)
187 tree clobber = build_clobber (TREE_TYPE (var));
188 gimple *new_stmt = gimple_build_assign (var, clobber);
189 vect_finish_stmt_generation (stmt, new_stmt, gsi);
192 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
194 /* Function vect_mark_relevant.
196 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
198 static void
199 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
200 enum vect_relevant relevant, bool live_p)
202 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
203 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
204 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
205 gimple *pattern_stmt;
207 if (dump_enabled_p ())
209 dump_printf_loc (MSG_NOTE, vect_location,
210 "mark relevant %d, live %d: ", relevant, live_p);
211 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
214 /* If this stmt is an original stmt in a pattern, we might need to mark its
215 related pattern stmt instead of the original stmt. However, such stmts
216 may have their own uses that are not in any pattern, in such cases the
217 stmt itself should be marked. */
218 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
220 /* This is the last stmt in a sequence that was detected as a
221 pattern that can potentially be vectorized. Don't mark the stmt
222 as relevant/live because it's not going to be vectorized.
223 Instead mark the pattern-stmt that replaces it. */
225 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
227 if (dump_enabled_p ())
228 dump_printf_loc (MSG_NOTE, vect_location,
229 "last stmt in pattern. don't mark"
230 " relevant/live.\n");
231 stmt_info = vinfo_for_stmt (pattern_stmt);
232 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
233 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
234 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
235 stmt = pattern_stmt;
238 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
239 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
240 STMT_VINFO_RELEVANT (stmt_info) = relevant;
242 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
243 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
245 if (dump_enabled_p ())
246 dump_printf_loc (MSG_NOTE, vect_location,
247 "already marked relevant/live.\n");
248 return;
251 worklist->safe_push (stmt);
255 /* Function is_simple_and_all_uses_invariant
257 Return true if STMT is simple and all uses of it are invariant. */
259 bool
260 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
262 tree op;
263 gimple *def_stmt;
264 ssa_op_iter iter;
266 if (!is_gimple_assign (stmt))
267 return false;
269 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
271 enum vect_def_type dt = vect_uninitialized_def;
273 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
275 if (dump_enabled_p ())
276 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
277 "use not simple.\n");
278 return false;
281 if (dt != vect_external_def && dt != vect_constant_def)
282 return false;
284 return true;
287 /* Function vect_stmt_relevant_p.
289 Return true if STMT in loop that is represented by LOOP_VINFO is
290 "relevant for vectorization".
292 A stmt is considered "relevant for vectorization" if:
293 - it has uses outside the loop.
294 - it has vdefs (it alters memory).
295 - control stmts in the loop (except for the exit condition).
297 CHECKME: what other side effects would the vectorizer allow? */
299 static bool
300 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
301 enum vect_relevant *relevant, bool *live_p)
303 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
304 ssa_op_iter op_iter;
305 imm_use_iterator imm_iter;
306 use_operand_p use_p;
307 def_operand_p def_p;
309 *relevant = vect_unused_in_scope;
310 *live_p = false;
312 /* cond stmt other than loop exit cond. */
313 if (is_ctrl_stmt (stmt)
314 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
315 != loop_exit_ctrl_vec_info_type)
316 *relevant = vect_used_in_scope;
318 /* changing memory. */
319 if (gimple_code (stmt) != GIMPLE_PHI)
320 if (gimple_vdef (stmt)
321 && !gimple_clobber_p (stmt))
323 if (dump_enabled_p ())
324 dump_printf_loc (MSG_NOTE, vect_location,
325 "vec_stmt_relevant_p: stmt has vdefs.\n");
326 *relevant = vect_used_in_scope;
329 /* uses outside the loop. */
330 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
332 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
334 basic_block bb = gimple_bb (USE_STMT (use_p));
335 if (!flow_bb_inside_loop_p (loop, bb))
337 if (dump_enabled_p ())
338 dump_printf_loc (MSG_NOTE, vect_location,
339 "vec_stmt_relevant_p: used out of loop.\n");
341 if (is_gimple_debug (USE_STMT (use_p)))
342 continue;
344 /* We expect all such uses to be in the loop exit phis
345 (because of loop closed form) */
346 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
347 gcc_assert (bb == single_exit (loop)->dest);
349 *live_p = true;
354 if (*live_p && *relevant == vect_unused_in_scope
355 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
357 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE, vect_location,
359 "vec_stmt_relevant_p: stmt live but not relevant.\n");
360 *relevant = vect_used_only_live;
363 return (*live_p || *relevant);
367 /* Function exist_non_indexing_operands_for_use_p
369 USE is one of the uses attached to STMT. Check if USE is
370 used in STMT for anything other than indexing an array. */
372 static bool
373 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
375 tree operand;
376 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
378 /* USE corresponds to some operand in STMT. If there is no data
379 reference in STMT, then any operand that corresponds to USE
380 is not indexing an array. */
381 if (!STMT_VINFO_DATA_REF (stmt_info))
382 return true;
384 /* STMT has a data_ref. FORNOW this means that its of one of
385 the following forms:
386 -1- ARRAY_REF = var
387 -2- var = ARRAY_REF
388 (This should have been verified in analyze_data_refs).
390 'var' in the second case corresponds to a def, not a use,
391 so USE cannot correspond to any operands that are not used
392 for array indexing.
394 Therefore, all we need to check is if STMT falls into the
395 first case, and whether var corresponds to USE. */
397 if (!gimple_assign_copy_p (stmt))
399 if (is_gimple_call (stmt)
400 && gimple_call_internal_p (stmt))
402 internal_fn ifn = gimple_call_internal_fn (stmt);
403 int mask_index = internal_fn_mask_index (ifn);
404 if (mask_index >= 0
405 && use == gimple_call_arg (stmt, mask_index))
406 return true;
407 int stored_value_index = internal_fn_stored_value_index (ifn);
408 if (stored_value_index >= 0
409 && use == gimple_call_arg (stmt, stored_value_index))
410 return true;
411 if (internal_gather_scatter_fn_p (ifn)
412 && use == gimple_call_arg (stmt, 1))
413 return true;
415 return false;
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
424 if (operand == use)
425 return true;
427 return false;
432 Function process_use.
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
458 static bool
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
461 bool force)
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 gimple *def_stmt;
468 enum vect_def_type dt;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
473 return true;
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
480 return false;
483 if (!def_stmt || gimple_nop_p (def_stmt))
484 return true;
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
491 return true;
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
515 return true;
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
520 d = def_stmt
521 inner-loop:
522 stmt # use (d)
523 outer-loop-tail-bb:
524 ... */
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
531 switch (relevant)
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
536 break;
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
541 break;
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
546 break;
548 case vect_used_in_scope:
549 break;
551 default:
552 gcc_unreachable ();
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
559 inner-loop:
560 d = def_stmt
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
562 stmt # use (d) */
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
569 switch (relevant)
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
575 break;
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
586 default:
587 gcc_unreachable ();
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
593 of course. */
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
598 == use))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
603 return true;
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
608 return true;
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
616 for i...
617 for j...
618 1. T0 = i + j
619 2. T1 = a[T0]
621 3. j = j + 1
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
628 bool
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
635 gimple *stmt;
636 unsigned int i;
637 stmt_vec_info stmt_vinfo;
638 basic_block bb;
639 gimple *phi;
640 bool live_p;
641 enum vect_relevant relevant;
643 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
645 auto_vec<gimple *, 64> worklist;
647 /* 1. Init worklist. */
648 for (i = 0; i < nbbs; i++)
650 bb = bbs[i];
651 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
653 phi = gsi_stmt (si);
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
657 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
660 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
661 vect_mark_relevant (&worklist, phi, relevant, live_p);
663 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
665 stmt = gsi_stmt (si);
666 if (dump_enabled_p ())
668 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
669 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
672 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
673 vect_mark_relevant (&worklist, stmt, relevant, live_p);
677 /* 2. Process_worklist */
678 while (worklist.length () > 0)
680 use_operand_p use_p;
681 ssa_op_iter iter;
683 stmt = worklist.pop ();
684 if (dump_enabled_p ())
686 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
687 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
690 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
691 (DEF_STMT) as relevant/irrelevant according to the relevance property
692 of STMT. */
693 stmt_vinfo = vinfo_for_stmt (stmt);
694 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
696 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
697 propagated as is to the DEF_STMTs of its USEs.
699 One exception is when STMT has been identified as defining a reduction
700 variable; in this case we set the relevance to vect_used_by_reduction.
701 This is because we distinguish between two kinds of relevant stmts -
702 those that are used by a reduction computation, and those that are
703 (also) used by a regular computation. This allows us later on to
704 identify stmts that are used solely by a reduction, and therefore the
705 order of the results that they produce does not have to be kept. */
707 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
709 case vect_reduction_def:
710 gcc_assert (relevant != vect_unused_in_scope);
711 if (relevant != vect_unused_in_scope
712 && relevant != vect_used_in_scope
713 && relevant != vect_used_by_reduction
714 && relevant != vect_used_only_live)
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
718 "unsupported use of reduction.\n");
719 return false;
721 break;
723 case vect_nested_cycle:
724 if (relevant != vect_unused_in_scope
725 && relevant != vect_used_in_outer_by_reduction
726 && relevant != vect_used_in_outer)
728 if (dump_enabled_p ())
729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
730 "unsupported use of nested cycle.\n");
732 return false;
734 break;
736 case vect_double_reduction_def:
737 if (relevant != vect_unused_in_scope
738 && relevant != vect_used_by_reduction
739 && relevant != vect_used_only_live)
741 if (dump_enabled_p ())
742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
743 "unsupported use of double reduction.\n");
745 return false;
747 break;
749 default:
750 break;
753 if (is_pattern_stmt_p (stmt_vinfo))
755 /* Pattern statements are not inserted into the code, so
756 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
757 have to scan the RHS or function arguments instead. */
758 if (is_gimple_assign (stmt))
760 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
761 tree op = gimple_assign_rhs1 (stmt);
763 i = 1;
764 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
766 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
767 relevant, &worklist, false)
768 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
769 relevant, &worklist, false))
770 return false;
771 i = 2;
773 for (; i < gimple_num_ops (stmt); i++)
775 op = gimple_op (stmt, i);
776 if (TREE_CODE (op) == SSA_NAME
777 && !process_use (stmt, op, loop_vinfo, relevant,
778 &worklist, false))
779 return false;
782 else if (is_gimple_call (stmt))
784 for (i = 0; i < gimple_call_num_args (stmt); i++)
786 tree arg = gimple_call_arg (stmt, i);
787 if (!process_use (stmt, arg, loop_vinfo, relevant,
788 &worklist, false))
789 return false;
793 else
794 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
796 tree op = USE_FROM_PTR (use_p);
797 if (!process_use (stmt, op, loop_vinfo, relevant,
798 &worklist, false))
799 return false;
802 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
804 gather_scatter_info gs_info;
805 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
806 gcc_unreachable ();
807 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
808 &worklist, true))
809 return false;
811 } /* while worklist */
813 return true;
816 /* Compute the prologue cost for invariant or constant operands. */
818 static unsigned
819 vect_prologue_cost_for_slp_op (slp_tree node, stmt_vec_info stmt_info,
820 unsigned opno, enum vect_def_type dt,
821 stmt_vector_for_cost *cost_vec)
823 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
824 tree op = gimple_op (stmt, opno);
825 unsigned prologue_cost = 0;
827 /* Without looking at the actual initializer a vector of
828 constants can be implemented as load from the constant pool.
829 When all elements are the same we can use a splat. */
830 tree vectype = get_vectype_for_scalar_type (TREE_TYPE (op));
831 unsigned group_size = SLP_TREE_SCALAR_STMTS (node).length ();
832 unsigned num_vects_to_check;
833 unsigned HOST_WIDE_INT const_nunits;
834 unsigned nelt_limit;
835 if (TYPE_VECTOR_SUBPARTS (vectype).is_constant (&const_nunits)
836 && ! multiple_p (const_nunits, group_size))
838 num_vects_to_check = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
839 nelt_limit = const_nunits;
841 else
843 /* If either the vector has variable length or the vectors
844 are composed of repeated whole groups we only need to
845 cost construction once. All vectors will be the same. */
846 num_vects_to_check = 1;
847 nelt_limit = group_size;
849 tree elt = NULL_TREE;
850 unsigned nelt = 0;
851 for (unsigned j = 0; j < num_vects_to_check * nelt_limit; ++j)
853 unsigned si = j % group_size;
854 if (nelt == 0)
855 elt = gimple_op (SLP_TREE_SCALAR_STMTS (node)[si], opno);
856 /* ??? We're just tracking whether all operands of a single
857 vector initializer are the same, ideally we'd check if
858 we emitted the same one already. */
859 else if (elt != gimple_op (SLP_TREE_SCALAR_STMTS (node)[si],
860 opno))
861 elt = NULL_TREE;
862 nelt++;
863 if (nelt == nelt_limit)
865 /* ??? We need to pass down stmt_info for a vector type
866 even if it points to the wrong stmt. */
867 prologue_cost += record_stmt_cost
868 (cost_vec, 1,
869 dt == vect_external_def
870 ? (elt ? scalar_to_vec : vec_construct)
871 : vector_load,
872 stmt_info, 0, vect_prologue);
873 nelt = 0;
877 return prologue_cost;
880 /* Function vect_model_simple_cost.
882 Models cost for simple operations, i.e. those that only emit ncopies of a
883 single op. Right now, this does not account for multiple insns that could
884 be generated for the single vector op. We will handle that shortly. */
886 static void
887 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
888 enum vect_def_type *dt,
889 int ndts,
890 slp_tree node,
891 stmt_vector_for_cost *cost_vec)
893 int inside_cost = 0, prologue_cost = 0;
895 gcc_assert (cost_vec != NULL);
897 /* ??? Somehow we need to fix this at the callers. */
898 if (node)
899 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
901 if (node)
903 /* Scan operands and account for prologue cost of constants/externals.
904 ??? This over-estimates cost for multiple uses and should be
905 re-engineered. */
906 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
907 tree lhs = gimple_get_lhs (stmt);
908 for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
910 tree op = gimple_op (stmt, i);
911 gimple *def_stmt;
912 enum vect_def_type dt;
913 if (!op || op == lhs)
914 continue;
915 if (vect_is_simple_use (op, stmt_info->vinfo, &def_stmt, &dt)
916 && (dt == vect_constant_def || dt == vect_external_def))
917 prologue_cost += vect_prologue_cost_for_slp_op (node, stmt_info,
918 i, dt, cost_vec);
921 else
922 /* Cost the "broadcast" of a scalar operand in to a vector operand.
923 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
924 cost model. */
925 for (int i = 0; i < ndts; i++)
926 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
927 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
928 stmt_info, 0, vect_prologue);
930 /* Adjust for two-operator SLP nodes. */
931 if (node && SLP_TREE_TWO_OPERATORS (node))
933 ncopies *= 2;
934 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_perm,
935 stmt_info, 0, vect_body);
938 /* Pass the inside-of-loop statements to the target-specific cost model. */
939 inside_cost += record_stmt_cost (cost_vec, ncopies, vector_stmt,
940 stmt_info, 0, vect_body);
942 if (dump_enabled_p ())
943 dump_printf_loc (MSG_NOTE, vect_location,
944 "vect_model_simple_cost: inside_cost = %d, "
945 "prologue_cost = %d .\n", inside_cost, prologue_cost);
949 /* Model cost for type demotion and promotion operations. PWR is normally
950 zero for single-step promotions and demotions. It will be one if
951 two-step promotion/demotion is required, and so on. Each additional
952 step doubles the number of instructions required. */
954 static void
955 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
956 enum vect_def_type *dt, int pwr,
957 stmt_vector_for_cost *cost_vec)
959 int i, tmp;
960 int inside_cost = 0, prologue_cost = 0;
962 for (i = 0; i < pwr + 1; i++)
964 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
965 (i + 1) : i;
966 inside_cost += record_stmt_cost (cost_vec, vect_pow2 (tmp),
967 vec_promote_demote, stmt_info, 0,
968 vect_body);
971 /* FORNOW: Assuming maximum 2 args per stmts. */
972 for (i = 0; i < 2; i++)
973 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
974 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
975 stmt_info, 0, vect_prologue);
977 if (dump_enabled_p ())
978 dump_printf_loc (MSG_NOTE, vect_location,
979 "vect_model_promotion_demotion_cost: inside_cost = %d, "
980 "prologue_cost = %d .\n", inside_cost, prologue_cost);
983 /* Function vect_model_store_cost
985 Models cost for stores. In the case of grouped accesses, one access
986 has the overhead of the grouped access attributed to it. */
988 static void
989 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
990 enum vect_def_type dt,
991 vect_memory_access_type memory_access_type,
992 vec_load_store_type vls_type, slp_tree slp_node,
993 stmt_vector_for_cost *cost_vec)
995 unsigned int inside_cost = 0, prologue_cost = 0;
996 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
997 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
999 /* ??? Somehow we need to fix this at the callers. */
1000 if (slp_node)
1001 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1003 if (vls_type == VLS_STORE_INVARIANT)
1005 if (slp_node)
1006 prologue_cost += vect_prologue_cost_for_slp_op (slp_node, stmt_info,
1007 1, dt, cost_vec);
1008 else
1009 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
1010 stmt_info, 0, vect_prologue);
1013 /* Grouped stores update all elements in the group at once,
1014 so we want the DR for the first statement. */
1015 if (!slp_node && grouped_access_p)
1016 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
1018 /* True if we should include any once-per-group costs as well as
1019 the cost of the statement itself. For SLP we only get called
1020 once per group anyhow. */
1021 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1023 /* We assume that the cost of a single store-lanes instruction is
1024 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
1025 access is instead being provided by a permute-and-store operation,
1026 include the cost of the permutes. */
1027 if (first_stmt_p
1028 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1030 /* Uses a high and low interleave or shuffle operations for each
1031 needed permute. */
1032 int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
1033 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1034 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
1035 stmt_info, 0, vect_body);
1037 if (dump_enabled_p ())
1038 dump_printf_loc (MSG_NOTE, vect_location,
1039 "vect_model_store_cost: strided group_size = %d .\n",
1040 group_size);
1043 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1044 /* Costs of the stores. */
1045 if (memory_access_type == VMAT_ELEMENTWISE
1046 || memory_access_type == VMAT_GATHER_SCATTER)
1048 /* N scalar stores plus extracting the elements. */
1049 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1050 inside_cost += record_stmt_cost (cost_vec,
1051 ncopies * assumed_nunits,
1052 scalar_store, stmt_info, 0, vect_body);
1054 else
1055 vect_get_store_cost (stmt_info, ncopies, &inside_cost, cost_vec);
1057 if (memory_access_type == VMAT_ELEMENTWISE
1058 || memory_access_type == VMAT_STRIDED_SLP)
1060 /* N scalar stores plus extracting the elements. */
1061 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1062 inside_cost += record_stmt_cost (cost_vec,
1063 ncopies * assumed_nunits,
1064 vec_to_scalar, stmt_info, 0, vect_body);
1067 if (dump_enabled_p ())
1068 dump_printf_loc (MSG_NOTE, vect_location,
1069 "vect_model_store_cost: inside_cost = %d, "
1070 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1074 /* Calculate cost of DR's memory access. */
1075 void
1076 vect_get_store_cost (stmt_vec_info stmt_info, int ncopies,
1077 unsigned int *inside_cost,
1078 stmt_vector_for_cost *body_cost_vec)
1080 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1081 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1083 switch (alignment_support_scheme)
1085 case dr_aligned:
1087 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1088 vector_store, stmt_info, 0,
1089 vect_body);
1091 if (dump_enabled_p ())
1092 dump_printf_loc (MSG_NOTE, vect_location,
1093 "vect_model_store_cost: aligned.\n");
1094 break;
1097 case dr_unaligned_supported:
1099 /* Here, we assign an additional cost for the unaligned store. */
1100 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1101 unaligned_store, stmt_info,
1102 DR_MISALIGNMENT (dr), vect_body);
1103 if (dump_enabled_p ())
1104 dump_printf_loc (MSG_NOTE, vect_location,
1105 "vect_model_store_cost: unaligned supported by "
1106 "hardware.\n");
1107 break;
1110 case dr_unaligned_unsupported:
1112 *inside_cost = VECT_MAX_COST;
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1116 "vect_model_store_cost: unsupported access.\n");
1117 break;
1120 default:
1121 gcc_unreachable ();
1126 /* Function vect_model_load_cost
1128 Models cost for loads. In the case of grouped accesses, one access has
1129 the overhead of the grouped access attributed to it. Since unaligned
1130 accesses are supported for loads, we also account for the costs of the
1131 access scheme chosen. */
1133 static void
1134 vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
1135 vect_memory_access_type memory_access_type,
1136 slp_instance instance,
1137 slp_tree slp_node,
1138 stmt_vector_for_cost *cost_vec)
1140 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1141 unsigned int inside_cost = 0, prologue_cost = 0;
1142 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1144 gcc_assert (cost_vec);
1146 /* ??? Somehow we need to fix this at the callers. */
1147 if (slp_node)
1148 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1150 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1152 /* If the load is permuted then the alignment is determined by
1153 the first group element not by the first scalar stmt DR. */
1154 gimple *stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
1155 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1156 /* Record the cost for the permutation. */
1157 unsigned n_perms;
1158 unsigned assumed_nunits
1159 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (stmt_info));
1160 unsigned slp_vf = (ncopies * assumed_nunits) / instance->group_size;
1161 vect_transform_slp_perm_load (slp_node, vNULL, NULL,
1162 slp_vf, instance, true,
1163 &n_perms);
1164 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
1165 stmt_info, 0, vect_body);
1166 /* And adjust the number of loads performed. This handles
1167 redundancies as well as loads that are later dead. */
1168 auto_sbitmap perm (DR_GROUP_SIZE (stmt_info));
1169 bitmap_clear (perm);
1170 for (unsigned i = 0;
1171 i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
1172 bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
1173 ncopies = 0;
1174 bool load_seen = false;
1175 for (unsigned i = 0; i < DR_GROUP_SIZE (stmt_info); ++i)
1177 if (i % assumed_nunits == 0)
1179 if (load_seen)
1180 ncopies++;
1181 load_seen = false;
1183 if (bitmap_bit_p (perm, i))
1184 load_seen = true;
1186 if (load_seen)
1187 ncopies++;
1188 gcc_assert (ncopies
1189 <= (DR_GROUP_SIZE (stmt_info) - DR_GROUP_GAP (stmt_info)
1190 + assumed_nunits - 1) / assumed_nunits);
1193 /* Grouped loads read all elements in the group at once,
1194 so we want the DR for the first statement. */
1195 if (!slp_node && grouped_access_p)
1196 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
1198 /* True if we should include any once-per-group costs as well as
1199 the cost of the statement itself. For SLP we only get called
1200 once per group anyhow. */
1201 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1203 /* We assume that the cost of a single load-lanes instruction is
1204 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1205 access is instead being provided by a load-and-permute operation,
1206 include the cost of the permutes. */
1207 if (first_stmt_p
1208 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1210 /* Uses an even and odd extract operations or shuffle operations
1211 for each needed permute. */
1212 int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
1213 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1214 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1215 stmt_info, 0, vect_body);
1217 if (dump_enabled_p ())
1218 dump_printf_loc (MSG_NOTE, vect_location,
1219 "vect_model_load_cost: strided group_size = %d .\n",
1220 group_size);
1223 /* The loads themselves. */
1224 if (memory_access_type == VMAT_ELEMENTWISE
1225 || memory_access_type == VMAT_GATHER_SCATTER)
1227 /* N scalar loads plus gathering them into a vector. */
1228 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1229 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1230 inside_cost += record_stmt_cost (cost_vec,
1231 ncopies * assumed_nunits,
1232 scalar_load, stmt_info, 0, vect_body);
1234 else
1235 vect_get_load_cost (stmt_info, ncopies, first_stmt_p,
1236 &inside_cost, &prologue_cost,
1237 cost_vec, cost_vec, true);
1238 if (memory_access_type == VMAT_ELEMENTWISE
1239 || memory_access_type == VMAT_STRIDED_SLP)
1240 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
1241 stmt_info, 0, vect_body);
1243 if (dump_enabled_p ())
1244 dump_printf_loc (MSG_NOTE, vect_location,
1245 "vect_model_load_cost: inside_cost = %d, "
1246 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1250 /* Calculate cost of DR's memory access. */
1251 void
1252 vect_get_load_cost (stmt_vec_info stmt_info, int ncopies,
1253 bool add_realign_cost, unsigned int *inside_cost,
1254 unsigned int *prologue_cost,
1255 stmt_vector_for_cost *prologue_cost_vec,
1256 stmt_vector_for_cost *body_cost_vec,
1257 bool record_prologue_costs)
1259 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1260 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1262 switch (alignment_support_scheme)
1264 case dr_aligned:
1266 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1267 stmt_info, 0, vect_body);
1269 if (dump_enabled_p ())
1270 dump_printf_loc (MSG_NOTE, vect_location,
1271 "vect_model_load_cost: aligned.\n");
1273 break;
1275 case dr_unaligned_supported:
1277 /* Here, we assign an additional cost for the unaligned load. */
1278 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1279 unaligned_load, stmt_info,
1280 DR_MISALIGNMENT (dr), vect_body);
1282 if (dump_enabled_p ())
1283 dump_printf_loc (MSG_NOTE, vect_location,
1284 "vect_model_load_cost: unaligned supported by "
1285 "hardware.\n");
1287 break;
1289 case dr_explicit_realign:
1291 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1292 vector_load, stmt_info, 0, vect_body);
1293 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1294 vec_perm, stmt_info, 0, vect_body);
1296 /* FIXME: If the misalignment remains fixed across the iterations of
1297 the containing loop, the following cost should be added to the
1298 prologue costs. */
1299 if (targetm.vectorize.builtin_mask_for_load)
1300 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1301 stmt_info, 0, vect_body);
1303 if (dump_enabled_p ())
1304 dump_printf_loc (MSG_NOTE, vect_location,
1305 "vect_model_load_cost: explicit realign\n");
1307 break;
1309 case dr_explicit_realign_optimized:
1311 if (dump_enabled_p ())
1312 dump_printf_loc (MSG_NOTE, vect_location,
1313 "vect_model_load_cost: unaligned software "
1314 "pipelined.\n");
1316 /* Unaligned software pipeline has a load of an address, an initial
1317 load, and possibly a mask operation to "prime" the loop. However,
1318 if this is an access in a group of loads, which provide grouped
1319 access, then the above cost should only be considered for one
1320 access in the group. Inside the loop, there is a load op
1321 and a realignment op. */
1323 if (add_realign_cost && record_prologue_costs)
1325 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1326 vector_stmt, stmt_info,
1327 0, vect_prologue);
1328 if (targetm.vectorize.builtin_mask_for_load)
1329 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1330 vector_stmt, stmt_info,
1331 0, vect_prologue);
1334 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1335 stmt_info, 0, vect_body);
1336 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1337 stmt_info, 0, vect_body);
1339 if (dump_enabled_p ())
1340 dump_printf_loc (MSG_NOTE, vect_location,
1341 "vect_model_load_cost: explicit realign optimized"
1342 "\n");
1344 break;
1347 case dr_unaligned_unsupported:
1349 *inside_cost = VECT_MAX_COST;
1351 if (dump_enabled_p ())
1352 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1353 "vect_model_load_cost: unsupported access.\n");
1354 break;
1357 default:
1358 gcc_unreachable ();
1362 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1363 the loop preheader for the vectorized stmt STMT. */
1365 static void
1366 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1368 if (gsi)
1369 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1370 else
1372 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1373 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1375 if (loop_vinfo)
1377 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1378 basic_block new_bb;
1379 edge pe;
1381 if (nested_in_vect_loop_p (loop, stmt))
1382 loop = loop->inner;
1384 pe = loop_preheader_edge (loop);
1385 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1386 gcc_assert (!new_bb);
1388 else
1390 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1391 basic_block bb;
1392 gimple_stmt_iterator gsi_bb_start;
1394 gcc_assert (bb_vinfo);
1395 bb = BB_VINFO_BB (bb_vinfo);
1396 gsi_bb_start = gsi_after_labels (bb);
1397 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1401 if (dump_enabled_p ())
1403 dump_printf_loc (MSG_NOTE, vect_location,
1404 "created new init_stmt: ");
1405 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1409 /* Function vect_init_vector.
1411 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1412 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1413 vector type a vector with all elements equal to VAL is created first.
1414 Place the initialization at BSI if it is not NULL. Otherwise, place the
1415 initialization at the loop preheader.
1416 Return the DEF of INIT_STMT.
1417 It will be used in the vectorization of STMT. */
1419 tree
1420 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1422 gimple *init_stmt;
1423 tree new_temp;
1425 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1426 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1428 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1429 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1431 /* Scalar boolean value should be transformed into
1432 all zeros or all ones value before building a vector. */
1433 if (VECTOR_BOOLEAN_TYPE_P (type))
1435 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1436 tree false_val = build_zero_cst (TREE_TYPE (type));
1438 if (CONSTANT_CLASS_P (val))
1439 val = integer_zerop (val) ? false_val : true_val;
1440 else
1442 new_temp = make_ssa_name (TREE_TYPE (type));
1443 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1444 val, true_val, false_val);
1445 vect_init_vector_1 (stmt, init_stmt, gsi);
1446 val = new_temp;
1449 else if (CONSTANT_CLASS_P (val))
1450 val = fold_convert (TREE_TYPE (type), val);
1451 else
1453 new_temp = make_ssa_name (TREE_TYPE (type));
1454 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1455 init_stmt = gimple_build_assign (new_temp,
1456 fold_build1 (VIEW_CONVERT_EXPR,
1457 TREE_TYPE (type),
1458 val));
1459 else
1460 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1461 vect_init_vector_1 (stmt, init_stmt, gsi);
1462 val = new_temp;
1465 val = build_vector_from_val (type, val);
1468 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1469 init_stmt = gimple_build_assign (new_temp, val);
1470 vect_init_vector_1 (stmt, init_stmt, gsi);
1471 return new_temp;
1474 /* Function vect_get_vec_def_for_operand_1.
1476 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1477 DT that will be used in the vectorized stmt. */
1479 tree
1480 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1482 tree vec_oprnd;
1483 gimple *vec_stmt;
1484 stmt_vec_info def_stmt_info = NULL;
1486 switch (dt)
1488 /* operand is a constant or a loop invariant. */
1489 case vect_constant_def:
1490 case vect_external_def:
1491 /* Code should use vect_get_vec_def_for_operand. */
1492 gcc_unreachable ();
1494 /* operand is defined inside the loop. */
1495 case vect_internal_def:
1497 /* Get the def from the vectorized stmt. */
1498 def_stmt_info = vinfo_for_stmt (def_stmt);
1500 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1501 /* Get vectorized pattern statement. */
1502 if (!vec_stmt
1503 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1504 && !STMT_VINFO_RELEVANT (def_stmt_info))
1505 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1506 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1507 gcc_assert (vec_stmt);
1508 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1509 vec_oprnd = PHI_RESULT (vec_stmt);
1510 else if (is_gimple_call (vec_stmt))
1511 vec_oprnd = gimple_call_lhs (vec_stmt);
1512 else
1513 vec_oprnd = gimple_assign_lhs (vec_stmt);
1514 return vec_oprnd;
1517 /* operand is defined by a loop header phi. */
1518 case vect_reduction_def:
1519 case vect_double_reduction_def:
1520 case vect_nested_cycle:
1521 case vect_induction_def:
1523 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1525 /* Get the def from the vectorized stmt. */
1526 def_stmt_info = vinfo_for_stmt (def_stmt);
1527 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1528 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1529 vec_oprnd = PHI_RESULT (vec_stmt);
1530 else
1531 vec_oprnd = gimple_get_lhs (vec_stmt);
1532 return vec_oprnd;
1535 default:
1536 gcc_unreachable ();
1541 /* Function vect_get_vec_def_for_operand.
1543 OP is an operand in STMT. This function returns a (vector) def that will be
1544 used in the vectorized stmt for STMT.
1546 In the case that OP is an SSA_NAME which is defined in the loop, then
1547 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1549 In case OP is an invariant or constant, a new stmt that creates a vector def
1550 needs to be introduced. VECTYPE may be used to specify a required type for
1551 vector invariant. */
1553 tree
1554 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1556 gimple *def_stmt;
1557 enum vect_def_type dt;
1558 bool is_simple_use;
1559 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1560 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1562 if (dump_enabled_p ())
1564 dump_printf_loc (MSG_NOTE, vect_location,
1565 "vect_get_vec_def_for_operand: ");
1566 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1567 dump_printf (MSG_NOTE, "\n");
1570 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1571 gcc_assert (is_simple_use);
1572 if (def_stmt && dump_enabled_p ())
1574 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1575 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1578 if (dt == vect_constant_def || dt == vect_external_def)
1580 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1581 tree vector_type;
1583 if (vectype)
1584 vector_type = vectype;
1585 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1586 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1587 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1588 else
1589 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1591 gcc_assert (vector_type);
1592 return vect_init_vector (stmt, op, vector_type, NULL);
1594 else
1595 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1599 /* Function vect_get_vec_def_for_stmt_copy
1601 Return a vector-def for an operand. This function is used when the
1602 vectorized stmt to be created (by the caller to this function) is a "copy"
1603 created in case the vectorized result cannot fit in one vector, and several
1604 copies of the vector-stmt are required. In this case the vector-def is
1605 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1606 of the stmt that defines VEC_OPRND.
1607 DT is the type of the vector def VEC_OPRND.
1609 Context:
1610 In case the vectorization factor (VF) is bigger than the number
1611 of elements that can fit in a vectype (nunits), we have to generate
1612 more than one vector stmt to vectorize the scalar stmt. This situation
1613 arises when there are multiple data-types operated upon in the loop; the
1614 smallest data-type determines the VF, and as a result, when vectorizing
1615 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1616 vector stmt (each computing a vector of 'nunits' results, and together
1617 computing 'VF' results in each iteration). This function is called when
1618 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1619 which VF=16 and nunits=4, so the number of copies required is 4):
1621 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1623 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1624 VS1.1: vx.1 = memref1 VS1.2
1625 VS1.2: vx.2 = memref2 VS1.3
1626 VS1.3: vx.3 = memref3
1628 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1629 VSnew.1: vz1 = vx.1 + ... VSnew.2
1630 VSnew.2: vz2 = vx.2 + ... VSnew.3
1631 VSnew.3: vz3 = vx.3 + ...
1633 The vectorization of S1 is explained in vectorizable_load.
1634 The vectorization of S2:
1635 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1636 the function 'vect_get_vec_def_for_operand' is called to
1637 get the relevant vector-def for each operand of S2. For operand x it
1638 returns the vector-def 'vx.0'.
1640 To create the remaining copies of the vector-stmt (VSnew.j), this
1641 function is called to get the relevant vector-def for each operand. It is
1642 obtained from the respective VS1.j stmt, which is recorded in the
1643 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1645 For example, to obtain the vector-def 'vx.1' in order to create the
1646 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1647 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1648 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1649 and return its def ('vx.1').
1650 Overall, to create the above sequence this function will be called 3 times:
1651 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1652 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1653 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1655 tree
1656 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1658 gimple *vec_stmt_for_operand;
1659 stmt_vec_info def_stmt_info;
1661 /* Do nothing; can reuse same def. */
1662 if (dt == vect_external_def || dt == vect_constant_def )
1663 return vec_oprnd;
1665 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1666 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1667 gcc_assert (def_stmt_info);
1668 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1669 gcc_assert (vec_stmt_for_operand);
1670 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1671 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1672 else
1673 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1674 return vec_oprnd;
1678 /* Get vectorized definitions for the operands to create a copy of an original
1679 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1681 void
1682 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1683 vec<tree> *vec_oprnds0,
1684 vec<tree> *vec_oprnds1)
1686 tree vec_oprnd = vec_oprnds0->pop ();
1688 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1689 vec_oprnds0->quick_push (vec_oprnd);
1691 if (vec_oprnds1 && vec_oprnds1->length ())
1693 vec_oprnd = vec_oprnds1->pop ();
1694 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1695 vec_oprnds1->quick_push (vec_oprnd);
1700 /* Get vectorized definitions for OP0 and OP1. */
1702 void
1703 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1704 vec<tree> *vec_oprnds0,
1705 vec<tree> *vec_oprnds1,
1706 slp_tree slp_node)
1708 if (slp_node)
1710 int nops = (op1 == NULL_TREE) ? 1 : 2;
1711 auto_vec<tree> ops (nops);
1712 auto_vec<vec<tree> > vec_defs (nops);
1714 ops.quick_push (op0);
1715 if (op1)
1716 ops.quick_push (op1);
1718 vect_get_slp_defs (ops, slp_node, &vec_defs);
1720 *vec_oprnds0 = vec_defs[0];
1721 if (op1)
1722 *vec_oprnds1 = vec_defs[1];
1724 else
1726 tree vec_oprnd;
1728 vec_oprnds0->create (1);
1729 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1730 vec_oprnds0->quick_push (vec_oprnd);
1732 if (op1)
1734 vec_oprnds1->create (1);
1735 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1736 vec_oprnds1->quick_push (vec_oprnd);
1741 /* Helper function called by vect_finish_replace_stmt and
1742 vect_finish_stmt_generation. Set the location of the new
1743 statement and create a stmt_vec_info for it. */
1745 static void
1746 vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
1748 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1749 vec_info *vinfo = stmt_info->vinfo;
1751 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1753 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1756 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1759 gimple_set_location (vec_stmt, gimple_location (stmt));
1761 /* While EH edges will generally prevent vectorization, stmt might
1762 e.g. be in a must-not-throw region. Ensure newly created stmts
1763 that could throw are part of the same region. */
1764 int lp_nr = lookup_stmt_eh_lp (stmt);
1765 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1766 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1769 /* Replace the scalar statement STMT with a new vector statement VEC_STMT,
1770 which sets the same scalar result as STMT did. */
1772 void
1773 vect_finish_replace_stmt (gimple *stmt, gimple *vec_stmt)
1775 gcc_assert (gimple_get_lhs (stmt) == gimple_get_lhs (vec_stmt));
1777 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1778 gsi_replace (&gsi, vec_stmt, false);
1780 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1783 /* Function vect_finish_stmt_generation.
1785 Insert a new stmt. */
1787 void
1788 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1789 gimple_stmt_iterator *gsi)
1791 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1793 if (!gsi_end_p (*gsi)
1794 && gimple_has_mem_ops (vec_stmt))
1796 gimple *at_stmt = gsi_stmt (*gsi);
1797 tree vuse = gimple_vuse (at_stmt);
1798 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1800 tree vdef = gimple_vdef (at_stmt);
1801 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1802 /* If we have an SSA vuse and insert a store, update virtual
1803 SSA form to avoid triggering the renamer. Do so only
1804 if we can easily see all uses - which is what almost always
1805 happens with the way vectorized stmts are inserted. */
1806 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1807 && ((is_gimple_assign (vec_stmt)
1808 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1809 || (is_gimple_call (vec_stmt)
1810 && !(gimple_call_flags (vec_stmt)
1811 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1813 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1814 gimple_set_vdef (vec_stmt, new_vdef);
1815 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1819 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1820 vect_finish_stmt_generation_1 (stmt, vec_stmt);
1823 /* We want to vectorize a call to combined function CFN with function
1824 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1825 as the types of all inputs. Check whether this is possible using
1826 an internal function, returning its code if so or IFN_LAST if not. */
1828 static internal_fn
1829 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1830 tree vectype_out, tree vectype_in)
1832 internal_fn ifn;
1833 if (internal_fn_p (cfn))
1834 ifn = as_internal_fn (cfn);
1835 else
1836 ifn = associated_internal_fn (fndecl);
1837 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1839 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1840 if (info.vectorizable)
1842 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1843 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1844 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1845 OPTIMIZE_FOR_SPEED))
1846 return ifn;
1849 return IFN_LAST;
1853 static tree permute_vec_elements (tree, tree, tree, gimple *,
1854 gimple_stmt_iterator *);
1856 /* Check whether a load or store statement in the loop described by
1857 LOOP_VINFO is possible in a fully-masked loop. This is testing
1858 whether the vectorizer pass has the appropriate support, as well as
1859 whether the target does.
1861 VLS_TYPE says whether the statement is a load or store and VECTYPE
1862 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1863 says how the load or store is going to be implemented and GROUP_SIZE
1864 is the number of load or store statements in the containing group.
1865 If the access is a gather load or scatter store, GS_INFO describes
1866 its arguments.
1868 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1869 supported, otherwise record the required mask types. */
1871 static void
1872 check_load_store_masking (loop_vec_info loop_vinfo, tree vectype,
1873 vec_load_store_type vls_type, int group_size,
1874 vect_memory_access_type memory_access_type,
1875 gather_scatter_info *gs_info)
1877 /* Invariant loads need no special support. */
1878 if (memory_access_type == VMAT_INVARIANT)
1879 return;
1881 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1882 machine_mode vecmode = TYPE_MODE (vectype);
1883 bool is_load = (vls_type == VLS_LOAD);
1884 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1886 if (is_load
1887 ? !vect_load_lanes_supported (vectype, group_size, true)
1888 : !vect_store_lanes_supported (vectype, group_size, true))
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1892 "can't use a fully-masked loop because the"
1893 " target doesn't have an appropriate masked"
1894 " load/store-lanes instruction.\n");
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1896 return;
1898 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1899 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1900 return;
1903 if (memory_access_type == VMAT_GATHER_SCATTER)
1905 internal_fn ifn = (is_load
1906 ? IFN_MASK_GATHER_LOAD
1907 : IFN_MASK_SCATTER_STORE);
1908 tree offset_type = TREE_TYPE (gs_info->offset);
1909 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1910 gs_info->memory_type,
1911 TYPE_SIGN (offset_type),
1912 gs_info->scale))
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1916 "can't use a fully-masked loop because the"
1917 " target doesn't have an appropriate masked"
1918 " gather load or scatter store instruction.\n");
1919 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1920 return;
1922 unsigned int ncopies = vect_get_num_copies (loop_vinfo, vectype);
1923 vect_record_loop_mask (loop_vinfo, masks, ncopies, vectype);
1924 return;
1927 if (memory_access_type != VMAT_CONTIGUOUS
1928 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1930 /* Element X of the data must come from iteration i * VF + X of the
1931 scalar loop. We need more work to support other mappings. */
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1934 "can't use a fully-masked loop because an access"
1935 " isn't contiguous.\n");
1936 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1937 return;
1940 machine_mode mask_mode;
1941 if (!(targetm.vectorize.get_mask_mode
1942 (GET_MODE_NUNITS (vecmode),
1943 GET_MODE_SIZE (vecmode)).exists (&mask_mode))
1944 || !can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1946 if (dump_enabled_p ())
1947 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1948 "can't use a fully-masked loop because the target"
1949 " doesn't have the appropriate masked load or"
1950 " store.\n");
1951 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
1952 return;
1954 /* We might load more scalars than we need for permuting SLP loads.
1955 We checked in get_group_load_store_type that the extra elements
1956 don't leak into a new vector. */
1957 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1958 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1959 unsigned int nvectors;
1960 if (can_div_away_from_zero_p (group_size * vf, nunits, &nvectors))
1961 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype);
1962 else
1963 gcc_unreachable ();
1966 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1967 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1968 that needs to be applied to all loads and stores in a vectorized loop.
1969 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1971 MASK_TYPE is the type of both masks. If new statements are needed,
1972 insert them before GSI. */
1974 static tree
1975 prepare_load_store_mask (tree mask_type, tree loop_mask, tree vec_mask,
1976 gimple_stmt_iterator *gsi)
1978 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1979 if (!loop_mask)
1980 return vec_mask;
1982 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1983 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1984 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1985 vec_mask, loop_mask);
1986 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1987 return and_res;
1990 /* Determine whether we can use a gather load or scatter store to vectorize
1991 strided load or store STMT by truncating the current offset to a smaller
1992 width. We need to be able to construct an offset vector:
1994 { 0, X, X*2, X*3, ... }
1996 without loss of precision, where X is STMT's DR_STEP.
1998 Return true if this is possible, describing the gather load or scatter
1999 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
2001 static bool
2002 vect_truncate_gather_scatter_offset (gimple *stmt, loop_vec_info loop_vinfo,
2003 bool masked_p,
2004 gather_scatter_info *gs_info)
2006 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2007 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2008 tree step = DR_STEP (dr);
2009 if (TREE_CODE (step) != INTEGER_CST)
2011 /* ??? Perhaps we could use range information here? */
2012 if (dump_enabled_p ())
2013 dump_printf_loc (MSG_NOTE, vect_location,
2014 "cannot truncate variable step.\n");
2015 return false;
2018 /* Get the number of bits in an element. */
2019 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2020 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
2021 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2023 /* Set COUNT to the upper limit on the number of elements - 1.
2024 Start with the maximum vectorization factor. */
2025 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
2027 /* Try lowering COUNT to the number of scalar latch iterations. */
2028 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2029 widest_int max_iters;
2030 if (max_loop_iterations (loop, &max_iters)
2031 && max_iters < count)
2032 count = max_iters.to_shwi ();
2034 /* Try scales of 1 and the element size. */
2035 int scales[] = { 1, vect_get_scalar_dr_size (dr) };
2036 bool overflow_p = false;
2037 for (int i = 0; i < 2; ++i)
2039 int scale = scales[i];
2040 widest_int factor;
2041 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
2042 continue;
2044 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
2045 in OFFSET_BITS bits. */
2046 widest_int range = wi::mul (count, factor, SIGNED, &overflow_p);
2047 if (overflow_p)
2048 continue;
2049 signop sign = range >= 0 ? UNSIGNED : SIGNED;
2050 if (wi::min_precision (range, sign) > element_bits)
2052 overflow_p = true;
2053 continue;
2056 /* See whether the target supports the operation. */
2057 tree memory_type = TREE_TYPE (DR_REF (dr));
2058 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr), masked_p, vectype,
2059 memory_type, element_bits, sign, scale,
2060 &gs_info->ifn, &gs_info->element_type))
2061 continue;
2063 tree offset_type = build_nonstandard_integer_type (element_bits,
2064 sign == UNSIGNED);
2066 gs_info->decl = NULL_TREE;
2067 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2068 but we don't need to store that here. */
2069 gs_info->base = NULL_TREE;
2070 gs_info->offset = fold_convert (offset_type, step);
2071 gs_info->offset_dt = vect_constant_def;
2072 gs_info->offset_vectype = NULL_TREE;
2073 gs_info->scale = scale;
2074 gs_info->memory_type = memory_type;
2075 return true;
2078 if (overflow_p && dump_enabled_p ())
2079 dump_printf_loc (MSG_NOTE, vect_location,
2080 "truncating gather/scatter offset to %d bits"
2081 " might change its value.\n", element_bits);
2083 return false;
2086 /* Return true if we can use gather/scatter internal functions to
2087 vectorize STMT, which is a grouped or strided load or store.
2088 MASKED_P is true if load or store is conditional. When returning
2089 true, fill in GS_INFO with the information required to perform the
2090 operation. */
2092 static bool
2093 vect_use_strided_gather_scatters_p (gimple *stmt, loop_vec_info loop_vinfo,
2094 bool masked_p,
2095 gather_scatter_info *gs_info)
2097 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)
2098 || gs_info->decl)
2099 return vect_truncate_gather_scatter_offset (stmt, loop_vinfo,
2100 masked_p, gs_info);
2102 scalar_mode element_mode = SCALAR_TYPE_MODE (gs_info->element_type);
2103 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
2104 tree offset_type = TREE_TYPE (gs_info->offset);
2105 unsigned int offset_bits = TYPE_PRECISION (offset_type);
2107 /* Enforced by vect_check_gather_scatter. */
2108 gcc_assert (element_bits >= offset_bits);
2110 /* If the elements are wider than the offset, convert the offset to the
2111 same width, without changing its sign. */
2112 if (element_bits > offset_bits)
2114 bool unsigned_p = TYPE_UNSIGNED (offset_type);
2115 offset_type = build_nonstandard_integer_type (element_bits, unsigned_p);
2116 gs_info->offset = fold_convert (offset_type, gs_info->offset);
2119 if (dump_enabled_p ())
2120 dump_printf_loc (MSG_NOTE, vect_location,
2121 "using gather/scatter for strided/grouped access,"
2122 " scale = %d\n", gs_info->scale);
2124 return true;
2127 /* STMT is a non-strided load or store, meaning that it accesses
2128 elements with a known constant step. Return -1 if that step
2129 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2131 static int
2132 compare_step_with_zero (gimple *stmt)
2134 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2135 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2136 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
2137 size_zero_node);
2140 /* If the target supports a permute mask that reverses the elements in
2141 a vector of type VECTYPE, return that mask, otherwise return null. */
2143 static tree
2144 perm_mask_for_reverse (tree vectype)
2146 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2148 /* The encoding has a single stepped pattern. */
2149 vec_perm_builder sel (nunits, 1, 3);
2150 for (int i = 0; i < 3; ++i)
2151 sel.quick_push (nunits - 1 - i);
2153 vec_perm_indices indices (sel, 1, nunits);
2154 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
2155 return NULL_TREE;
2156 return vect_gen_perm_mask_checked (vectype, indices);
2159 /* STMT is either a masked or unconditional store. Return the value
2160 being stored. */
2162 tree
2163 vect_get_store_rhs (gimple *stmt)
2165 if (gassign *assign = dyn_cast <gassign *> (stmt))
2167 gcc_assert (gimple_assign_single_p (assign));
2168 return gimple_assign_rhs1 (assign);
2170 if (gcall *call = dyn_cast <gcall *> (stmt))
2172 internal_fn ifn = gimple_call_internal_fn (call);
2173 int index = internal_fn_stored_value_index (ifn);
2174 gcc_assert (index >= 0);
2175 return gimple_call_arg (stmt, index);
2177 gcc_unreachable ();
2180 /* A subroutine of get_load_store_type, with a subset of the same
2181 arguments. Handle the case where STMT is part of a grouped load
2182 or store.
2184 For stores, the statements in the group are all consecutive
2185 and there is no gap at the end. For loads, the statements in the
2186 group might not be consecutive; there can be gaps between statements
2187 as well as at the end. */
2189 static bool
2190 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
2191 bool masked_p, vec_load_store_type vls_type,
2192 vect_memory_access_type *memory_access_type,
2193 gather_scatter_info *gs_info)
2195 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2196 vec_info *vinfo = stmt_info->vinfo;
2197 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2198 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2199 gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
2200 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
2201 unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
2202 bool single_element_p = (stmt == first_stmt
2203 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
2204 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (vinfo_for_stmt (first_stmt));
2205 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2207 /* True if the vectorized statements would access beyond the last
2208 statement in the group. */
2209 bool overrun_p = false;
2211 /* True if we can cope with such overrun by peeling for gaps, so that
2212 there is at least one final scalar iteration after the vector loop. */
2213 bool can_overrun_p = (!masked_p
2214 && vls_type == VLS_LOAD
2215 && loop_vinfo
2216 && !loop->inner);
2218 /* There can only be a gap at the end of the group if the stride is
2219 known at compile time. */
2220 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
2222 /* Stores can't yet have gaps. */
2223 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
2225 if (slp)
2227 if (STMT_VINFO_STRIDED_P (stmt_info))
2229 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2230 separated by the stride, until we have a complete vector.
2231 Fall back to scalar accesses if that isn't possible. */
2232 if (multiple_p (nunits, group_size))
2233 *memory_access_type = VMAT_STRIDED_SLP;
2234 else
2235 *memory_access_type = VMAT_ELEMENTWISE;
2237 else
2239 overrun_p = loop_vinfo && gap != 0;
2240 if (overrun_p && vls_type != VLS_LOAD)
2242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2243 "Grouped store with gaps requires"
2244 " non-consecutive accesses\n");
2245 return false;
2247 /* An overrun is fine if the trailing elements are smaller
2248 than the alignment boundary B. Every vector access will
2249 be a multiple of B and so we are guaranteed to access a
2250 non-gap element in the same B-sized block. */
2251 if (overrun_p
2252 && gap < (vect_known_alignment_in_bytes (first_dr)
2253 / vect_get_scalar_dr_size (first_dr)))
2254 overrun_p = false;
2255 if (overrun_p && !can_overrun_p)
2257 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2259 "Peeling for outer loop is not supported\n");
2260 return false;
2262 *memory_access_type = VMAT_CONTIGUOUS;
2265 else
2267 /* We can always handle this case using elementwise accesses,
2268 but see if something more efficient is available. */
2269 *memory_access_type = VMAT_ELEMENTWISE;
2271 /* If there is a gap at the end of the group then these optimizations
2272 would access excess elements in the last iteration. */
2273 bool would_overrun_p = (gap != 0);
2274 /* An overrun is fine if the trailing elements are smaller than the
2275 alignment boundary B. Every vector access will be a multiple of B
2276 and so we are guaranteed to access a non-gap element in the
2277 same B-sized block. */
2278 if (would_overrun_p
2279 && !masked_p
2280 && gap < (vect_known_alignment_in_bytes (first_dr)
2281 / vect_get_scalar_dr_size (first_dr)))
2282 would_overrun_p = false;
2284 if (!STMT_VINFO_STRIDED_P (stmt_info)
2285 && (can_overrun_p || !would_overrun_p)
2286 && compare_step_with_zero (stmt) > 0)
2288 /* First cope with the degenerate case of a single-element
2289 vector. */
2290 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2291 *memory_access_type = VMAT_CONTIGUOUS;
2293 /* Otherwise try using LOAD/STORE_LANES. */
2294 if (*memory_access_type == VMAT_ELEMENTWISE
2295 && (vls_type == VLS_LOAD
2296 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2297 : vect_store_lanes_supported (vectype, group_size,
2298 masked_p)))
2300 *memory_access_type = VMAT_LOAD_STORE_LANES;
2301 overrun_p = would_overrun_p;
2304 /* If that fails, try using permuting loads. */
2305 if (*memory_access_type == VMAT_ELEMENTWISE
2306 && (vls_type == VLS_LOAD
2307 ? vect_grouped_load_supported (vectype, single_element_p,
2308 group_size)
2309 : vect_grouped_store_supported (vectype, group_size)))
2311 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2312 overrun_p = would_overrun_p;
2316 /* As a last resort, trying using a gather load or scatter store.
2318 ??? Although the code can handle all group sizes correctly,
2319 it probably isn't a win to use separate strided accesses based
2320 on nearby locations. Or, even if it's a win over scalar code,
2321 it might not be a win over vectorizing at a lower VF, if that
2322 allows us to use contiguous accesses. */
2323 if (*memory_access_type == VMAT_ELEMENTWISE
2324 && single_element_p
2325 && loop_vinfo
2326 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2327 masked_p, gs_info))
2328 *memory_access_type = VMAT_GATHER_SCATTER;
2331 if (vls_type != VLS_LOAD && first_stmt == stmt)
2333 /* STMT is the leader of the group. Check the operands of all the
2334 stmts of the group. */
2335 gimple *next_stmt = DR_GROUP_NEXT_ELEMENT (stmt_info);
2336 while (next_stmt)
2338 tree op = vect_get_store_rhs (next_stmt);
2339 gimple *def_stmt;
2340 enum vect_def_type dt;
2341 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
2343 if (dump_enabled_p ())
2344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2345 "use not simple.\n");
2346 return false;
2348 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2352 if (overrun_p)
2354 gcc_assert (can_overrun_p);
2355 if (dump_enabled_p ())
2356 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2357 "Data access with gaps requires scalar "
2358 "epilogue loop\n");
2359 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2362 return true;
2365 /* A subroutine of get_load_store_type, with a subset of the same
2366 arguments. Handle the case where STMT is a load or store that
2367 accesses consecutive elements with a negative step. */
2369 static vect_memory_access_type
2370 get_negative_load_store_type (gimple *stmt, tree vectype,
2371 vec_load_store_type vls_type,
2372 unsigned int ncopies)
2374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2375 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2376 dr_alignment_support alignment_support_scheme;
2378 if (ncopies > 1)
2380 if (dump_enabled_p ())
2381 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2382 "multiple types with negative step.\n");
2383 return VMAT_ELEMENTWISE;
2386 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
2387 if (alignment_support_scheme != dr_aligned
2388 && alignment_support_scheme != dr_unaligned_supported)
2390 if (dump_enabled_p ())
2391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2392 "negative step but alignment required.\n");
2393 return VMAT_ELEMENTWISE;
2396 if (vls_type == VLS_STORE_INVARIANT)
2398 if (dump_enabled_p ())
2399 dump_printf_loc (MSG_NOTE, vect_location,
2400 "negative step with invariant source;"
2401 " no permute needed.\n");
2402 return VMAT_CONTIGUOUS_DOWN;
2405 if (!perm_mask_for_reverse (vectype))
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2409 "negative step and reversing not supported.\n");
2410 return VMAT_ELEMENTWISE;
2413 return VMAT_CONTIGUOUS_REVERSE;
2416 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
2417 if there is a memory access type that the vectorized form can use,
2418 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2419 or scatters, fill in GS_INFO accordingly.
2421 SLP says whether we're performing SLP rather than loop vectorization.
2422 MASKED_P is true if the statement is conditional on a vectorized mask.
2423 VECTYPE is the vector type that the vectorized statements will use.
2424 NCOPIES is the number of vector statements that will be needed. */
2426 static bool
2427 get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
2428 vec_load_store_type vls_type, unsigned int ncopies,
2429 vect_memory_access_type *memory_access_type,
2430 gather_scatter_info *gs_info)
2432 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2433 vec_info *vinfo = stmt_info->vinfo;
2434 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2435 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2436 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2438 *memory_access_type = VMAT_GATHER_SCATTER;
2439 gimple *def_stmt;
2440 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
2441 gcc_unreachable ();
2442 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
2443 &gs_info->offset_dt,
2444 &gs_info->offset_vectype))
2446 if (dump_enabled_p ())
2447 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2448 "%s index use not simple.\n",
2449 vls_type == VLS_LOAD ? "gather" : "scatter");
2450 return false;
2453 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2455 if (!get_group_load_store_type (stmt, vectype, slp, masked_p, vls_type,
2456 memory_access_type, gs_info))
2457 return false;
2459 else if (STMT_VINFO_STRIDED_P (stmt_info))
2461 gcc_assert (!slp);
2462 if (loop_vinfo
2463 && vect_use_strided_gather_scatters_p (stmt, loop_vinfo,
2464 masked_p, gs_info))
2465 *memory_access_type = VMAT_GATHER_SCATTER;
2466 else
2467 *memory_access_type = VMAT_ELEMENTWISE;
2469 else
2471 int cmp = compare_step_with_zero (stmt);
2472 if (cmp < 0)
2473 *memory_access_type = get_negative_load_store_type
2474 (stmt, vectype, vls_type, ncopies);
2475 else if (cmp == 0)
2477 gcc_assert (vls_type == VLS_LOAD);
2478 *memory_access_type = VMAT_INVARIANT;
2480 else
2481 *memory_access_type = VMAT_CONTIGUOUS;
2484 if ((*memory_access_type == VMAT_ELEMENTWISE
2485 || *memory_access_type == VMAT_STRIDED_SLP)
2486 && !nunits.is_constant ())
2488 if (dump_enabled_p ())
2489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2490 "Not using elementwise accesses due to variable "
2491 "vectorization factor.\n");
2492 return false;
2495 /* FIXME: At the moment the cost model seems to underestimate the
2496 cost of using elementwise accesses. This check preserves the
2497 traditional behavior until that can be fixed. */
2498 if (*memory_access_type == VMAT_ELEMENTWISE
2499 && !STMT_VINFO_STRIDED_P (stmt_info)
2500 && !(stmt == DR_GROUP_FIRST_ELEMENT (stmt_info)
2501 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2502 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2504 if (dump_enabled_p ())
2505 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2506 "not falling back to elementwise accesses\n");
2507 return false;
2509 return true;
2512 /* Return true if boolean argument MASK is suitable for vectorizing
2513 conditional load or store STMT. When returning true, store the type
2514 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2515 in *MASK_VECTYPE_OUT. */
2517 static bool
2518 vect_check_load_store_mask (gimple *stmt, tree mask,
2519 vect_def_type *mask_dt_out,
2520 tree *mask_vectype_out)
2522 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2524 if (dump_enabled_p ())
2525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2526 "mask argument is not a boolean.\n");
2527 return false;
2530 if (TREE_CODE (mask) != SSA_NAME)
2532 if (dump_enabled_p ())
2533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2534 "mask argument is not an SSA name.\n");
2535 return false;
2538 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2539 gimple *def_stmt;
2540 enum vect_def_type mask_dt;
2541 tree mask_vectype;
2542 if (!vect_is_simple_use (mask, stmt_info->vinfo, &def_stmt, &mask_dt,
2543 &mask_vectype))
2545 if (dump_enabled_p ())
2546 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2547 "mask use not simple.\n");
2548 return false;
2551 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2552 if (!mask_vectype)
2553 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2555 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2557 if (dump_enabled_p ())
2558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2559 "could not find an appropriate vector mask type.\n");
2560 return false;
2563 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2564 TYPE_VECTOR_SUBPARTS (vectype)))
2566 if (dump_enabled_p ())
2568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2569 "vector mask type ");
2570 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_vectype);
2571 dump_printf (MSG_MISSED_OPTIMIZATION,
2572 " does not match vector data type ");
2573 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
2574 dump_printf (MSG_MISSED_OPTIMIZATION, ".\n");
2576 return false;
2579 *mask_dt_out = mask_dt;
2580 *mask_vectype_out = mask_vectype;
2581 return true;
2584 /* Return true if stored value RHS is suitable for vectorizing store
2585 statement STMT. When returning true, store the type of the
2586 definition in *RHS_DT_OUT, the type of the vectorized store value in
2587 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2589 static bool
2590 vect_check_store_rhs (gimple *stmt, tree rhs, vect_def_type *rhs_dt_out,
2591 tree *rhs_vectype_out, vec_load_store_type *vls_type_out)
2593 /* In the case this is a store from a constant make sure
2594 native_encode_expr can handle it. */
2595 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2597 if (dump_enabled_p ())
2598 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2599 "cannot encode constant as a byte sequence.\n");
2600 return false;
2603 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2604 gimple *def_stmt;
2605 enum vect_def_type rhs_dt;
2606 tree rhs_vectype;
2607 if (!vect_is_simple_use (rhs, stmt_info->vinfo, &def_stmt, &rhs_dt,
2608 &rhs_vectype))
2610 if (dump_enabled_p ())
2611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2612 "use not simple.\n");
2613 return false;
2616 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2617 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2619 if (dump_enabled_p ())
2620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2621 "incompatible vector types.\n");
2622 return false;
2625 *rhs_dt_out = rhs_dt;
2626 *rhs_vectype_out = rhs_vectype;
2627 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2628 *vls_type_out = VLS_STORE_INVARIANT;
2629 else
2630 *vls_type_out = VLS_STORE;
2631 return true;
2634 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
2635 Note that we support masks with floating-point type, in which case the
2636 floats are interpreted as a bitmask. */
2638 static tree
2639 vect_build_all_ones_mask (gimple *stmt, tree masktype)
2641 if (TREE_CODE (masktype) == INTEGER_TYPE)
2642 return build_int_cst (masktype, -1);
2643 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2645 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2646 mask = build_vector_from_val (masktype, mask);
2647 return vect_init_vector (stmt, mask, masktype, NULL);
2649 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2651 REAL_VALUE_TYPE r;
2652 long tmp[6];
2653 for (int j = 0; j < 6; ++j)
2654 tmp[j] = -1;
2655 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2656 tree mask = build_real (TREE_TYPE (masktype), r);
2657 mask = build_vector_from_val (masktype, mask);
2658 return vect_init_vector (stmt, mask, masktype, NULL);
2660 gcc_unreachable ();
2663 /* Build an all-zero merge value of type VECTYPE while vectorizing
2664 STMT as a gather load. */
2666 static tree
2667 vect_build_zero_merge_argument (gimple *stmt, tree vectype)
2669 tree merge;
2670 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2671 merge = build_int_cst (TREE_TYPE (vectype), 0);
2672 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2674 REAL_VALUE_TYPE r;
2675 long tmp[6];
2676 for (int j = 0; j < 6; ++j)
2677 tmp[j] = 0;
2678 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2679 merge = build_real (TREE_TYPE (vectype), r);
2681 else
2682 gcc_unreachable ();
2683 merge = build_vector_from_val (vectype, merge);
2684 return vect_init_vector (stmt, merge, vectype, NULL);
2687 /* Build a gather load call while vectorizing STMT. Insert new instructions
2688 before GSI and add them to VEC_STMT. GS_INFO describes the gather load
2689 operation. If the load is conditional, MASK is the unvectorized
2690 condition and MASK_DT is its definition type, otherwise MASK is null. */
2692 static void
2693 vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
2694 gimple **vec_stmt, gather_scatter_info *gs_info,
2695 tree mask, vect_def_type mask_dt)
2697 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2698 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2699 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2700 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2701 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2702 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2703 edge pe = loop_preheader_edge (loop);
2704 enum { NARROW, NONE, WIDEN } modifier;
2705 poly_uint64 gather_off_nunits
2706 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2708 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2709 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2710 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2711 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2712 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2713 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2714 tree scaletype = TREE_VALUE (arglist);
2715 gcc_checking_assert (types_compatible_p (srctype, rettype)
2716 && (!mask || types_compatible_p (srctype, masktype)));
2718 tree perm_mask = NULL_TREE;
2719 tree mask_perm_mask = NULL_TREE;
2720 if (known_eq (nunits, gather_off_nunits))
2721 modifier = NONE;
2722 else if (known_eq (nunits * 2, gather_off_nunits))
2724 modifier = WIDEN;
2726 /* Currently widening gathers and scatters are only supported for
2727 fixed-length vectors. */
2728 int count = gather_off_nunits.to_constant ();
2729 vec_perm_builder sel (count, count, 1);
2730 for (int i = 0; i < count; ++i)
2731 sel.quick_push (i | (count / 2));
2733 vec_perm_indices indices (sel, 1, count);
2734 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2735 indices);
2737 else if (known_eq (nunits, gather_off_nunits * 2))
2739 modifier = NARROW;
2741 /* Currently narrowing gathers and scatters are only supported for
2742 fixed-length vectors. */
2743 int count = nunits.to_constant ();
2744 vec_perm_builder sel (count, count, 1);
2745 sel.quick_grow (count);
2746 for (int i = 0; i < count; ++i)
2747 sel[i] = i < count / 2 ? i : i + count / 2;
2748 vec_perm_indices indices (sel, 2, count);
2749 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2751 ncopies *= 2;
2753 if (mask)
2755 for (int i = 0; i < count; ++i)
2756 sel[i] = i | (count / 2);
2757 indices.new_vector (sel, 2, count);
2758 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2761 else
2762 gcc_unreachable ();
2764 tree vec_dest = vect_create_destination_var (gimple_get_lhs (stmt),
2765 vectype);
2767 tree ptr = fold_convert (ptrtype, gs_info->base);
2768 if (!is_gimple_min_invariant (ptr))
2770 gimple_seq seq;
2771 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2772 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2773 gcc_assert (!new_bb);
2776 tree scale = build_int_cst (scaletype, gs_info->scale);
2778 tree vec_oprnd0 = NULL_TREE;
2779 tree vec_mask = NULL_TREE;
2780 tree src_op = NULL_TREE;
2781 tree mask_op = NULL_TREE;
2782 tree prev_res = NULL_TREE;
2783 stmt_vec_info prev_stmt_info = NULL;
2785 if (!mask)
2787 src_op = vect_build_zero_merge_argument (stmt, rettype);
2788 mask_op = vect_build_all_ones_mask (stmt, masktype);
2791 for (int j = 0; j < ncopies; ++j)
2793 tree op, var;
2794 gimple *new_stmt;
2795 if (modifier == WIDEN && (j & 1))
2796 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2797 perm_mask, stmt, gsi);
2798 else if (j == 0)
2799 op = vec_oprnd0
2800 = vect_get_vec_def_for_operand (gs_info->offset, stmt);
2801 else
2802 op = vec_oprnd0
2803 = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
2805 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2807 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2808 TYPE_VECTOR_SUBPARTS (idxtype)));
2809 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2810 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2811 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2812 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2813 op = var;
2816 if (mask)
2818 if (mask_perm_mask && (j & 1))
2819 mask_op = permute_vec_elements (mask_op, mask_op,
2820 mask_perm_mask, stmt, gsi);
2821 else
2823 if (j == 0)
2824 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2825 else
2826 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
2828 mask_op = vec_mask;
2829 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2831 gcc_assert
2832 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2833 TYPE_VECTOR_SUBPARTS (masktype)));
2834 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2835 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2836 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR,
2837 mask_op);
2838 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2839 mask_op = var;
2842 src_op = mask_op;
2845 new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
2846 mask_op, scale);
2848 if (!useless_type_conversion_p (vectype, rettype))
2850 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2851 TYPE_VECTOR_SUBPARTS (rettype)));
2852 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2853 gimple_call_set_lhs (new_stmt, op);
2854 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2855 var = make_ssa_name (vec_dest);
2856 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2857 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2859 else
2861 var = make_ssa_name (vec_dest, new_stmt);
2862 gimple_call_set_lhs (new_stmt, var);
2865 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2867 if (modifier == NARROW)
2869 if ((j & 1) == 0)
2871 prev_res = var;
2872 continue;
2874 var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi);
2875 new_stmt = SSA_NAME_DEF_STMT (var);
2878 if (prev_stmt_info == NULL)
2879 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2880 else
2881 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2882 prev_stmt_info = vinfo_for_stmt (new_stmt);
2886 /* Prepare the base and offset in GS_INFO for vectorization.
2887 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2888 to the vectorized offset argument for the first copy of STMT. STMT
2889 is the statement described by GS_INFO and LOOP is the containing loop. */
2891 static void
2892 vect_get_gather_scatter_ops (struct loop *loop, gimple *stmt,
2893 gather_scatter_info *gs_info,
2894 tree *dataref_ptr, tree *vec_offset)
2896 gimple_seq stmts = NULL;
2897 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
2898 if (stmts != NULL)
2900 basic_block new_bb;
2901 edge pe = loop_preheader_edge (loop);
2902 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2903 gcc_assert (!new_bb);
2905 tree offset_type = TREE_TYPE (gs_info->offset);
2906 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2907 *vec_offset = vect_get_vec_def_for_operand (gs_info->offset, stmt,
2908 offset_vectype);
2911 /* Prepare to implement a grouped or strided load or store using
2912 the gather load or scatter store operation described by GS_INFO.
2913 STMT is the load or store statement.
2915 Set *DATAREF_BUMP to the amount that should be added to the base
2916 address after each copy of the vectorized statement. Set *VEC_OFFSET
2917 to an invariant offset vector in which element I has the value
2918 I * DR_STEP / SCALE. */
2920 static void
2921 vect_get_strided_load_store_ops (gimple *stmt, loop_vec_info loop_vinfo,
2922 gather_scatter_info *gs_info,
2923 tree *dataref_bump, tree *vec_offset)
2925 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2926 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2927 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2928 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2929 gimple_seq stmts;
2931 tree bump = size_binop (MULT_EXPR,
2932 fold_convert (sizetype, DR_STEP (dr)),
2933 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
2934 *dataref_bump = force_gimple_operand (bump, &stmts, true, NULL_TREE);
2935 if (stmts)
2936 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2938 /* The offset given in GS_INFO can have pointer type, so use the element
2939 type of the vector instead. */
2940 tree offset_type = TREE_TYPE (gs_info->offset);
2941 tree offset_vectype = get_vectype_for_scalar_type (offset_type);
2942 offset_type = TREE_TYPE (offset_vectype);
2944 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2945 tree step = size_binop (EXACT_DIV_EXPR, DR_STEP (dr),
2946 ssize_int (gs_info->scale));
2947 step = fold_convert (offset_type, step);
2948 step = force_gimple_operand (step, &stmts, true, NULL_TREE);
2950 /* Create {0, X, X*2, X*3, ...}. */
2951 *vec_offset = gimple_build (&stmts, VEC_SERIES_EXPR, offset_vectype,
2952 build_zero_cst (offset_type), step);
2953 if (stmts)
2954 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2957 /* Return the amount that should be added to a vector pointer to move
2958 to the next or previous copy of AGGR_TYPE. DR is the data reference
2959 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2960 vectorization. */
2962 static tree
2963 vect_get_data_ptr_increment (data_reference *dr, tree aggr_type,
2964 vect_memory_access_type memory_access_type)
2966 if (memory_access_type == VMAT_INVARIANT)
2967 return size_zero_node;
2969 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
2970 tree step = vect_dr_behavior (dr)->step;
2971 if (tree_int_cst_sgn (step) == -1)
2972 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
2973 return iv_step;
2976 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2978 static bool
2979 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2980 gimple **vec_stmt, slp_tree slp_node,
2981 tree vectype_in, enum vect_def_type *dt,
2982 stmt_vector_for_cost *cost_vec)
2984 tree op, vectype;
2985 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2986 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2987 unsigned ncopies;
2988 unsigned HOST_WIDE_INT nunits, num_bytes;
2990 op = gimple_call_arg (stmt, 0);
2991 vectype = STMT_VINFO_VECTYPE (stmt_info);
2993 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2994 return false;
2996 /* Multiple types in SLP are handled by creating the appropriate number of
2997 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2998 case of SLP. */
2999 if (slp_node)
3000 ncopies = 1;
3001 else
3002 ncopies = vect_get_num_copies (loop_vinfo, vectype);
3004 gcc_assert (ncopies >= 1);
3006 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
3007 if (! char_vectype)
3008 return false;
3010 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
3011 return false;
3013 unsigned word_bytes = num_bytes / nunits;
3015 /* The encoding uses one stepped pattern for each byte in the word. */
3016 vec_perm_builder elts (num_bytes, word_bytes, 3);
3017 for (unsigned i = 0; i < 3; ++i)
3018 for (unsigned j = 0; j < word_bytes; ++j)
3019 elts.quick_push ((i + 1) * word_bytes - j - 1);
3021 vec_perm_indices indices (elts, 1, num_bytes);
3022 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
3023 return false;
3025 if (! vec_stmt)
3027 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3028 DUMP_VECT_SCOPE ("vectorizable_bswap");
3029 if (! slp_node)
3031 record_stmt_cost (cost_vec,
3032 1, vector_stmt, stmt_info, 0, vect_prologue);
3033 record_stmt_cost (cost_vec,
3034 ncopies, vec_perm, stmt_info, 0, vect_body);
3036 return true;
3039 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
3041 /* Transform. */
3042 vec<tree> vec_oprnds = vNULL;
3043 gimple *new_stmt = NULL;
3044 stmt_vec_info prev_stmt_info = NULL;
3045 for (unsigned j = 0; j < ncopies; j++)
3047 /* Handle uses. */
3048 if (j == 0)
3049 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
3050 else
3051 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
3053 /* Arguments are ready. create the new vector stmt. */
3054 unsigned i;
3055 tree vop;
3056 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3058 tree tem = make_ssa_name (char_vectype);
3059 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3060 char_vectype, vop));
3061 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3062 tree tem2 = make_ssa_name (char_vectype);
3063 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3064 tem, tem, bswap_vconst);
3065 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3066 tem = make_ssa_name (vectype);
3067 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3068 vectype, tem2));
3069 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3070 if (slp_node)
3071 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3074 if (slp_node)
3075 continue;
3077 if (j == 0)
3078 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3079 else
3080 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3082 prev_stmt_info = vinfo_for_stmt (new_stmt);
3085 vec_oprnds.release ();
3086 return true;
3089 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3090 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3091 in a single step. On success, store the binary pack code in
3092 *CONVERT_CODE. */
3094 static bool
3095 simple_integer_narrowing (tree vectype_out, tree vectype_in,
3096 tree_code *convert_code)
3098 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3099 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3100 return false;
3102 tree_code code;
3103 int multi_step_cvt = 0;
3104 auto_vec <tree, 8> interm_types;
3105 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3106 &code, &multi_step_cvt,
3107 &interm_types)
3108 || multi_step_cvt)
3109 return false;
3111 *convert_code = code;
3112 return true;
3115 /* Function vectorizable_call.
3117 Check if GS performs a function call that can be vectorized.
3118 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3119 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3120 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3122 static bool
3123 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
3124 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
3126 gcall *stmt;
3127 tree vec_dest;
3128 tree scalar_dest;
3129 tree op, type;
3130 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3131 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
3132 tree vectype_out, vectype_in;
3133 poly_uint64 nunits_in;
3134 poly_uint64 nunits_out;
3135 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3136 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3137 vec_info *vinfo = stmt_info->vinfo;
3138 tree fndecl, new_temp, rhs_type;
3139 gimple *def_stmt;
3140 enum vect_def_type dt[3]
3141 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
3142 int ndts = 3;
3143 gimple *new_stmt = NULL;
3144 int ncopies, j;
3145 vec<tree> vargs = vNULL;
3146 enum { NARROW, NONE, WIDEN } modifier;
3147 size_t i, nargs;
3148 tree lhs;
3150 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3151 return false;
3153 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3154 && ! vec_stmt)
3155 return false;
3157 /* Is GS a vectorizable call? */
3158 stmt = dyn_cast <gcall *> (gs);
3159 if (!stmt)
3160 return false;
3162 if (gimple_call_internal_p (stmt)
3163 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3164 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3165 /* Handled by vectorizable_load and vectorizable_store. */
3166 return false;
3168 if (gimple_call_lhs (stmt) == NULL_TREE
3169 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3170 return false;
3172 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3174 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3176 /* Process function arguments. */
3177 rhs_type = NULL_TREE;
3178 vectype_in = NULL_TREE;
3179 nargs = gimple_call_num_args (stmt);
3181 /* Bail out if the function has more than three arguments, we do not have
3182 interesting builtin functions to vectorize with more than two arguments
3183 except for fma. No arguments is also not good. */
3184 if (nargs == 0 || nargs > 3)
3185 return false;
3187 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3188 if (gimple_call_internal_p (stmt)
3189 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3191 nargs = 0;
3192 rhs_type = unsigned_type_node;
3195 for (i = 0; i < nargs; i++)
3197 tree opvectype;
3199 op = gimple_call_arg (stmt, i);
3201 /* We can only handle calls with arguments of the same type. */
3202 if (rhs_type
3203 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3205 if (dump_enabled_p ())
3206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3207 "argument types differ.\n");
3208 return false;
3210 if (!rhs_type)
3211 rhs_type = TREE_TYPE (op);
3213 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
3215 if (dump_enabled_p ())
3216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3217 "use not simple.\n");
3218 return false;
3221 if (!vectype_in)
3222 vectype_in = opvectype;
3223 else if (opvectype
3224 && opvectype != vectype_in)
3226 if (dump_enabled_p ())
3227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3228 "argument vector types differ.\n");
3229 return false;
3232 /* If all arguments are external or constant defs use a vector type with
3233 the same size as the output vector type. */
3234 if (!vectype_in)
3235 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3236 if (vec_stmt)
3237 gcc_assert (vectype_in);
3238 if (!vectype_in)
3240 if (dump_enabled_p ())
3242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3243 "no vectype for scalar type ");
3244 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3245 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3248 return false;
3251 /* FORNOW */
3252 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3253 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3254 if (known_eq (nunits_in * 2, nunits_out))
3255 modifier = NARROW;
3256 else if (known_eq (nunits_out, nunits_in))
3257 modifier = NONE;
3258 else if (known_eq (nunits_out * 2, nunits_in))
3259 modifier = WIDEN;
3260 else
3261 return false;
3263 /* We only handle functions that do not read or clobber memory. */
3264 if (gimple_vuse (stmt))
3266 if (dump_enabled_p ())
3267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3268 "function reads from or writes to memory.\n");
3269 return false;
3272 /* For now, we only vectorize functions if a target specific builtin
3273 is available. TODO -- in some cases, it might be profitable to
3274 insert the calls for pieces of the vector, in order to be able
3275 to vectorize other operations in the loop. */
3276 fndecl = NULL_TREE;
3277 internal_fn ifn = IFN_LAST;
3278 combined_fn cfn = gimple_call_combined_fn (stmt);
3279 tree callee = gimple_call_fndecl (stmt);
3281 /* First try using an internal function. */
3282 tree_code convert_code = ERROR_MARK;
3283 if (cfn != CFN_LAST
3284 && (modifier == NONE
3285 || (modifier == NARROW
3286 && simple_integer_narrowing (vectype_out, vectype_in,
3287 &convert_code))))
3288 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3289 vectype_in);
3291 /* If that fails, try asking for a target-specific built-in function. */
3292 if (ifn == IFN_LAST)
3294 if (cfn != CFN_LAST)
3295 fndecl = targetm.vectorize.builtin_vectorized_function
3296 (cfn, vectype_out, vectype_in);
3297 else if (callee)
3298 fndecl = targetm.vectorize.builtin_md_vectorized_function
3299 (callee, vectype_out, vectype_in);
3302 if (ifn == IFN_LAST && !fndecl)
3304 if (cfn == CFN_GOMP_SIMD_LANE
3305 && !slp_node
3306 && loop_vinfo
3307 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3308 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3309 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3310 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3312 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3313 { 0, 1, 2, ... vf - 1 } vector. */
3314 gcc_assert (nargs == 0);
3316 else if (modifier == NONE
3317 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3318 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3319 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
3320 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
3321 vectype_in, dt, cost_vec);
3322 else
3324 if (dump_enabled_p ())
3325 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3326 "function is not vectorizable.\n");
3327 return false;
3331 if (slp_node)
3332 ncopies = 1;
3333 else if (modifier == NARROW && ifn == IFN_LAST)
3334 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3335 else
3336 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3338 /* Sanity check: make sure that at least one copy of the vectorized stmt
3339 needs to be generated. */
3340 gcc_assert (ncopies >= 1);
3342 if (!vec_stmt) /* transformation not required. */
3344 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3345 DUMP_VECT_SCOPE ("vectorizable_call");
3346 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
3347 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3348 record_stmt_cost (cost_vec, ncopies / 2,
3349 vec_promote_demote, stmt_info, 0, vect_body);
3351 return true;
3354 /* Transform. */
3356 if (dump_enabled_p ())
3357 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3359 /* Handle def. */
3360 scalar_dest = gimple_call_lhs (stmt);
3361 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3363 prev_stmt_info = NULL;
3364 if (modifier == NONE || ifn != IFN_LAST)
3366 tree prev_res = NULL_TREE;
3367 for (j = 0; j < ncopies; ++j)
3369 /* Build argument list for the vectorized call. */
3370 if (j == 0)
3371 vargs.create (nargs);
3372 else
3373 vargs.truncate (0);
3375 if (slp_node)
3377 auto_vec<vec<tree> > vec_defs (nargs);
3378 vec<tree> vec_oprnds0;
3380 for (i = 0; i < nargs; i++)
3381 vargs.quick_push (gimple_call_arg (stmt, i));
3382 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3383 vec_oprnds0 = vec_defs[0];
3385 /* Arguments are ready. Create the new vector stmt. */
3386 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3388 size_t k;
3389 for (k = 0; k < nargs; k++)
3391 vec<tree> vec_oprndsk = vec_defs[k];
3392 vargs[k] = vec_oprndsk[i];
3394 if (modifier == NARROW)
3396 tree half_res = make_ssa_name (vectype_in);
3397 gcall *call
3398 = gimple_build_call_internal_vec (ifn, vargs);
3399 gimple_call_set_lhs (call, half_res);
3400 gimple_call_set_nothrow (call, true);
3401 new_stmt = call;
3402 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3403 if ((i & 1) == 0)
3405 prev_res = half_res;
3406 continue;
3408 new_temp = make_ssa_name (vec_dest);
3409 new_stmt = gimple_build_assign (new_temp, convert_code,
3410 prev_res, half_res);
3412 else
3414 gcall *call;
3415 if (ifn != IFN_LAST)
3416 call = gimple_build_call_internal_vec (ifn, vargs);
3417 else
3418 call = gimple_build_call_vec (fndecl, vargs);
3419 new_temp = make_ssa_name (vec_dest, call);
3420 gimple_call_set_lhs (call, new_temp);
3421 gimple_call_set_nothrow (call, true);
3422 new_stmt = call;
3424 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3425 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3428 for (i = 0; i < nargs; i++)
3430 vec<tree> vec_oprndsi = vec_defs[i];
3431 vec_oprndsi.release ();
3433 continue;
3436 for (i = 0; i < nargs; i++)
3438 op = gimple_call_arg (stmt, i);
3439 if (j == 0)
3440 vec_oprnd0
3441 = vect_get_vec_def_for_operand (op, stmt);
3442 else
3444 vec_oprnd0 = gimple_call_arg (new_stmt, i);
3445 vec_oprnd0
3446 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3449 vargs.quick_push (vec_oprnd0);
3452 if (gimple_call_internal_p (stmt)
3453 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
3455 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3456 tree new_var
3457 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3458 gimple *init_stmt = gimple_build_assign (new_var, cst);
3459 vect_init_vector_1 (stmt, init_stmt, NULL);
3460 new_temp = make_ssa_name (vec_dest);
3461 new_stmt = gimple_build_assign (new_temp, new_var);
3463 else if (modifier == NARROW)
3465 tree half_res = make_ssa_name (vectype_in);
3466 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3467 gimple_call_set_lhs (call, half_res);
3468 gimple_call_set_nothrow (call, true);
3469 new_stmt = call;
3470 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3471 if ((j & 1) == 0)
3473 prev_res = half_res;
3474 continue;
3476 new_temp = make_ssa_name (vec_dest);
3477 new_stmt = gimple_build_assign (new_temp, convert_code,
3478 prev_res, half_res);
3480 else
3482 gcall *call;
3483 if (ifn != IFN_LAST)
3484 call = gimple_build_call_internal_vec (ifn, vargs);
3485 else
3486 call = gimple_build_call_vec (fndecl, vargs);
3487 new_temp = make_ssa_name (vec_dest, new_stmt);
3488 gimple_call_set_lhs (call, new_temp);
3489 gimple_call_set_nothrow (call, true);
3490 new_stmt = call;
3492 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3494 if (j == (modifier == NARROW ? 1 : 0))
3495 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3496 else
3497 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3499 prev_stmt_info = vinfo_for_stmt (new_stmt);
3502 else if (modifier == NARROW)
3504 for (j = 0; j < ncopies; ++j)
3506 /* Build argument list for the vectorized call. */
3507 if (j == 0)
3508 vargs.create (nargs * 2);
3509 else
3510 vargs.truncate (0);
3512 if (slp_node)
3514 auto_vec<vec<tree> > vec_defs (nargs);
3515 vec<tree> vec_oprnds0;
3517 for (i = 0; i < nargs; i++)
3518 vargs.quick_push (gimple_call_arg (stmt, i));
3519 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3520 vec_oprnds0 = vec_defs[0];
3522 /* Arguments are ready. Create the new vector stmt. */
3523 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3525 size_t k;
3526 vargs.truncate (0);
3527 for (k = 0; k < nargs; k++)
3529 vec<tree> vec_oprndsk = vec_defs[k];
3530 vargs.quick_push (vec_oprndsk[i]);
3531 vargs.quick_push (vec_oprndsk[i + 1]);
3533 gcall *call;
3534 if (ifn != IFN_LAST)
3535 call = gimple_build_call_internal_vec (ifn, vargs);
3536 else
3537 call = gimple_build_call_vec (fndecl, vargs);
3538 new_temp = make_ssa_name (vec_dest, call);
3539 gimple_call_set_lhs (call, new_temp);
3540 gimple_call_set_nothrow (call, true);
3541 new_stmt = call;
3542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3543 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3546 for (i = 0; i < nargs; i++)
3548 vec<tree> vec_oprndsi = vec_defs[i];
3549 vec_oprndsi.release ();
3551 continue;
3554 for (i = 0; i < nargs; i++)
3556 op = gimple_call_arg (stmt, i);
3557 if (j == 0)
3559 vec_oprnd0
3560 = vect_get_vec_def_for_operand (op, stmt);
3561 vec_oprnd1
3562 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3564 else
3566 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3567 vec_oprnd0
3568 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3569 vec_oprnd1
3570 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3573 vargs.quick_push (vec_oprnd0);
3574 vargs.quick_push (vec_oprnd1);
3577 new_stmt = gimple_build_call_vec (fndecl, vargs);
3578 new_temp = make_ssa_name (vec_dest, new_stmt);
3579 gimple_call_set_lhs (new_stmt, new_temp);
3580 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3582 if (j == 0)
3583 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3584 else
3585 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3587 prev_stmt_info = vinfo_for_stmt (new_stmt);
3590 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3592 else
3593 /* No current target implements this case. */
3594 return false;
3596 vargs.release ();
3598 /* The call in STMT might prevent it from being removed in dce.
3599 We however cannot remove it here, due to the way the ssa name
3600 it defines is mapped to the new definition. So just replace
3601 rhs of the statement with something harmless. */
3603 if (slp_node)
3604 return true;
3606 type = TREE_TYPE (scalar_dest);
3607 if (is_pattern_stmt_p (stmt_info))
3608 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3609 else
3610 lhs = gimple_call_lhs (stmt);
3612 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3613 set_vinfo_for_stmt (new_stmt, stmt_info);
3614 set_vinfo_for_stmt (stmt, NULL);
3615 STMT_VINFO_STMT (stmt_info) = new_stmt;
3616 gsi_replace (gsi, new_stmt, false);
3618 return true;
3622 struct simd_call_arg_info
3624 tree vectype;
3625 tree op;
3626 HOST_WIDE_INT linear_step;
3627 enum vect_def_type dt;
3628 unsigned int align;
3629 bool simd_lane_linear;
3632 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3633 is linear within simd lane (but not within whole loop), note it in
3634 *ARGINFO. */
3636 static void
3637 vect_simd_lane_linear (tree op, struct loop *loop,
3638 struct simd_call_arg_info *arginfo)
3640 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3642 if (!is_gimple_assign (def_stmt)
3643 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3644 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3645 return;
3647 tree base = gimple_assign_rhs1 (def_stmt);
3648 HOST_WIDE_INT linear_step = 0;
3649 tree v = gimple_assign_rhs2 (def_stmt);
3650 while (TREE_CODE (v) == SSA_NAME)
3652 tree t;
3653 def_stmt = SSA_NAME_DEF_STMT (v);
3654 if (is_gimple_assign (def_stmt))
3655 switch (gimple_assign_rhs_code (def_stmt))
3657 case PLUS_EXPR:
3658 t = gimple_assign_rhs2 (def_stmt);
3659 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3660 return;
3661 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3662 v = gimple_assign_rhs1 (def_stmt);
3663 continue;
3664 case MULT_EXPR:
3665 t = gimple_assign_rhs2 (def_stmt);
3666 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3667 return;
3668 linear_step = tree_to_shwi (t);
3669 v = gimple_assign_rhs1 (def_stmt);
3670 continue;
3671 CASE_CONVERT:
3672 t = gimple_assign_rhs1 (def_stmt);
3673 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3674 || (TYPE_PRECISION (TREE_TYPE (v))
3675 < TYPE_PRECISION (TREE_TYPE (t))))
3676 return;
3677 if (!linear_step)
3678 linear_step = 1;
3679 v = t;
3680 continue;
3681 default:
3682 return;
3684 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3685 && loop->simduid
3686 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3687 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3688 == loop->simduid))
3690 if (!linear_step)
3691 linear_step = 1;
3692 arginfo->linear_step = linear_step;
3693 arginfo->op = base;
3694 arginfo->simd_lane_linear = true;
3695 return;
3700 /* Return the number of elements in vector type VECTYPE, which is associated
3701 with a SIMD clone. At present these vectors always have a constant
3702 length. */
3704 static unsigned HOST_WIDE_INT
3705 simd_clone_subparts (tree vectype)
3707 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3710 /* Function vectorizable_simd_clone_call.
3712 Check if STMT performs a function call that can be vectorized
3713 by calling a simd clone of the function.
3714 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3715 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3716 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3718 static bool
3719 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3720 gimple **vec_stmt, slp_tree slp_node,
3721 stmt_vector_for_cost *)
3723 tree vec_dest;
3724 tree scalar_dest;
3725 tree op, type;
3726 tree vec_oprnd0 = NULL_TREE;
3727 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3728 tree vectype;
3729 unsigned int nunits;
3730 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3731 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3732 vec_info *vinfo = stmt_info->vinfo;
3733 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3734 tree fndecl, new_temp;
3735 gimple *def_stmt;
3736 gimple *new_stmt = NULL;
3737 int ncopies, j;
3738 auto_vec<simd_call_arg_info> arginfo;
3739 vec<tree> vargs = vNULL;
3740 size_t i, nargs;
3741 tree lhs, rtype, ratype;
3742 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3744 /* Is STMT a vectorizable call? */
3745 if (!is_gimple_call (stmt))
3746 return false;
3748 fndecl = gimple_call_fndecl (stmt);
3749 if (fndecl == NULL_TREE)
3750 return false;
3752 struct cgraph_node *node = cgraph_node::get (fndecl);
3753 if (node == NULL || node->simd_clones == NULL)
3754 return false;
3756 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3757 return false;
3759 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3760 && ! vec_stmt)
3761 return false;
3763 if (gimple_call_lhs (stmt)
3764 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3765 return false;
3767 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3769 vectype = STMT_VINFO_VECTYPE (stmt_info);
3771 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3772 return false;
3774 /* FORNOW */
3775 if (slp_node)
3776 return false;
3778 /* Process function arguments. */
3779 nargs = gimple_call_num_args (stmt);
3781 /* Bail out if the function has zero arguments. */
3782 if (nargs == 0)
3783 return false;
3785 arginfo.reserve (nargs, true);
3787 for (i = 0; i < nargs; i++)
3789 simd_call_arg_info thisarginfo;
3790 affine_iv iv;
3792 thisarginfo.linear_step = 0;
3793 thisarginfo.align = 0;
3794 thisarginfo.op = NULL_TREE;
3795 thisarginfo.simd_lane_linear = false;
3797 op = gimple_call_arg (stmt, i);
3798 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3799 &thisarginfo.vectype)
3800 || thisarginfo.dt == vect_uninitialized_def)
3802 if (dump_enabled_p ())
3803 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3804 "use not simple.\n");
3805 return false;
3808 if (thisarginfo.dt == vect_constant_def
3809 || thisarginfo.dt == vect_external_def)
3810 gcc_assert (thisarginfo.vectype == NULL_TREE);
3811 else
3812 gcc_assert (thisarginfo.vectype != NULL_TREE);
3814 /* For linear arguments, the analyze phase should have saved
3815 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3816 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3817 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3819 gcc_assert (vec_stmt);
3820 thisarginfo.linear_step
3821 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3822 thisarginfo.op
3823 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3824 thisarginfo.simd_lane_linear
3825 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3826 == boolean_true_node);
3827 /* If loop has been peeled for alignment, we need to adjust it. */
3828 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3829 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3830 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3832 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3833 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3834 tree opt = TREE_TYPE (thisarginfo.op);
3835 bias = fold_convert (TREE_TYPE (step), bias);
3836 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3837 thisarginfo.op
3838 = fold_build2 (POINTER_TYPE_P (opt)
3839 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3840 thisarginfo.op, bias);
3843 else if (!vec_stmt
3844 && thisarginfo.dt != vect_constant_def
3845 && thisarginfo.dt != vect_external_def
3846 && loop_vinfo
3847 && TREE_CODE (op) == SSA_NAME
3848 && simple_iv (loop, loop_containing_stmt (stmt), op,
3849 &iv, false)
3850 && tree_fits_shwi_p (iv.step))
3852 thisarginfo.linear_step = tree_to_shwi (iv.step);
3853 thisarginfo.op = iv.base;
3855 else if ((thisarginfo.dt == vect_constant_def
3856 || thisarginfo.dt == vect_external_def)
3857 && POINTER_TYPE_P (TREE_TYPE (op)))
3858 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3859 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3860 linear too. */
3861 if (POINTER_TYPE_P (TREE_TYPE (op))
3862 && !thisarginfo.linear_step
3863 && !vec_stmt
3864 && thisarginfo.dt != vect_constant_def
3865 && thisarginfo.dt != vect_external_def
3866 && loop_vinfo
3867 && !slp_node
3868 && TREE_CODE (op) == SSA_NAME)
3869 vect_simd_lane_linear (op, loop, &thisarginfo);
3871 arginfo.quick_push (thisarginfo);
3874 unsigned HOST_WIDE_INT vf;
3875 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3877 if (dump_enabled_p ())
3878 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3879 "not considering SIMD clones; not yet supported"
3880 " for variable-width vectors.\n");
3881 return NULL;
3884 unsigned int badness = 0;
3885 struct cgraph_node *bestn = NULL;
3886 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3887 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3888 else
3889 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3890 n = n->simdclone->next_clone)
3892 unsigned int this_badness = 0;
3893 if (n->simdclone->simdlen > vf
3894 || n->simdclone->nargs != nargs)
3895 continue;
3896 if (n->simdclone->simdlen < vf)
3897 this_badness += (exact_log2 (vf)
3898 - exact_log2 (n->simdclone->simdlen)) * 1024;
3899 if (n->simdclone->inbranch)
3900 this_badness += 2048;
3901 int target_badness = targetm.simd_clone.usable (n);
3902 if (target_badness < 0)
3903 continue;
3904 this_badness += target_badness * 512;
3905 /* FORNOW: Have to add code to add the mask argument. */
3906 if (n->simdclone->inbranch)
3907 continue;
3908 for (i = 0; i < nargs; i++)
3910 switch (n->simdclone->args[i].arg_type)
3912 case SIMD_CLONE_ARG_TYPE_VECTOR:
3913 if (!useless_type_conversion_p
3914 (n->simdclone->args[i].orig_type,
3915 TREE_TYPE (gimple_call_arg (stmt, i))))
3916 i = -1;
3917 else if (arginfo[i].dt == vect_constant_def
3918 || arginfo[i].dt == vect_external_def
3919 || arginfo[i].linear_step)
3920 this_badness += 64;
3921 break;
3922 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3923 if (arginfo[i].dt != vect_constant_def
3924 && arginfo[i].dt != vect_external_def)
3925 i = -1;
3926 break;
3927 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3928 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3929 if (arginfo[i].dt == vect_constant_def
3930 || arginfo[i].dt == vect_external_def
3931 || (arginfo[i].linear_step
3932 != n->simdclone->args[i].linear_step))
3933 i = -1;
3934 break;
3935 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3936 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3937 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3938 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3939 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3940 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3941 /* FORNOW */
3942 i = -1;
3943 break;
3944 case SIMD_CLONE_ARG_TYPE_MASK:
3945 gcc_unreachable ();
3947 if (i == (size_t) -1)
3948 break;
3949 if (n->simdclone->args[i].alignment > arginfo[i].align)
3951 i = -1;
3952 break;
3954 if (arginfo[i].align)
3955 this_badness += (exact_log2 (arginfo[i].align)
3956 - exact_log2 (n->simdclone->args[i].alignment));
3958 if (i == (size_t) -1)
3959 continue;
3960 if (bestn == NULL || this_badness < badness)
3962 bestn = n;
3963 badness = this_badness;
3967 if (bestn == NULL)
3968 return false;
3970 for (i = 0; i < nargs; i++)
3971 if ((arginfo[i].dt == vect_constant_def
3972 || arginfo[i].dt == vect_external_def)
3973 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3975 arginfo[i].vectype
3976 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3977 i)));
3978 if (arginfo[i].vectype == NULL
3979 || (simd_clone_subparts (arginfo[i].vectype)
3980 > bestn->simdclone->simdlen))
3981 return false;
3984 fndecl = bestn->decl;
3985 nunits = bestn->simdclone->simdlen;
3986 ncopies = vf / nunits;
3988 /* If the function isn't const, only allow it in simd loops where user
3989 has asserted that at least nunits consecutive iterations can be
3990 performed using SIMD instructions. */
3991 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3992 && gimple_vuse (stmt))
3993 return false;
3995 /* Sanity check: make sure that at least one copy of the vectorized stmt
3996 needs to be generated. */
3997 gcc_assert (ncopies >= 1);
3999 if (!vec_stmt) /* transformation not required. */
4001 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4002 for (i = 0; i < nargs; i++)
4003 if ((bestn->simdclone->args[i].arg_type
4004 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4005 || (bestn->simdclone->args[i].arg_type
4006 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
4008 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
4009 + 1);
4010 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4011 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4012 ? size_type_node : TREE_TYPE (arginfo[i].op);
4013 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4014 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
4015 tree sll = arginfo[i].simd_lane_linear
4016 ? boolean_true_node : boolean_false_node;
4017 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
4019 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
4020 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4021 /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
4022 return true;
4025 /* Transform. */
4027 if (dump_enabled_p ())
4028 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4030 /* Handle def. */
4031 scalar_dest = gimple_call_lhs (stmt);
4032 vec_dest = NULL_TREE;
4033 rtype = NULL_TREE;
4034 ratype = NULL_TREE;
4035 if (scalar_dest)
4037 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4038 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4039 if (TREE_CODE (rtype) == ARRAY_TYPE)
4041 ratype = rtype;
4042 rtype = TREE_TYPE (ratype);
4046 prev_stmt_info = NULL;
4047 for (j = 0; j < ncopies; ++j)
4049 /* Build argument list for the vectorized call. */
4050 if (j == 0)
4051 vargs.create (nargs);
4052 else
4053 vargs.truncate (0);
4055 for (i = 0; i < nargs; i++)
4057 unsigned int k, l, m, o;
4058 tree atype;
4059 op = gimple_call_arg (stmt, i);
4060 switch (bestn->simdclone->args[i].arg_type)
4062 case SIMD_CLONE_ARG_TYPE_VECTOR:
4063 atype = bestn->simdclone->args[i].vector_type;
4064 o = nunits / simd_clone_subparts (atype);
4065 for (m = j * o; m < (j + 1) * o; m++)
4067 if (simd_clone_subparts (atype)
4068 < simd_clone_subparts (arginfo[i].vectype))
4070 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
4071 k = (simd_clone_subparts (arginfo[i].vectype)
4072 / simd_clone_subparts (atype));
4073 gcc_assert ((k & (k - 1)) == 0);
4074 if (m == 0)
4075 vec_oprnd0
4076 = vect_get_vec_def_for_operand (op, stmt);
4077 else
4079 vec_oprnd0 = arginfo[i].op;
4080 if ((m & (k - 1)) == 0)
4081 vec_oprnd0
4082 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
4083 vec_oprnd0);
4085 arginfo[i].op = vec_oprnd0;
4086 vec_oprnd0
4087 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
4088 bitsize_int (prec),
4089 bitsize_int ((m & (k - 1)) * prec));
4090 new_stmt
4091 = gimple_build_assign (make_ssa_name (atype),
4092 vec_oprnd0);
4093 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4094 vargs.safe_push (gimple_assign_lhs (new_stmt));
4096 else
4098 k = (simd_clone_subparts (atype)
4099 / simd_clone_subparts (arginfo[i].vectype));
4100 gcc_assert ((k & (k - 1)) == 0);
4101 vec<constructor_elt, va_gc> *ctor_elts;
4102 if (k != 1)
4103 vec_alloc (ctor_elts, k);
4104 else
4105 ctor_elts = NULL;
4106 for (l = 0; l < k; l++)
4108 if (m == 0 && l == 0)
4109 vec_oprnd0
4110 = vect_get_vec_def_for_operand (op, stmt);
4111 else
4112 vec_oprnd0
4113 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
4114 arginfo[i].op);
4115 arginfo[i].op = vec_oprnd0;
4116 if (k == 1)
4117 break;
4118 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4119 vec_oprnd0);
4121 if (k == 1)
4122 vargs.safe_push (vec_oprnd0);
4123 else
4125 vec_oprnd0 = build_constructor (atype, ctor_elts);
4126 new_stmt
4127 = gimple_build_assign (make_ssa_name (atype),
4128 vec_oprnd0);
4129 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4130 vargs.safe_push (gimple_assign_lhs (new_stmt));
4134 break;
4135 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4136 vargs.safe_push (op);
4137 break;
4138 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4139 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4140 if (j == 0)
4142 gimple_seq stmts;
4143 arginfo[i].op
4144 = force_gimple_operand (arginfo[i].op, &stmts, true,
4145 NULL_TREE);
4146 if (stmts != NULL)
4148 basic_block new_bb;
4149 edge pe = loop_preheader_edge (loop);
4150 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4151 gcc_assert (!new_bb);
4153 if (arginfo[i].simd_lane_linear)
4155 vargs.safe_push (arginfo[i].op);
4156 break;
4158 tree phi_res = copy_ssa_name (op);
4159 gphi *new_phi = create_phi_node (phi_res, loop->header);
4160 set_vinfo_for_stmt (new_phi,
4161 new_stmt_vec_info (new_phi, loop_vinfo));
4162 add_phi_arg (new_phi, arginfo[i].op,
4163 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4164 enum tree_code code
4165 = POINTER_TYPE_P (TREE_TYPE (op))
4166 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4167 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4168 ? sizetype : TREE_TYPE (op);
4169 widest_int cst
4170 = wi::mul (bestn->simdclone->args[i].linear_step,
4171 ncopies * nunits);
4172 tree tcst = wide_int_to_tree (type, cst);
4173 tree phi_arg = copy_ssa_name (op);
4174 new_stmt
4175 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4176 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4177 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4178 set_vinfo_for_stmt (new_stmt,
4179 new_stmt_vec_info (new_stmt, loop_vinfo));
4180 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4181 UNKNOWN_LOCATION);
4182 arginfo[i].op = phi_res;
4183 vargs.safe_push (phi_res);
4185 else
4187 enum tree_code code
4188 = POINTER_TYPE_P (TREE_TYPE (op))
4189 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4190 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4191 ? sizetype : TREE_TYPE (op);
4192 widest_int cst
4193 = wi::mul (bestn->simdclone->args[i].linear_step,
4194 j * nunits);
4195 tree tcst = wide_int_to_tree (type, cst);
4196 new_temp = make_ssa_name (TREE_TYPE (op));
4197 new_stmt = gimple_build_assign (new_temp, code,
4198 arginfo[i].op, tcst);
4199 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4200 vargs.safe_push (new_temp);
4202 break;
4203 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4204 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4205 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4206 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4207 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4208 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4209 default:
4210 gcc_unreachable ();
4214 new_stmt = gimple_build_call_vec (fndecl, vargs);
4215 if (vec_dest)
4217 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
4218 if (ratype)
4219 new_temp = create_tmp_var (ratype);
4220 else if (simd_clone_subparts (vectype)
4221 == simd_clone_subparts (rtype))
4222 new_temp = make_ssa_name (vec_dest, new_stmt);
4223 else
4224 new_temp = make_ssa_name (rtype, new_stmt);
4225 gimple_call_set_lhs (new_stmt, new_temp);
4227 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4229 if (vec_dest)
4231 if (simd_clone_subparts (vectype) < nunits)
4233 unsigned int k, l;
4234 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4235 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4236 k = nunits / simd_clone_subparts (vectype);
4237 gcc_assert ((k & (k - 1)) == 0);
4238 for (l = 0; l < k; l++)
4240 tree t;
4241 if (ratype)
4243 t = build_fold_addr_expr (new_temp);
4244 t = build2 (MEM_REF, vectype, t,
4245 build_int_cst (TREE_TYPE (t), l * bytes));
4247 else
4248 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4249 bitsize_int (prec), bitsize_int (l * prec));
4250 new_stmt
4251 = gimple_build_assign (make_ssa_name (vectype), t);
4252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4253 if (j == 0 && l == 0)
4254 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4255 else
4256 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4258 prev_stmt_info = vinfo_for_stmt (new_stmt);
4261 if (ratype)
4262 vect_clobber_variable (stmt, gsi, new_temp);
4263 continue;
4265 else if (simd_clone_subparts (vectype) > nunits)
4267 unsigned int k = (simd_clone_subparts (vectype)
4268 / simd_clone_subparts (rtype));
4269 gcc_assert ((k & (k - 1)) == 0);
4270 if ((j & (k - 1)) == 0)
4271 vec_alloc (ret_ctor_elts, k);
4272 if (ratype)
4274 unsigned int m, o = nunits / simd_clone_subparts (rtype);
4275 for (m = 0; m < o; m++)
4277 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4278 size_int (m), NULL_TREE, NULL_TREE);
4279 new_stmt
4280 = gimple_build_assign (make_ssa_name (rtype), tem);
4281 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4282 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4283 gimple_assign_lhs (new_stmt));
4285 vect_clobber_variable (stmt, gsi, new_temp);
4287 else
4288 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4289 if ((j & (k - 1)) != k - 1)
4290 continue;
4291 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4292 new_stmt
4293 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4294 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4296 if ((unsigned) j == k - 1)
4297 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4298 else
4299 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4301 prev_stmt_info = vinfo_for_stmt (new_stmt);
4302 continue;
4304 else if (ratype)
4306 tree t = build_fold_addr_expr (new_temp);
4307 t = build2 (MEM_REF, vectype, t,
4308 build_int_cst (TREE_TYPE (t), 0));
4309 new_stmt
4310 = gimple_build_assign (make_ssa_name (vec_dest), t);
4311 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4312 vect_clobber_variable (stmt, gsi, new_temp);
4316 if (j == 0)
4317 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4318 else
4319 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4321 prev_stmt_info = vinfo_for_stmt (new_stmt);
4324 vargs.release ();
4326 /* The call in STMT might prevent it from being removed in dce.
4327 We however cannot remove it here, due to the way the ssa name
4328 it defines is mapped to the new definition. So just replace
4329 rhs of the statement with something harmless. */
4331 if (slp_node)
4332 return true;
4334 if (scalar_dest)
4336 type = TREE_TYPE (scalar_dest);
4337 if (is_pattern_stmt_p (stmt_info))
4338 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
4339 else
4340 lhs = gimple_call_lhs (stmt);
4341 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4343 else
4344 new_stmt = gimple_build_nop ();
4345 set_vinfo_for_stmt (new_stmt, stmt_info);
4346 set_vinfo_for_stmt (stmt, NULL);
4347 STMT_VINFO_STMT (stmt_info) = new_stmt;
4348 gsi_replace (gsi, new_stmt, true);
4349 unlink_stmt_vdef (stmt);
4351 return true;
4355 /* Function vect_gen_widened_results_half
4357 Create a vector stmt whose code, type, number of arguments, and result
4358 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4359 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4360 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4361 needs to be created (DECL is a function-decl of a target-builtin).
4362 STMT is the original scalar stmt that we are vectorizing. */
4364 static gimple *
4365 vect_gen_widened_results_half (enum tree_code code,
4366 tree decl,
4367 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4368 tree vec_dest, gimple_stmt_iterator *gsi,
4369 gimple *stmt)
4371 gimple *new_stmt;
4372 tree new_temp;
4374 /* Generate half of the widened result: */
4375 if (code == CALL_EXPR)
4377 /* Target specific support */
4378 if (op_type == binary_op)
4379 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
4380 else
4381 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
4382 new_temp = make_ssa_name (vec_dest, new_stmt);
4383 gimple_call_set_lhs (new_stmt, new_temp);
4385 else
4387 /* Generic support */
4388 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4389 if (op_type != binary_op)
4390 vec_oprnd1 = NULL;
4391 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4392 new_temp = make_ssa_name (vec_dest, new_stmt);
4393 gimple_assign_set_lhs (new_stmt, new_temp);
4395 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4397 return new_stmt;
4401 /* Get vectorized definitions for loop-based vectorization. For the first
4402 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4403 scalar operand), and for the rest we get a copy with
4404 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4405 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4406 The vectors are collected into VEC_OPRNDS. */
4408 static void
4409 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
4410 vec<tree> *vec_oprnds, int multi_step_cvt)
4412 tree vec_oprnd;
4414 /* Get first vector operand. */
4415 /* All the vector operands except the very first one (that is scalar oprnd)
4416 are stmt copies. */
4417 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
4418 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
4419 else
4420 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
4422 vec_oprnds->quick_push (vec_oprnd);
4424 /* Get second vector operand. */
4425 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
4426 vec_oprnds->quick_push (vec_oprnd);
4428 *oprnd = vec_oprnd;
4430 /* For conversion in multiple steps, continue to get operands
4431 recursively. */
4432 if (multi_step_cvt)
4433 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
4437 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4438 For multi-step conversions store the resulting vectors and call the function
4439 recursively. */
4441 static void
4442 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
4443 int multi_step_cvt, gimple *stmt,
4444 vec<tree> vec_dsts,
4445 gimple_stmt_iterator *gsi,
4446 slp_tree slp_node, enum tree_code code,
4447 stmt_vec_info *prev_stmt_info)
4449 unsigned int i;
4450 tree vop0, vop1, new_tmp, vec_dest;
4451 gimple *new_stmt;
4452 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4454 vec_dest = vec_dsts.pop ();
4456 for (i = 0; i < vec_oprnds->length (); i += 2)
4458 /* Create demotion operation. */
4459 vop0 = (*vec_oprnds)[i];
4460 vop1 = (*vec_oprnds)[i + 1];
4461 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4462 new_tmp = make_ssa_name (vec_dest, new_stmt);
4463 gimple_assign_set_lhs (new_stmt, new_tmp);
4464 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4466 if (multi_step_cvt)
4467 /* Store the resulting vector for next recursive call. */
4468 (*vec_oprnds)[i/2] = new_tmp;
4469 else
4471 /* This is the last step of the conversion sequence. Store the
4472 vectors in SLP_NODE or in vector info of the scalar statement
4473 (or in STMT_VINFO_RELATED_STMT chain). */
4474 if (slp_node)
4475 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4476 else
4478 if (!*prev_stmt_info)
4479 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4480 else
4481 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4483 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4488 /* For multi-step demotion operations we first generate demotion operations
4489 from the source type to the intermediate types, and then combine the
4490 results (stored in VEC_OPRNDS) in demotion operation to the destination
4491 type. */
4492 if (multi_step_cvt)
4494 /* At each level of recursion we have half of the operands we had at the
4495 previous level. */
4496 vec_oprnds->truncate ((i+1)/2);
4497 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4498 stmt, vec_dsts, gsi, slp_node,
4499 VEC_PACK_TRUNC_EXPR,
4500 prev_stmt_info);
4503 vec_dsts.quick_push (vec_dest);
4507 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4508 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4509 the resulting vectors and call the function recursively. */
4511 static void
4512 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4513 vec<tree> *vec_oprnds1,
4514 gimple *stmt, tree vec_dest,
4515 gimple_stmt_iterator *gsi,
4516 enum tree_code code1,
4517 enum tree_code code2, tree decl1,
4518 tree decl2, int op_type)
4520 int i;
4521 tree vop0, vop1, new_tmp1, new_tmp2;
4522 gimple *new_stmt1, *new_stmt2;
4523 vec<tree> vec_tmp = vNULL;
4525 vec_tmp.create (vec_oprnds0->length () * 2);
4526 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4528 if (op_type == binary_op)
4529 vop1 = (*vec_oprnds1)[i];
4530 else
4531 vop1 = NULL_TREE;
4533 /* Generate the two halves of promotion operation. */
4534 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4535 op_type, vec_dest, gsi, stmt);
4536 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4537 op_type, vec_dest, gsi, stmt);
4538 if (is_gimple_call (new_stmt1))
4540 new_tmp1 = gimple_call_lhs (new_stmt1);
4541 new_tmp2 = gimple_call_lhs (new_stmt2);
4543 else
4545 new_tmp1 = gimple_assign_lhs (new_stmt1);
4546 new_tmp2 = gimple_assign_lhs (new_stmt2);
4549 /* Store the results for the next step. */
4550 vec_tmp.quick_push (new_tmp1);
4551 vec_tmp.quick_push (new_tmp2);
4554 vec_oprnds0->release ();
4555 *vec_oprnds0 = vec_tmp;
4559 /* Check if STMT performs a conversion operation, that can be vectorized.
4560 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4561 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4562 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4564 static bool
4565 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4566 gimple **vec_stmt, slp_tree slp_node,
4567 stmt_vector_for_cost *cost_vec)
4569 tree vec_dest;
4570 tree scalar_dest;
4571 tree op0, op1 = NULL_TREE;
4572 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4573 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4574 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4575 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4576 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4577 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4578 tree new_temp;
4579 gimple *def_stmt;
4580 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4581 int ndts = 2;
4582 gimple *new_stmt = NULL;
4583 stmt_vec_info prev_stmt_info;
4584 poly_uint64 nunits_in;
4585 poly_uint64 nunits_out;
4586 tree vectype_out, vectype_in;
4587 int ncopies, i, j;
4588 tree lhs_type, rhs_type;
4589 enum { NARROW, NONE, WIDEN } modifier;
4590 vec<tree> vec_oprnds0 = vNULL;
4591 vec<tree> vec_oprnds1 = vNULL;
4592 tree vop0;
4593 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4594 vec_info *vinfo = stmt_info->vinfo;
4595 int multi_step_cvt = 0;
4596 vec<tree> interm_types = vNULL;
4597 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4598 int op_type;
4599 unsigned short fltsz;
4601 /* Is STMT a vectorizable conversion? */
4603 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4604 return false;
4606 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4607 && ! vec_stmt)
4608 return false;
4610 if (!is_gimple_assign (stmt))
4611 return false;
4613 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4614 return false;
4616 code = gimple_assign_rhs_code (stmt);
4617 if (!CONVERT_EXPR_CODE_P (code)
4618 && code != FIX_TRUNC_EXPR
4619 && code != FLOAT_EXPR
4620 && code != WIDEN_MULT_EXPR
4621 && code != WIDEN_LSHIFT_EXPR)
4622 return false;
4624 op_type = TREE_CODE_LENGTH (code);
4626 /* Check types of lhs and rhs. */
4627 scalar_dest = gimple_assign_lhs (stmt);
4628 lhs_type = TREE_TYPE (scalar_dest);
4629 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4631 op0 = gimple_assign_rhs1 (stmt);
4632 rhs_type = TREE_TYPE (op0);
4634 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4635 && !((INTEGRAL_TYPE_P (lhs_type)
4636 && INTEGRAL_TYPE_P (rhs_type))
4637 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4638 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4639 return false;
4641 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4642 && ((INTEGRAL_TYPE_P (lhs_type)
4643 && !type_has_mode_precision_p (lhs_type))
4644 || (INTEGRAL_TYPE_P (rhs_type)
4645 && !type_has_mode_precision_p (rhs_type))))
4647 if (dump_enabled_p ())
4648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4649 "type conversion to/from bit-precision unsupported."
4650 "\n");
4651 return false;
4654 /* Check the operands of the operation. */
4655 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4657 if (dump_enabled_p ())
4658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4659 "use not simple.\n");
4660 return false;
4662 if (op_type == binary_op)
4664 bool ok;
4666 op1 = gimple_assign_rhs2 (stmt);
4667 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4668 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4669 OP1. */
4670 if (CONSTANT_CLASS_P (op0))
4671 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4672 else
4673 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4675 if (!ok)
4677 if (dump_enabled_p ())
4678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4679 "use not simple.\n");
4680 return false;
4684 /* If op0 is an external or constant defs use a vector type of
4685 the same size as the output vector type. */
4686 if (!vectype_in)
4687 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4688 if (vec_stmt)
4689 gcc_assert (vectype_in);
4690 if (!vectype_in)
4692 if (dump_enabled_p ())
4694 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4695 "no vectype for scalar type ");
4696 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4697 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4700 return false;
4703 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4704 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4706 if (dump_enabled_p ())
4708 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4709 "can't convert between boolean and non "
4710 "boolean vectors");
4711 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4712 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4715 return false;
4718 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4719 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4720 if (known_eq (nunits_out, nunits_in))
4721 modifier = NONE;
4722 else if (multiple_p (nunits_out, nunits_in))
4723 modifier = NARROW;
4724 else
4726 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4727 modifier = WIDEN;
4730 /* Multiple types in SLP are handled by creating the appropriate number of
4731 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4732 case of SLP. */
4733 if (slp_node)
4734 ncopies = 1;
4735 else if (modifier == NARROW)
4736 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4737 else
4738 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4740 /* Sanity check: make sure that at least one copy of the vectorized stmt
4741 needs to be generated. */
4742 gcc_assert (ncopies >= 1);
4744 bool found_mode = false;
4745 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4746 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4747 opt_scalar_mode rhs_mode_iter;
4749 /* Supportable by target? */
4750 switch (modifier)
4752 case NONE:
4753 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4754 return false;
4755 if (supportable_convert_operation (code, vectype_out, vectype_in,
4756 &decl1, &code1))
4757 break;
4758 /* FALLTHRU */
4759 unsupported:
4760 if (dump_enabled_p ())
4761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4762 "conversion not supported by target.\n");
4763 return false;
4765 case WIDEN:
4766 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4767 &code1, &code2, &multi_step_cvt,
4768 &interm_types))
4770 /* Binary widening operation can only be supported directly by the
4771 architecture. */
4772 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4773 break;
4776 if (code != FLOAT_EXPR
4777 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4778 goto unsupported;
4780 fltsz = GET_MODE_SIZE (lhs_mode);
4781 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4783 rhs_mode = rhs_mode_iter.require ();
4784 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4785 break;
4787 cvt_type
4788 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4789 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4790 if (cvt_type == NULL_TREE)
4791 goto unsupported;
4793 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4795 if (!supportable_convert_operation (code, vectype_out,
4796 cvt_type, &decl1, &codecvt1))
4797 goto unsupported;
4799 else if (!supportable_widening_operation (code, stmt, vectype_out,
4800 cvt_type, &codecvt1,
4801 &codecvt2, &multi_step_cvt,
4802 &interm_types))
4803 continue;
4804 else
4805 gcc_assert (multi_step_cvt == 0);
4807 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4808 vectype_in, &code1, &code2,
4809 &multi_step_cvt, &interm_types))
4811 found_mode = true;
4812 break;
4816 if (!found_mode)
4817 goto unsupported;
4819 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4820 codecvt2 = ERROR_MARK;
4821 else
4823 multi_step_cvt++;
4824 interm_types.safe_push (cvt_type);
4825 cvt_type = NULL_TREE;
4827 break;
4829 case NARROW:
4830 gcc_assert (op_type == unary_op);
4831 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4832 &code1, &multi_step_cvt,
4833 &interm_types))
4834 break;
4836 if (code != FIX_TRUNC_EXPR
4837 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4838 goto unsupported;
4840 cvt_type
4841 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4842 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4843 if (cvt_type == NULL_TREE)
4844 goto unsupported;
4845 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4846 &decl1, &codecvt1))
4847 goto unsupported;
4848 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4849 &code1, &multi_step_cvt,
4850 &interm_types))
4851 break;
4852 goto unsupported;
4854 default:
4855 gcc_unreachable ();
4858 if (!vec_stmt) /* transformation not required. */
4860 DUMP_VECT_SCOPE ("vectorizable_conversion");
4861 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4863 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4864 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node,
4865 cost_vec);
4867 else if (modifier == NARROW)
4869 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4870 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4871 cost_vec);
4873 else
4875 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4876 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt,
4877 cost_vec);
4879 interm_types.release ();
4880 return true;
4883 /* Transform. */
4884 if (dump_enabled_p ())
4885 dump_printf_loc (MSG_NOTE, vect_location,
4886 "transform conversion. ncopies = %d.\n", ncopies);
4888 if (op_type == binary_op)
4890 if (CONSTANT_CLASS_P (op0))
4891 op0 = fold_convert (TREE_TYPE (op1), op0);
4892 else if (CONSTANT_CLASS_P (op1))
4893 op1 = fold_convert (TREE_TYPE (op0), op1);
4896 /* In case of multi-step conversion, we first generate conversion operations
4897 to the intermediate types, and then from that types to the final one.
4898 We create vector destinations for the intermediate type (TYPES) received
4899 from supportable_*_operation, and store them in the correct order
4900 for future use in vect_create_vectorized_*_stmts (). */
4901 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4902 vec_dest = vect_create_destination_var (scalar_dest,
4903 (cvt_type && modifier == WIDEN)
4904 ? cvt_type : vectype_out);
4905 vec_dsts.quick_push (vec_dest);
4907 if (multi_step_cvt)
4909 for (i = interm_types.length () - 1;
4910 interm_types.iterate (i, &intermediate_type); i--)
4912 vec_dest = vect_create_destination_var (scalar_dest,
4913 intermediate_type);
4914 vec_dsts.quick_push (vec_dest);
4918 if (cvt_type)
4919 vec_dest = vect_create_destination_var (scalar_dest,
4920 modifier == WIDEN
4921 ? vectype_out : cvt_type);
4923 if (!slp_node)
4925 if (modifier == WIDEN)
4927 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4928 if (op_type == binary_op)
4929 vec_oprnds1.create (1);
4931 else if (modifier == NARROW)
4932 vec_oprnds0.create (
4933 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4935 else if (code == WIDEN_LSHIFT_EXPR)
4936 vec_oprnds1.create (slp_node->vec_stmts_size);
4938 last_oprnd = op0;
4939 prev_stmt_info = NULL;
4940 switch (modifier)
4942 case NONE:
4943 for (j = 0; j < ncopies; j++)
4945 if (j == 0)
4946 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4947 else
4948 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4950 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4952 /* Arguments are ready, create the new vector stmt. */
4953 if (code1 == CALL_EXPR)
4955 new_stmt = gimple_build_call (decl1, 1, vop0);
4956 new_temp = make_ssa_name (vec_dest, new_stmt);
4957 gimple_call_set_lhs (new_stmt, new_temp);
4959 else
4961 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4962 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4963 new_temp = make_ssa_name (vec_dest, new_stmt);
4964 gimple_assign_set_lhs (new_stmt, new_temp);
4967 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4968 if (slp_node)
4969 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4970 else
4972 if (!prev_stmt_info)
4973 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4974 else
4975 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4976 prev_stmt_info = vinfo_for_stmt (new_stmt);
4980 break;
4982 case WIDEN:
4983 /* In case the vectorization factor (VF) is bigger than the number
4984 of elements that we can fit in a vectype (nunits), we have to
4985 generate more than one vector stmt - i.e - we need to "unroll"
4986 the vector stmt by a factor VF/nunits. */
4987 for (j = 0; j < ncopies; j++)
4989 /* Handle uses. */
4990 if (j == 0)
4992 if (slp_node)
4994 if (code == WIDEN_LSHIFT_EXPR)
4996 unsigned int k;
4998 vec_oprnd1 = op1;
4999 /* Store vec_oprnd1 for every vector stmt to be created
5000 for SLP_NODE. We check during the analysis that all
5001 the shift arguments are the same. */
5002 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5003 vec_oprnds1.quick_push (vec_oprnd1);
5005 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5006 slp_node);
5008 else
5009 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
5010 &vec_oprnds1, slp_node);
5012 else
5014 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
5015 vec_oprnds0.quick_push (vec_oprnd0);
5016 if (op_type == binary_op)
5018 if (code == WIDEN_LSHIFT_EXPR)
5019 vec_oprnd1 = op1;
5020 else
5021 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
5022 vec_oprnds1.quick_push (vec_oprnd1);
5026 else
5028 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
5029 vec_oprnds0.truncate (0);
5030 vec_oprnds0.quick_push (vec_oprnd0);
5031 if (op_type == binary_op)
5033 if (code == WIDEN_LSHIFT_EXPR)
5034 vec_oprnd1 = op1;
5035 else
5036 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
5037 vec_oprnd1);
5038 vec_oprnds1.truncate (0);
5039 vec_oprnds1.quick_push (vec_oprnd1);
5043 /* Arguments are ready. Create the new vector stmts. */
5044 for (i = multi_step_cvt; i >= 0; i--)
5046 tree this_dest = vec_dsts[i];
5047 enum tree_code c1 = code1, c2 = code2;
5048 if (i == 0 && codecvt2 != ERROR_MARK)
5050 c1 = codecvt1;
5051 c2 = codecvt2;
5053 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
5054 &vec_oprnds1,
5055 stmt, this_dest, gsi,
5056 c1, c2, decl1, decl2,
5057 op_type);
5060 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5062 if (cvt_type)
5064 if (codecvt1 == CALL_EXPR)
5066 new_stmt = gimple_build_call (decl1, 1, vop0);
5067 new_temp = make_ssa_name (vec_dest, new_stmt);
5068 gimple_call_set_lhs (new_stmt, new_temp);
5070 else
5072 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5073 new_temp = make_ssa_name (vec_dest);
5074 new_stmt = gimple_build_assign (new_temp, codecvt1,
5075 vop0);
5078 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5080 else
5081 new_stmt = SSA_NAME_DEF_STMT (vop0);
5083 if (slp_node)
5084 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5085 else
5087 if (!prev_stmt_info)
5088 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
5089 else
5090 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5091 prev_stmt_info = vinfo_for_stmt (new_stmt);
5096 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5097 break;
5099 case NARROW:
5100 /* In case the vectorization factor (VF) is bigger than the number
5101 of elements that we can fit in a vectype (nunits), we have to
5102 generate more than one vector stmt - i.e - we need to "unroll"
5103 the vector stmt by a factor VF/nunits. */
5104 for (j = 0; j < ncopies; j++)
5106 /* Handle uses. */
5107 if (slp_node)
5108 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5109 slp_node);
5110 else
5112 vec_oprnds0.truncate (0);
5113 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
5114 vect_pow2 (multi_step_cvt) - 1);
5117 /* Arguments are ready. Create the new vector stmts. */
5118 if (cvt_type)
5119 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5121 if (codecvt1 == CALL_EXPR)
5123 new_stmt = gimple_build_call (decl1, 1, vop0);
5124 new_temp = make_ssa_name (vec_dest, new_stmt);
5125 gimple_call_set_lhs (new_stmt, new_temp);
5127 else
5129 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5130 new_temp = make_ssa_name (vec_dest);
5131 new_stmt = gimple_build_assign (new_temp, codecvt1,
5132 vop0);
5135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5136 vec_oprnds0[i] = new_temp;
5139 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
5140 stmt, vec_dsts, gsi,
5141 slp_node, code1,
5142 &prev_stmt_info);
5145 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
5146 break;
5149 vec_oprnds0.release ();
5150 vec_oprnds1.release ();
5151 interm_types.release ();
5153 return true;
5157 /* Function vectorizable_assignment.
5159 Check if STMT performs an assignment (copy) that can be vectorized.
5160 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5161 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5162 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5164 static bool
5165 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
5166 gimple **vec_stmt, slp_tree slp_node,
5167 stmt_vector_for_cost *cost_vec)
5169 tree vec_dest;
5170 tree scalar_dest;
5171 tree op;
5172 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5173 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5174 tree new_temp;
5175 gimple *def_stmt;
5176 enum vect_def_type dt[1] = {vect_unknown_def_type};
5177 int ndts = 1;
5178 int ncopies;
5179 int i, j;
5180 vec<tree> vec_oprnds = vNULL;
5181 tree vop;
5182 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5183 vec_info *vinfo = stmt_info->vinfo;
5184 gimple *new_stmt = NULL;
5185 stmt_vec_info prev_stmt_info = NULL;
5186 enum tree_code code;
5187 tree vectype_in;
5189 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5190 return false;
5192 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5193 && ! vec_stmt)
5194 return false;
5196 /* Is vectorizable assignment? */
5197 if (!is_gimple_assign (stmt))
5198 return false;
5200 scalar_dest = gimple_assign_lhs (stmt);
5201 if (TREE_CODE (scalar_dest) != SSA_NAME)
5202 return false;
5204 code = gimple_assign_rhs_code (stmt);
5205 if (gimple_assign_single_p (stmt)
5206 || code == PAREN_EXPR
5207 || CONVERT_EXPR_CODE_P (code))
5208 op = gimple_assign_rhs1 (stmt);
5209 else
5210 return false;
5212 if (code == VIEW_CONVERT_EXPR)
5213 op = TREE_OPERAND (op, 0);
5215 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5216 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5218 /* Multiple types in SLP are handled by creating the appropriate number of
5219 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5220 case of SLP. */
5221 if (slp_node)
5222 ncopies = 1;
5223 else
5224 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5226 gcc_assert (ncopies >= 1);
5228 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
5230 if (dump_enabled_p ())
5231 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5232 "use not simple.\n");
5233 return false;
5236 /* We can handle NOP_EXPR conversions that do not change the number
5237 of elements or the vector size. */
5238 if ((CONVERT_EXPR_CODE_P (code)
5239 || code == VIEW_CONVERT_EXPR)
5240 && (!vectype_in
5241 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5242 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5243 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5244 return false;
5246 /* We do not handle bit-precision changes. */
5247 if ((CONVERT_EXPR_CODE_P (code)
5248 || code == VIEW_CONVERT_EXPR)
5249 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5250 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5251 || !type_has_mode_precision_p (TREE_TYPE (op)))
5252 /* But a conversion that does not change the bit-pattern is ok. */
5253 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5254 > TYPE_PRECISION (TREE_TYPE (op)))
5255 && TYPE_UNSIGNED (TREE_TYPE (op)))
5256 /* Conversion between boolean types of different sizes is
5257 a simple assignment in case their vectypes are same
5258 boolean vectors. */
5259 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
5260 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
5262 if (dump_enabled_p ())
5263 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5264 "type conversion to/from bit-precision "
5265 "unsupported.\n");
5266 return false;
5269 if (!vec_stmt) /* transformation not required. */
5271 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5272 DUMP_VECT_SCOPE ("vectorizable_assignment");
5273 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5274 return true;
5277 /* Transform. */
5278 if (dump_enabled_p ())
5279 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5281 /* Handle def. */
5282 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5284 /* Handle use. */
5285 for (j = 0; j < ncopies; j++)
5287 /* Handle uses. */
5288 if (j == 0)
5289 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
5290 else
5291 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
5293 /* Arguments are ready. create the new vector stmt. */
5294 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5296 if (CONVERT_EXPR_CODE_P (code)
5297 || code == VIEW_CONVERT_EXPR)
5298 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5299 new_stmt = gimple_build_assign (vec_dest, vop);
5300 new_temp = make_ssa_name (vec_dest, new_stmt);
5301 gimple_assign_set_lhs (new_stmt, new_temp);
5302 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5303 if (slp_node)
5304 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5307 if (slp_node)
5308 continue;
5310 if (j == 0)
5311 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5312 else
5313 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5315 prev_stmt_info = vinfo_for_stmt (new_stmt);
5318 vec_oprnds.release ();
5319 return true;
5323 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5324 either as shift by a scalar or by a vector. */
5326 bool
5327 vect_supportable_shift (enum tree_code code, tree scalar_type)
5330 machine_mode vec_mode;
5331 optab optab;
5332 int icode;
5333 tree vectype;
5335 vectype = get_vectype_for_scalar_type (scalar_type);
5336 if (!vectype)
5337 return false;
5339 optab = optab_for_tree_code (code, vectype, optab_scalar);
5340 if (!optab
5341 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5343 optab = optab_for_tree_code (code, vectype, optab_vector);
5344 if (!optab
5345 || (optab_handler (optab, TYPE_MODE (vectype))
5346 == CODE_FOR_nothing))
5347 return false;
5350 vec_mode = TYPE_MODE (vectype);
5351 icode = (int) optab_handler (optab, vec_mode);
5352 if (icode == CODE_FOR_nothing)
5353 return false;
5355 return true;
5359 /* Function vectorizable_shift.
5361 Check if STMT performs a shift operation that can be vectorized.
5362 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5363 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5364 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5366 static bool
5367 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
5368 gimple **vec_stmt, slp_tree slp_node,
5369 stmt_vector_for_cost *cost_vec)
5371 tree vec_dest;
5372 tree scalar_dest;
5373 tree op0, op1 = NULL;
5374 tree vec_oprnd1 = NULL_TREE;
5375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5376 tree vectype;
5377 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5378 enum tree_code code;
5379 machine_mode vec_mode;
5380 tree new_temp;
5381 optab optab;
5382 int icode;
5383 machine_mode optab_op2_mode;
5384 gimple *def_stmt;
5385 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5386 int ndts = 2;
5387 gimple *new_stmt = NULL;
5388 stmt_vec_info prev_stmt_info;
5389 poly_uint64 nunits_in;
5390 poly_uint64 nunits_out;
5391 tree vectype_out;
5392 tree op1_vectype;
5393 int ncopies;
5394 int j, i;
5395 vec<tree> vec_oprnds0 = vNULL;
5396 vec<tree> vec_oprnds1 = vNULL;
5397 tree vop0, vop1;
5398 unsigned int k;
5399 bool scalar_shift_arg = true;
5400 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5401 vec_info *vinfo = stmt_info->vinfo;
5403 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5404 return false;
5406 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5407 && ! vec_stmt)
5408 return false;
5410 /* Is STMT a vectorizable binary/unary operation? */
5411 if (!is_gimple_assign (stmt))
5412 return false;
5414 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5415 return false;
5417 code = gimple_assign_rhs_code (stmt);
5419 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5420 || code == RROTATE_EXPR))
5421 return false;
5423 scalar_dest = gimple_assign_lhs (stmt);
5424 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5425 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5429 "bit-precision shifts not supported.\n");
5430 return false;
5433 op0 = gimple_assign_rhs1 (stmt);
5434 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5436 if (dump_enabled_p ())
5437 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5438 "use not simple.\n");
5439 return false;
5441 /* If op0 is an external or constant def use a vector type with
5442 the same size as the output vector type. */
5443 if (!vectype)
5444 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5445 if (vec_stmt)
5446 gcc_assert (vectype);
5447 if (!vectype)
5449 if (dump_enabled_p ())
5450 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5451 "no vectype for scalar type\n");
5452 return false;
5455 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5456 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5457 if (maybe_ne (nunits_out, nunits_in))
5458 return false;
5460 op1 = gimple_assign_rhs2 (stmt);
5461 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
5463 if (dump_enabled_p ())
5464 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5465 "use not simple.\n");
5466 return false;
5469 /* Multiple types in SLP are handled by creating the appropriate number of
5470 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5471 case of SLP. */
5472 if (slp_node)
5473 ncopies = 1;
5474 else
5475 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5477 gcc_assert (ncopies >= 1);
5479 /* Determine whether the shift amount is a vector, or scalar. If the
5480 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5482 if ((dt[1] == vect_internal_def
5483 || dt[1] == vect_induction_def)
5484 && !slp_node)
5485 scalar_shift_arg = false;
5486 else if (dt[1] == vect_constant_def
5487 || dt[1] == vect_external_def
5488 || dt[1] == vect_internal_def)
5490 /* In SLP, need to check whether the shift count is the same,
5491 in loops if it is a constant or invariant, it is always
5492 a scalar shift. */
5493 if (slp_node)
5495 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5496 gimple *slpstmt;
5498 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5499 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5500 scalar_shift_arg = false;
5503 /* If the shift amount is computed by a pattern stmt we cannot
5504 use the scalar amount directly thus give up and use a vector
5505 shift. */
5506 if (dt[1] == vect_internal_def)
5508 gimple *def = SSA_NAME_DEF_STMT (op1);
5509 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5510 scalar_shift_arg = false;
5513 else
5515 if (dump_enabled_p ())
5516 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5517 "operand mode requires invariant argument.\n");
5518 return false;
5521 /* Vector shifted by vector. */
5522 if (!scalar_shift_arg)
5524 optab = optab_for_tree_code (code, vectype, optab_vector);
5525 if (dump_enabled_p ())
5526 dump_printf_loc (MSG_NOTE, vect_location,
5527 "vector/vector shift/rotate found.\n");
5529 if (!op1_vectype)
5530 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5531 if (op1_vectype == NULL_TREE
5532 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5534 if (dump_enabled_p ())
5535 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5536 "unusable type for last operand in"
5537 " vector/vector shift/rotate.\n");
5538 return false;
5541 /* See if the machine has a vector shifted by scalar insn and if not
5542 then see if it has a vector shifted by vector insn. */
5543 else
5545 optab = optab_for_tree_code (code, vectype, optab_scalar);
5546 if (optab
5547 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5549 if (dump_enabled_p ())
5550 dump_printf_loc (MSG_NOTE, vect_location,
5551 "vector/scalar shift/rotate found.\n");
5553 else
5555 optab = optab_for_tree_code (code, vectype, optab_vector);
5556 if (optab
5557 && (optab_handler (optab, TYPE_MODE (vectype))
5558 != CODE_FOR_nothing))
5560 scalar_shift_arg = false;
5562 if (dump_enabled_p ())
5563 dump_printf_loc (MSG_NOTE, vect_location,
5564 "vector/vector shift/rotate found.\n");
5566 /* Unlike the other binary operators, shifts/rotates have
5567 the rhs being int, instead of the same type as the lhs,
5568 so make sure the scalar is the right type if we are
5569 dealing with vectors of long long/long/short/char. */
5570 if (dt[1] == vect_constant_def)
5571 op1 = fold_convert (TREE_TYPE (vectype), op1);
5572 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5573 TREE_TYPE (op1)))
5575 if (slp_node
5576 && TYPE_MODE (TREE_TYPE (vectype))
5577 != TYPE_MODE (TREE_TYPE (op1)))
5579 if (dump_enabled_p ())
5580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5581 "unusable type for last operand in"
5582 " vector/vector shift/rotate.\n");
5583 return false;
5585 if (vec_stmt && !slp_node)
5587 op1 = fold_convert (TREE_TYPE (vectype), op1);
5588 op1 = vect_init_vector (stmt, op1,
5589 TREE_TYPE (vectype), NULL);
5596 /* Supportable by target? */
5597 if (!optab)
5599 if (dump_enabled_p ())
5600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5601 "no optab.\n");
5602 return false;
5604 vec_mode = TYPE_MODE (vectype);
5605 icode = (int) optab_handler (optab, vec_mode);
5606 if (icode == CODE_FOR_nothing)
5608 if (dump_enabled_p ())
5609 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5610 "op not supported by target.\n");
5611 /* Check only during analysis. */
5612 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5613 || (!vec_stmt
5614 && !vect_worthwhile_without_simd_p (vinfo, code)))
5615 return false;
5616 if (dump_enabled_p ())
5617 dump_printf_loc (MSG_NOTE, vect_location,
5618 "proceeding using word mode.\n");
5621 /* Worthwhile without SIMD support? Check only during analysis. */
5622 if (!vec_stmt
5623 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5624 && !vect_worthwhile_without_simd_p (vinfo, code))
5626 if (dump_enabled_p ())
5627 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5628 "not worthwhile without SIMD support.\n");
5629 return false;
5632 if (!vec_stmt) /* transformation not required. */
5634 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5635 DUMP_VECT_SCOPE ("vectorizable_shift");
5636 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5637 return true;
5640 /* Transform. */
5642 if (dump_enabled_p ())
5643 dump_printf_loc (MSG_NOTE, vect_location,
5644 "transform binary/unary operation.\n");
5646 /* Handle def. */
5647 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5649 prev_stmt_info = NULL;
5650 for (j = 0; j < ncopies; j++)
5652 /* Handle uses. */
5653 if (j == 0)
5655 if (scalar_shift_arg)
5657 /* Vector shl and shr insn patterns can be defined with scalar
5658 operand 2 (shift operand). In this case, use constant or loop
5659 invariant op1 directly, without extending it to vector mode
5660 first. */
5661 optab_op2_mode = insn_data[icode].operand[2].mode;
5662 if (!VECTOR_MODE_P (optab_op2_mode))
5664 if (dump_enabled_p ())
5665 dump_printf_loc (MSG_NOTE, vect_location,
5666 "operand 1 using scalar mode.\n");
5667 vec_oprnd1 = op1;
5668 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5669 vec_oprnds1.quick_push (vec_oprnd1);
5670 if (slp_node)
5672 /* Store vec_oprnd1 for every vector stmt to be created
5673 for SLP_NODE. We check during the analysis that all
5674 the shift arguments are the same.
5675 TODO: Allow different constants for different vector
5676 stmts generated for an SLP instance. */
5677 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5678 vec_oprnds1.quick_push (vec_oprnd1);
5683 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5684 (a special case for certain kind of vector shifts); otherwise,
5685 operand 1 should be of a vector type (the usual case). */
5686 if (vec_oprnd1)
5687 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5688 slp_node);
5689 else
5690 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5691 slp_node);
5693 else
5694 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5696 /* Arguments are ready. Create the new vector stmt. */
5697 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5699 vop1 = vec_oprnds1[i];
5700 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5701 new_temp = make_ssa_name (vec_dest, new_stmt);
5702 gimple_assign_set_lhs (new_stmt, new_temp);
5703 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5704 if (slp_node)
5705 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5708 if (slp_node)
5709 continue;
5711 if (j == 0)
5712 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5713 else
5714 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5715 prev_stmt_info = vinfo_for_stmt (new_stmt);
5718 vec_oprnds0.release ();
5719 vec_oprnds1.release ();
5721 return true;
5725 /* Function vectorizable_operation.
5727 Check if STMT performs a binary, unary or ternary operation that can
5728 be vectorized.
5729 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5730 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5731 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5733 static bool
5734 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5735 gimple **vec_stmt, slp_tree slp_node,
5736 stmt_vector_for_cost *cost_vec)
5738 tree vec_dest;
5739 tree scalar_dest;
5740 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5741 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5742 tree vectype;
5743 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5744 enum tree_code code, orig_code;
5745 machine_mode vec_mode;
5746 tree new_temp;
5747 int op_type;
5748 optab optab;
5749 bool target_support_p;
5750 gimple *def_stmt;
5751 enum vect_def_type dt[3]
5752 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5753 int ndts = 3;
5754 gimple *new_stmt = NULL;
5755 stmt_vec_info prev_stmt_info;
5756 poly_uint64 nunits_in;
5757 poly_uint64 nunits_out;
5758 tree vectype_out;
5759 int ncopies;
5760 int j, i;
5761 vec<tree> vec_oprnds0 = vNULL;
5762 vec<tree> vec_oprnds1 = vNULL;
5763 vec<tree> vec_oprnds2 = vNULL;
5764 tree vop0, vop1, vop2;
5765 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5766 vec_info *vinfo = stmt_info->vinfo;
5768 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5769 return false;
5771 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5772 && ! vec_stmt)
5773 return false;
5775 /* Is STMT a vectorizable binary/unary operation? */
5776 if (!is_gimple_assign (stmt))
5777 return false;
5779 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5780 return false;
5782 orig_code = code = gimple_assign_rhs_code (stmt);
5784 /* For pointer addition and subtraction, we should use the normal
5785 plus and minus for the vector operation. */
5786 if (code == POINTER_PLUS_EXPR)
5787 code = PLUS_EXPR;
5788 if (code == POINTER_DIFF_EXPR)
5789 code = MINUS_EXPR;
5791 /* Support only unary or binary operations. */
5792 op_type = TREE_CODE_LENGTH (code);
5793 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5795 if (dump_enabled_p ())
5796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5797 "num. args = %d (not unary/binary/ternary op).\n",
5798 op_type);
5799 return false;
5802 scalar_dest = gimple_assign_lhs (stmt);
5803 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5805 /* Most operations cannot handle bit-precision types without extra
5806 truncations. */
5807 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5808 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5809 /* Exception are bitwise binary operations. */
5810 && code != BIT_IOR_EXPR
5811 && code != BIT_XOR_EXPR
5812 && code != BIT_AND_EXPR)
5814 if (dump_enabled_p ())
5815 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5816 "bit-precision arithmetic not supported.\n");
5817 return false;
5820 op0 = gimple_assign_rhs1 (stmt);
5821 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5823 if (dump_enabled_p ())
5824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5825 "use not simple.\n");
5826 return false;
5828 /* If op0 is an external or constant def use a vector type with
5829 the same size as the output vector type. */
5830 if (!vectype)
5832 /* For boolean type we cannot determine vectype by
5833 invariant value (don't know whether it is a vector
5834 of booleans or vector of integers). We use output
5835 vectype because operations on boolean don't change
5836 type. */
5837 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5839 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5841 if (dump_enabled_p ())
5842 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5843 "not supported operation on bool value.\n");
5844 return false;
5846 vectype = vectype_out;
5848 else
5849 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5851 if (vec_stmt)
5852 gcc_assert (vectype);
5853 if (!vectype)
5855 if (dump_enabled_p ())
5857 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5858 "no vectype for scalar type ");
5859 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5860 TREE_TYPE (op0));
5861 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5864 return false;
5867 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5868 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5869 if (maybe_ne (nunits_out, nunits_in))
5870 return false;
5872 if (op_type == binary_op || op_type == ternary_op)
5874 op1 = gimple_assign_rhs2 (stmt);
5875 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5877 if (dump_enabled_p ())
5878 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5879 "use not simple.\n");
5880 return false;
5883 if (op_type == ternary_op)
5885 op2 = gimple_assign_rhs3 (stmt);
5886 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5888 if (dump_enabled_p ())
5889 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5890 "use not simple.\n");
5891 return false;
5895 /* Multiple types in SLP are handled by creating the appropriate number of
5896 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5897 case of SLP. */
5898 if (slp_node)
5899 ncopies = 1;
5900 else
5901 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5903 gcc_assert (ncopies >= 1);
5905 /* Shifts are handled in vectorizable_shift (). */
5906 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5907 || code == RROTATE_EXPR)
5908 return false;
5910 /* Supportable by target? */
5912 vec_mode = TYPE_MODE (vectype);
5913 if (code == MULT_HIGHPART_EXPR)
5914 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5915 else
5917 optab = optab_for_tree_code (code, vectype, optab_default);
5918 if (!optab)
5920 if (dump_enabled_p ())
5921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5922 "no optab.\n");
5923 return false;
5925 target_support_p = (optab_handler (optab, vec_mode)
5926 != CODE_FOR_nothing);
5929 if (!target_support_p)
5931 if (dump_enabled_p ())
5932 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5933 "op not supported by target.\n");
5934 /* Check only during analysis. */
5935 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5936 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5937 return false;
5938 if (dump_enabled_p ())
5939 dump_printf_loc (MSG_NOTE, vect_location,
5940 "proceeding using word mode.\n");
5943 /* Worthwhile without SIMD support? Check only during analysis. */
5944 if (!VECTOR_MODE_P (vec_mode)
5945 && !vec_stmt
5946 && !vect_worthwhile_without_simd_p (vinfo, code))
5948 if (dump_enabled_p ())
5949 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5950 "not worthwhile without SIMD support.\n");
5951 return false;
5954 if (!vec_stmt) /* transformation not required. */
5956 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5957 DUMP_VECT_SCOPE ("vectorizable_operation");
5958 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, slp_node, cost_vec);
5959 return true;
5962 /* Transform. */
5964 if (dump_enabled_p ())
5965 dump_printf_loc (MSG_NOTE, vect_location,
5966 "transform binary/unary operation.\n");
5968 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5969 vectors with unsigned elements, but the result is signed. So, we
5970 need to compute the MINUS_EXPR into vectype temporary and
5971 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5972 tree vec_cvt_dest = NULL_TREE;
5973 if (orig_code == POINTER_DIFF_EXPR)
5975 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5976 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5978 /* Handle def. */
5979 else
5980 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
5982 /* In case the vectorization factor (VF) is bigger than the number
5983 of elements that we can fit in a vectype (nunits), we have to generate
5984 more than one vector stmt - i.e - we need to "unroll" the
5985 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5986 from one copy of the vector stmt to the next, in the field
5987 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5988 stages to find the correct vector defs to be used when vectorizing
5989 stmts that use the defs of the current stmt. The example below
5990 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5991 we need to create 4 vectorized stmts):
5993 before vectorization:
5994 RELATED_STMT VEC_STMT
5995 S1: x = memref - -
5996 S2: z = x + 1 - -
5998 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5999 there):
6000 RELATED_STMT VEC_STMT
6001 VS1_0: vx0 = memref0 VS1_1 -
6002 VS1_1: vx1 = memref1 VS1_2 -
6003 VS1_2: vx2 = memref2 VS1_3 -
6004 VS1_3: vx3 = memref3 - -
6005 S1: x = load - VS1_0
6006 S2: z = x + 1 - -
6008 step2: vectorize stmt S2 (done here):
6009 To vectorize stmt S2 we first need to find the relevant vector
6010 def for the first operand 'x'. This is, as usual, obtained from
6011 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6012 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6013 relevant vector def 'vx0'. Having found 'vx0' we can generate
6014 the vector stmt VS2_0, and as usual, record it in the
6015 STMT_VINFO_VEC_STMT of stmt S2.
6016 When creating the second copy (VS2_1), we obtain the relevant vector
6017 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6018 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6019 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6020 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6021 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6022 chain of stmts and pointers:
6023 RELATED_STMT VEC_STMT
6024 VS1_0: vx0 = memref0 VS1_1 -
6025 VS1_1: vx1 = memref1 VS1_2 -
6026 VS1_2: vx2 = memref2 VS1_3 -
6027 VS1_3: vx3 = memref3 - -
6028 S1: x = load - VS1_0
6029 VS2_0: vz0 = vx0 + v1 VS2_1 -
6030 VS2_1: vz1 = vx1 + v1 VS2_2 -
6031 VS2_2: vz2 = vx2 + v1 VS2_3 -
6032 VS2_3: vz3 = vx3 + v1 - -
6033 S2: z = x + 1 - VS2_0 */
6035 prev_stmt_info = NULL;
6036 for (j = 0; j < ncopies; j++)
6038 /* Handle uses. */
6039 if (j == 0)
6041 if (op_type == binary_op)
6042 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
6043 slp_node);
6044 else if (op_type == ternary_op)
6046 if (slp_node)
6048 auto_vec<tree> ops(3);
6049 ops.quick_push (op0);
6050 ops.quick_push (op1);
6051 ops.quick_push (op2);
6052 auto_vec<vec<tree> > vec_defs(3);
6053 vect_get_slp_defs (ops, slp_node, &vec_defs);
6054 vec_oprnds0 = vec_defs[0];
6055 vec_oprnds1 = vec_defs[1];
6056 vec_oprnds2 = vec_defs[2];
6058 else
6060 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
6061 NULL);
6062 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
6063 NULL);
6066 else
6067 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
6068 slp_node);
6070 else
6072 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
6073 if (op_type == ternary_op)
6075 tree vec_oprnd = vec_oprnds2.pop ();
6076 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
6077 vec_oprnd));
6081 /* Arguments are ready. Create the new vector stmt. */
6082 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6084 vop1 = ((op_type == binary_op || op_type == ternary_op)
6085 ? vec_oprnds1[i] : NULL_TREE);
6086 vop2 = ((op_type == ternary_op)
6087 ? vec_oprnds2[i] : NULL_TREE);
6088 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
6089 new_temp = make_ssa_name (vec_dest, new_stmt);
6090 gimple_assign_set_lhs (new_stmt, new_temp);
6091 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6092 if (vec_cvt_dest)
6094 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
6095 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6096 new_temp);
6097 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6098 gimple_assign_set_lhs (new_stmt, new_temp);
6099 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6101 if (slp_node)
6102 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6105 if (slp_node)
6106 continue;
6108 if (j == 0)
6109 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6110 else
6111 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6112 prev_stmt_info = vinfo_for_stmt (new_stmt);
6115 vec_oprnds0.release ();
6116 vec_oprnds1.release ();
6117 vec_oprnds2.release ();
6119 return true;
6122 /* A helper function to ensure data reference DR's base alignment. */
6124 static void
6125 ensure_base_align (struct data_reference *dr)
6127 if (!dr->aux)
6128 return;
6130 if (DR_VECT_AUX (dr)->base_misaligned)
6132 tree base_decl = DR_VECT_AUX (dr)->base_decl;
6134 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
6136 if (decl_in_symtab_p (base_decl))
6137 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6138 else
6140 SET_DECL_ALIGN (base_decl, align_base_to);
6141 DECL_USER_ALIGN (base_decl) = 1;
6143 DR_VECT_AUX (dr)->base_misaligned = false;
6148 /* Function get_group_alias_ptr_type.
6150 Return the alias type for the group starting at FIRST_STMT. */
6152 static tree
6153 get_group_alias_ptr_type (gimple *first_stmt)
6155 struct data_reference *first_dr, *next_dr;
6156 gimple *next_stmt;
6158 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6159 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
6160 while (next_stmt)
6162 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
6163 if (get_alias_set (DR_REF (first_dr))
6164 != get_alias_set (DR_REF (next_dr)))
6166 if (dump_enabled_p ())
6167 dump_printf_loc (MSG_NOTE, vect_location,
6168 "conflicting alias set types.\n");
6169 return ptr_type_node;
6171 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6173 return reference_alias_ptr_type (DR_REF (first_dr));
6177 /* Function vectorizable_store.
6179 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
6180 can be vectorized.
6181 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6182 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6183 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6185 static bool
6186 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6187 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
6189 tree data_ref;
6190 tree op;
6191 tree vec_oprnd = NULL_TREE;
6192 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6193 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6194 tree elem_type;
6195 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6196 struct loop *loop = NULL;
6197 machine_mode vec_mode;
6198 tree dummy;
6199 enum dr_alignment_support alignment_support_scheme;
6200 gimple *def_stmt;
6201 enum vect_def_type rhs_dt = vect_unknown_def_type;
6202 enum vect_def_type mask_dt = vect_unknown_def_type;
6203 stmt_vec_info prev_stmt_info = NULL;
6204 tree dataref_ptr = NULL_TREE;
6205 tree dataref_offset = NULL_TREE;
6206 gimple *ptr_incr = NULL;
6207 int ncopies;
6208 int j;
6209 gimple *next_stmt, *first_stmt;
6210 bool grouped_store;
6211 unsigned int group_size, i;
6212 vec<tree> oprnds = vNULL;
6213 vec<tree> result_chain = vNULL;
6214 bool inv_p;
6215 tree offset = NULL_TREE;
6216 vec<tree> vec_oprnds = vNULL;
6217 bool slp = (slp_node != NULL);
6218 unsigned int vec_num;
6219 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6220 vec_info *vinfo = stmt_info->vinfo;
6221 tree aggr_type;
6222 gather_scatter_info gs_info;
6223 gimple *new_stmt;
6224 poly_uint64 vf;
6225 vec_load_store_type vls_type;
6226 tree ref_type;
6228 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6229 return false;
6231 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6232 && ! vec_stmt)
6233 return false;
6235 /* Is vectorizable store? */
6237 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
6238 if (is_gimple_assign (stmt))
6240 tree scalar_dest = gimple_assign_lhs (stmt);
6241 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
6242 && is_pattern_stmt_p (stmt_info))
6243 scalar_dest = TREE_OPERAND (scalar_dest, 0);
6244 if (TREE_CODE (scalar_dest) != ARRAY_REF
6245 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
6246 && TREE_CODE (scalar_dest) != INDIRECT_REF
6247 && TREE_CODE (scalar_dest) != COMPONENT_REF
6248 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
6249 && TREE_CODE (scalar_dest) != REALPART_EXPR
6250 && TREE_CODE (scalar_dest) != MEM_REF)
6251 return false;
6253 else
6255 gcall *call = dyn_cast <gcall *> (stmt);
6256 if (!call || !gimple_call_internal_p (call))
6257 return false;
6259 internal_fn ifn = gimple_call_internal_fn (call);
6260 if (!internal_store_fn_p (ifn))
6261 return false;
6263 if (slp_node != NULL)
6265 if (dump_enabled_p ())
6266 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6267 "SLP of masked stores not supported.\n");
6268 return false;
6271 int mask_index = internal_fn_mask_index (ifn);
6272 if (mask_index >= 0)
6274 mask = gimple_call_arg (call, mask_index);
6275 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
6276 &mask_vectype))
6277 return false;
6281 op = vect_get_store_rhs (stmt);
6283 /* Cannot have hybrid store SLP -- that would mean storing to the
6284 same location twice. */
6285 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
6287 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
6288 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6290 if (loop_vinfo)
6292 loop = LOOP_VINFO_LOOP (loop_vinfo);
6293 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6295 else
6296 vf = 1;
6298 /* Multiple types in SLP are handled by creating the appropriate number of
6299 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6300 case of SLP. */
6301 if (slp)
6302 ncopies = 1;
6303 else
6304 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6306 gcc_assert (ncopies >= 1);
6308 /* FORNOW. This restriction should be relaxed. */
6309 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
6311 if (dump_enabled_p ())
6312 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6313 "multiple types in nested loop.\n");
6314 return false;
6317 if (!vect_check_store_rhs (stmt, op, &rhs_dt, &rhs_vectype, &vls_type))
6318 return false;
6320 elem_type = TREE_TYPE (vectype);
6321 vec_mode = TYPE_MODE (vectype);
6323 if (!STMT_VINFO_DATA_REF (stmt_info))
6324 return false;
6326 vect_memory_access_type memory_access_type;
6327 if (!get_load_store_type (stmt, vectype, slp, mask, vls_type, ncopies,
6328 &memory_access_type, &gs_info))
6329 return false;
6331 if (mask)
6333 if (memory_access_type == VMAT_CONTIGUOUS)
6335 if (!VECTOR_MODE_P (vec_mode)
6336 || !can_vec_mask_load_store_p (vec_mode,
6337 TYPE_MODE (mask_vectype), false))
6338 return false;
6340 else if (memory_access_type != VMAT_LOAD_STORE_LANES
6341 && (memory_access_type != VMAT_GATHER_SCATTER || gs_info.decl))
6343 if (dump_enabled_p ())
6344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6345 "unsupported access type for masked store.\n");
6346 return false;
6349 else
6351 /* FORNOW. In some cases can vectorize even if data-type not supported
6352 (e.g. - array initialization with 0). */
6353 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
6354 return false;
6357 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
6358 && memory_access_type != VMAT_GATHER_SCATTER
6359 && (slp || memory_access_type != VMAT_CONTIGUOUS));
6360 if (grouped_store)
6362 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
6363 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6364 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
6366 else
6368 first_stmt = stmt;
6369 first_dr = dr;
6370 group_size = vec_num = 1;
6373 if (!vec_stmt) /* transformation not required. */
6375 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6377 if (loop_vinfo
6378 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6379 check_load_store_masking (loop_vinfo, vectype, vls_type, group_size,
6380 memory_access_type, &gs_info);
6382 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
6383 vect_model_store_cost (stmt_info, ncopies, rhs_dt, memory_access_type,
6384 vls_type, slp_node, cost_vec);
6385 return true;
6387 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6389 /* Transform. */
6391 ensure_base_align (dr);
6393 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
6395 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
6396 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6397 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6398 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
6399 edge pe = loop_preheader_edge (loop);
6400 gimple_seq seq;
6401 basic_block new_bb;
6402 enum { NARROW, NONE, WIDEN } modifier;
6403 poly_uint64 scatter_off_nunits
6404 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6406 if (known_eq (nunits, scatter_off_nunits))
6407 modifier = NONE;
6408 else if (known_eq (nunits * 2, scatter_off_nunits))
6410 modifier = WIDEN;
6412 /* Currently gathers and scatters are only supported for
6413 fixed-length vectors. */
6414 unsigned int count = scatter_off_nunits.to_constant ();
6415 vec_perm_builder sel (count, count, 1);
6416 for (i = 0; i < (unsigned int) count; ++i)
6417 sel.quick_push (i | (count / 2));
6419 vec_perm_indices indices (sel, 1, count);
6420 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6421 indices);
6422 gcc_assert (perm_mask != NULL_TREE);
6424 else if (known_eq (nunits, scatter_off_nunits * 2))
6426 modifier = NARROW;
6428 /* Currently gathers and scatters are only supported for
6429 fixed-length vectors. */
6430 unsigned int count = nunits.to_constant ();
6431 vec_perm_builder sel (count, count, 1);
6432 for (i = 0; i < (unsigned int) count; ++i)
6433 sel.quick_push (i | (count / 2));
6435 vec_perm_indices indices (sel, 2, count);
6436 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6437 gcc_assert (perm_mask != NULL_TREE);
6438 ncopies *= 2;
6440 else
6441 gcc_unreachable ();
6443 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6444 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6445 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6446 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6447 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6448 scaletype = TREE_VALUE (arglist);
6450 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
6451 && TREE_CODE (rettype) == VOID_TYPE);
6453 ptr = fold_convert (ptrtype, gs_info.base);
6454 if (!is_gimple_min_invariant (ptr))
6456 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6457 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6458 gcc_assert (!new_bb);
6461 /* Currently we support only unconditional scatter stores,
6462 so mask should be all ones. */
6463 mask = build_int_cst (masktype, -1);
6464 mask = vect_init_vector (stmt, mask, masktype, NULL);
6466 scale = build_int_cst (scaletype, gs_info.scale);
6468 prev_stmt_info = NULL;
6469 for (j = 0; j < ncopies; ++j)
6471 if (j == 0)
6473 src = vec_oprnd1
6474 = vect_get_vec_def_for_operand (op, stmt);
6475 op = vec_oprnd0
6476 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6478 else if (modifier != NONE && (j & 1))
6480 if (modifier == WIDEN)
6482 src = vec_oprnd1
6483 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6484 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
6485 stmt, gsi);
6487 else if (modifier == NARROW)
6489 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
6490 stmt, gsi);
6491 op = vec_oprnd0
6492 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6493 vec_oprnd0);
6495 else
6496 gcc_unreachable ();
6498 else
6500 src = vec_oprnd1
6501 = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
6502 op = vec_oprnd0
6503 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
6504 vec_oprnd0);
6507 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
6509 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
6510 TYPE_VECTOR_SUBPARTS (srctype)));
6511 var = vect_get_new_ssa_name (srctype, vect_simple_var);
6512 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
6513 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
6514 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6515 src = var;
6518 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6520 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6521 TYPE_VECTOR_SUBPARTS (idxtype)));
6522 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6523 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6524 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6525 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6526 op = var;
6529 new_stmt
6530 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
6532 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6534 if (prev_stmt_info == NULL)
6535 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6536 else
6537 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6538 prev_stmt_info = vinfo_for_stmt (new_stmt);
6540 return true;
6543 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6545 gimple *group_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
6546 DR_GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
6549 if (grouped_store)
6551 /* FORNOW */
6552 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
6554 /* We vectorize all the stmts of the interleaving group when we
6555 reach the last stmt in the group. */
6556 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
6557 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
6558 && !slp)
6560 *vec_stmt = NULL;
6561 return true;
6564 if (slp)
6566 grouped_store = false;
6567 /* VEC_NUM is the number of vect stmts to be created for this
6568 group. */
6569 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6570 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6571 gcc_assert (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6572 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6573 op = vect_get_store_rhs (first_stmt);
6575 else
6576 /* VEC_NUM is the number of vect stmts to be created for this
6577 group. */
6578 vec_num = group_size;
6580 ref_type = get_group_alias_ptr_type (first_stmt);
6582 else
6583 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6585 if (dump_enabled_p ())
6586 dump_printf_loc (MSG_NOTE, vect_location,
6587 "transform store. ncopies = %d\n", ncopies);
6589 if (memory_access_type == VMAT_ELEMENTWISE
6590 || memory_access_type == VMAT_STRIDED_SLP)
6592 gimple_stmt_iterator incr_gsi;
6593 bool insert_after;
6594 gimple *incr;
6595 tree offvar;
6596 tree ivstep;
6597 tree running_off;
6598 tree stride_base, stride_step, alias_off;
6599 tree vec_oprnd;
6600 unsigned int g;
6601 /* Checked by get_load_store_type. */
6602 unsigned int const_nunits = nunits.to_constant ();
6604 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
6605 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6607 stride_base
6608 = fold_build_pointer_plus
6609 (DR_BASE_ADDRESS (first_dr),
6610 size_binop (PLUS_EXPR,
6611 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6612 convert_to_ptrofftype (DR_INIT (first_dr))));
6613 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6615 /* For a store with loop-invariant (but other than power-of-2)
6616 stride (i.e. not a grouped access) like so:
6618 for (i = 0; i < n; i += stride)
6619 array[i] = ...;
6621 we generate a new induction variable and new stores from
6622 the components of the (vectorized) rhs:
6624 for (j = 0; ; j += VF*stride)
6625 vectemp = ...;
6626 tmp1 = vectemp[0];
6627 array[j] = tmp1;
6628 tmp2 = vectemp[1];
6629 array[j + stride] = tmp2;
6633 unsigned nstores = const_nunits;
6634 unsigned lnel = 1;
6635 tree ltype = elem_type;
6636 tree lvectype = vectype;
6637 if (slp)
6639 if (group_size < const_nunits
6640 && const_nunits % group_size == 0)
6642 nstores = const_nunits / group_size;
6643 lnel = group_size;
6644 ltype = build_vector_type (elem_type, group_size);
6645 lvectype = vectype;
6647 /* First check if vec_extract optab doesn't support extraction
6648 of vector elts directly. */
6649 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6650 machine_mode vmode;
6651 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6652 || !VECTOR_MODE_P (vmode)
6653 || !targetm.vector_mode_supported_p (vmode)
6654 || (convert_optab_handler (vec_extract_optab,
6655 TYPE_MODE (vectype), vmode)
6656 == CODE_FOR_nothing))
6658 /* Try to avoid emitting an extract of vector elements
6659 by performing the extracts using an integer type of the
6660 same size, extracting from a vector of those and then
6661 re-interpreting it as the original vector type if
6662 supported. */
6663 unsigned lsize
6664 = group_size * GET_MODE_BITSIZE (elmode);
6665 elmode = int_mode_for_size (lsize, 0).require ();
6666 unsigned int lnunits = const_nunits / group_size;
6667 /* If we can't construct such a vector fall back to
6668 element extracts from the original vector type and
6669 element size stores. */
6670 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6671 && VECTOR_MODE_P (vmode)
6672 && targetm.vector_mode_supported_p (vmode)
6673 && (convert_optab_handler (vec_extract_optab,
6674 vmode, elmode)
6675 != CODE_FOR_nothing))
6677 nstores = lnunits;
6678 lnel = group_size;
6679 ltype = build_nonstandard_integer_type (lsize, 1);
6680 lvectype = build_vector_type (ltype, nstores);
6682 /* Else fall back to vector extraction anyway.
6683 Fewer stores are more important than avoiding spilling
6684 of the vector we extract from. Compared to the
6685 construction case in vectorizable_load no store-forwarding
6686 issue exists here for reasonable archs. */
6689 else if (group_size >= const_nunits
6690 && group_size % const_nunits == 0)
6692 nstores = 1;
6693 lnel = const_nunits;
6694 ltype = vectype;
6695 lvectype = vectype;
6697 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6698 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6701 ivstep = stride_step;
6702 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6703 build_int_cst (TREE_TYPE (ivstep), vf));
6705 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6707 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
6708 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
6709 create_iv (stride_base, ivstep, NULL,
6710 loop, &incr_gsi, insert_after,
6711 &offvar, NULL);
6712 incr = gsi_stmt (incr_gsi);
6713 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6715 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
6717 prev_stmt_info = NULL;
6718 alias_off = build_int_cst (ref_type, 0);
6719 next_stmt = first_stmt;
6720 for (g = 0; g < group_size; g++)
6722 running_off = offvar;
6723 if (g)
6725 tree size = TYPE_SIZE_UNIT (ltype);
6726 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6727 size);
6728 tree newoff = copy_ssa_name (running_off, NULL);
6729 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6730 running_off, pos);
6731 vect_finish_stmt_generation (stmt, incr, gsi);
6732 running_off = newoff;
6734 unsigned int group_el = 0;
6735 unsigned HOST_WIDE_INT
6736 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6737 for (j = 0; j < ncopies; j++)
6739 /* We've set op and dt above, from vect_get_store_rhs,
6740 and first_stmt == stmt. */
6741 if (j == 0)
6743 if (slp)
6745 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6746 slp_node);
6747 vec_oprnd = vec_oprnds[0];
6749 else
6751 op = vect_get_store_rhs (next_stmt);
6752 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6755 else
6757 if (slp)
6758 vec_oprnd = vec_oprnds[j];
6759 else
6761 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
6762 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt,
6763 vec_oprnd);
6766 /* Pun the vector to extract from if necessary. */
6767 if (lvectype != vectype)
6769 tree tem = make_ssa_name (lvectype);
6770 gimple *pun
6771 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6772 lvectype, vec_oprnd));
6773 vect_finish_stmt_generation (stmt, pun, gsi);
6774 vec_oprnd = tem;
6776 for (i = 0; i < nstores; i++)
6778 tree newref, newoff;
6779 gimple *incr, *assign;
6780 tree size = TYPE_SIZE (ltype);
6781 /* Extract the i'th component. */
6782 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6783 bitsize_int (i), size);
6784 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6785 size, pos);
6787 elem = force_gimple_operand_gsi (gsi, elem, true,
6788 NULL_TREE, true,
6789 GSI_SAME_STMT);
6791 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6792 group_el * elsz);
6793 newref = build2 (MEM_REF, ltype,
6794 running_off, this_off);
6795 vect_copy_ref_info (newref, DR_REF (first_dr));
6797 /* And store it to *running_off. */
6798 assign = gimple_build_assign (newref, elem);
6799 vect_finish_stmt_generation (stmt, assign, gsi);
6801 group_el += lnel;
6802 if (! slp
6803 || group_el == group_size)
6805 newoff = copy_ssa_name (running_off, NULL);
6806 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6807 running_off, stride_step);
6808 vect_finish_stmt_generation (stmt, incr, gsi);
6810 running_off = newoff;
6811 group_el = 0;
6813 if (g == group_size - 1
6814 && !slp)
6816 if (j == 0 && i == 0)
6817 STMT_VINFO_VEC_STMT (stmt_info)
6818 = *vec_stmt = assign;
6819 else
6820 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6821 prev_stmt_info = vinfo_for_stmt (assign);
6825 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6826 if (slp)
6827 break;
6830 vec_oprnds.release ();
6831 return true;
6834 auto_vec<tree> dr_chain (group_size);
6835 oprnds.create (group_size);
6837 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6838 gcc_assert (alignment_support_scheme);
6839 vec_loop_masks *loop_masks
6840 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6841 ? &LOOP_VINFO_MASKS (loop_vinfo)
6842 : NULL);
6843 /* Targets with store-lane instructions must not require explicit
6844 realignment. vect_supportable_dr_alignment always returns either
6845 dr_aligned or dr_unaligned_supported for masked operations. */
6846 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
6847 && !mask
6848 && !loop_masks)
6849 || alignment_support_scheme == dr_aligned
6850 || alignment_support_scheme == dr_unaligned_supported);
6852 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6853 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6854 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6856 tree bump;
6857 tree vec_offset = NULL_TREE;
6858 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6860 aggr_type = NULL_TREE;
6861 bump = NULL_TREE;
6863 else if (memory_access_type == VMAT_GATHER_SCATTER)
6865 aggr_type = elem_type;
6866 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
6867 &bump, &vec_offset);
6869 else
6871 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6872 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6873 else
6874 aggr_type = vectype;
6875 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
6878 if (mask)
6879 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
6881 /* In case the vectorization factor (VF) is bigger than the number
6882 of elements that we can fit in a vectype (nunits), we have to generate
6883 more than one vector stmt - i.e - we need to "unroll" the
6884 vector stmt by a factor VF/nunits. For more details see documentation in
6885 vect_get_vec_def_for_copy_stmt. */
6887 /* In case of interleaving (non-unit grouped access):
6889 S1: &base + 2 = x2
6890 S2: &base = x0
6891 S3: &base + 1 = x1
6892 S4: &base + 3 = x3
6894 We create vectorized stores starting from base address (the access of the
6895 first stmt in the chain (S2 in the above example), when the last store stmt
6896 of the chain (S4) is reached:
6898 VS1: &base = vx2
6899 VS2: &base + vec_size*1 = vx0
6900 VS3: &base + vec_size*2 = vx1
6901 VS4: &base + vec_size*3 = vx3
6903 Then permutation statements are generated:
6905 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6906 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6909 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6910 (the order of the data-refs in the output of vect_permute_store_chain
6911 corresponds to the order of scalar stmts in the interleaving chain - see
6912 the documentation of vect_permute_store_chain()).
6914 In case of both multiple types and interleaving, above vector stores and
6915 permutation stmts are created for every copy. The result vector stmts are
6916 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6917 STMT_VINFO_RELATED_STMT for the next copies.
6920 prev_stmt_info = NULL;
6921 tree vec_mask = NULL_TREE;
6922 for (j = 0; j < ncopies; j++)
6925 if (j == 0)
6927 if (slp)
6929 /* Get vectorized arguments for SLP_NODE. */
6930 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6931 NULL, slp_node);
6933 vec_oprnd = vec_oprnds[0];
6935 else
6937 /* For interleaved stores we collect vectorized defs for all the
6938 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6939 used as an input to vect_permute_store_chain(), and OPRNDS as
6940 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6942 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
6943 OPRNDS are of size 1. */
6944 next_stmt = first_stmt;
6945 for (i = 0; i < group_size; i++)
6947 /* Since gaps are not supported for interleaved stores,
6948 DR_GROUP_SIZE is the exact number of stmts in the chain.
6949 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6950 there is no interleaving, DR_GROUP_SIZE is 1, and only one
6951 iteration of the loop will be executed. */
6952 op = vect_get_store_rhs (next_stmt);
6953 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6954 dr_chain.quick_push (vec_oprnd);
6955 oprnds.quick_push (vec_oprnd);
6956 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6958 if (mask)
6959 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
6960 mask_vectype);
6963 /* We should have catched mismatched types earlier. */
6964 gcc_assert (useless_type_conversion_p (vectype,
6965 TREE_TYPE (vec_oprnd)));
6966 bool simd_lane_access_p
6967 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6968 if (simd_lane_access_p
6969 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6970 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6971 && integer_zerop (DR_OFFSET (first_dr))
6972 && integer_zerop (DR_INIT (first_dr))
6973 && alias_sets_conflict_p (get_alias_set (aggr_type),
6974 get_alias_set (TREE_TYPE (ref_type))))
6976 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6977 dataref_offset = build_int_cst (ref_type, 0);
6978 inv_p = false;
6980 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6982 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
6983 &dataref_ptr, &vec_offset);
6984 inv_p = false;
6986 else
6987 dataref_ptr
6988 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6989 simd_lane_access_p ? loop : NULL,
6990 offset, &dummy, gsi, &ptr_incr,
6991 simd_lane_access_p, &inv_p,
6992 NULL_TREE, bump);
6993 gcc_assert (bb_vinfo || !inv_p);
6995 else
6997 /* For interleaved stores we created vectorized defs for all the
6998 defs stored in OPRNDS in the previous iteration (previous copy).
6999 DR_CHAIN is then used as an input to vect_permute_store_chain(),
7000 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7001 next copy.
7002 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7003 OPRNDS are of size 1. */
7004 for (i = 0; i < group_size; i++)
7006 op = oprnds[i];
7007 vect_is_simple_use (op, vinfo, &def_stmt, &rhs_dt);
7008 vec_oprnd = vect_get_vec_def_for_stmt_copy (rhs_dt, op);
7009 dr_chain[i] = vec_oprnd;
7010 oprnds[i] = vec_oprnd;
7012 if (mask)
7013 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
7014 if (dataref_offset)
7015 dataref_offset
7016 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7017 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
7018 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
7019 vec_offset);
7020 else
7021 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7022 bump);
7025 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7027 tree vec_array;
7029 /* Get an array into which we can store the individual vectors. */
7030 vec_array = create_vector_array (vectype, vec_num);
7032 /* Invalidate the current contents of VEC_ARRAY. This should
7033 become an RTL clobber too, which prevents the vector registers
7034 from being upward-exposed. */
7035 vect_clobber_variable (stmt, gsi, vec_array);
7037 /* Store the individual vectors into the array. */
7038 for (i = 0; i < vec_num; i++)
7040 vec_oprnd = dr_chain[i];
7041 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
7044 tree final_mask = NULL;
7045 if (loop_masks)
7046 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
7047 vectype, j);
7048 if (vec_mask)
7049 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7050 vec_mask, gsi);
7052 gcall *call;
7053 if (final_mask)
7055 /* Emit:
7056 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7057 VEC_ARRAY). */
7058 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
7059 tree alias_ptr = build_int_cst (ref_type, align);
7060 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
7061 dataref_ptr, alias_ptr,
7062 final_mask, vec_array);
7064 else
7066 /* Emit:
7067 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7068 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7069 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
7070 vec_array);
7071 gimple_call_set_lhs (call, data_ref);
7073 gimple_call_set_nothrow (call, true);
7074 new_stmt = call;
7075 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7077 /* Record that VEC_ARRAY is now dead. */
7078 vect_clobber_variable (stmt, gsi, vec_array);
7080 else
7082 new_stmt = NULL;
7083 if (grouped_store)
7085 if (j == 0)
7086 result_chain.create (group_size);
7087 /* Permute. */
7088 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
7089 &result_chain);
7092 next_stmt = first_stmt;
7093 for (i = 0; i < vec_num; i++)
7095 unsigned align, misalign;
7097 tree final_mask = NULL_TREE;
7098 if (loop_masks)
7099 final_mask = vect_get_loop_mask (gsi, loop_masks,
7100 vec_num * ncopies,
7101 vectype, vec_num * j + i);
7102 if (vec_mask)
7103 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
7104 vec_mask, gsi);
7106 if (memory_access_type == VMAT_GATHER_SCATTER)
7108 tree scale = size_int (gs_info.scale);
7109 gcall *call;
7110 if (loop_masks)
7111 call = gimple_build_call_internal
7112 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
7113 scale, vec_oprnd, final_mask);
7114 else
7115 call = gimple_build_call_internal
7116 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
7117 scale, vec_oprnd);
7118 gimple_call_set_nothrow (call, true);
7119 new_stmt = call;
7120 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7121 break;
7124 if (i > 0)
7125 /* Bump the vector pointer. */
7126 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7127 stmt, bump);
7129 if (slp)
7130 vec_oprnd = vec_oprnds[i];
7131 else if (grouped_store)
7132 /* For grouped stores vectorized defs are interleaved in
7133 vect_permute_store_chain(). */
7134 vec_oprnd = result_chain[i];
7136 align = DR_TARGET_ALIGNMENT (first_dr);
7137 if (aligned_access_p (first_dr))
7138 misalign = 0;
7139 else if (DR_MISALIGNMENT (first_dr) == -1)
7141 align = dr_alignment (vect_dr_behavior (first_dr));
7142 misalign = 0;
7144 else
7145 misalign = DR_MISALIGNMENT (first_dr);
7146 if (dataref_offset == NULL_TREE
7147 && TREE_CODE (dataref_ptr) == SSA_NAME)
7148 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
7149 misalign);
7151 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7153 tree perm_mask = perm_mask_for_reverse (vectype);
7154 tree perm_dest
7155 = vect_create_destination_var (vect_get_store_rhs (stmt),
7156 vectype);
7157 tree new_temp = make_ssa_name (perm_dest);
7159 /* Generate the permute statement. */
7160 gimple *perm_stmt
7161 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
7162 vec_oprnd, perm_mask);
7163 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7165 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
7166 vec_oprnd = new_temp;
7169 /* Arguments are ready. Create the new vector stmt. */
7170 if (final_mask)
7172 align = least_bit_hwi (misalign | align);
7173 tree ptr = build_int_cst (ref_type, align);
7174 gcall *call
7175 = gimple_build_call_internal (IFN_MASK_STORE, 4,
7176 dataref_ptr, ptr,
7177 final_mask, vec_oprnd);
7178 gimple_call_set_nothrow (call, true);
7179 new_stmt = call;
7181 else
7183 data_ref = fold_build2 (MEM_REF, vectype,
7184 dataref_ptr,
7185 dataref_offset
7186 ? dataref_offset
7187 : build_int_cst (ref_type, 0));
7188 if (aligned_access_p (first_dr))
7190 else if (DR_MISALIGNMENT (first_dr) == -1)
7191 TREE_TYPE (data_ref)
7192 = build_aligned_type (TREE_TYPE (data_ref),
7193 align * BITS_PER_UNIT);
7194 else
7195 TREE_TYPE (data_ref)
7196 = build_aligned_type (TREE_TYPE (data_ref),
7197 TYPE_ALIGN (elem_type));
7198 vect_copy_ref_info (data_ref, DR_REF (first_dr));
7199 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
7201 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7203 if (slp)
7204 continue;
7206 next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
7207 if (!next_stmt)
7208 break;
7211 if (!slp)
7213 if (j == 0)
7214 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7215 else
7216 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7217 prev_stmt_info = vinfo_for_stmt (new_stmt);
7221 oprnds.release ();
7222 result_chain.release ();
7223 vec_oprnds.release ();
7225 return true;
7228 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7229 VECTOR_CST mask. No checks are made that the target platform supports the
7230 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7231 vect_gen_perm_mask_checked. */
7233 tree
7234 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
7236 tree mask_type;
7238 poly_uint64 nunits = sel.length ();
7239 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
7241 mask_type = build_vector_type (ssizetype, nunits);
7242 return vec_perm_indices_to_tree (mask_type, sel);
7245 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7246 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7248 tree
7249 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
7251 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
7252 return vect_gen_perm_mask_any (vectype, sel);
7255 /* Given a vector variable X and Y, that was generated for the scalar
7256 STMT, generate instructions to permute the vector elements of X and Y
7257 using permutation mask MASK_VEC, insert them at *GSI and return the
7258 permuted vector variable. */
7260 static tree
7261 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
7262 gimple_stmt_iterator *gsi)
7264 tree vectype = TREE_TYPE (x);
7265 tree perm_dest, data_ref;
7266 gimple *perm_stmt;
7268 tree scalar_dest = gimple_get_lhs (stmt);
7269 if (TREE_CODE (scalar_dest) == SSA_NAME)
7270 perm_dest = vect_create_destination_var (scalar_dest, vectype);
7271 else
7272 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
7273 data_ref = make_ssa_name (perm_dest);
7275 /* Generate the permute statement. */
7276 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
7277 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
7279 return data_ref;
7282 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
7283 inserting them on the loops preheader edge. Returns true if we
7284 were successful in doing so (and thus STMT can be moved then),
7285 otherwise returns false. */
7287 static bool
7288 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
7290 ssa_op_iter i;
7291 tree op;
7292 bool any = false;
7294 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7296 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7297 if (!gimple_nop_p (def_stmt)
7298 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7300 /* Make sure we don't need to recurse. While we could do
7301 so in simple cases when there are more complex use webs
7302 we don't have an easy way to preserve stmt order to fulfil
7303 dependencies within them. */
7304 tree op2;
7305 ssa_op_iter i2;
7306 if (gimple_code (def_stmt) == GIMPLE_PHI)
7307 return false;
7308 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
7310 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
7311 if (!gimple_nop_p (def_stmt2)
7312 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
7313 return false;
7315 any = true;
7319 if (!any)
7320 return true;
7322 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
7324 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
7325 if (!gimple_nop_p (def_stmt)
7326 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
7328 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
7329 gsi_remove (&gsi, false);
7330 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
7334 return true;
7337 /* vectorizable_load.
7339 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
7340 can be vectorized.
7341 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7342 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
7343 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7345 static bool
7346 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
7347 slp_tree slp_node, slp_instance slp_node_instance,
7348 stmt_vector_for_cost *cost_vec)
7350 tree scalar_dest;
7351 tree vec_dest = NULL;
7352 tree data_ref = NULL;
7353 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7354 stmt_vec_info prev_stmt_info;
7355 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7356 struct loop *loop = NULL;
7357 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
7358 bool nested_in_vect_loop = false;
7359 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
7360 tree elem_type;
7361 tree new_temp;
7362 machine_mode mode;
7363 gimple *new_stmt = NULL;
7364 tree dummy;
7365 enum dr_alignment_support alignment_support_scheme;
7366 tree dataref_ptr = NULL_TREE;
7367 tree dataref_offset = NULL_TREE;
7368 gimple *ptr_incr = NULL;
7369 int ncopies;
7370 int i, j;
7371 unsigned int group_size;
7372 poly_uint64 group_gap_adj;
7373 tree msq = NULL_TREE, lsq;
7374 tree offset = NULL_TREE;
7375 tree byte_offset = NULL_TREE;
7376 tree realignment_token = NULL_TREE;
7377 gphi *phi = NULL;
7378 vec<tree> dr_chain = vNULL;
7379 bool grouped_load = false;
7380 gimple *first_stmt;
7381 gimple *first_stmt_for_drptr = NULL;
7382 bool inv_p;
7383 bool compute_in_loop = false;
7384 struct loop *at_loop;
7385 int vec_num;
7386 bool slp = (slp_node != NULL);
7387 bool slp_perm = false;
7388 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7389 poly_uint64 vf;
7390 tree aggr_type;
7391 gather_scatter_info gs_info;
7392 vec_info *vinfo = stmt_info->vinfo;
7393 tree ref_type;
7394 enum vect_def_type mask_dt = vect_unknown_def_type;
7396 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7397 return false;
7399 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7400 && ! vec_stmt)
7401 return false;
7403 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7404 if (is_gimple_assign (stmt))
7406 scalar_dest = gimple_assign_lhs (stmt);
7407 if (TREE_CODE (scalar_dest) != SSA_NAME)
7408 return false;
7410 tree_code code = gimple_assign_rhs_code (stmt);
7411 if (code != ARRAY_REF
7412 && code != BIT_FIELD_REF
7413 && code != INDIRECT_REF
7414 && code != COMPONENT_REF
7415 && code != IMAGPART_EXPR
7416 && code != REALPART_EXPR
7417 && code != MEM_REF
7418 && TREE_CODE_CLASS (code) != tcc_declaration)
7419 return false;
7421 else
7423 gcall *call = dyn_cast <gcall *> (stmt);
7424 if (!call || !gimple_call_internal_p (call))
7425 return false;
7427 internal_fn ifn = gimple_call_internal_fn (call);
7428 if (!internal_load_fn_p (ifn))
7429 return false;
7431 scalar_dest = gimple_call_lhs (call);
7432 if (!scalar_dest)
7433 return false;
7435 if (slp_node != NULL)
7437 if (dump_enabled_p ())
7438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7439 "SLP of masked loads not supported.\n");
7440 return false;
7443 int mask_index = internal_fn_mask_index (ifn);
7444 if (mask_index >= 0)
7446 mask = gimple_call_arg (call, mask_index);
7447 if (!vect_check_load_store_mask (stmt, mask, &mask_dt,
7448 &mask_vectype))
7449 return false;
7453 if (!STMT_VINFO_DATA_REF (stmt_info))
7454 return false;
7456 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7457 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7459 if (loop_vinfo)
7461 loop = LOOP_VINFO_LOOP (loop_vinfo);
7462 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
7463 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7465 else
7466 vf = 1;
7468 /* Multiple types in SLP are handled by creating the appropriate number of
7469 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7470 case of SLP. */
7471 if (slp)
7472 ncopies = 1;
7473 else
7474 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7476 gcc_assert (ncopies >= 1);
7478 /* FORNOW. This restriction should be relaxed. */
7479 if (nested_in_vect_loop && ncopies > 1)
7481 if (dump_enabled_p ())
7482 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7483 "multiple types in nested loop.\n");
7484 return false;
7487 /* Invalidate assumptions made by dependence analysis when vectorization
7488 on the unrolled body effectively re-orders stmts. */
7489 if (ncopies > 1
7490 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7491 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7492 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7494 if (dump_enabled_p ())
7495 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7496 "cannot perform implicit CSE when unrolling "
7497 "with negative dependence distance\n");
7498 return false;
7501 elem_type = TREE_TYPE (vectype);
7502 mode = TYPE_MODE (vectype);
7504 /* FORNOW. In some cases can vectorize even if data-type not supported
7505 (e.g. - data copies). */
7506 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
7508 if (dump_enabled_p ())
7509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7510 "Aligned load, but unsupported type.\n");
7511 return false;
7514 /* Check if the load is a part of an interleaving chain. */
7515 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
7517 grouped_load = true;
7518 /* FORNOW */
7519 gcc_assert (!nested_in_vect_loop);
7520 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
7522 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
7523 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
7525 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7526 slp_perm = true;
7528 /* Invalidate assumptions made by dependence analysis when vectorization
7529 on the unrolled body effectively re-orders stmts. */
7530 if (!PURE_SLP_STMT (stmt_info)
7531 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
7532 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
7533 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
7535 if (dump_enabled_p ())
7536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7537 "cannot perform implicit CSE when performing "
7538 "group loads with negative dependence distance\n");
7539 return false;
7542 /* Similarly when the stmt is a load that is both part of a SLP
7543 instance and a loop vectorized stmt via the same-dr mechanism
7544 we have to give up. */
7545 if (DR_GROUP_SAME_DR_STMT (stmt_info)
7546 && (STMT_SLP_TYPE (stmt_info)
7547 != STMT_SLP_TYPE (vinfo_for_stmt
7548 (DR_GROUP_SAME_DR_STMT (stmt_info)))))
7550 if (dump_enabled_p ())
7551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7552 "conflicting SLP types for CSEd load\n");
7553 return false;
7556 else
7557 group_size = 1;
7559 vect_memory_access_type memory_access_type;
7560 if (!get_load_store_type (stmt, vectype, slp, mask, VLS_LOAD, ncopies,
7561 &memory_access_type, &gs_info))
7562 return false;
7564 if (mask)
7566 if (memory_access_type == VMAT_CONTIGUOUS)
7568 machine_mode vec_mode = TYPE_MODE (vectype);
7569 if (!VECTOR_MODE_P (vec_mode)
7570 || !can_vec_mask_load_store_p (vec_mode,
7571 TYPE_MODE (mask_vectype), true))
7572 return false;
7574 else if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7576 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7577 tree masktype
7578 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
7579 if (TREE_CODE (masktype) == INTEGER_TYPE)
7581 if (dump_enabled_p ())
7582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7583 "masked gather with integer mask not"
7584 " supported.");
7585 return false;
7588 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7589 && memory_access_type != VMAT_GATHER_SCATTER)
7591 if (dump_enabled_p ())
7592 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7593 "unsupported access type for masked load.\n");
7594 return false;
7598 if (!vec_stmt) /* transformation not required. */
7600 if (!slp)
7601 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7603 if (loop_vinfo
7604 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7605 check_load_store_masking (loop_vinfo, vectype, VLS_LOAD, group_size,
7606 memory_access_type, &gs_info);
7608 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
7609 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
7610 slp_node_instance, slp_node, cost_vec);
7611 return true;
7614 if (!slp)
7615 gcc_assert (memory_access_type
7616 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7618 if (dump_enabled_p ())
7619 dump_printf_loc (MSG_NOTE, vect_location,
7620 "transform load. ncopies = %d\n", ncopies);
7622 /* Transform. */
7624 ensure_base_align (dr);
7626 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7628 vect_build_gather_load_calls (stmt, gsi, vec_stmt, &gs_info, mask,
7629 mask_dt);
7630 return true;
7633 if (memory_access_type == VMAT_ELEMENTWISE
7634 || memory_access_type == VMAT_STRIDED_SLP)
7636 gimple_stmt_iterator incr_gsi;
7637 bool insert_after;
7638 gimple *incr;
7639 tree offvar;
7640 tree ivstep;
7641 tree running_off;
7642 vec<constructor_elt, va_gc> *v = NULL;
7643 tree stride_base, stride_step, alias_off;
7644 /* Checked by get_load_store_type. */
7645 unsigned int const_nunits = nunits.to_constant ();
7646 unsigned HOST_WIDE_INT cst_offset = 0;
7648 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7649 gcc_assert (!nested_in_vect_loop);
7651 if (grouped_load)
7653 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
7654 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7656 else
7658 first_stmt = stmt;
7659 first_dr = dr;
7661 if (slp && grouped_load)
7663 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
7664 ref_type = get_group_alias_ptr_type (first_stmt);
7666 else
7668 if (grouped_load)
7669 cst_offset
7670 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
7671 * vect_get_place_in_interleaving_chain (stmt, first_stmt));
7672 group_size = 1;
7673 ref_type = reference_alias_ptr_type (DR_REF (dr));
7676 stride_base
7677 = fold_build_pointer_plus
7678 (DR_BASE_ADDRESS (first_dr),
7679 size_binop (PLUS_EXPR,
7680 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7681 convert_to_ptrofftype (DR_INIT (first_dr))));
7682 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7684 /* For a load with loop-invariant (but other than power-of-2)
7685 stride (i.e. not a grouped access) like so:
7687 for (i = 0; i < n; i += stride)
7688 ... = array[i];
7690 we generate a new induction variable and new accesses to
7691 form a new vector (or vectors, depending on ncopies):
7693 for (j = 0; ; j += VF*stride)
7694 tmp1 = array[j];
7695 tmp2 = array[j + stride];
7697 vectemp = {tmp1, tmp2, ...}
7700 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7701 build_int_cst (TREE_TYPE (stride_step), vf));
7703 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7705 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
7706 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
7707 create_iv (stride_base, ivstep, NULL,
7708 loop, &incr_gsi, insert_after,
7709 &offvar, NULL);
7710 incr = gsi_stmt (incr_gsi);
7711 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7713 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
7715 prev_stmt_info = NULL;
7716 running_off = offvar;
7717 alias_off = build_int_cst (ref_type, 0);
7718 int nloads = const_nunits;
7719 int lnel = 1;
7720 tree ltype = TREE_TYPE (vectype);
7721 tree lvectype = vectype;
7722 auto_vec<tree> dr_chain;
7723 if (memory_access_type == VMAT_STRIDED_SLP)
7725 if (group_size < const_nunits)
7727 /* First check if vec_init optab supports construction from
7728 vector elts directly. */
7729 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7730 machine_mode vmode;
7731 if (mode_for_vector (elmode, group_size).exists (&vmode)
7732 && VECTOR_MODE_P (vmode)
7733 && targetm.vector_mode_supported_p (vmode)
7734 && (convert_optab_handler (vec_init_optab,
7735 TYPE_MODE (vectype), vmode)
7736 != CODE_FOR_nothing))
7738 nloads = const_nunits / group_size;
7739 lnel = group_size;
7740 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7742 else
7744 /* Otherwise avoid emitting a constructor of vector elements
7745 by performing the loads using an integer type of the same
7746 size, constructing a vector of those and then
7747 re-interpreting it as the original vector type.
7748 This avoids a huge runtime penalty due to the general
7749 inability to perform store forwarding from smaller stores
7750 to a larger load. */
7751 unsigned lsize
7752 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7753 elmode = int_mode_for_size (lsize, 0).require ();
7754 unsigned int lnunits = const_nunits / group_size;
7755 /* If we can't construct such a vector fall back to
7756 element loads of the original vector type. */
7757 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7758 && VECTOR_MODE_P (vmode)
7759 && targetm.vector_mode_supported_p (vmode)
7760 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7761 != CODE_FOR_nothing))
7763 nloads = lnunits;
7764 lnel = group_size;
7765 ltype = build_nonstandard_integer_type (lsize, 1);
7766 lvectype = build_vector_type (ltype, nloads);
7770 else
7772 nloads = 1;
7773 lnel = const_nunits;
7774 ltype = vectype;
7776 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7778 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7779 else if (nloads == 1)
7780 ltype = vectype;
7782 if (slp)
7784 /* For SLP permutation support we need to load the whole group,
7785 not only the number of vector stmts the permutation result
7786 fits in. */
7787 if (slp_perm)
7789 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7790 variable VF. */
7791 unsigned int const_vf = vf.to_constant ();
7792 ncopies = CEIL (group_size * const_vf, const_nunits);
7793 dr_chain.create (ncopies);
7795 else
7796 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7798 unsigned int group_el = 0;
7799 unsigned HOST_WIDE_INT
7800 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7801 for (j = 0; j < ncopies; j++)
7803 if (nloads > 1)
7804 vec_alloc (v, nloads);
7805 for (i = 0; i < nloads; i++)
7807 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7808 group_el * elsz + cst_offset);
7809 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
7810 vect_copy_ref_info (data_ref, DR_REF (first_dr));
7811 new_stmt = gimple_build_assign (make_ssa_name (ltype), data_ref);
7812 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7813 if (nloads > 1)
7814 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7815 gimple_assign_lhs (new_stmt));
7817 group_el += lnel;
7818 if (! slp
7819 || group_el == group_size)
7821 tree newoff = copy_ssa_name (running_off);
7822 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7823 running_off, stride_step);
7824 vect_finish_stmt_generation (stmt, incr, gsi);
7826 running_off = newoff;
7827 group_el = 0;
7830 if (nloads > 1)
7832 tree vec_inv = build_constructor (lvectype, v);
7833 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7834 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7835 if (lvectype != vectype)
7837 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7838 VIEW_CONVERT_EXPR,
7839 build1 (VIEW_CONVERT_EXPR,
7840 vectype, new_temp));
7841 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7845 if (slp)
7847 if (slp_perm)
7848 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7849 else
7850 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7852 else
7854 if (j == 0)
7855 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7856 else
7857 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7858 prev_stmt_info = vinfo_for_stmt (new_stmt);
7861 if (slp_perm)
7863 unsigned n_perms;
7864 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7865 slp_node_instance, false, &n_perms);
7867 return true;
7870 if (memory_access_type == VMAT_GATHER_SCATTER
7871 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
7872 grouped_load = false;
7874 if (grouped_load)
7876 first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
7877 group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
7878 /* For SLP vectorization we directly vectorize a subchain
7879 without permutation. */
7880 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7881 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7882 /* For BB vectorization always use the first stmt to base
7883 the data ref pointer on. */
7884 if (bb_vinfo)
7885 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7887 /* Check if the chain of loads is already vectorized. */
7888 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7889 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7890 ??? But we can only do so if there is exactly one
7891 as we have no way to get at the rest. Leave the CSE
7892 opportunity alone.
7893 ??? With the group load eventually participating
7894 in multiple different permutations (having multiple
7895 slp nodes which refer to the same group) the CSE
7896 is even wrong code. See PR56270. */
7897 && !slp)
7899 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7900 return true;
7902 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7903 group_gap_adj = 0;
7905 /* VEC_NUM is the number of vect stmts to be created for this group. */
7906 if (slp)
7908 grouped_load = false;
7909 /* For SLP permutation support we need to load the whole group,
7910 not only the number of vector stmts the permutation result
7911 fits in. */
7912 if (slp_perm)
7914 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7915 variable VF. */
7916 unsigned int const_vf = vf.to_constant ();
7917 unsigned int const_nunits = nunits.to_constant ();
7918 vec_num = CEIL (group_size * const_vf, const_nunits);
7919 group_gap_adj = vf * group_size - nunits * vec_num;
7921 else
7923 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7924 group_gap_adj
7925 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7928 else
7929 vec_num = group_size;
7931 ref_type = get_group_alias_ptr_type (first_stmt);
7933 else
7935 first_stmt = stmt;
7936 first_dr = dr;
7937 group_size = vec_num = 1;
7938 group_gap_adj = 0;
7939 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7942 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7943 gcc_assert (alignment_support_scheme);
7944 vec_loop_masks *loop_masks
7945 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
7946 ? &LOOP_VINFO_MASKS (loop_vinfo)
7947 : NULL);
7948 /* Targets with store-lane instructions must not require explicit
7949 realignment. vect_supportable_dr_alignment always returns either
7950 dr_aligned or dr_unaligned_supported for masked operations. */
7951 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
7952 && !mask
7953 && !loop_masks)
7954 || alignment_support_scheme == dr_aligned
7955 || alignment_support_scheme == dr_unaligned_supported);
7957 /* In case the vectorization factor (VF) is bigger than the number
7958 of elements that we can fit in a vectype (nunits), we have to generate
7959 more than one vector stmt - i.e - we need to "unroll" the
7960 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7961 from one copy of the vector stmt to the next, in the field
7962 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7963 stages to find the correct vector defs to be used when vectorizing
7964 stmts that use the defs of the current stmt. The example below
7965 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7966 need to create 4 vectorized stmts):
7968 before vectorization:
7969 RELATED_STMT VEC_STMT
7970 S1: x = memref - -
7971 S2: z = x + 1 - -
7973 step 1: vectorize stmt S1:
7974 We first create the vector stmt VS1_0, and, as usual, record a
7975 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7976 Next, we create the vector stmt VS1_1, and record a pointer to
7977 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7978 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7979 stmts and pointers:
7980 RELATED_STMT VEC_STMT
7981 VS1_0: vx0 = memref0 VS1_1 -
7982 VS1_1: vx1 = memref1 VS1_2 -
7983 VS1_2: vx2 = memref2 VS1_3 -
7984 VS1_3: vx3 = memref3 - -
7985 S1: x = load - VS1_0
7986 S2: z = x + 1 - -
7988 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7989 information we recorded in RELATED_STMT field is used to vectorize
7990 stmt S2. */
7992 /* In case of interleaving (non-unit grouped access):
7994 S1: x2 = &base + 2
7995 S2: x0 = &base
7996 S3: x1 = &base + 1
7997 S4: x3 = &base + 3
7999 Vectorized loads are created in the order of memory accesses
8000 starting from the access of the first stmt of the chain:
8002 VS1: vx0 = &base
8003 VS2: vx1 = &base + vec_size*1
8004 VS3: vx3 = &base + vec_size*2
8005 VS4: vx4 = &base + vec_size*3
8007 Then permutation statements are generated:
8009 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8010 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
8013 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8014 (the order of the data-refs in the output of vect_permute_load_chain
8015 corresponds to the order of scalar stmts in the interleaving chain - see
8016 the documentation of vect_permute_load_chain()).
8017 The generation of permutation stmts and recording them in
8018 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
8020 In case of both multiple types and interleaving, the vector loads and
8021 permutation stmts above are created for every copy. The result vector
8022 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8023 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
8025 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8026 on a target that supports unaligned accesses (dr_unaligned_supported)
8027 we generate the following code:
8028 p = initial_addr;
8029 indx = 0;
8030 loop {
8031 p = p + indx * vectype_size;
8032 vec_dest = *(p);
8033 indx = indx + 1;
8036 Otherwise, the data reference is potentially unaligned on a target that
8037 does not support unaligned accesses (dr_explicit_realign_optimized) -
8038 then generate the following code, in which the data in each iteration is
8039 obtained by two vector loads, one from the previous iteration, and one
8040 from the current iteration:
8041 p1 = initial_addr;
8042 msq_init = *(floor(p1))
8043 p2 = initial_addr + VS - 1;
8044 realignment_token = call target_builtin;
8045 indx = 0;
8046 loop {
8047 p2 = p2 + indx * vectype_size
8048 lsq = *(floor(p2))
8049 vec_dest = realign_load (msq, lsq, realignment_token)
8050 indx = indx + 1;
8051 msq = lsq;
8052 } */
8054 /* If the misalignment remains the same throughout the execution of the
8055 loop, we can create the init_addr and permutation mask at the loop
8056 preheader. Otherwise, it needs to be created inside the loop.
8057 This can only occur when vectorizing memory accesses in the inner-loop
8058 nested within an outer-loop that is being vectorized. */
8060 if (nested_in_vect_loop
8061 && !multiple_p (DR_STEP_ALIGNMENT (dr),
8062 GET_MODE_SIZE (TYPE_MODE (vectype))))
8064 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
8065 compute_in_loop = true;
8068 if ((alignment_support_scheme == dr_explicit_realign_optimized
8069 || alignment_support_scheme == dr_explicit_realign)
8070 && !compute_in_loop)
8072 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
8073 alignment_support_scheme, NULL_TREE,
8074 &at_loop);
8075 if (alignment_support_scheme == dr_explicit_realign_optimized)
8077 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
8078 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
8079 size_one_node);
8082 else
8083 at_loop = loop;
8085 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8086 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
8088 tree bump;
8089 tree vec_offset = NULL_TREE;
8090 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8092 aggr_type = NULL_TREE;
8093 bump = NULL_TREE;
8095 else if (memory_access_type == VMAT_GATHER_SCATTER)
8097 aggr_type = elem_type;
8098 vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info,
8099 &bump, &vec_offset);
8101 else
8103 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8104 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8105 else
8106 aggr_type = vectype;
8107 bump = vect_get_data_ptr_increment (dr, aggr_type, memory_access_type);
8110 tree vec_mask = NULL_TREE;
8111 prev_stmt_info = NULL;
8112 poly_uint64 group_elt = 0;
8113 for (j = 0; j < ncopies; j++)
8115 /* 1. Create the vector or array pointer update chain. */
8116 if (j == 0)
8118 bool simd_lane_access_p
8119 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
8120 if (simd_lane_access_p
8121 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
8122 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
8123 && integer_zerop (DR_OFFSET (first_dr))
8124 && integer_zerop (DR_INIT (first_dr))
8125 && alias_sets_conflict_p (get_alias_set (aggr_type),
8126 get_alias_set (TREE_TYPE (ref_type)))
8127 && (alignment_support_scheme == dr_aligned
8128 || alignment_support_scheme == dr_unaligned_supported))
8130 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
8131 dataref_offset = build_int_cst (ref_type, 0);
8132 inv_p = false;
8134 else if (first_stmt_for_drptr
8135 && first_stmt != first_stmt_for_drptr)
8137 dataref_ptr
8138 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
8139 at_loop, offset, &dummy, gsi,
8140 &ptr_incr, simd_lane_access_p,
8141 &inv_p, byte_offset, bump);
8142 /* Adjust the pointer by the difference to first_stmt. */
8143 data_reference_p ptrdr
8144 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
8145 tree diff = fold_convert (sizetype,
8146 size_binop (MINUS_EXPR,
8147 DR_INIT (first_dr),
8148 DR_INIT (ptrdr)));
8149 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8150 stmt, diff);
8152 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8154 vect_get_gather_scatter_ops (loop, stmt, &gs_info,
8155 &dataref_ptr, &vec_offset);
8156 inv_p = false;
8158 else
8159 dataref_ptr
8160 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
8161 offset, &dummy, gsi, &ptr_incr,
8162 simd_lane_access_p, &inv_p,
8163 byte_offset, bump);
8164 if (mask)
8165 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
8166 mask_vectype);
8168 else
8170 if (dataref_offset)
8171 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
8172 bump);
8173 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8174 vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
8175 vec_offset);
8176 else
8177 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8178 stmt, bump);
8179 if (mask)
8180 vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
8183 if (grouped_load || slp_perm)
8184 dr_chain.create (vec_num);
8186 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8188 tree vec_array;
8190 vec_array = create_vector_array (vectype, vec_num);
8192 tree final_mask = NULL_TREE;
8193 if (loop_masks)
8194 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8195 vectype, j);
8196 if (vec_mask)
8197 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8198 vec_mask, gsi);
8200 gcall *call;
8201 if (final_mask)
8203 /* Emit:
8204 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8205 VEC_MASK). */
8206 unsigned int align = TYPE_ALIGN_UNIT (TREE_TYPE (vectype));
8207 tree alias_ptr = build_int_cst (ref_type, align);
8208 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
8209 dataref_ptr, alias_ptr,
8210 final_mask);
8212 else
8214 /* Emit:
8215 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8216 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8217 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
8219 gimple_call_set_lhs (call, vec_array);
8220 gimple_call_set_nothrow (call, true);
8221 new_stmt = call;
8222 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8224 /* Extract each vector into an SSA_NAME. */
8225 for (i = 0; i < vec_num; i++)
8227 new_temp = read_vector_array (stmt, gsi, scalar_dest,
8228 vec_array, i);
8229 dr_chain.quick_push (new_temp);
8232 /* Record the mapping between SSA_NAMEs and statements. */
8233 vect_record_grouped_load_vectors (stmt, dr_chain);
8235 /* Record that VEC_ARRAY is now dead. */
8236 vect_clobber_variable (stmt, gsi, vec_array);
8238 else
8240 for (i = 0; i < vec_num; i++)
8242 tree final_mask = NULL_TREE;
8243 if (loop_masks
8244 && memory_access_type != VMAT_INVARIANT)
8245 final_mask = vect_get_loop_mask (gsi, loop_masks,
8246 vec_num * ncopies,
8247 vectype, vec_num * j + i);
8248 if (vec_mask)
8249 final_mask = prepare_load_store_mask (mask_vectype, final_mask,
8250 vec_mask, gsi);
8252 if (i > 0)
8253 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8254 stmt, bump);
8256 /* 2. Create the vector-load in the loop. */
8257 switch (alignment_support_scheme)
8259 case dr_aligned:
8260 case dr_unaligned_supported:
8262 unsigned int align, misalign;
8264 if (memory_access_type == VMAT_GATHER_SCATTER)
8266 tree scale = size_int (gs_info.scale);
8267 gcall *call;
8268 if (loop_masks)
8269 call = gimple_build_call_internal
8270 (IFN_MASK_GATHER_LOAD, 4, dataref_ptr,
8271 vec_offset, scale, final_mask);
8272 else
8273 call = gimple_build_call_internal
8274 (IFN_GATHER_LOAD, 3, dataref_ptr,
8275 vec_offset, scale);
8276 gimple_call_set_nothrow (call, true);
8277 new_stmt = call;
8278 data_ref = NULL_TREE;
8279 break;
8282 align = DR_TARGET_ALIGNMENT (dr);
8283 if (alignment_support_scheme == dr_aligned)
8285 gcc_assert (aligned_access_p (first_dr));
8286 misalign = 0;
8288 else if (DR_MISALIGNMENT (first_dr) == -1)
8290 align = dr_alignment (vect_dr_behavior (first_dr));
8291 misalign = 0;
8293 else
8294 misalign = DR_MISALIGNMENT (first_dr);
8295 if (dataref_offset == NULL_TREE
8296 && TREE_CODE (dataref_ptr) == SSA_NAME)
8297 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
8298 align, misalign);
8300 if (final_mask)
8302 align = least_bit_hwi (misalign | align);
8303 tree ptr = build_int_cst (ref_type, align);
8304 gcall *call
8305 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
8306 dataref_ptr, ptr,
8307 final_mask);
8308 gimple_call_set_nothrow (call, true);
8309 new_stmt = call;
8310 data_ref = NULL_TREE;
8312 else
8314 data_ref
8315 = fold_build2 (MEM_REF, vectype, dataref_ptr,
8316 dataref_offset
8317 ? dataref_offset
8318 : build_int_cst (ref_type, 0));
8319 if (alignment_support_scheme == dr_aligned)
8321 else if (DR_MISALIGNMENT (first_dr) == -1)
8322 TREE_TYPE (data_ref)
8323 = build_aligned_type (TREE_TYPE (data_ref),
8324 align * BITS_PER_UNIT);
8325 else
8326 TREE_TYPE (data_ref)
8327 = build_aligned_type (TREE_TYPE (data_ref),
8328 TYPE_ALIGN (elem_type));
8330 break;
8332 case dr_explicit_realign:
8334 tree ptr, bump;
8336 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
8338 if (compute_in_loop)
8339 msq = vect_setup_realignment (first_stmt, gsi,
8340 &realignment_token,
8341 dr_explicit_realign,
8342 dataref_ptr, NULL);
8344 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8345 ptr = copy_ssa_name (dataref_ptr);
8346 else
8347 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
8348 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8349 new_stmt = gimple_build_assign
8350 (ptr, BIT_AND_EXPR, dataref_ptr,
8351 build_int_cst
8352 (TREE_TYPE (dataref_ptr),
8353 -(HOST_WIDE_INT) align));
8354 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8355 data_ref
8356 = build2 (MEM_REF, vectype, ptr,
8357 build_int_cst (ref_type, 0));
8358 vect_copy_ref_info (data_ref, DR_REF (first_dr));
8359 vec_dest = vect_create_destination_var (scalar_dest,
8360 vectype);
8361 new_stmt = gimple_build_assign (vec_dest, data_ref);
8362 new_temp = make_ssa_name (vec_dest, new_stmt);
8363 gimple_assign_set_lhs (new_stmt, new_temp);
8364 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
8365 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
8366 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8367 msq = new_temp;
8369 bump = size_binop (MULT_EXPR, vs,
8370 TYPE_SIZE_UNIT (elem_type));
8371 bump = size_binop (MINUS_EXPR, bump, size_one_node);
8372 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
8373 new_stmt = gimple_build_assign
8374 (NULL_TREE, BIT_AND_EXPR, ptr,
8375 build_int_cst
8376 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
8377 ptr = copy_ssa_name (ptr, new_stmt);
8378 gimple_assign_set_lhs (new_stmt, ptr);
8379 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8380 data_ref
8381 = build2 (MEM_REF, vectype, ptr,
8382 build_int_cst (ref_type, 0));
8383 break;
8385 case dr_explicit_realign_optimized:
8387 if (TREE_CODE (dataref_ptr) == SSA_NAME)
8388 new_temp = copy_ssa_name (dataref_ptr);
8389 else
8390 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
8391 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
8392 new_stmt = gimple_build_assign
8393 (new_temp, BIT_AND_EXPR, dataref_ptr,
8394 build_int_cst (TREE_TYPE (dataref_ptr),
8395 -(HOST_WIDE_INT) align));
8396 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8397 data_ref
8398 = build2 (MEM_REF, vectype, new_temp,
8399 build_int_cst (ref_type, 0));
8400 break;
8402 default:
8403 gcc_unreachable ();
8405 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8406 /* DATA_REF is null if we've already built the statement. */
8407 if (data_ref)
8409 vect_copy_ref_info (data_ref, DR_REF (first_dr));
8410 new_stmt = gimple_build_assign (vec_dest, data_ref);
8412 new_temp = make_ssa_name (vec_dest, new_stmt);
8413 gimple_set_lhs (new_stmt, new_temp);
8414 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8416 /* 3. Handle explicit realignment if necessary/supported.
8417 Create in loop:
8418 vec_dest = realign_load (msq, lsq, realignment_token) */
8419 if (alignment_support_scheme == dr_explicit_realign_optimized
8420 || alignment_support_scheme == dr_explicit_realign)
8422 lsq = gimple_assign_lhs (new_stmt);
8423 if (!realignment_token)
8424 realignment_token = dataref_ptr;
8425 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8426 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
8427 msq, lsq, realignment_token);
8428 new_temp = make_ssa_name (vec_dest, new_stmt);
8429 gimple_assign_set_lhs (new_stmt, new_temp);
8430 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8432 if (alignment_support_scheme == dr_explicit_realign_optimized)
8434 gcc_assert (phi);
8435 if (i == vec_num - 1 && j == ncopies - 1)
8436 add_phi_arg (phi, lsq,
8437 loop_latch_edge (containing_loop),
8438 UNKNOWN_LOCATION);
8439 msq = lsq;
8443 /* 4. Handle invariant-load. */
8444 if (inv_p && !bb_vinfo)
8446 gcc_assert (!grouped_load);
8447 /* If we have versioned for aliasing or the loop doesn't
8448 have any data dependencies that would preclude this,
8449 then we are sure this is a loop invariant load and
8450 thus we can insert it on the preheader edge. */
8451 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
8452 && !nested_in_vect_loop
8453 && hoist_defs_of_uses (stmt, loop))
8455 if (dump_enabled_p ())
8457 dump_printf_loc (MSG_NOTE, vect_location,
8458 "hoisting out of the vectorized "
8459 "loop: ");
8460 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8462 tree tem = copy_ssa_name (scalar_dest);
8463 gsi_insert_on_edge_immediate
8464 (loop_preheader_edge (loop),
8465 gimple_build_assign (tem,
8466 unshare_expr
8467 (gimple_assign_rhs1 (stmt))));
8468 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
8469 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8470 set_vinfo_for_stmt (new_stmt,
8471 new_stmt_vec_info (new_stmt, vinfo));
8473 else
8475 gimple_stmt_iterator gsi2 = *gsi;
8476 gsi_next (&gsi2);
8477 new_temp = vect_init_vector (stmt, scalar_dest,
8478 vectype, &gsi2);
8479 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8483 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8485 tree perm_mask = perm_mask_for_reverse (vectype);
8486 new_temp = permute_vec_elements (new_temp, new_temp,
8487 perm_mask, stmt, gsi);
8488 new_stmt = SSA_NAME_DEF_STMT (new_temp);
8491 /* Collect vector loads and later create their permutation in
8492 vect_transform_grouped_load (). */
8493 if (grouped_load || slp_perm)
8494 dr_chain.quick_push (new_temp);
8496 /* Store vector loads in the corresponding SLP_NODE. */
8497 if (slp && !slp_perm)
8498 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8500 /* With SLP permutation we load the gaps as well, without
8501 we need to skip the gaps after we manage to fully load
8502 all elements. group_gap_adj is DR_GROUP_SIZE here. */
8503 group_elt += nunits;
8504 if (maybe_ne (group_gap_adj, 0U)
8505 && !slp_perm
8506 && known_eq (group_elt, group_size - group_gap_adj))
8508 poly_wide_int bump_val
8509 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8510 * group_gap_adj);
8511 tree bump = wide_int_to_tree (sizetype, bump_val);
8512 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8513 stmt, bump);
8514 group_elt = 0;
8517 /* Bump the vector pointer to account for a gap or for excess
8518 elements loaded for a permuted SLP load. */
8519 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
8521 poly_wide_int bump_val
8522 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
8523 * group_gap_adj);
8524 tree bump = wide_int_to_tree (sizetype, bump_val);
8525 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
8526 stmt, bump);
8530 if (slp && !slp_perm)
8531 continue;
8533 if (slp_perm)
8535 unsigned n_perms;
8536 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
8537 slp_node_instance, false,
8538 &n_perms))
8540 dr_chain.release ();
8541 return false;
8544 else
8546 if (grouped_load)
8548 if (memory_access_type != VMAT_LOAD_STORE_LANES)
8549 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
8550 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8552 else
8554 if (j == 0)
8555 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8556 else
8557 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8558 prev_stmt_info = vinfo_for_stmt (new_stmt);
8561 dr_chain.release ();
8564 return true;
8567 /* Function vect_is_simple_cond.
8569 Input:
8570 LOOP - the loop that is being vectorized.
8571 COND - Condition that is checked for simple use.
8573 Output:
8574 *COMP_VECTYPE - the vector type for the comparison.
8575 *DTS - The def types for the arguments of the comparison
8577 Returns whether a COND can be vectorized. Checks whether
8578 condition operands are supportable using vec_is_simple_use. */
8580 static bool
8581 vect_is_simple_cond (tree cond, vec_info *vinfo,
8582 tree *comp_vectype, enum vect_def_type *dts,
8583 tree vectype)
8585 tree lhs, rhs;
8586 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8588 /* Mask case. */
8589 if (TREE_CODE (cond) == SSA_NAME
8590 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
8592 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
8593 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
8594 &dts[0], comp_vectype)
8595 || !*comp_vectype
8596 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
8597 return false;
8598 return true;
8601 if (!COMPARISON_CLASS_P (cond))
8602 return false;
8604 lhs = TREE_OPERAND (cond, 0);
8605 rhs = TREE_OPERAND (cond, 1);
8607 if (TREE_CODE (lhs) == SSA_NAME)
8609 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
8610 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
8611 return false;
8613 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
8614 || TREE_CODE (lhs) == FIXED_CST)
8615 dts[0] = vect_constant_def;
8616 else
8617 return false;
8619 if (TREE_CODE (rhs) == SSA_NAME)
8621 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
8622 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
8623 return false;
8625 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
8626 || TREE_CODE (rhs) == FIXED_CST)
8627 dts[1] = vect_constant_def;
8628 else
8629 return false;
8631 if (vectype1 && vectype2
8632 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8633 TYPE_VECTOR_SUBPARTS (vectype2)))
8634 return false;
8636 *comp_vectype = vectype1 ? vectype1 : vectype2;
8637 /* Invariant comparison. */
8638 if (! *comp_vectype && vectype)
8640 tree scalar_type = TREE_TYPE (lhs);
8641 /* If we can widen the comparison to match vectype do so. */
8642 if (INTEGRAL_TYPE_P (scalar_type)
8643 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
8644 TYPE_SIZE (TREE_TYPE (vectype))))
8645 scalar_type = build_nonstandard_integer_type
8646 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
8647 TYPE_UNSIGNED (scalar_type));
8648 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
8651 return true;
8654 /* vectorizable_condition.
8656 Check if STMT is conditional modify expression that can be vectorized.
8657 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8658 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8659 at GSI.
8661 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
8662 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
8663 else clause if it is 2).
8665 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8667 bool
8668 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
8669 gimple **vec_stmt, tree reduc_def, int reduc_index,
8670 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
8672 tree scalar_dest = NULL_TREE;
8673 tree vec_dest = NULL_TREE;
8674 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
8675 tree then_clause, else_clause;
8676 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8677 tree comp_vectype = NULL_TREE;
8678 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
8679 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
8680 tree vec_compare;
8681 tree new_temp;
8682 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8683 enum vect_def_type dts[4]
8684 = {vect_unknown_def_type, vect_unknown_def_type,
8685 vect_unknown_def_type, vect_unknown_def_type};
8686 int ndts = 4;
8687 int ncopies;
8688 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8689 stmt_vec_info prev_stmt_info = NULL;
8690 int i, j;
8691 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8692 vec<tree> vec_oprnds0 = vNULL;
8693 vec<tree> vec_oprnds1 = vNULL;
8694 vec<tree> vec_oprnds2 = vNULL;
8695 vec<tree> vec_oprnds3 = vNULL;
8696 tree vec_cmp_type;
8697 bool masked = false;
8699 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8700 return false;
8702 vect_reduction_type reduction_type
8703 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
8704 if (reduction_type == TREE_CODE_REDUCTION)
8706 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8707 return false;
8709 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8710 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8711 && reduc_def))
8712 return false;
8714 /* FORNOW: not yet supported. */
8715 if (STMT_VINFO_LIVE_P (stmt_info))
8717 if (dump_enabled_p ())
8718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8719 "value used after loop.\n");
8720 return false;
8724 /* Is vectorizable conditional operation? */
8725 if (!is_gimple_assign (stmt))
8726 return false;
8728 code = gimple_assign_rhs_code (stmt);
8730 if (code != COND_EXPR)
8731 return false;
8733 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8734 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8736 if (slp_node)
8737 ncopies = 1;
8738 else
8739 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8741 gcc_assert (ncopies >= 1);
8742 if (reduc_index && ncopies > 1)
8743 return false; /* FORNOW */
8745 cond_expr = gimple_assign_rhs1 (stmt);
8746 then_clause = gimple_assign_rhs2 (stmt);
8747 else_clause = gimple_assign_rhs3 (stmt);
8749 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8750 &comp_vectype, &dts[0], slp_node ? NULL : vectype)
8751 || !comp_vectype)
8752 return false;
8754 gimple *def_stmt;
8755 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8756 &vectype1))
8757 return false;
8758 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8759 &vectype2))
8760 return false;
8762 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8763 return false;
8765 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8766 return false;
8768 masked = !COMPARISON_CLASS_P (cond_expr);
8769 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8771 if (vec_cmp_type == NULL_TREE)
8772 return false;
8774 cond_code = TREE_CODE (cond_expr);
8775 if (!masked)
8777 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8778 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8781 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8783 /* Boolean values may have another representation in vectors
8784 and therefore we prefer bit operations over comparison for
8785 them (which also works for scalar masks). We store opcodes
8786 to use in bitop1 and bitop2. Statement is vectorized as
8787 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8788 depending on bitop1 and bitop2 arity. */
8789 switch (cond_code)
8791 case GT_EXPR:
8792 bitop1 = BIT_NOT_EXPR;
8793 bitop2 = BIT_AND_EXPR;
8794 break;
8795 case GE_EXPR:
8796 bitop1 = BIT_NOT_EXPR;
8797 bitop2 = BIT_IOR_EXPR;
8798 break;
8799 case LT_EXPR:
8800 bitop1 = BIT_NOT_EXPR;
8801 bitop2 = BIT_AND_EXPR;
8802 std::swap (cond_expr0, cond_expr1);
8803 break;
8804 case LE_EXPR:
8805 bitop1 = BIT_NOT_EXPR;
8806 bitop2 = BIT_IOR_EXPR;
8807 std::swap (cond_expr0, cond_expr1);
8808 break;
8809 case NE_EXPR:
8810 bitop1 = BIT_XOR_EXPR;
8811 break;
8812 case EQ_EXPR:
8813 bitop1 = BIT_XOR_EXPR;
8814 bitop2 = BIT_NOT_EXPR;
8815 break;
8816 default:
8817 return false;
8819 cond_code = SSA_NAME;
8822 if (!vec_stmt)
8824 if (bitop1 != NOP_EXPR)
8826 machine_mode mode = TYPE_MODE (comp_vectype);
8827 optab optab;
8829 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8830 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8831 return false;
8833 if (bitop2 != NOP_EXPR)
8835 optab = optab_for_tree_code (bitop2, comp_vectype,
8836 optab_default);
8837 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8838 return false;
8841 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8842 cond_code))
8844 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8845 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, slp_node,
8846 cost_vec);
8847 return true;
8849 return false;
8852 /* Transform. */
8854 if (!slp_node)
8856 vec_oprnds0.create (1);
8857 vec_oprnds1.create (1);
8858 vec_oprnds2.create (1);
8859 vec_oprnds3.create (1);
8862 /* Handle def. */
8863 scalar_dest = gimple_assign_lhs (stmt);
8864 if (reduction_type != EXTRACT_LAST_REDUCTION)
8865 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8867 /* Handle cond expr. */
8868 for (j = 0; j < ncopies; j++)
8870 gimple *new_stmt = NULL;
8871 if (j == 0)
8873 if (slp_node)
8875 auto_vec<tree, 4> ops;
8876 auto_vec<vec<tree>, 4> vec_defs;
8878 if (masked)
8879 ops.safe_push (cond_expr);
8880 else
8882 ops.safe_push (cond_expr0);
8883 ops.safe_push (cond_expr1);
8885 ops.safe_push (then_clause);
8886 ops.safe_push (else_clause);
8887 vect_get_slp_defs (ops, slp_node, &vec_defs);
8888 vec_oprnds3 = vec_defs.pop ();
8889 vec_oprnds2 = vec_defs.pop ();
8890 if (!masked)
8891 vec_oprnds1 = vec_defs.pop ();
8892 vec_oprnds0 = vec_defs.pop ();
8894 else
8896 gimple *gtemp;
8897 if (masked)
8899 vec_cond_lhs
8900 = vect_get_vec_def_for_operand (cond_expr, stmt,
8901 comp_vectype);
8902 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8903 &gtemp, &dts[0]);
8905 else
8907 vec_cond_lhs
8908 = vect_get_vec_def_for_operand (cond_expr0,
8909 stmt, comp_vectype);
8910 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8912 vec_cond_rhs
8913 = vect_get_vec_def_for_operand (cond_expr1,
8914 stmt, comp_vectype);
8915 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8917 if (reduc_index == 1)
8918 vec_then_clause = reduc_def;
8919 else
8921 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8922 stmt);
8923 vect_is_simple_use (then_clause, loop_vinfo,
8924 &gtemp, &dts[2]);
8926 if (reduc_index == 2)
8927 vec_else_clause = reduc_def;
8928 else
8930 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8931 stmt);
8932 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8936 else
8938 vec_cond_lhs
8939 = vect_get_vec_def_for_stmt_copy (dts[0],
8940 vec_oprnds0.pop ());
8941 if (!masked)
8942 vec_cond_rhs
8943 = vect_get_vec_def_for_stmt_copy (dts[1],
8944 vec_oprnds1.pop ());
8946 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8947 vec_oprnds2.pop ());
8948 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8949 vec_oprnds3.pop ());
8952 if (!slp_node)
8954 vec_oprnds0.quick_push (vec_cond_lhs);
8955 if (!masked)
8956 vec_oprnds1.quick_push (vec_cond_rhs);
8957 vec_oprnds2.quick_push (vec_then_clause);
8958 vec_oprnds3.quick_push (vec_else_clause);
8961 /* Arguments are ready. Create the new vector stmt. */
8962 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8964 vec_then_clause = vec_oprnds2[i];
8965 vec_else_clause = vec_oprnds3[i];
8967 if (masked)
8968 vec_compare = vec_cond_lhs;
8969 else
8971 vec_cond_rhs = vec_oprnds1[i];
8972 if (bitop1 == NOP_EXPR)
8973 vec_compare = build2 (cond_code, vec_cmp_type,
8974 vec_cond_lhs, vec_cond_rhs);
8975 else
8977 new_temp = make_ssa_name (vec_cmp_type);
8978 if (bitop1 == BIT_NOT_EXPR)
8979 new_stmt = gimple_build_assign (new_temp, bitop1,
8980 vec_cond_rhs);
8981 else
8982 new_stmt
8983 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8984 vec_cond_rhs);
8985 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8986 if (bitop2 == NOP_EXPR)
8987 vec_compare = new_temp;
8988 else if (bitop2 == BIT_NOT_EXPR)
8990 /* Instead of doing ~x ? y : z do x ? z : y. */
8991 vec_compare = new_temp;
8992 std::swap (vec_then_clause, vec_else_clause);
8994 else
8996 vec_compare = make_ssa_name (vec_cmp_type);
8997 new_stmt
8998 = gimple_build_assign (vec_compare, bitop2,
8999 vec_cond_lhs, new_temp);
9000 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9004 if (reduction_type == EXTRACT_LAST_REDUCTION)
9006 if (!is_gimple_val (vec_compare))
9008 tree vec_compare_name = make_ssa_name (vec_cmp_type);
9009 new_stmt = gimple_build_assign (vec_compare_name,
9010 vec_compare);
9011 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9012 vec_compare = vec_compare_name;
9014 gcc_assert (reduc_index == 2);
9015 new_stmt = gimple_build_call_internal
9016 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
9017 vec_then_clause);
9018 gimple_call_set_lhs (new_stmt, scalar_dest);
9019 SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
9020 if (stmt == gsi_stmt (*gsi))
9021 vect_finish_replace_stmt (stmt, new_stmt);
9022 else
9024 /* In this case we're moving the definition to later in the
9025 block. That doesn't matter because the only uses of the
9026 lhs are in phi statements. */
9027 gimple_stmt_iterator old_gsi = gsi_for_stmt (stmt);
9028 gsi_remove (&old_gsi, true);
9029 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9032 else
9034 new_temp = make_ssa_name (vec_dest);
9035 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
9036 vec_compare, vec_then_clause,
9037 vec_else_clause);
9038 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9040 if (slp_node)
9041 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9044 if (slp_node)
9045 continue;
9047 if (j == 0)
9048 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
9049 else
9050 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
9052 prev_stmt_info = vinfo_for_stmt (new_stmt);
9055 vec_oprnds0.release ();
9056 vec_oprnds1.release ();
9057 vec_oprnds2.release ();
9058 vec_oprnds3.release ();
9060 return true;
9063 /* vectorizable_comparison.
9065 Check if STMT is comparison expression that can be vectorized.
9066 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
9067 comparison, put it in VEC_STMT, and insert it at GSI.
9069 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
9071 static bool
9072 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
9073 gimple **vec_stmt, tree reduc_def,
9074 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
9076 tree lhs, rhs1, rhs2;
9077 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9078 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
9079 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
9080 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
9081 tree new_temp;
9082 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
9083 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
9084 int ndts = 2;
9085 poly_uint64 nunits;
9086 int ncopies;
9087 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
9088 stmt_vec_info prev_stmt_info = NULL;
9089 int i, j;
9090 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9091 vec<tree> vec_oprnds0 = vNULL;
9092 vec<tree> vec_oprnds1 = vNULL;
9093 gimple *def_stmt;
9094 tree mask_type;
9095 tree mask;
9097 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
9098 return false;
9100 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
9101 return false;
9103 mask_type = vectype;
9104 nunits = TYPE_VECTOR_SUBPARTS (vectype);
9106 if (slp_node)
9107 ncopies = 1;
9108 else
9109 ncopies = vect_get_num_copies (loop_vinfo, vectype);
9111 gcc_assert (ncopies >= 1);
9112 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
9113 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
9114 && reduc_def))
9115 return false;
9117 if (STMT_VINFO_LIVE_P (stmt_info))
9119 if (dump_enabled_p ())
9120 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9121 "value used after loop.\n");
9122 return false;
9125 if (!is_gimple_assign (stmt))
9126 return false;
9128 code = gimple_assign_rhs_code (stmt);
9130 if (TREE_CODE_CLASS (code) != tcc_comparison)
9131 return false;
9133 rhs1 = gimple_assign_rhs1 (stmt);
9134 rhs2 = gimple_assign_rhs2 (stmt);
9136 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
9137 &dts[0], &vectype1))
9138 return false;
9140 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
9141 &dts[1], &vectype2))
9142 return false;
9144 if (vectype1 && vectype2
9145 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
9146 TYPE_VECTOR_SUBPARTS (vectype2)))
9147 return false;
9149 vectype = vectype1 ? vectype1 : vectype2;
9151 /* Invariant comparison. */
9152 if (!vectype)
9154 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
9155 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
9156 return false;
9158 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
9159 return false;
9161 /* Can't compare mask and non-mask types. */
9162 if (vectype1 && vectype2
9163 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
9164 return false;
9166 /* Boolean values may have another representation in vectors
9167 and therefore we prefer bit operations over comparison for
9168 them (which also works for scalar masks). We store opcodes
9169 to use in bitop1 and bitop2. Statement is vectorized as
9170 BITOP2 (rhs1 BITOP1 rhs2) or
9171 rhs1 BITOP2 (BITOP1 rhs2)
9172 depending on bitop1 and bitop2 arity. */
9173 if (VECTOR_BOOLEAN_TYPE_P (vectype))
9175 if (code == GT_EXPR)
9177 bitop1 = BIT_NOT_EXPR;
9178 bitop2 = BIT_AND_EXPR;
9180 else if (code == GE_EXPR)
9182 bitop1 = BIT_NOT_EXPR;
9183 bitop2 = BIT_IOR_EXPR;
9185 else if (code == LT_EXPR)
9187 bitop1 = BIT_NOT_EXPR;
9188 bitop2 = BIT_AND_EXPR;
9189 std::swap (rhs1, rhs2);
9190 std::swap (dts[0], dts[1]);
9192 else if (code == LE_EXPR)
9194 bitop1 = BIT_NOT_EXPR;
9195 bitop2 = BIT_IOR_EXPR;
9196 std::swap (rhs1, rhs2);
9197 std::swap (dts[0], dts[1]);
9199 else
9201 bitop1 = BIT_XOR_EXPR;
9202 if (code == EQ_EXPR)
9203 bitop2 = BIT_NOT_EXPR;
9207 if (!vec_stmt)
9209 if (bitop1 == NOP_EXPR)
9211 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
9212 return false;
9214 else
9216 machine_mode mode = TYPE_MODE (vectype);
9217 optab optab;
9219 optab = optab_for_tree_code (bitop1, vectype, optab_default);
9220 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9221 return false;
9223 if (bitop2 != NOP_EXPR)
9225 optab = optab_for_tree_code (bitop2, vectype, optab_default);
9226 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
9227 return false;
9231 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
9232 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
9233 dts, ndts, slp_node, cost_vec);
9234 return true;
9237 /* Transform. */
9238 if (!slp_node)
9240 vec_oprnds0.create (1);
9241 vec_oprnds1.create (1);
9244 /* Handle def. */
9245 lhs = gimple_assign_lhs (stmt);
9246 mask = vect_create_destination_var (lhs, mask_type);
9248 /* Handle cmp expr. */
9249 for (j = 0; j < ncopies; j++)
9251 gassign *new_stmt = NULL;
9252 if (j == 0)
9254 if (slp_node)
9256 auto_vec<tree, 2> ops;
9257 auto_vec<vec<tree>, 2> vec_defs;
9259 ops.safe_push (rhs1);
9260 ops.safe_push (rhs2);
9261 vect_get_slp_defs (ops, slp_node, &vec_defs);
9262 vec_oprnds1 = vec_defs.pop ();
9263 vec_oprnds0 = vec_defs.pop ();
9265 else
9267 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
9268 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
9271 else
9273 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
9274 vec_oprnds0.pop ());
9275 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
9276 vec_oprnds1.pop ());
9279 if (!slp_node)
9281 vec_oprnds0.quick_push (vec_rhs1);
9282 vec_oprnds1.quick_push (vec_rhs2);
9285 /* Arguments are ready. Create the new vector stmt. */
9286 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
9288 vec_rhs2 = vec_oprnds1[i];
9290 new_temp = make_ssa_name (mask);
9291 if (bitop1 == NOP_EXPR)
9293 new_stmt = gimple_build_assign (new_temp, code,
9294 vec_rhs1, vec_rhs2);
9295 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9297 else
9299 if (bitop1 == BIT_NOT_EXPR)
9300 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
9301 else
9302 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
9303 vec_rhs2);
9304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9305 if (bitop2 != NOP_EXPR)
9307 tree res = make_ssa_name (mask);
9308 if (bitop2 == BIT_NOT_EXPR)
9309 new_stmt = gimple_build_assign (res, bitop2, new_temp);
9310 else
9311 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
9312 new_temp);
9313 vect_finish_stmt_generation (stmt, new_stmt, gsi);
9316 if (slp_node)
9317 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9320 if (slp_node)
9321 continue;
9323 if (j == 0)
9324 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
9325 else
9326 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
9328 prev_stmt_info = vinfo_for_stmt (new_stmt);
9331 vec_oprnds0.release ();
9332 vec_oprnds1.release ();
9334 return true;
9337 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9338 can handle all live statements in the node. Otherwise return true
9339 if STMT is not live or if vectorizable_live_operation can handle it.
9340 GSI and VEC_STMT are as for vectorizable_live_operation. */
9342 static bool
9343 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
9344 slp_tree slp_node, gimple **vec_stmt,
9345 stmt_vector_for_cost *cost_vec)
9347 if (slp_node)
9349 gimple *slp_stmt;
9350 unsigned int i;
9351 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
9353 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
9354 if (STMT_VINFO_LIVE_P (slp_stmt_info)
9355 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
9356 vec_stmt, cost_vec))
9357 return false;
9360 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
9361 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt,
9362 cost_vec))
9363 return false;
9365 return true;
9368 /* Make sure the statement is vectorizable. */
9370 bool
9371 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
9372 slp_instance node_instance, stmt_vector_for_cost *cost_vec)
9374 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9375 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
9376 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
9377 bool ok;
9378 gimple *pattern_stmt;
9379 gimple_seq pattern_def_seq;
9381 if (dump_enabled_p ())
9383 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
9384 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9387 if (gimple_has_volatile_ops (stmt))
9389 if (dump_enabled_p ())
9390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9391 "not vectorized: stmt has volatile operands\n");
9393 return false;
9396 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9397 && node == NULL
9398 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
9400 gimple_stmt_iterator si;
9402 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
9404 gimple *pattern_def_stmt = gsi_stmt (si);
9405 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
9406 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
9408 /* Analyze def stmt of STMT if it's a pattern stmt. */
9409 if (dump_enabled_p ())
9411 dump_printf_loc (MSG_NOTE, vect_location,
9412 "==> examining pattern def statement: ");
9413 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
9416 if (!vect_analyze_stmt (pattern_def_stmt,
9417 need_to_vectorize, node, node_instance,
9418 cost_vec))
9419 return false;
9424 /* Skip stmts that do not need to be vectorized. In loops this is expected
9425 to include:
9426 - the COND_EXPR which is the loop exit condition
9427 - any LABEL_EXPRs in the loop
9428 - computations that are used only for array indexing or loop control.
9429 In basic blocks we only analyze statements that are a part of some SLP
9430 instance, therefore, all the statements are relevant.
9432 Pattern statement needs to be analyzed instead of the original statement
9433 if the original statement is not relevant. Otherwise, we analyze both
9434 statements. In basic blocks we are called from some SLP instance
9435 traversal, don't analyze pattern stmts instead, the pattern stmts
9436 already will be part of SLP instance. */
9438 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
9439 if (!STMT_VINFO_RELEVANT_P (stmt_info)
9440 && !STMT_VINFO_LIVE_P (stmt_info))
9442 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9443 && pattern_stmt
9444 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9445 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9447 /* Analyze PATTERN_STMT instead of the original stmt. */
9448 stmt = pattern_stmt;
9449 stmt_info = vinfo_for_stmt (pattern_stmt);
9450 if (dump_enabled_p ())
9452 dump_printf_loc (MSG_NOTE, vect_location,
9453 "==> examining pattern statement: ");
9454 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9457 else
9459 if (dump_enabled_p ())
9460 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
9462 return true;
9465 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9466 && node == NULL
9467 && pattern_stmt
9468 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
9469 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
9471 /* Analyze PATTERN_STMT too. */
9472 if (dump_enabled_p ())
9474 dump_printf_loc (MSG_NOTE, vect_location,
9475 "==> examining pattern statement: ");
9476 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
9479 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
9480 node_instance, cost_vec))
9481 return false;
9484 switch (STMT_VINFO_DEF_TYPE (stmt_info))
9486 case vect_internal_def:
9487 break;
9489 case vect_reduction_def:
9490 case vect_nested_cycle:
9491 gcc_assert (!bb_vinfo
9492 && (relevance == vect_used_in_outer
9493 || relevance == vect_used_in_outer_by_reduction
9494 || relevance == vect_used_by_reduction
9495 || relevance == vect_unused_in_scope
9496 || relevance == vect_used_only_live));
9497 break;
9499 case vect_induction_def:
9500 gcc_assert (!bb_vinfo);
9501 break;
9503 case vect_constant_def:
9504 case vect_external_def:
9505 case vect_unknown_def_type:
9506 default:
9507 gcc_unreachable ();
9510 if (STMT_VINFO_RELEVANT_P (stmt_info))
9512 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
9513 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
9514 || (is_gimple_call (stmt)
9515 && gimple_call_lhs (stmt) == NULL_TREE));
9516 *need_to_vectorize = true;
9519 if (PURE_SLP_STMT (stmt_info) && !node)
9521 dump_printf_loc (MSG_NOTE, vect_location,
9522 "handled only by SLP analysis\n");
9523 return true;
9526 ok = true;
9527 if (!bb_vinfo
9528 && (STMT_VINFO_RELEVANT_P (stmt_info)
9529 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
9530 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node, cost_vec)
9531 || vectorizable_conversion (stmt, NULL, NULL, node, cost_vec)
9532 || vectorizable_shift (stmt, NULL, NULL, node, cost_vec)
9533 || vectorizable_operation (stmt, NULL, NULL, node, cost_vec)
9534 || vectorizable_assignment (stmt, NULL, NULL, node, cost_vec)
9535 || vectorizable_load (stmt, NULL, NULL, node, node_instance, cost_vec)
9536 || vectorizable_call (stmt, NULL, NULL, node, cost_vec)
9537 || vectorizable_store (stmt, NULL, NULL, node, cost_vec)
9538 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance,
9539 cost_vec)
9540 || vectorizable_induction (stmt, NULL, NULL, node, cost_vec)
9541 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node, cost_vec)
9542 || vectorizable_comparison (stmt, NULL, NULL, NULL, node, cost_vec));
9543 else
9545 if (bb_vinfo)
9546 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node, cost_vec)
9547 || vectorizable_conversion (stmt, NULL, NULL, node, cost_vec)
9548 || vectorizable_shift (stmt, NULL, NULL, node, cost_vec)
9549 || vectorizable_operation (stmt, NULL, NULL, node, cost_vec)
9550 || vectorizable_assignment (stmt, NULL, NULL, node, cost_vec)
9551 || vectorizable_load (stmt, NULL, NULL, node, node_instance,
9552 cost_vec)
9553 || vectorizable_call (stmt, NULL, NULL, node, cost_vec)
9554 || vectorizable_store (stmt, NULL, NULL, node, cost_vec)
9555 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node,
9556 cost_vec)
9557 || vectorizable_comparison (stmt, NULL, NULL, NULL, node,
9558 cost_vec));
9561 if (!ok)
9563 if (dump_enabled_p ())
9565 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9566 "not vectorized: relevant stmt not ");
9567 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
9568 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9571 return false;
9574 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9575 need extra handling, except for vectorizable reductions. */
9576 if (!bb_vinfo
9577 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9578 && !can_vectorize_live_stmts (stmt, NULL, node, NULL, cost_vec))
9580 if (dump_enabled_p ())
9582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9583 "not vectorized: live stmt not supported: ");
9584 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
9587 return false;
9590 return true;
9594 /* Function vect_transform_stmt.
9596 Create a vectorized stmt to replace STMT, and insert it at BSI. */
9598 bool
9599 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
9600 bool *grouped_store, slp_tree slp_node,
9601 slp_instance slp_node_instance)
9603 bool is_store = false;
9604 gimple *vec_stmt = NULL;
9605 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9606 bool done;
9608 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
9609 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
9611 bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
9612 && nested_in_vect_loop_p
9613 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
9614 stmt));
9616 switch (STMT_VINFO_TYPE (stmt_info))
9618 case type_demotion_vec_info_type:
9619 case type_promotion_vec_info_type:
9620 case type_conversion_vec_info_type:
9621 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node, NULL);
9622 gcc_assert (done);
9623 break;
9625 case induc_vec_info_type:
9626 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node, NULL);
9627 gcc_assert (done);
9628 break;
9630 case shift_vec_info_type:
9631 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node, NULL);
9632 gcc_assert (done);
9633 break;
9635 case op_vec_info_type:
9636 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node, NULL);
9637 gcc_assert (done);
9638 break;
9640 case assignment_vec_info_type:
9641 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node, NULL);
9642 gcc_assert (done);
9643 break;
9645 case load_vec_info_type:
9646 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
9647 slp_node_instance, NULL);
9648 gcc_assert (done);
9649 break;
9651 case store_vec_info_type:
9652 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node, NULL);
9653 gcc_assert (done);
9654 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
9656 /* In case of interleaving, the whole chain is vectorized when the
9657 last store in the chain is reached. Store stmts before the last
9658 one are skipped, and there vec_stmt_info shouldn't be freed
9659 meanwhile. */
9660 *grouped_store = true;
9661 stmt_vec_info group_info
9662 = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
9663 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
9664 is_store = true;
9666 else
9667 is_store = true;
9668 break;
9670 case condition_vec_info_type:
9671 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node, NULL);
9672 gcc_assert (done);
9673 break;
9675 case comparison_vec_info_type:
9676 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node, NULL);
9677 gcc_assert (done);
9678 break;
9680 case call_vec_info_type:
9681 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node, NULL);
9682 stmt = gsi_stmt (*gsi);
9683 break;
9685 case call_simd_clone_vec_info_type:
9686 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node, NULL);
9687 stmt = gsi_stmt (*gsi);
9688 break;
9690 case reduc_vec_info_type:
9691 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
9692 slp_node_instance, NULL);
9693 gcc_assert (done);
9694 break;
9696 default:
9697 if (!STMT_VINFO_LIVE_P (stmt_info))
9699 if (dump_enabled_p ())
9700 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9701 "stmt not supported.\n");
9702 gcc_unreachable ();
9706 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9707 This would break hybrid SLP vectorization. */
9708 if (slp_node)
9709 gcc_assert (!vec_stmt
9710 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
9712 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9713 is being vectorized, but outside the immediately enclosing loop. */
9714 if (vec_stmt
9715 && nested_p
9716 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
9717 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
9718 || STMT_VINFO_RELEVANT (stmt_info) ==
9719 vect_used_in_outer_by_reduction))
9721 struct loop *innerloop = LOOP_VINFO_LOOP (
9722 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
9723 imm_use_iterator imm_iter;
9724 use_operand_p use_p;
9725 tree scalar_dest;
9726 gimple *exit_phi;
9728 if (dump_enabled_p ())
9729 dump_printf_loc (MSG_NOTE, vect_location,
9730 "Record the vdef for outer-loop vectorization.\n");
9732 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9733 (to be used when vectorizing outer-loop stmts that use the DEF of
9734 STMT). */
9735 if (gimple_code (stmt) == GIMPLE_PHI)
9736 scalar_dest = PHI_RESULT (stmt);
9737 else
9738 scalar_dest = gimple_assign_lhs (stmt);
9740 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9742 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9744 exit_phi = USE_STMT (use_p);
9745 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
9750 /* Handle stmts whose DEF is used outside the loop-nest that is
9751 being vectorized. */
9752 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9754 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt, NULL);
9755 gcc_assert (done);
9758 if (vec_stmt)
9759 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9761 return is_store;
9765 /* Remove a group of stores (for SLP or interleaving), free their
9766 stmt_vec_info. */
9768 void
9769 vect_remove_stores (gimple *first_stmt)
9771 gimple *next = first_stmt;
9772 gimple *tmp;
9773 gimple_stmt_iterator next_si;
9775 while (next)
9777 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9779 tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
9780 if (is_pattern_stmt_p (stmt_info))
9781 next = STMT_VINFO_RELATED_STMT (stmt_info);
9782 /* Free the attached stmt_vec_info and remove the stmt. */
9783 next_si = gsi_for_stmt (next);
9784 unlink_stmt_vdef (next);
9785 gsi_remove (&next_si, true);
9786 release_defs (next);
9787 free_stmt_vec_info (next);
9788 next = tmp;
9793 /* Function new_stmt_vec_info.
9795 Create and initialize a new stmt_vec_info struct for STMT. */
9797 stmt_vec_info
9798 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9800 stmt_vec_info res;
9801 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9803 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9804 STMT_VINFO_STMT (res) = stmt;
9805 res->vinfo = vinfo;
9806 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9807 STMT_VINFO_LIVE_P (res) = false;
9808 STMT_VINFO_VECTYPE (res) = NULL;
9809 STMT_VINFO_VEC_STMT (res) = NULL;
9810 STMT_VINFO_VECTORIZABLE (res) = true;
9811 STMT_VINFO_IN_PATTERN_P (res) = false;
9812 STMT_VINFO_RELATED_STMT (res) = NULL;
9813 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9814 STMT_VINFO_DATA_REF (res) = NULL;
9815 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9816 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9818 if (gimple_code (stmt) == GIMPLE_PHI
9819 && is_loop_header_bb_p (gimple_bb (stmt)))
9820 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9821 else
9822 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9824 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9825 STMT_SLP_TYPE (res) = loop_vect;
9826 STMT_VINFO_NUM_SLP_USES (res) = 0;
9828 res->first_element = NULL; /* GROUP_FIRST_ELEMENT */
9829 res->next_element = NULL; /* GROUP_NEXT_ELEMENT */
9830 res->size = 0; /* GROUP_SIZE */
9831 res->store_count = 0; /* GROUP_STORE_COUNT */
9832 res->gap = 0; /* GROUP_GAP */
9833 res->same_dr_stmt = NULL; /* GROUP_SAME_DR_STMT */
9835 return res;
9839 /* Set the current stmt_vec_info vector to V. */
9841 void
9842 set_stmt_vec_info_vec (vec<stmt_vec_info> *v)
9844 stmt_vec_info_vec = v;
9847 /* Free the stmt_vec_info entries in V and release V. */
9849 void
9850 free_stmt_vec_infos (vec<stmt_vec_info> *v)
9852 unsigned int i;
9853 stmt_vec_info info;
9854 FOR_EACH_VEC_ELT (*v, i, info)
9855 if (info != NULL)
9856 free_stmt_vec_info (STMT_VINFO_STMT (info));
9857 if (v == stmt_vec_info_vec)
9858 stmt_vec_info_vec = NULL;
9859 v->release ();
9863 /* Free stmt vectorization related info. */
9865 void
9866 free_stmt_vec_info (gimple *stmt)
9868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9870 if (!stmt_info)
9871 return;
9873 /* Check if this statement has a related "pattern stmt"
9874 (introduced by the vectorizer during the pattern recognition
9875 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9876 too. */
9877 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9879 stmt_vec_info patt_info
9880 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9881 if (patt_info)
9883 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9884 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9885 gimple_set_bb (patt_stmt, NULL);
9886 tree lhs = gimple_get_lhs (patt_stmt);
9887 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9888 release_ssa_name (lhs);
9889 if (seq)
9891 gimple_stmt_iterator si;
9892 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9894 gimple *seq_stmt = gsi_stmt (si);
9895 gimple_set_bb (seq_stmt, NULL);
9896 lhs = gimple_get_lhs (seq_stmt);
9897 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9898 release_ssa_name (lhs);
9899 free_stmt_vec_info (seq_stmt);
9902 free_stmt_vec_info (patt_stmt);
9906 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9907 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9908 set_vinfo_for_stmt (stmt, NULL);
9909 free (stmt_info);
9913 /* Function get_vectype_for_scalar_type_and_size.
9915 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9916 by the target. */
9918 tree
9919 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9921 tree orig_scalar_type = scalar_type;
9922 scalar_mode inner_mode;
9923 machine_mode simd_mode;
9924 poly_uint64 nunits;
9925 tree vectype;
9927 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9928 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9929 return NULL_TREE;
9931 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9933 /* For vector types of elements whose mode precision doesn't
9934 match their types precision we use a element type of mode
9935 precision. The vectorization routines will have to make sure
9936 they support the proper result truncation/extension.
9937 We also make sure to build vector types with INTEGER_TYPE
9938 component type only. */
9939 if (INTEGRAL_TYPE_P (scalar_type)
9940 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9941 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9942 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9943 TYPE_UNSIGNED (scalar_type));
9945 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9946 When the component mode passes the above test simply use a type
9947 corresponding to that mode. The theory is that any use that
9948 would cause problems with this will disable vectorization anyway. */
9949 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9950 && !INTEGRAL_TYPE_P (scalar_type))
9951 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9953 /* We can't build a vector type of elements with alignment bigger than
9954 their size. */
9955 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9956 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9957 TYPE_UNSIGNED (scalar_type));
9959 /* If we felt back to using the mode fail if there was
9960 no scalar type for it. */
9961 if (scalar_type == NULL_TREE)
9962 return NULL_TREE;
9964 /* If no size was supplied use the mode the target prefers. Otherwise
9965 lookup a vector mode of the specified size. */
9966 if (known_eq (size, 0U))
9967 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9968 else if (!multiple_p (size, nbytes, &nunits)
9969 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9970 return NULL_TREE;
9971 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9972 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9973 return NULL_TREE;
9975 vectype = build_vector_type (scalar_type, nunits);
9977 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9978 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9979 return NULL_TREE;
9981 /* Re-attach the address-space qualifier if we canonicalized the scalar
9982 type. */
9983 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9984 return build_qualified_type
9985 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9987 return vectype;
9990 poly_uint64 current_vector_size;
9992 /* Function get_vectype_for_scalar_type.
9994 Returns the vector type corresponding to SCALAR_TYPE as supported
9995 by the target. */
9997 tree
9998 get_vectype_for_scalar_type (tree scalar_type)
10000 tree vectype;
10001 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
10002 current_vector_size);
10003 if (vectype
10004 && known_eq (current_vector_size, 0U))
10005 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
10006 return vectype;
10009 /* Function get_mask_type_for_scalar_type.
10011 Returns the mask type corresponding to a result of comparison
10012 of vectors of specified SCALAR_TYPE as supported by target. */
10014 tree
10015 get_mask_type_for_scalar_type (tree scalar_type)
10017 tree vectype = get_vectype_for_scalar_type (scalar_type);
10019 if (!vectype)
10020 return NULL;
10022 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
10023 current_vector_size);
10026 /* Function get_same_sized_vectype
10028 Returns a vector type corresponding to SCALAR_TYPE of size
10029 VECTOR_TYPE if supported by the target. */
10031 tree
10032 get_same_sized_vectype (tree scalar_type, tree vector_type)
10034 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
10035 return build_same_sized_truth_vector_type (vector_type);
10037 return get_vectype_for_scalar_type_and_size
10038 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
10041 /* Function vect_is_simple_use.
10043 Input:
10044 VINFO - the vect info of the loop or basic block that is being vectorized.
10045 OPERAND - operand in the loop or bb.
10046 Output:
10047 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
10048 DT - the type of definition
10050 Returns whether a stmt with OPERAND can be vectorized.
10051 For loops, supportable operands are constants, loop invariants, and operands
10052 that are defined by the current iteration of the loop. Unsupportable
10053 operands are those that are defined by a previous iteration of the loop (as
10054 is the case in reduction/induction computations).
10055 For basic blocks, supportable operands are constants and bb invariants.
10056 For now, operands defined outside the basic block are not supported. */
10058 bool
10059 vect_is_simple_use (tree operand, vec_info *vinfo,
10060 gimple **def_stmt, enum vect_def_type *dt)
10062 *def_stmt = NULL;
10063 *dt = vect_unknown_def_type;
10065 if (dump_enabled_p ())
10067 dump_printf_loc (MSG_NOTE, vect_location,
10068 "vect_is_simple_use: operand ");
10069 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
10070 dump_printf (MSG_NOTE, "\n");
10073 if (CONSTANT_CLASS_P (operand))
10075 *dt = vect_constant_def;
10076 return true;
10079 if (is_gimple_min_invariant (operand))
10081 *dt = vect_external_def;
10082 return true;
10085 if (TREE_CODE (operand) != SSA_NAME)
10087 if (dump_enabled_p ())
10088 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10089 "not ssa-name.\n");
10090 return false;
10093 if (SSA_NAME_IS_DEFAULT_DEF (operand))
10095 *dt = vect_external_def;
10096 return true;
10099 *def_stmt = SSA_NAME_DEF_STMT (operand);
10100 if (dump_enabled_p ())
10102 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
10103 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
10106 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
10107 *dt = vect_external_def;
10108 else
10110 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
10111 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
10114 if (dump_enabled_p ())
10116 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
10117 switch (*dt)
10119 case vect_uninitialized_def:
10120 dump_printf (MSG_NOTE, "uninitialized\n");
10121 break;
10122 case vect_constant_def:
10123 dump_printf (MSG_NOTE, "constant\n");
10124 break;
10125 case vect_external_def:
10126 dump_printf (MSG_NOTE, "external\n");
10127 break;
10128 case vect_internal_def:
10129 dump_printf (MSG_NOTE, "internal\n");
10130 break;
10131 case vect_induction_def:
10132 dump_printf (MSG_NOTE, "induction\n");
10133 break;
10134 case vect_reduction_def:
10135 dump_printf (MSG_NOTE, "reduction\n");
10136 break;
10137 case vect_double_reduction_def:
10138 dump_printf (MSG_NOTE, "double reduction\n");
10139 break;
10140 case vect_nested_cycle:
10141 dump_printf (MSG_NOTE, "nested cycle\n");
10142 break;
10143 case vect_unknown_def_type:
10144 dump_printf (MSG_NOTE, "unknown\n");
10145 break;
10149 if (*dt == vect_unknown_def_type)
10151 if (dump_enabled_p ())
10152 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10153 "Unsupported pattern.\n");
10154 return false;
10157 switch (gimple_code (*def_stmt))
10159 case GIMPLE_PHI:
10160 case GIMPLE_ASSIGN:
10161 case GIMPLE_CALL:
10162 break;
10163 default:
10164 if (dump_enabled_p ())
10165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10166 "unsupported defining stmt:\n");
10167 return false;
10170 return true;
10173 /* Function vect_is_simple_use.
10175 Same as vect_is_simple_use but also determines the vector operand
10176 type of OPERAND and stores it to *VECTYPE. If the definition of
10177 OPERAND is vect_uninitialized_def, vect_constant_def or
10178 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10179 is responsible to compute the best suited vector type for the
10180 scalar operand. */
10182 bool
10183 vect_is_simple_use (tree operand, vec_info *vinfo,
10184 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
10186 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
10187 return false;
10189 /* Now get a vector type if the def is internal, otherwise supply
10190 NULL_TREE and leave it up to the caller to figure out a proper
10191 type for the use stmt. */
10192 if (*dt == vect_internal_def
10193 || *dt == vect_induction_def
10194 || *dt == vect_reduction_def
10195 || *dt == vect_double_reduction_def
10196 || *dt == vect_nested_cycle)
10198 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
10200 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
10201 && !STMT_VINFO_RELEVANT (stmt_info)
10202 && !STMT_VINFO_LIVE_P (stmt_info))
10203 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
10205 *vectype = STMT_VINFO_VECTYPE (stmt_info);
10206 gcc_assert (*vectype != NULL_TREE);
10208 else if (*dt == vect_uninitialized_def
10209 || *dt == vect_constant_def
10210 || *dt == vect_external_def)
10211 *vectype = NULL_TREE;
10212 else
10213 gcc_unreachable ();
10215 return true;
10219 /* Function supportable_widening_operation
10221 Check whether an operation represented by the code CODE is a
10222 widening operation that is supported by the target platform in
10223 vector form (i.e., when operating on arguments of type VECTYPE_IN
10224 producing a result of type VECTYPE_OUT).
10226 Widening operations we currently support are NOP (CONVERT), FLOAT,
10227 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10228 are supported by the target platform either directly (via vector
10229 tree-codes), or via target builtins.
10231 Output:
10232 - CODE1 and CODE2 are codes of vector operations to be used when
10233 vectorizing the operation, if available.
10234 - MULTI_STEP_CVT determines the number of required intermediate steps in
10235 case of multi-step conversion (like char->short->int - in that case
10236 MULTI_STEP_CVT will be 1).
10237 - INTERM_TYPES contains the intermediate type required to perform the
10238 widening operation (short in the above example). */
10240 bool
10241 supportable_widening_operation (enum tree_code code, gimple *stmt,
10242 tree vectype_out, tree vectype_in,
10243 enum tree_code *code1, enum tree_code *code2,
10244 int *multi_step_cvt,
10245 vec<tree> *interm_types)
10247 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
10248 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
10249 struct loop *vect_loop = NULL;
10250 machine_mode vec_mode;
10251 enum insn_code icode1, icode2;
10252 optab optab1, optab2;
10253 tree vectype = vectype_in;
10254 tree wide_vectype = vectype_out;
10255 enum tree_code c1, c2;
10256 int i;
10257 tree prev_type, intermediate_type;
10258 machine_mode intermediate_mode, prev_mode;
10259 optab optab3, optab4;
10261 *multi_step_cvt = 0;
10262 if (loop_info)
10263 vect_loop = LOOP_VINFO_LOOP (loop_info);
10265 switch (code)
10267 case WIDEN_MULT_EXPR:
10268 /* The result of a vectorized widening operation usually requires
10269 two vectors (because the widened results do not fit into one vector).
10270 The generated vector results would normally be expected to be
10271 generated in the same order as in the original scalar computation,
10272 i.e. if 8 results are generated in each vector iteration, they are
10273 to be organized as follows:
10274 vect1: [res1,res2,res3,res4],
10275 vect2: [res5,res6,res7,res8].
10277 However, in the special case that the result of the widening
10278 operation is used in a reduction computation only, the order doesn't
10279 matter (because when vectorizing a reduction we change the order of
10280 the computation). Some targets can take advantage of this and
10281 generate more efficient code. For example, targets like Altivec,
10282 that support widen_mult using a sequence of {mult_even,mult_odd}
10283 generate the following vectors:
10284 vect1: [res1,res3,res5,res7],
10285 vect2: [res2,res4,res6,res8].
10287 When vectorizing outer-loops, we execute the inner-loop sequentially
10288 (each vectorized inner-loop iteration contributes to VF outer-loop
10289 iterations in parallel). We therefore don't allow to change the
10290 order of the computation in the inner-loop during outer-loop
10291 vectorization. */
10292 /* TODO: Another case in which order doesn't *really* matter is when we
10293 widen and then contract again, e.g. (short)((int)x * y >> 8).
10294 Normally, pack_trunc performs an even/odd permute, whereas the
10295 repack from an even/odd expansion would be an interleave, which
10296 would be significantly simpler for e.g. AVX2. */
10297 /* In any case, in order to avoid duplicating the code below, recurse
10298 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10299 are properly set up for the caller. If we fail, we'll continue with
10300 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10301 if (vect_loop
10302 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
10303 && !nested_in_vect_loop_p (vect_loop, stmt)
10304 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
10305 stmt, vectype_out, vectype_in,
10306 code1, code2, multi_step_cvt,
10307 interm_types))
10309 /* Elements in a vector with vect_used_by_reduction property cannot
10310 be reordered if the use chain with this property does not have the
10311 same operation. One such an example is s += a * b, where elements
10312 in a and b cannot be reordered. Here we check if the vector defined
10313 by STMT is only directly used in the reduction statement. */
10314 tree lhs = gimple_assign_lhs (stmt);
10315 use_operand_p dummy;
10316 gimple *use_stmt;
10317 stmt_vec_info use_stmt_info = NULL;
10318 if (single_imm_use (lhs, &dummy, &use_stmt)
10319 && (use_stmt_info = vinfo_for_stmt (use_stmt))
10320 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
10321 return true;
10323 c1 = VEC_WIDEN_MULT_LO_EXPR;
10324 c2 = VEC_WIDEN_MULT_HI_EXPR;
10325 break;
10327 case DOT_PROD_EXPR:
10328 c1 = DOT_PROD_EXPR;
10329 c2 = DOT_PROD_EXPR;
10330 break;
10332 case SAD_EXPR:
10333 c1 = SAD_EXPR;
10334 c2 = SAD_EXPR;
10335 break;
10337 case VEC_WIDEN_MULT_EVEN_EXPR:
10338 /* Support the recursion induced just above. */
10339 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
10340 c2 = VEC_WIDEN_MULT_ODD_EXPR;
10341 break;
10343 case WIDEN_LSHIFT_EXPR:
10344 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
10345 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
10346 break;
10348 CASE_CONVERT:
10349 c1 = VEC_UNPACK_LO_EXPR;
10350 c2 = VEC_UNPACK_HI_EXPR;
10351 break;
10353 case FLOAT_EXPR:
10354 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
10355 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
10356 break;
10358 case FIX_TRUNC_EXPR:
10359 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
10360 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
10361 break;
10363 default:
10364 gcc_unreachable ();
10367 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
10368 std::swap (c1, c2);
10370 if (code == FIX_TRUNC_EXPR)
10372 /* The signedness is determined from output operand. */
10373 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10374 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
10376 else
10378 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10379 optab2 = optab_for_tree_code (c2, vectype, optab_default);
10382 if (!optab1 || !optab2)
10383 return false;
10385 vec_mode = TYPE_MODE (vectype);
10386 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
10387 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
10388 return false;
10390 *code1 = c1;
10391 *code2 = c2;
10393 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10394 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10395 /* For scalar masks we may have different boolean
10396 vector types having the same QImode. Thus we
10397 add additional check for elements number. */
10398 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10399 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
10400 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10402 /* Check if it's a multi-step conversion that can be done using intermediate
10403 types. */
10405 prev_type = vectype;
10406 prev_mode = vec_mode;
10408 if (!CONVERT_EXPR_CODE_P (code))
10409 return false;
10411 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10412 intermediate steps in promotion sequence. We try
10413 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10414 not. */
10415 interm_types->create (MAX_INTERM_CVT_STEPS);
10416 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10418 intermediate_mode = insn_data[icode1].operand[0].mode;
10419 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10421 intermediate_type = vect_halve_mask_nunits (prev_type);
10422 if (intermediate_mode != TYPE_MODE (intermediate_type))
10423 return false;
10425 else
10426 intermediate_type
10427 = lang_hooks.types.type_for_mode (intermediate_mode,
10428 TYPE_UNSIGNED (prev_type));
10430 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
10431 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
10433 if (!optab3 || !optab4
10434 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
10435 || insn_data[icode1].operand[0].mode != intermediate_mode
10436 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
10437 || insn_data[icode2].operand[0].mode != intermediate_mode
10438 || ((icode1 = optab_handler (optab3, intermediate_mode))
10439 == CODE_FOR_nothing)
10440 || ((icode2 = optab_handler (optab4, intermediate_mode))
10441 == CODE_FOR_nothing))
10442 break;
10444 interm_types->quick_push (intermediate_type);
10445 (*multi_step_cvt)++;
10447 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
10448 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
10449 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10450 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
10451 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
10453 prev_type = intermediate_type;
10454 prev_mode = intermediate_mode;
10457 interm_types->release ();
10458 return false;
10462 /* Function supportable_narrowing_operation
10464 Check whether an operation represented by the code CODE is a
10465 narrowing operation that is supported by the target platform in
10466 vector form (i.e., when operating on arguments of type VECTYPE_IN
10467 and producing a result of type VECTYPE_OUT).
10469 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10470 and FLOAT. This function checks if these operations are supported by
10471 the target platform directly via vector tree-codes.
10473 Output:
10474 - CODE1 is the code of a vector operation to be used when
10475 vectorizing the operation, if available.
10476 - MULTI_STEP_CVT determines the number of required intermediate steps in
10477 case of multi-step conversion (like int->short->char - in that case
10478 MULTI_STEP_CVT will be 1).
10479 - INTERM_TYPES contains the intermediate type required to perform the
10480 narrowing operation (short in the above example). */
10482 bool
10483 supportable_narrowing_operation (enum tree_code code,
10484 tree vectype_out, tree vectype_in,
10485 enum tree_code *code1, int *multi_step_cvt,
10486 vec<tree> *interm_types)
10488 machine_mode vec_mode;
10489 enum insn_code icode1;
10490 optab optab1, interm_optab;
10491 tree vectype = vectype_in;
10492 tree narrow_vectype = vectype_out;
10493 enum tree_code c1;
10494 tree intermediate_type, prev_type;
10495 machine_mode intermediate_mode, prev_mode;
10496 int i;
10497 bool uns;
10499 *multi_step_cvt = 0;
10500 switch (code)
10502 CASE_CONVERT:
10503 c1 = VEC_PACK_TRUNC_EXPR;
10504 break;
10506 case FIX_TRUNC_EXPR:
10507 c1 = VEC_PACK_FIX_TRUNC_EXPR;
10508 break;
10510 case FLOAT_EXPR:
10511 c1 = VEC_PACK_FLOAT_EXPR;
10512 break;
10514 default:
10515 gcc_unreachable ();
10518 if (code == FIX_TRUNC_EXPR)
10519 /* The signedness is determined from output operand. */
10520 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
10521 else
10522 optab1 = optab_for_tree_code (c1, vectype, optab_default);
10524 if (!optab1)
10525 return false;
10527 vec_mode = TYPE_MODE (vectype);
10528 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
10529 return false;
10531 *code1 = c1;
10533 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10534 /* For scalar masks we may have different boolean
10535 vector types having the same QImode. Thus we
10536 add additional check for elements number. */
10537 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10538 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
10539 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10541 if (code == FLOAT_EXPR)
10542 return false;
10544 /* Check if it's a multi-step conversion that can be done using intermediate
10545 types. */
10546 prev_mode = vec_mode;
10547 prev_type = vectype;
10548 if (code == FIX_TRUNC_EXPR)
10549 uns = TYPE_UNSIGNED (vectype_out);
10550 else
10551 uns = TYPE_UNSIGNED (vectype);
10553 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10554 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10555 costly than signed. */
10556 if (code == FIX_TRUNC_EXPR && uns)
10558 enum insn_code icode2;
10560 intermediate_type
10561 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
10562 interm_optab
10563 = optab_for_tree_code (c1, intermediate_type, optab_default);
10564 if (interm_optab != unknown_optab
10565 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
10566 && insn_data[icode1].operand[0].mode
10567 == insn_data[icode2].operand[0].mode)
10569 uns = false;
10570 optab1 = interm_optab;
10571 icode1 = icode2;
10575 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10576 intermediate steps in promotion sequence. We try
10577 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10578 interm_types->create (MAX_INTERM_CVT_STEPS);
10579 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
10581 intermediate_mode = insn_data[icode1].operand[0].mode;
10582 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
10584 intermediate_type = vect_double_mask_nunits (prev_type);
10585 if (intermediate_mode != TYPE_MODE (intermediate_type))
10586 return false;
10588 else
10589 intermediate_type
10590 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
10591 interm_optab
10592 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
10593 optab_default);
10594 if (!interm_optab
10595 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
10596 || insn_data[icode1].operand[0].mode != intermediate_mode
10597 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
10598 == CODE_FOR_nothing))
10599 break;
10601 interm_types->quick_push (intermediate_type);
10602 (*multi_step_cvt)++;
10604 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
10605 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
10606 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
10607 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
10609 prev_mode = intermediate_mode;
10610 prev_type = intermediate_type;
10611 optab1 = interm_optab;
10614 interm_types->release ();
10615 return false;
10618 /* Generate and return a statement that sets vector mask MASK such that
10619 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10621 gcall *
10622 vect_gen_while (tree mask, tree start_index, tree end_index)
10624 tree cmp_type = TREE_TYPE (start_index);
10625 tree mask_type = TREE_TYPE (mask);
10626 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
10627 cmp_type, mask_type,
10628 OPTIMIZE_FOR_SPEED));
10629 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
10630 start_index, end_index,
10631 build_zero_cst (mask_type));
10632 gimple_call_set_lhs (call, mask);
10633 return call;
10636 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10637 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10639 tree
10640 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
10641 tree end_index)
10643 tree tmp = make_ssa_name (mask_type);
10644 gcall *call = vect_gen_while (tmp, start_index, end_index);
10645 gimple_seq_add_stmt (seq, call);
10646 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
10649 /* Try to compute the vector types required to vectorize STMT_INFO,
10650 returning true on success and false if vectorization isn't possible.
10652 On success:
10654 - Set *STMT_VECTYPE_OUT to:
10655 - NULL_TREE if the statement doesn't need to be vectorized;
10656 - boolean_type_node if the statement is a boolean operation whose
10657 vector type can only be determined once all the other vector types
10658 are known; and
10659 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10661 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10662 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10663 statement does not help to determine the overall number of units. */
10665 bool
10666 vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
10667 tree *stmt_vectype_out,
10668 tree *nunits_vectype_out)
10670 gimple *stmt = stmt_info->stmt;
10672 *stmt_vectype_out = NULL_TREE;
10673 *nunits_vectype_out = NULL_TREE;
10675 if (gimple_get_lhs (stmt) == NULL_TREE
10676 /* MASK_STORE has no lhs, but is ok. */
10677 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
10679 if (is_a <gcall *> (stmt))
10681 /* Ignore calls with no lhs. These must be calls to
10682 #pragma omp simd functions, and what vectorization factor
10683 it really needs can't be determined until
10684 vectorizable_simd_clone_call. */
10685 if (dump_enabled_p ())
10686 dump_printf_loc (MSG_NOTE, vect_location,
10687 "defer to SIMD clone analysis.\n");
10688 return true;
10691 if (dump_enabled_p ())
10693 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10694 "not vectorized: irregular stmt.");
10695 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10697 return false;
10700 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
10702 if (dump_enabled_p ())
10704 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10705 "not vectorized: vector stmt in loop:");
10706 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10708 return false;
10711 tree vectype;
10712 tree scalar_type = NULL_TREE;
10713 if (STMT_VINFO_VECTYPE (stmt_info))
10714 *stmt_vectype_out = vectype = STMT_VINFO_VECTYPE (stmt_info);
10715 else
10717 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
10718 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
10719 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
10720 else
10721 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
10723 /* Pure bool ops don't participate in number-of-units computation.
10724 For comparisons use the types being compared. */
10725 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
10726 && is_gimple_assign (stmt)
10727 && gimple_assign_rhs_code (stmt) != COND_EXPR)
10729 *stmt_vectype_out = boolean_type_node;
10731 tree rhs1 = gimple_assign_rhs1 (stmt);
10732 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10733 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
10734 scalar_type = TREE_TYPE (rhs1);
10735 else
10737 if (dump_enabled_p ())
10738 dump_printf_loc (MSG_NOTE, vect_location,
10739 "pure bool operation.\n");
10740 return true;
10744 if (dump_enabled_p ())
10746 dump_printf_loc (MSG_NOTE, vect_location,
10747 "get vectype for scalar type: ");
10748 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10749 dump_printf (MSG_NOTE, "\n");
10751 vectype = get_vectype_for_scalar_type (scalar_type);
10752 if (!vectype)
10754 if (dump_enabled_p ())
10756 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10757 "not vectorized: unsupported data-type ");
10758 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10759 scalar_type);
10760 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10762 return false;
10765 if (!*stmt_vectype_out)
10766 *stmt_vectype_out = vectype;
10768 if (dump_enabled_p ())
10770 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10771 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
10772 dump_printf (MSG_NOTE, "\n");
10776 /* Don't try to compute scalar types if the stmt produces a boolean
10777 vector; use the existing vector type instead. */
10778 tree nunits_vectype;
10779 if (VECTOR_BOOLEAN_TYPE_P (vectype))
10780 nunits_vectype = vectype;
10781 else
10783 /* The number of units is set according to the smallest scalar
10784 type (or the largest vector size, but we only support one
10785 vector size per vectorization). */
10786 if (*stmt_vectype_out != boolean_type_node)
10788 HOST_WIDE_INT dummy;
10789 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
10791 if (dump_enabled_p ())
10793 dump_printf_loc (MSG_NOTE, vect_location,
10794 "get vectype for scalar type: ");
10795 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
10796 dump_printf (MSG_NOTE, "\n");
10798 nunits_vectype = get_vectype_for_scalar_type (scalar_type);
10800 if (!nunits_vectype)
10802 if (dump_enabled_p ())
10804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10805 "not vectorized: unsupported data-type ");
10806 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
10807 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10809 return false;
10812 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
10813 GET_MODE_SIZE (TYPE_MODE (nunits_vectype))))
10815 if (dump_enabled_p ())
10817 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10818 "not vectorized: different sized vector "
10819 "types in statement, ");
10820 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype);
10821 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10822 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, nunits_vectype);
10823 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10825 return false;
10828 if (dump_enabled_p ())
10830 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
10831 dump_generic_expr (MSG_NOTE, TDF_SLIM, nunits_vectype);
10832 dump_printf (MSG_NOTE, "\n");
10834 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
10835 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
10836 dump_printf (MSG_NOTE, "\n");
10839 *nunits_vectype_out = nunits_vectype;
10840 return true;
10843 /* Try to determine the correct vector type for STMT_INFO, which is a
10844 statement that produces a scalar boolean result. Return the vector
10845 type on success, otherwise return NULL_TREE. */
10847 tree
10848 vect_get_mask_type_for_stmt (stmt_vec_info stmt_info)
10850 gimple *stmt = stmt_info->stmt;
10851 tree mask_type = NULL;
10852 tree vectype, scalar_type;
10854 if (is_gimple_assign (stmt)
10855 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
10856 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt))))
10858 scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
10859 mask_type = get_mask_type_for_scalar_type (scalar_type);
10861 if (!mask_type)
10863 if (dump_enabled_p ())
10864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10865 "not vectorized: unsupported mask\n");
10866 return NULL_TREE;
10869 else
10871 tree rhs;
10872 ssa_op_iter iter;
10873 gimple *def_stmt;
10874 enum vect_def_type dt;
10876 FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
10878 if (!vect_is_simple_use (rhs, stmt_info->vinfo,
10879 &def_stmt, &dt, &vectype))
10881 if (dump_enabled_p ())
10883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10884 "not vectorized: can't compute mask type "
10885 "for statement, ");
10886 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
10889 return NULL_TREE;
10892 /* No vectype probably means external definition.
10893 Allow it in case there is another operand which
10894 allows to determine mask type. */
10895 if (!vectype)
10896 continue;
10898 if (!mask_type)
10899 mask_type = vectype;
10900 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
10901 TYPE_VECTOR_SUBPARTS (vectype)))
10903 if (dump_enabled_p ())
10905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10906 "not vectorized: different sized masks "
10907 "types in statement, ");
10908 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10909 mask_type);
10910 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10911 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10912 vectype);
10913 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10915 return NULL_TREE;
10917 else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
10918 != VECTOR_BOOLEAN_TYPE_P (vectype))
10920 if (dump_enabled_p ())
10922 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10923 "not vectorized: mixed mask and "
10924 "nonmask vector types in statement, ");
10925 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10926 mask_type);
10927 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
10928 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
10929 vectype);
10930 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
10932 return NULL_TREE;
10936 /* We may compare boolean value loaded as vector of integers.
10937 Fix mask_type in such case. */
10938 if (mask_type
10939 && !VECTOR_BOOLEAN_TYPE_P (mask_type)
10940 && gimple_code (stmt) == GIMPLE_ASSIGN
10941 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
10942 mask_type = build_same_sized_truth_vector_type (mask_type);
10945 /* No mask_type should mean loop invariant predicate.
10946 This is probably a subject for optimization in if-conversion. */
10947 if (!mask_type && dump_enabled_p ())
10949 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10950 "not vectorized: can't compute mask type "
10951 "for statement, ");
10952 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
10954 return mask_type;