c++: class NTTP and nested anon union [PR108566]
[official-gcc.git] / gcc / tree-vect-stmts.cc
blobb56457617c09d811633acb384e772e70fdc59216
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2023 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "explow.h"
47 #include "tree-ssa-loop.h"
48 #include "tree-scalar-evolution.h"
49 #include "tree-vectorizer.h"
50 #include "builtins.h"
51 #include "internal-fn.h"
52 #include "tree-vector-builder.h"
53 #include "vec-perm-indices.h"
54 #include "tree-ssa-loop-niter.h"
55 #include "gimple-fold.h"
56 #include "regs.h"
57 #include "attribs.h"
59 /* For lang_hooks.types.type_for_mode. */
60 #include "langhooks.h"
62 /* Return the vectorized type for the given statement. */
64 tree
65 stmt_vectype (class _stmt_vec_info *stmt_info)
67 return STMT_VINFO_VECTYPE (stmt_info);
70 /* Return TRUE iff the given statement is in an inner loop relative to
71 the loop being vectorized. */
72 bool
73 stmt_in_inner_loop_p (vec_info *vinfo, class _stmt_vec_info *stmt_info)
75 gimple *stmt = STMT_VINFO_STMT (stmt_info);
76 basic_block bb = gimple_bb (stmt);
77 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
78 class loop* loop;
80 if (!loop_vinfo)
81 return false;
83 loop = LOOP_VINFO_LOOP (loop_vinfo);
85 return (bb->loop_father == loop->inner);
88 /* Record the cost of a statement, either by directly informing the
89 target model or by saving it in a vector for later processing.
90 Return a preliminary estimate of the statement's cost. */
92 static unsigned
93 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
94 enum vect_cost_for_stmt kind,
95 stmt_vec_info stmt_info, slp_tree node,
96 tree vectype, int misalign,
97 enum vect_cost_model_location where)
99 if ((kind == vector_load || kind == unaligned_load)
100 && (stmt_info && STMT_VINFO_GATHER_SCATTER_P (stmt_info)))
101 kind = vector_gather_load;
102 if ((kind == vector_store || kind == unaligned_store)
103 && (stmt_info && STMT_VINFO_GATHER_SCATTER_P (stmt_info)))
104 kind = vector_scatter_store;
106 stmt_info_for_cost si
107 = { count, kind, where, stmt_info, node, vectype, misalign };
108 body_cost_vec->safe_push (si);
110 return (unsigned)
111 (builtin_vectorization_cost (kind, vectype, misalign) * count);
114 unsigned
115 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
116 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
117 tree vectype, int misalign,
118 enum vect_cost_model_location where)
120 return record_stmt_cost (body_cost_vec, count, kind, stmt_info, NULL,
121 vectype, misalign, where);
124 unsigned
125 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
126 enum vect_cost_for_stmt kind, slp_tree node,
127 tree vectype, int misalign,
128 enum vect_cost_model_location where)
130 return record_stmt_cost (body_cost_vec, count, kind, NULL, node,
131 vectype, misalign, where);
134 unsigned
135 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
136 enum vect_cost_for_stmt kind,
137 enum vect_cost_model_location where)
139 gcc_assert (kind == cond_branch_taken || kind == cond_branch_not_taken
140 || kind == scalar_stmt);
141 return record_stmt_cost (body_cost_vec, count, kind, NULL, NULL,
142 NULL_TREE, 0, where);
145 /* Return a variable of type ELEM_TYPE[NELEMS]. */
147 static tree
148 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
150 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
151 "vect_array");
154 /* ARRAY is an array of vectors created by create_vector_array.
155 Return an SSA_NAME for the vector in index N. The reference
156 is part of the vectorization of STMT_INFO and the vector is associated
157 with scalar destination SCALAR_DEST. */
159 static tree
160 read_vector_array (vec_info *vinfo,
161 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
162 tree scalar_dest, tree array, unsigned HOST_WIDE_INT n)
164 tree vect_type, vect, vect_name, array_ref;
165 gimple *new_stmt;
167 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
168 vect_type = TREE_TYPE (TREE_TYPE (array));
169 vect = vect_create_destination_var (scalar_dest, vect_type);
170 array_ref = build4 (ARRAY_REF, vect_type, array,
171 build_int_cst (size_type_node, n),
172 NULL_TREE, NULL_TREE);
174 new_stmt = gimple_build_assign (vect, array_ref);
175 vect_name = make_ssa_name (vect, new_stmt);
176 gimple_assign_set_lhs (new_stmt, vect_name);
177 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
179 return vect_name;
182 /* ARRAY is an array of vectors created by create_vector_array.
183 Emit code to store SSA_NAME VECT in index N of the array.
184 The store is part of the vectorization of STMT_INFO. */
186 static void
187 write_vector_array (vec_info *vinfo,
188 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
189 tree vect, tree array, unsigned HOST_WIDE_INT n)
191 tree array_ref;
192 gimple *new_stmt;
194 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
195 build_int_cst (size_type_node, n),
196 NULL_TREE, NULL_TREE);
198 new_stmt = gimple_build_assign (array_ref, vect);
199 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
202 /* PTR is a pointer to an array of type TYPE. Return a representation
203 of *PTR. The memory reference replaces those in FIRST_DR
204 (and its group). */
206 static tree
207 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
209 tree mem_ref;
211 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
212 /* Arrays have the same alignment as their type. */
213 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
214 return mem_ref;
217 /* Add a clobber of variable VAR to the vectorization of STMT_INFO.
218 Emit the clobber before *GSI. */
220 static void
221 vect_clobber_variable (vec_info *vinfo, stmt_vec_info stmt_info,
222 gimple_stmt_iterator *gsi, tree var)
224 tree clobber = build_clobber (TREE_TYPE (var));
225 gimple *new_stmt = gimple_build_assign (var, clobber);
226 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
229 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
231 /* Function vect_mark_relevant.
233 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
235 static void
236 vect_mark_relevant (vec<stmt_vec_info> *worklist, stmt_vec_info stmt_info,
237 enum vect_relevant relevant, bool live_p)
239 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
240 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
242 if (dump_enabled_p ())
243 dump_printf_loc (MSG_NOTE, vect_location,
244 "mark relevant %d, live %d: %G", relevant, live_p,
245 stmt_info->stmt);
247 /* If this stmt is an original stmt in a pattern, we might need to mark its
248 related pattern stmt instead of the original stmt. However, such stmts
249 may have their own uses that are not in any pattern, in such cases the
250 stmt itself should be marked. */
251 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
253 /* This is the last stmt in a sequence that was detected as a
254 pattern that can potentially be vectorized. Don't mark the stmt
255 as relevant/live because it's not going to be vectorized.
256 Instead mark the pattern-stmt that replaces it. */
258 if (dump_enabled_p ())
259 dump_printf_loc (MSG_NOTE, vect_location,
260 "last stmt in pattern. don't mark"
261 " relevant/live.\n");
262 stmt_vec_info old_stmt_info = stmt_info;
263 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
264 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == old_stmt_info);
265 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
266 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
269 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
270 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
271 STMT_VINFO_RELEVANT (stmt_info) = relevant;
273 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
274 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
276 if (dump_enabled_p ())
277 dump_printf_loc (MSG_NOTE, vect_location,
278 "already marked relevant/live.\n");
279 return;
282 worklist->safe_push (stmt_info);
286 /* Function is_simple_and_all_uses_invariant
288 Return true if STMT_INFO is simple and all uses of it are invariant. */
290 bool
291 is_simple_and_all_uses_invariant (stmt_vec_info stmt_info,
292 loop_vec_info loop_vinfo)
294 tree op;
295 ssa_op_iter iter;
297 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
298 if (!stmt)
299 return false;
301 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
303 enum vect_def_type dt = vect_uninitialized_def;
305 if (!vect_is_simple_use (op, loop_vinfo, &dt))
307 if (dump_enabled_p ())
308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
309 "use not simple.\n");
310 return false;
313 if (dt != vect_external_def && dt != vect_constant_def)
314 return false;
316 return true;
319 /* Function vect_stmt_relevant_p.
321 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
322 is "relevant for vectorization".
324 A stmt is considered "relevant for vectorization" if:
325 - it has uses outside the loop.
326 - it has vdefs (it alters memory).
327 - control stmts in the loop (except for the exit condition).
329 CHECKME: what other side effects would the vectorizer allow? */
331 static bool
332 vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
333 enum vect_relevant *relevant, bool *live_p)
335 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
336 ssa_op_iter op_iter;
337 imm_use_iterator imm_iter;
338 use_operand_p use_p;
339 def_operand_p def_p;
341 *relevant = vect_unused_in_scope;
342 *live_p = false;
344 /* cond stmt other than loop exit cond. */
345 if (is_ctrl_stmt (stmt_info->stmt)
346 && STMT_VINFO_TYPE (stmt_info) != loop_exit_ctrl_vec_info_type)
347 *relevant = vect_used_in_scope;
349 /* changing memory. */
350 if (gimple_code (stmt_info->stmt) != GIMPLE_PHI)
351 if (gimple_vdef (stmt_info->stmt)
352 && !gimple_clobber_p (stmt_info->stmt))
354 if (dump_enabled_p ())
355 dump_printf_loc (MSG_NOTE, vect_location,
356 "vec_stmt_relevant_p: stmt has vdefs.\n");
357 *relevant = vect_used_in_scope;
360 /* uses outside the loop. */
361 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt_info->stmt, op_iter, SSA_OP_DEF)
363 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
365 basic_block bb = gimple_bb (USE_STMT (use_p));
366 if (!flow_bb_inside_loop_p (loop, bb))
368 if (is_gimple_debug (USE_STMT (use_p)))
369 continue;
371 if (dump_enabled_p ())
372 dump_printf_loc (MSG_NOTE, vect_location,
373 "vec_stmt_relevant_p: used out of loop.\n");
375 /* We expect all such uses to be in the loop exit phis
376 (because of loop closed form) */
377 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
378 gcc_assert (bb == single_exit (loop)->dest);
380 *live_p = true;
385 if (*live_p && *relevant == vect_unused_in_scope
386 && !is_simple_and_all_uses_invariant (stmt_info, loop_vinfo))
388 if (dump_enabled_p ())
389 dump_printf_loc (MSG_NOTE, vect_location,
390 "vec_stmt_relevant_p: stmt live but not relevant.\n");
391 *relevant = vect_used_only_live;
394 return (*live_p || *relevant);
398 /* Function exist_non_indexing_operands_for_use_p
400 USE is one of the uses attached to STMT_INFO. Check if USE is
401 used in STMT_INFO for anything other than indexing an array. */
403 static bool
404 exist_non_indexing_operands_for_use_p (tree use, stmt_vec_info stmt_info)
406 tree operand;
408 /* USE corresponds to some operand in STMT. If there is no data
409 reference in STMT, then any operand that corresponds to USE
410 is not indexing an array. */
411 if (!STMT_VINFO_DATA_REF (stmt_info))
412 return true;
414 /* STMT has a data_ref. FORNOW this means that its of one of
415 the following forms:
416 -1- ARRAY_REF = var
417 -2- var = ARRAY_REF
418 (This should have been verified in analyze_data_refs).
420 'var' in the second case corresponds to a def, not a use,
421 so USE cannot correspond to any operands that are not used
422 for array indexing.
424 Therefore, all we need to check is if STMT falls into the
425 first case, and whether var corresponds to USE. */
427 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
428 if (!assign || !gimple_assign_copy_p (assign))
430 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
431 if (call && gimple_call_internal_p (call))
433 internal_fn ifn = gimple_call_internal_fn (call);
434 int mask_index = internal_fn_mask_index (ifn);
435 if (mask_index >= 0
436 && use == gimple_call_arg (call, mask_index))
437 return true;
438 int stored_value_index = internal_fn_stored_value_index (ifn);
439 if (stored_value_index >= 0
440 && use == gimple_call_arg (call, stored_value_index))
441 return true;
442 if (internal_gather_scatter_fn_p (ifn)
443 && use == gimple_call_arg (call, 1))
444 return true;
446 return false;
449 if (TREE_CODE (gimple_assign_lhs (assign)) == SSA_NAME)
450 return false;
451 operand = gimple_assign_rhs1 (assign);
452 if (TREE_CODE (operand) != SSA_NAME)
453 return false;
455 if (operand == use)
456 return true;
458 return false;
463 Function process_use.
465 Inputs:
466 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
467 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
468 that defined USE. This is done by calling mark_relevant and passing it
469 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
470 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
471 be performed.
473 Outputs:
474 Generally, LIVE_P and RELEVANT are used to define the liveness and
475 relevance info of the DEF_STMT of this USE:
476 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
477 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
478 Exceptions:
479 - case 1: If USE is used only for address computations (e.g. array indexing),
480 which does not need to be directly vectorized, then the liveness/relevance
481 of the respective DEF_STMT is left unchanged.
482 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
483 we skip DEF_STMT cause it had already been processed.
484 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
485 "relevant" will be modified accordingly.
487 Return true if everything is as expected. Return false otherwise. */
489 static opt_result
490 process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
491 enum vect_relevant relevant, vec<stmt_vec_info> *worklist,
492 bool force)
494 stmt_vec_info dstmt_vinfo;
495 enum vect_def_type dt;
497 /* case 1: we are only interested in uses that need to be vectorized. Uses
498 that are used for address computation are not considered relevant. */
499 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
500 return opt_result::success ();
502 if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
503 return opt_result::failure_at (stmt_vinfo->stmt,
504 "not vectorized:"
505 " unsupported use in stmt.\n");
507 if (!dstmt_vinfo)
508 return opt_result::success ();
510 basic_block def_bb = gimple_bb (dstmt_vinfo->stmt);
511 basic_block bb = gimple_bb (stmt_vinfo->stmt);
513 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
514 We have to force the stmt live since the epilogue loop needs it to
515 continue computing the reduction. */
516 if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
517 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
518 && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
519 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
520 && bb->loop_father == def_bb->loop_father)
522 if (dump_enabled_p ())
523 dump_printf_loc (MSG_NOTE, vect_location,
524 "reduc-stmt defining reduc-phi in the same nest.\n");
525 vect_mark_relevant (worklist, dstmt_vinfo, relevant, true);
526 return opt_result::success ();
529 /* case 3a: outer-loop stmt defining an inner-loop stmt:
530 outer-loop-header-bb:
531 d = dstmt_vinfo
532 inner-loop:
533 stmt # use (d)
534 outer-loop-tail-bb:
535 ... */
536 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
538 if (dump_enabled_p ())
539 dump_printf_loc (MSG_NOTE, vect_location,
540 "outer-loop def-stmt defining inner-loop stmt.\n");
542 switch (relevant)
544 case vect_unused_in_scope:
545 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
546 vect_used_in_scope : vect_unused_in_scope;
547 break;
549 case vect_used_in_outer_by_reduction:
550 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
551 relevant = vect_used_by_reduction;
552 break;
554 case vect_used_in_outer:
555 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
556 relevant = vect_used_in_scope;
557 break;
559 case vect_used_in_scope:
560 break;
562 default:
563 gcc_unreachable ();
567 /* case 3b: inner-loop stmt defining an outer-loop stmt:
568 outer-loop-header-bb:
570 inner-loop:
571 d = dstmt_vinfo
572 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
573 stmt # use (d) */
574 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
576 if (dump_enabled_p ())
577 dump_printf_loc (MSG_NOTE, vect_location,
578 "inner-loop def-stmt defining outer-loop stmt.\n");
580 switch (relevant)
582 case vect_unused_in_scope:
583 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
584 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
585 vect_used_in_outer_by_reduction : vect_unused_in_scope;
586 break;
588 case vect_used_by_reduction:
589 case vect_used_only_live:
590 relevant = vect_used_in_outer_by_reduction;
591 break;
593 case vect_used_in_scope:
594 relevant = vect_used_in_outer;
595 break;
597 default:
598 gcc_unreachable ();
601 /* We are also not interested in uses on loop PHI backedges that are
602 inductions. Otherwise we'll needlessly vectorize the IV increment
603 and cause hybrid SLP for SLP inductions. Unless the PHI is live
604 of course. */
605 else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
606 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
607 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
608 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
609 loop_latch_edge (bb->loop_father))
610 == use))
612 if (dump_enabled_p ())
613 dump_printf_loc (MSG_NOTE, vect_location,
614 "induction value on backedge.\n");
615 return opt_result::success ();
619 vect_mark_relevant (worklist, dstmt_vinfo, relevant, false);
620 return opt_result::success ();
624 /* Function vect_mark_stmts_to_be_vectorized.
626 Not all stmts in the loop need to be vectorized. For example:
628 for i...
629 for j...
630 1. T0 = i + j
631 2. T1 = a[T0]
633 3. j = j + 1
635 Stmt 1 and 3 do not need to be vectorized, because loop control and
636 addressing of vectorized data-refs are handled differently.
638 This pass detects such stmts. */
640 opt_result
641 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal)
643 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
644 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
645 unsigned int nbbs = loop->num_nodes;
646 gimple_stmt_iterator si;
647 unsigned int i;
648 basic_block bb;
649 bool live_p;
650 enum vect_relevant relevant;
652 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
654 auto_vec<stmt_vec_info, 64> worklist;
656 /* 1. Init worklist. */
657 for (i = 0; i < nbbs; i++)
659 bb = bbs[i];
660 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
662 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
663 if (dump_enabled_p ())
664 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? %G",
665 phi_info->stmt);
667 if (vect_stmt_relevant_p (phi_info, loop_vinfo, &relevant, &live_p))
668 vect_mark_relevant (&worklist, phi_info, relevant, live_p);
670 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
672 if (is_gimple_debug (gsi_stmt (si)))
673 continue;
674 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
675 if (dump_enabled_p ())
676 dump_printf_loc (MSG_NOTE, vect_location,
677 "init: stmt relevant? %G", stmt_info->stmt);
679 if (vect_stmt_relevant_p (stmt_info, loop_vinfo, &relevant, &live_p))
680 vect_mark_relevant (&worklist, stmt_info, relevant, live_p);
684 /* 2. Process_worklist */
685 while (worklist.length () > 0)
687 use_operand_p use_p;
688 ssa_op_iter iter;
690 stmt_vec_info stmt_vinfo = worklist.pop ();
691 if (dump_enabled_p ())
692 dump_printf_loc (MSG_NOTE, vect_location,
693 "worklist: examine stmt: %G", stmt_vinfo->stmt);
695 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
696 (DEF_STMT) as relevant/irrelevant according to the relevance property
697 of STMT. */
698 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
700 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
701 propagated as is to the DEF_STMTs of its USEs.
703 One exception is when STMT has been identified as defining a reduction
704 variable; in this case we set the relevance to vect_used_by_reduction.
705 This is because we distinguish between two kinds of relevant stmts -
706 those that are used by a reduction computation, and those that are
707 (also) used by a regular computation. This allows us later on to
708 identify stmts that are used solely by a reduction, and therefore the
709 order of the results that they produce does not have to be kept. */
711 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
713 case vect_reduction_def:
714 gcc_assert (relevant != vect_unused_in_scope);
715 if (relevant != vect_unused_in_scope
716 && relevant != vect_used_in_scope
717 && relevant != vect_used_by_reduction
718 && relevant != vect_used_only_live)
719 return opt_result::failure_at
720 (stmt_vinfo->stmt, "unsupported use of reduction.\n");
721 break;
723 case vect_nested_cycle:
724 if (relevant != vect_unused_in_scope
725 && relevant != vect_used_in_outer_by_reduction
726 && relevant != vect_used_in_outer)
727 return opt_result::failure_at
728 (stmt_vinfo->stmt, "unsupported use of nested cycle.\n");
729 break;
731 case vect_double_reduction_def:
732 if (relevant != vect_unused_in_scope
733 && relevant != vect_used_by_reduction
734 && relevant != vect_used_only_live)
735 return opt_result::failure_at
736 (stmt_vinfo->stmt, "unsupported use of double reduction.\n");
737 break;
739 default:
740 break;
743 if (is_pattern_stmt_p (stmt_vinfo))
745 /* Pattern statements are not inserted into the code, so
746 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
747 have to scan the RHS or function arguments instead. */
748 if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
750 enum tree_code rhs_code = gimple_assign_rhs_code (assign);
751 tree op = gimple_assign_rhs1 (assign);
753 i = 1;
754 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
756 opt_result res
757 = process_use (stmt_vinfo, TREE_OPERAND (op, 0),
758 loop_vinfo, relevant, &worklist, false);
759 if (!res)
760 return res;
761 res = process_use (stmt_vinfo, TREE_OPERAND (op, 1),
762 loop_vinfo, relevant, &worklist, false);
763 if (!res)
764 return res;
765 i = 2;
767 for (; i < gimple_num_ops (assign); i++)
769 op = gimple_op (assign, i);
770 if (TREE_CODE (op) == SSA_NAME)
772 opt_result res
773 = process_use (stmt_vinfo, op, loop_vinfo, relevant,
774 &worklist, false);
775 if (!res)
776 return res;
780 else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
782 for (i = 0; i < gimple_call_num_args (call); i++)
784 tree arg = gimple_call_arg (call, i);
785 opt_result res
786 = process_use (stmt_vinfo, arg, loop_vinfo, relevant,
787 &worklist, false);
788 if (!res)
789 return res;
793 else
794 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
796 tree op = USE_FROM_PTR (use_p);
797 opt_result res
798 = process_use (stmt_vinfo, op, loop_vinfo, relevant,
799 &worklist, false);
800 if (!res)
801 return res;
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
808 gcc_unreachable ();
809 opt_result res
810 = process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
811 &worklist, true);
812 if (!res)
814 if (fatal)
815 *fatal = false;
816 return res;
819 } /* while worklist */
821 return opt_result::success ();
824 /* Function vect_model_simple_cost.
826 Models cost for simple operations, i.e. those that only emit ncopies of a
827 single op. Right now, this does not account for multiple insns that could
828 be generated for the single vector op. We will handle that shortly. */
830 static void
831 vect_model_simple_cost (vec_info *,
832 stmt_vec_info stmt_info, int ncopies,
833 enum vect_def_type *dt,
834 int ndts,
835 slp_tree node,
836 stmt_vector_for_cost *cost_vec,
837 vect_cost_for_stmt kind = vector_stmt)
839 int inside_cost = 0, prologue_cost = 0;
841 gcc_assert (cost_vec != NULL);
843 /* ??? Somehow we need to fix this at the callers. */
844 if (node)
845 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (node);
847 if (!node)
848 /* Cost the "broadcast" of a scalar operand in to a vector operand.
849 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
850 cost model. */
851 for (int i = 0; i < ndts; i++)
852 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
853 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
854 stmt_info, 0, vect_prologue);
856 /* Pass the inside-of-loop statements to the target-specific cost model. */
857 inside_cost += record_stmt_cost (cost_vec, ncopies, kind,
858 stmt_info, 0, vect_body);
860 if (dump_enabled_p ())
861 dump_printf_loc (MSG_NOTE, vect_location,
862 "vect_model_simple_cost: inside_cost = %d, "
863 "prologue_cost = %d .\n", inside_cost, prologue_cost);
867 /* Model cost for type demotion and promotion operations. PWR is
868 normally zero for single-step promotions and demotions. It will be
869 one if two-step promotion/demotion is required, and so on. NCOPIES
870 is the number of vector results (and thus number of instructions)
871 for the narrowest end of the operation chain. Each additional
872 step doubles the number of instructions required. If WIDEN_ARITH
873 is true the stmt is doing widening arithmetic. */
875 static void
876 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
877 enum vect_def_type *dt,
878 unsigned int ncopies, int pwr,
879 stmt_vector_for_cost *cost_vec,
880 bool widen_arith)
882 int i;
883 int inside_cost = 0, prologue_cost = 0;
885 for (i = 0; i < pwr + 1; i++)
887 inside_cost += record_stmt_cost (cost_vec, ncopies,
888 widen_arith
889 ? vector_stmt : vec_promote_demote,
890 stmt_info, 0, vect_body);
891 ncopies *= 2;
894 /* FORNOW: Assuming maximum 2 args per stmts. */
895 for (i = 0; i < 2; i++)
896 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
897 prologue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
898 stmt_info, 0, vect_prologue);
900 if (dump_enabled_p ())
901 dump_printf_loc (MSG_NOTE, vect_location,
902 "vect_model_promotion_demotion_cost: inside_cost = %d, "
903 "prologue_cost = %d .\n", inside_cost, prologue_cost);
906 /* Returns true if the current function returns DECL. */
908 static bool
909 cfun_returns (tree decl)
911 edge_iterator ei;
912 edge e;
913 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
915 greturn *ret = safe_dyn_cast <greturn *> (last_stmt (e->src));
916 if (!ret)
917 continue;
918 if (gimple_return_retval (ret) == decl)
919 return true;
920 /* We often end up with an aggregate copy to the result decl,
921 handle that case as well. First skip intermediate clobbers
922 though. */
923 gimple *def = ret;
926 def = SSA_NAME_DEF_STMT (gimple_vuse (def));
928 while (gimple_clobber_p (def));
929 if (is_a <gassign *> (def)
930 && gimple_assign_lhs (def) == gimple_return_retval (ret)
931 && gimple_assign_rhs1 (def) == decl)
932 return true;
934 return false;
937 /* Function vect_model_store_cost
939 Models cost for stores. In the case of grouped accesses, one access
940 has the overhead of the grouped access attributed to it. */
942 static void
943 vect_model_store_cost (vec_info *vinfo, stmt_vec_info stmt_info, int ncopies,
944 vect_memory_access_type memory_access_type,
945 dr_alignment_support alignment_support_scheme,
946 int misalignment,
947 vec_load_store_type vls_type, slp_tree slp_node,
948 stmt_vector_for_cost *cost_vec)
950 unsigned int inside_cost = 0, prologue_cost = 0;
951 stmt_vec_info first_stmt_info = stmt_info;
952 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
954 /* ??? Somehow we need to fix this at the callers. */
955 if (slp_node)
956 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
958 if (vls_type == VLS_STORE_INVARIANT)
960 if (!slp_node)
961 prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
962 stmt_info, 0, vect_prologue);
965 /* Grouped stores update all elements in the group at once,
966 so we want the DR for the first statement. */
967 if (!slp_node && grouped_access_p)
968 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
970 /* True if we should include any once-per-group costs as well as
971 the cost of the statement itself. For SLP we only get called
972 once per group anyhow. */
973 bool first_stmt_p = (first_stmt_info == stmt_info);
975 /* We assume that the cost of a single store-lanes instruction is
976 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
977 access is instead being provided by a permute-and-store operation,
978 include the cost of the permutes. */
979 if (first_stmt_p
980 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
982 /* Uses a high and low interleave or shuffle operations for each
983 needed permute. */
984 int group_size = DR_GROUP_SIZE (first_stmt_info);
985 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
986 inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
987 stmt_info, 0, vect_body);
989 if (dump_enabled_p ())
990 dump_printf_loc (MSG_NOTE, vect_location,
991 "vect_model_store_cost: strided group_size = %d .\n",
992 group_size);
995 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
996 /* Costs of the stores. */
997 if (memory_access_type == VMAT_ELEMENTWISE
998 || memory_access_type == VMAT_GATHER_SCATTER)
1000 /* N scalar stores plus extracting the elements. */
1001 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1002 inside_cost += record_stmt_cost (cost_vec,
1003 ncopies * assumed_nunits,
1004 scalar_store, stmt_info, 0, vect_body);
1006 else
1007 vect_get_store_cost (vinfo, stmt_info, ncopies, alignment_support_scheme,
1008 misalignment, &inside_cost, cost_vec);
1010 if (memory_access_type == VMAT_ELEMENTWISE
1011 || memory_access_type == VMAT_STRIDED_SLP)
1013 /* N scalar stores plus extracting the elements. */
1014 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1015 inside_cost += record_stmt_cost (cost_vec,
1016 ncopies * assumed_nunits,
1017 vec_to_scalar, stmt_info, 0, vect_body);
1020 /* When vectorizing a store into the function result assign
1021 a penalty if the function returns in a multi-register location.
1022 In this case we assume we'll end up with having to spill the
1023 vector result and do piecewise loads as a conservative estimate. */
1024 tree base = get_base_address (STMT_VINFO_DATA_REF (stmt_info)->ref);
1025 if (base
1026 && (TREE_CODE (base) == RESULT_DECL
1027 || (DECL_P (base) && cfun_returns (base)))
1028 && !aggregate_value_p (base, cfun->decl))
1030 rtx reg = hard_function_value (TREE_TYPE (base), cfun->decl, 0, 1);
1031 /* ??? Handle PARALLEL in some way. */
1032 if (REG_P (reg))
1034 int nregs = hard_regno_nregs (REGNO (reg), GET_MODE (reg));
1035 /* Assume that a single reg-reg move is possible and cheap,
1036 do not account for vector to gp register move cost. */
1037 if (nregs > 1)
1039 /* Spill. */
1040 prologue_cost += record_stmt_cost (cost_vec, ncopies,
1041 vector_store,
1042 stmt_info, 0, vect_epilogue);
1043 /* Loads. */
1044 prologue_cost += record_stmt_cost (cost_vec, ncopies * nregs,
1045 scalar_load,
1046 stmt_info, 0, vect_epilogue);
1051 if (dump_enabled_p ())
1052 dump_printf_loc (MSG_NOTE, vect_location,
1053 "vect_model_store_cost: inside_cost = %d, "
1054 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1058 /* Calculate cost of DR's memory access. */
1059 void
1060 vect_get_store_cost (vec_info *, stmt_vec_info stmt_info, int ncopies,
1061 dr_alignment_support alignment_support_scheme,
1062 int misalignment,
1063 unsigned int *inside_cost,
1064 stmt_vector_for_cost *body_cost_vec)
1066 switch (alignment_support_scheme)
1068 case dr_aligned:
1070 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1071 vector_store, stmt_info, 0,
1072 vect_body);
1074 if (dump_enabled_p ())
1075 dump_printf_loc (MSG_NOTE, vect_location,
1076 "vect_model_store_cost: aligned.\n");
1077 break;
1080 case dr_unaligned_supported:
1082 /* Here, we assign an additional cost for the unaligned store. */
1083 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1084 unaligned_store, stmt_info,
1085 misalignment, vect_body);
1086 if (dump_enabled_p ())
1087 dump_printf_loc (MSG_NOTE, vect_location,
1088 "vect_model_store_cost: unaligned supported by "
1089 "hardware.\n");
1090 break;
1093 case dr_unaligned_unsupported:
1095 *inside_cost = VECT_MAX_COST;
1097 if (dump_enabled_p ())
1098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1099 "vect_model_store_cost: unsupported access.\n");
1100 break;
1103 default:
1104 gcc_unreachable ();
1109 /* Function vect_model_load_cost
1111 Models cost for loads. In the case of grouped accesses, one access has
1112 the overhead of the grouped access attributed to it. Since unaligned
1113 accesses are supported for loads, we also account for the costs of the
1114 access scheme chosen. */
1116 static void
1117 vect_model_load_cost (vec_info *vinfo,
1118 stmt_vec_info stmt_info, unsigned ncopies, poly_uint64 vf,
1119 vect_memory_access_type memory_access_type,
1120 dr_alignment_support alignment_support_scheme,
1121 int misalignment,
1122 gather_scatter_info *gs_info,
1123 slp_tree slp_node,
1124 stmt_vector_for_cost *cost_vec)
1126 unsigned int inside_cost = 0, prologue_cost = 0;
1127 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1129 gcc_assert (cost_vec);
1131 /* ??? Somehow we need to fix this at the callers. */
1132 if (slp_node)
1133 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1135 if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
1137 /* If the load is permuted then the alignment is determined by
1138 the first group element not by the first scalar stmt DR. */
1139 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1140 /* Record the cost for the permutation. */
1141 unsigned n_perms, n_loads;
1142 vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL,
1143 vf, true, &n_perms, &n_loads);
1144 inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
1145 first_stmt_info, 0, vect_body);
1147 /* And adjust the number of loads performed. This handles
1148 redundancies as well as loads that are later dead. */
1149 ncopies = n_loads;
1152 /* Grouped loads read all elements in the group at once,
1153 so we want the DR for the first statement. */
1154 stmt_vec_info first_stmt_info = stmt_info;
1155 if (!slp_node && grouped_access_p)
1156 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1158 /* True if we should include any once-per-group costs as well as
1159 the cost of the statement itself. For SLP we only get called
1160 once per group anyhow. */
1161 bool first_stmt_p = (first_stmt_info == stmt_info);
1163 /* An IFN_LOAD_LANES will load all its vector results, regardless of which
1164 ones we actually need. Account for the cost of unused results. */
1165 if (first_stmt_p && !slp_node && memory_access_type == VMAT_LOAD_STORE_LANES)
1167 unsigned int gaps = DR_GROUP_SIZE (first_stmt_info);
1168 stmt_vec_info next_stmt_info = first_stmt_info;
1171 gaps -= 1;
1172 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
1174 while (next_stmt_info);
1175 if (gaps)
1177 if (dump_enabled_p ())
1178 dump_printf_loc (MSG_NOTE, vect_location,
1179 "vect_model_load_cost: %d unused vectors.\n",
1180 gaps);
1181 vect_get_load_cost (vinfo, stmt_info, ncopies * gaps,
1182 alignment_support_scheme, misalignment, false,
1183 &inside_cost, &prologue_cost,
1184 cost_vec, cost_vec, true);
1188 /* We assume that the cost of a single load-lanes instruction is
1189 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1190 access is instead being provided by a load-and-permute operation,
1191 include the cost of the permutes. */
1192 if (first_stmt_p
1193 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1195 /* Uses an even and odd extract operations or shuffle operations
1196 for each needed permute. */
1197 int group_size = DR_GROUP_SIZE (first_stmt_info);
1198 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1199 inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
1200 stmt_info, 0, vect_body);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE, vect_location,
1204 "vect_model_load_cost: strided group_size = %d .\n",
1205 group_size);
1208 /* The loads themselves. */
1209 if (memory_access_type == VMAT_ELEMENTWISE
1210 || memory_access_type == VMAT_GATHER_SCATTER)
1212 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1213 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1214 if (memory_access_type == VMAT_GATHER_SCATTER
1215 && gs_info->ifn == IFN_LAST && !gs_info->decl)
1216 /* For emulated gathers N offset vector element extracts
1217 (we assume the scalar scaling and ptr + offset add is consumed by
1218 the load). */
1219 inside_cost += record_stmt_cost (cost_vec, ncopies * assumed_nunits,
1220 vec_to_scalar, stmt_info, 0,
1221 vect_body);
1222 /* N scalar loads plus gathering them into a vector. */
1223 inside_cost += record_stmt_cost (cost_vec,
1224 ncopies * assumed_nunits,
1225 scalar_load, stmt_info, 0, vect_body);
1227 else if (memory_access_type == VMAT_INVARIANT)
1229 /* Invariant loads will ideally be hoisted and splat to a vector. */
1230 prologue_cost += record_stmt_cost (cost_vec, 1,
1231 scalar_load, stmt_info, 0,
1232 vect_prologue);
1233 prologue_cost += record_stmt_cost (cost_vec, 1,
1234 scalar_to_vec, stmt_info, 0,
1235 vect_prologue);
1237 else
1238 vect_get_load_cost (vinfo, stmt_info, ncopies,
1239 alignment_support_scheme, misalignment, first_stmt_p,
1240 &inside_cost, &prologue_cost,
1241 cost_vec, cost_vec, true);
1242 if (memory_access_type == VMAT_ELEMENTWISE
1243 || memory_access_type == VMAT_STRIDED_SLP
1244 || (memory_access_type == VMAT_GATHER_SCATTER
1245 && gs_info->ifn == IFN_LAST && !gs_info->decl))
1246 inside_cost += record_stmt_cost (cost_vec, ncopies, vec_construct,
1247 stmt_info, 0, vect_body);
1249 if (dump_enabled_p ())
1250 dump_printf_loc (MSG_NOTE, vect_location,
1251 "vect_model_load_cost: inside_cost = %d, "
1252 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1256 /* Calculate cost of DR's memory access. */
1257 void
1258 vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, int ncopies,
1259 dr_alignment_support alignment_support_scheme,
1260 int misalignment,
1261 bool add_realign_cost, unsigned int *inside_cost,
1262 unsigned int *prologue_cost,
1263 stmt_vector_for_cost *prologue_cost_vec,
1264 stmt_vector_for_cost *body_cost_vec,
1265 bool record_prologue_costs)
1267 switch (alignment_support_scheme)
1269 case dr_aligned:
1271 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1272 stmt_info, 0, vect_body);
1274 if (dump_enabled_p ())
1275 dump_printf_loc (MSG_NOTE, vect_location,
1276 "vect_model_load_cost: aligned.\n");
1278 break;
1280 case dr_unaligned_supported:
1282 /* Here, we assign an additional cost for the unaligned load. */
1283 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1284 unaligned_load, stmt_info,
1285 misalignment, vect_body);
1287 if (dump_enabled_p ())
1288 dump_printf_loc (MSG_NOTE, vect_location,
1289 "vect_model_load_cost: unaligned supported by "
1290 "hardware.\n");
1292 break;
1294 case dr_explicit_realign:
1296 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1297 vector_load, stmt_info, 0, vect_body);
1298 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1299 vec_perm, stmt_info, 0, vect_body);
1301 /* FIXME: If the misalignment remains fixed across the iterations of
1302 the containing loop, the following cost should be added to the
1303 prologue costs. */
1304 if (targetm.vectorize.builtin_mask_for_load)
1305 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1306 stmt_info, 0, vect_body);
1308 if (dump_enabled_p ())
1309 dump_printf_loc (MSG_NOTE, vect_location,
1310 "vect_model_load_cost: explicit realign\n");
1312 break;
1314 case dr_explicit_realign_optimized:
1316 if (dump_enabled_p ())
1317 dump_printf_loc (MSG_NOTE, vect_location,
1318 "vect_model_load_cost: unaligned software "
1319 "pipelined.\n");
1321 /* Unaligned software pipeline has a load of an address, an initial
1322 load, and possibly a mask operation to "prime" the loop. However,
1323 if this is an access in a group of loads, which provide grouped
1324 access, then the above cost should only be considered for one
1325 access in the group. Inside the loop, there is a load op
1326 and a realignment op. */
1328 if (add_realign_cost && record_prologue_costs)
1330 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1331 vector_stmt, stmt_info,
1332 0, vect_prologue);
1333 if (targetm.vectorize.builtin_mask_for_load)
1334 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1335 vector_stmt, stmt_info,
1336 0, vect_prologue);
1339 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1340 stmt_info, 0, vect_body);
1341 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1342 stmt_info, 0, vect_body);
1344 if (dump_enabled_p ())
1345 dump_printf_loc (MSG_NOTE, vect_location,
1346 "vect_model_load_cost: explicit realign optimized"
1347 "\n");
1349 break;
1352 case dr_unaligned_unsupported:
1354 *inside_cost = VECT_MAX_COST;
1356 if (dump_enabled_p ())
1357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1358 "vect_model_load_cost: unsupported access.\n");
1359 break;
1362 default:
1363 gcc_unreachable ();
1367 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1368 the loop preheader for the vectorized stmt STMT_VINFO. */
1370 static void
1371 vect_init_vector_1 (vec_info *vinfo, stmt_vec_info stmt_vinfo, gimple *new_stmt,
1372 gimple_stmt_iterator *gsi)
1374 if (gsi)
1375 vect_finish_stmt_generation (vinfo, stmt_vinfo, new_stmt, gsi);
1376 else
1377 vinfo->insert_on_entry (stmt_vinfo, new_stmt);
1379 if (dump_enabled_p ())
1380 dump_printf_loc (MSG_NOTE, vect_location,
1381 "created new init_stmt: %G", new_stmt);
1384 /* Function vect_init_vector.
1386 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1387 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1388 vector type a vector with all elements equal to VAL is created first.
1389 Place the initialization at GSI if it is not NULL. Otherwise, place the
1390 initialization at the loop preheader.
1391 Return the DEF of INIT_STMT.
1392 It will be used in the vectorization of STMT_INFO. */
1394 tree
1395 vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
1396 gimple_stmt_iterator *gsi)
1398 gimple *init_stmt;
1399 tree new_temp;
1401 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1402 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1404 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1405 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1407 /* Scalar boolean value should be transformed into
1408 all zeros or all ones value before building a vector. */
1409 if (VECTOR_BOOLEAN_TYPE_P (type))
1411 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1412 tree false_val = build_zero_cst (TREE_TYPE (type));
1414 if (CONSTANT_CLASS_P (val))
1415 val = integer_zerop (val) ? false_val : true_val;
1416 else
1418 new_temp = make_ssa_name (TREE_TYPE (type));
1419 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1420 val, true_val, false_val);
1421 vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
1422 val = new_temp;
1425 else
1427 gimple_seq stmts = NULL;
1428 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1429 val = gimple_build (&stmts, VIEW_CONVERT_EXPR,
1430 TREE_TYPE (type), val);
1431 else
1432 /* ??? Condition vectorization expects us to do
1433 promotion of invariant/external defs. */
1434 val = gimple_convert (&stmts, TREE_TYPE (type), val);
1435 for (gimple_stmt_iterator gsi2 = gsi_start (stmts);
1436 !gsi_end_p (gsi2); )
1438 init_stmt = gsi_stmt (gsi2);
1439 gsi_remove (&gsi2, false);
1440 vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
1444 val = build_vector_from_val (type, val);
1447 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1448 init_stmt = gimple_build_assign (new_temp, val);
1449 vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
1450 return new_temp;
1454 /* Function vect_get_vec_defs_for_operand.
1456 OP is an operand in STMT_VINFO. This function returns a vector of
1457 NCOPIES defs that will be used in the vectorized stmts for STMT_VINFO.
1459 In the case that OP is an SSA_NAME which is defined in the loop, then
1460 STMT_VINFO_VEC_STMTS of the defining stmt holds the relevant defs.
1462 In case OP is an invariant or constant, a new stmt that creates a vector def
1463 needs to be introduced. VECTYPE may be used to specify a required type for
1464 vector invariant. */
1466 void
1467 vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info stmt_vinfo,
1468 unsigned ncopies,
1469 tree op, vec<tree> *vec_oprnds, tree vectype)
1471 gimple *def_stmt;
1472 enum vect_def_type dt;
1473 bool is_simple_use;
1474 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
1476 if (dump_enabled_p ())
1477 dump_printf_loc (MSG_NOTE, vect_location,
1478 "vect_get_vec_defs_for_operand: %T\n", op);
1480 stmt_vec_info def_stmt_info;
1481 is_simple_use = vect_is_simple_use (op, loop_vinfo, &dt,
1482 &def_stmt_info, &def_stmt);
1483 gcc_assert (is_simple_use);
1484 if (def_stmt && dump_enabled_p ())
1485 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = %G", def_stmt);
1487 vec_oprnds->create (ncopies);
1488 if (dt == vect_constant_def || dt == vect_external_def)
1490 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1491 tree vector_type;
1493 if (vectype)
1494 vector_type = vectype;
1495 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1496 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1497 vector_type = truth_type_for (stmt_vectype);
1498 else
1499 vector_type = get_vectype_for_scalar_type (loop_vinfo, TREE_TYPE (op));
1501 gcc_assert (vector_type);
1502 tree vop = vect_init_vector (vinfo, stmt_vinfo, op, vector_type, NULL);
1503 while (ncopies--)
1504 vec_oprnds->quick_push (vop);
1506 else
1508 def_stmt_info = vect_stmt_to_vectorize (def_stmt_info);
1509 gcc_assert (STMT_VINFO_VEC_STMTS (def_stmt_info).length () == ncopies);
1510 for (unsigned i = 0; i < ncopies; ++i)
1511 vec_oprnds->quick_push (gimple_get_lhs
1512 (STMT_VINFO_VEC_STMTS (def_stmt_info)[i]));
1517 /* Get vectorized definitions for OP0 and OP1. */
1519 void
1520 vect_get_vec_defs (vec_info *vinfo, stmt_vec_info stmt_info, slp_tree slp_node,
1521 unsigned ncopies,
1522 tree op0, vec<tree> *vec_oprnds0, tree vectype0,
1523 tree op1, vec<tree> *vec_oprnds1, tree vectype1,
1524 tree op2, vec<tree> *vec_oprnds2, tree vectype2,
1525 tree op3, vec<tree> *vec_oprnds3, tree vectype3)
1527 if (slp_node)
1529 if (op0)
1530 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[0], vec_oprnds0);
1531 if (op1)
1532 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[1], vec_oprnds1);
1533 if (op2)
1534 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[2], vec_oprnds2);
1535 if (op3)
1536 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[3], vec_oprnds3);
1538 else
1540 if (op0)
1541 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
1542 op0, vec_oprnds0, vectype0);
1543 if (op1)
1544 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
1545 op1, vec_oprnds1, vectype1);
1546 if (op2)
1547 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
1548 op2, vec_oprnds2, vectype2);
1549 if (op3)
1550 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
1551 op3, vec_oprnds3, vectype3);
1555 void
1556 vect_get_vec_defs (vec_info *vinfo, stmt_vec_info stmt_info, slp_tree slp_node,
1557 unsigned ncopies,
1558 tree op0, vec<tree> *vec_oprnds0,
1559 tree op1, vec<tree> *vec_oprnds1,
1560 tree op2, vec<tree> *vec_oprnds2,
1561 tree op3, vec<tree> *vec_oprnds3)
1563 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
1564 op0, vec_oprnds0, NULL_TREE,
1565 op1, vec_oprnds1, NULL_TREE,
1566 op2, vec_oprnds2, NULL_TREE,
1567 op3, vec_oprnds3, NULL_TREE);
1570 /* Helper function called by vect_finish_replace_stmt and
1571 vect_finish_stmt_generation. Set the location of the new
1572 statement and create and return a stmt_vec_info for it. */
1574 static void
1575 vect_finish_stmt_generation_1 (vec_info *,
1576 stmt_vec_info stmt_info, gimple *vec_stmt)
1578 if (dump_enabled_p ())
1579 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: %G", vec_stmt);
1581 if (stmt_info)
1583 gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
1585 /* While EH edges will generally prevent vectorization, stmt might
1586 e.g. be in a must-not-throw region. Ensure newly created stmts
1587 that could throw are part of the same region. */
1588 int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
1589 if (lp_nr != 0 && stmt_could_throw_p (cfun, vec_stmt))
1590 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1592 else
1593 gcc_assert (!stmt_could_throw_p (cfun, vec_stmt));
1596 /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1597 which sets the same scalar result as STMT_INFO did. Create and return a
1598 stmt_vec_info for VEC_STMT. */
1600 void
1601 vect_finish_replace_stmt (vec_info *vinfo,
1602 stmt_vec_info stmt_info, gimple *vec_stmt)
1604 gimple *scalar_stmt = vect_orig_stmt (stmt_info)->stmt;
1605 gcc_assert (gimple_get_lhs (scalar_stmt) == gimple_get_lhs (vec_stmt));
1607 gimple_stmt_iterator gsi = gsi_for_stmt (scalar_stmt);
1608 gsi_replace (&gsi, vec_stmt, true);
1610 vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt);
1613 /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
1614 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
1616 void
1617 vect_finish_stmt_generation (vec_info *vinfo,
1618 stmt_vec_info stmt_info, gimple *vec_stmt,
1619 gimple_stmt_iterator *gsi)
1621 gcc_assert (!stmt_info || gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
1623 if (!gsi_end_p (*gsi)
1624 && gimple_has_mem_ops (vec_stmt))
1626 gimple *at_stmt = gsi_stmt (*gsi);
1627 tree vuse = gimple_vuse (at_stmt);
1628 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1630 tree vdef = gimple_vdef (at_stmt);
1631 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1632 gimple_set_modified (vec_stmt, true);
1633 /* If we have an SSA vuse and insert a store, update virtual
1634 SSA form to avoid triggering the renamer. Do so only
1635 if we can easily see all uses - which is what almost always
1636 happens with the way vectorized stmts are inserted. */
1637 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1638 && ((is_gimple_assign (vec_stmt)
1639 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1640 || (is_gimple_call (vec_stmt)
1641 && (!(gimple_call_flags (vec_stmt)
1642 & (ECF_CONST|ECF_PURE|ECF_NOVOPS))
1643 || (gimple_call_lhs (vec_stmt)
1644 && !is_gimple_reg (gimple_call_lhs (vec_stmt)))))))
1646 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1647 gimple_set_vdef (vec_stmt, new_vdef);
1648 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1652 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1653 vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt);
1656 /* We want to vectorize a call to combined function CFN with function
1657 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1658 as the types of all inputs. Check whether this is possible using
1659 an internal function, returning its code if so or IFN_LAST if not. */
1661 static internal_fn
1662 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1663 tree vectype_out, tree vectype_in)
1665 internal_fn ifn;
1666 if (internal_fn_p (cfn))
1667 ifn = as_internal_fn (cfn);
1668 else
1669 ifn = associated_internal_fn (fndecl);
1670 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1672 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1673 if (info.vectorizable)
1675 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1676 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1677 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1678 OPTIMIZE_FOR_SPEED))
1679 return ifn;
1682 return IFN_LAST;
1686 static tree permute_vec_elements (vec_info *, tree, tree, tree, stmt_vec_info,
1687 gimple_stmt_iterator *);
1689 /* Check whether a load or store statement in the loop described by
1690 LOOP_VINFO is possible in a loop using partial vectors. This is
1691 testing whether the vectorizer pass has the appropriate support,
1692 as well as whether the target does.
1694 VLS_TYPE says whether the statement is a load or store and VECTYPE
1695 is the type of the vector being loaded or stored. SLP_NODE is the SLP
1696 node that contains the statement, or null if none. MEMORY_ACCESS_TYPE
1697 says how the load or store is going to be implemented and GROUP_SIZE
1698 is the number of load or store statements in the containing group.
1699 If the access is a gather load or scatter store, GS_INFO describes
1700 its arguments. If the load or store is conditional, SCALAR_MASK is the
1701 condition under which it occurs.
1703 Clear LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P if a loop using partial
1704 vectors is not supported, otherwise record the required rgroup control
1705 types. */
1707 static void
1708 check_load_store_for_partial_vectors (loop_vec_info loop_vinfo, tree vectype,
1709 slp_tree slp_node,
1710 vec_load_store_type vls_type,
1711 int group_size,
1712 vect_memory_access_type
1713 memory_access_type,
1714 gather_scatter_info *gs_info,
1715 tree scalar_mask)
1717 /* Invariant loads need no special support. */
1718 if (memory_access_type == VMAT_INVARIANT)
1719 return;
1721 unsigned int nvectors;
1722 if (slp_node)
1723 nvectors = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
1724 else
1725 nvectors = vect_get_num_copies (loop_vinfo, vectype);
1727 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
1728 machine_mode vecmode = TYPE_MODE (vectype);
1729 bool is_load = (vls_type == VLS_LOAD);
1730 if (memory_access_type == VMAT_LOAD_STORE_LANES)
1732 if (is_load
1733 ? !vect_load_lanes_supported (vectype, group_size, true)
1734 : !vect_store_lanes_supported (vectype, group_size, true))
1736 if (dump_enabled_p ())
1737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1738 "can't operate on partial vectors because"
1739 " the target doesn't have an appropriate"
1740 " load/store-lanes instruction.\n");
1741 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
1742 return;
1744 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
1745 scalar_mask);
1746 return;
1749 if (memory_access_type == VMAT_GATHER_SCATTER)
1751 internal_fn ifn = (is_load
1752 ? IFN_MASK_GATHER_LOAD
1753 : IFN_MASK_SCATTER_STORE);
1754 if (!internal_gather_scatter_fn_supported_p (ifn, vectype,
1755 gs_info->memory_type,
1756 gs_info->offset_vectype,
1757 gs_info->scale))
1759 if (dump_enabled_p ())
1760 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1761 "can't operate on partial vectors because"
1762 " the target doesn't have an appropriate"
1763 " gather load or scatter store instruction.\n");
1764 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
1765 return;
1767 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype,
1768 scalar_mask);
1769 return;
1772 if (memory_access_type != VMAT_CONTIGUOUS
1773 && memory_access_type != VMAT_CONTIGUOUS_PERMUTE)
1775 /* Element X of the data must come from iteration i * VF + X of the
1776 scalar loop. We need more work to support other mappings. */
1777 if (dump_enabled_p ())
1778 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1779 "can't operate on partial vectors because an"
1780 " access isn't contiguous.\n");
1781 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
1782 return;
1785 if (!VECTOR_MODE_P (vecmode))
1787 if (dump_enabled_p ())
1788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1789 "can't operate on partial vectors when emulating"
1790 " vector operations.\n");
1791 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
1792 return;
1795 /* We might load more scalars than we need for permuting SLP loads.
1796 We checked in get_group_load_store_type that the extra elements
1797 don't leak into a new vector. */
1798 auto group_memory_nvectors = [](poly_uint64 size, poly_uint64 nunits)
1800 unsigned int nvectors;
1801 if (can_div_away_from_zero_p (size, nunits, &nvectors))
1802 return nvectors;
1803 gcc_unreachable ();
1806 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1807 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1808 machine_mode mask_mode;
1809 bool using_partial_vectors_p = false;
1810 if (targetm.vectorize.get_mask_mode (vecmode).exists (&mask_mode)
1811 && can_vec_mask_load_store_p (vecmode, mask_mode, is_load))
1813 nvectors = group_memory_nvectors (group_size * vf, nunits);
1814 vect_record_loop_mask (loop_vinfo, masks, nvectors, vectype, scalar_mask);
1815 using_partial_vectors_p = true;
1818 machine_mode vmode;
1819 if (get_len_load_store_mode (vecmode, is_load).exists (&vmode))
1821 nvectors = group_memory_nvectors (group_size * vf, nunits);
1822 vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
1823 unsigned factor = (vecmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vecmode);
1824 vect_record_loop_len (loop_vinfo, lens, nvectors, vectype, factor);
1825 using_partial_vectors_p = true;
1828 if (!using_partial_vectors_p)
1830 if (dump_enabled_p ())
1831 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1832 "can't operate on partial vectors because the"
1833 " target doesn't have the appropriate partial"
1834 " vectorization load or store.\n");
1835 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
1839 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1840 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1841 that needs to be applied to all loads and stores in a vectorized loop.
1842 Return VEC_MASK if LOOP_MASK is null or if VEC_MASK is already masked,
1843 otherwise return VEC_MASK & LOOP_MASK.
1845 MASK_TYPE is the type of both masks. If new statements are needed,
1846 insert them before GSI. */
1848 static tree
1849 prepare_vec_mask (loop_vec_info loop_vinfo, tree mask_type, tree loop_mask,
1850 tree vec_mask, gimple_stmt_iterator *gsi)
1852 gcc_assert (useless_type_conversion_p (mask_type, TREE_TYPE (vec_mask)));
1853 if (!loop_mask)
1854 return vec_mask;
1856 gcc_assert (TREE_TYPE (loop_mask) == mask_type);
1858 if (loop_vinfo->vec_cond_masked_set.contains ({ vec_mask, loop_mask }))
1859 return vec_mask;
1861 tree and_res = make_temp_ssa_name (mask_type, NULL, "vec_mask_and");
1862 gimple *and_stmt = gimple_build_assign (and_res, BIT_AND_EXPR,
1863 vec_mask, loop_mask);
1865 gsi_insert_before (gsi, and_stmt, GSI_SAME_STMT);
1866 return and_res;
1869 /* Determine whether we can use a gather load or scatter store to vectorize
1870 strided load or store STMT_INFO by truncating the current offset to a
1871 smaller width. We need to be able to construct an offset vector:
1873 { 0, X, X*2, X*3, ... }
1875 without loss of precision, where X is STMT_INFO's DR_STEP.
1877 Return true if this is possible, describing the gather load or scatter
1878 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1880 static bool
1881 vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
1882 loop_vec_info loop_vinfo, bool masked_p,
1883 gather_scatter_info *gs_info)
1885 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
1886 data_reference *dr = dr_info->dr;
1887 tree step = DR_STEP (dr);
1888 if (TREE_CODE (step) != INTEGER_CST)
1890 /* ??? Perhaps we could use range information here? */
1891 if (dump_enabled_p ())
1892 dump_printf_loc (MSG_NOTE, vect_location,
1893 "cannot truncate variable step.\n");
1894 return false;
1897 /* Get the number of bits in an element. */
1898 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1899 scalar_mode element_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
1900 unsigned int element_bits = GET_MODE_BITSIZE (element_mode);
1902 /* Set COUNT to the upper limit on the number of elements - 1.
1903 Start with the maximum vectorization factor. */
1904 unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
1906 /* Try lowering COUNT to the number of scalar latch iterations. */
1907 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1908 widest_int max_iters;
1909 if (max_loop_iterations (loop, &max_iters)
1910 && max_iters < count)
1911 count = max_iters.to_shwi ();
1913 /* Try scales of 1 and the element size. */
1914 int scales[] = { 1, vect_get_scalar_dr_size (dr_info) };
1915 wi::overflow_type overflow = wi::OVF_NONE;
1916 for (int i = 0; i < 2; ++i)
1918 int scale = scales[i];
1919 widest_int factor;
1920 if (!wi::multiple_of_p (wi::to_widest (step), scale, SIGNED, &factor))
1921 continue;
1923 /* Determine the minimum precision of (COUNT - 1) * STEP / SCALE. */
1924 widest_int range = wi::mul (count, factor, SIGNED, &overflow);
1925 if (overflow)
1926 continue;
1927 signop sign = range >= 0 ? UNSIGNED : SIGNED;
1928 unsigned int min_offset_bits = wi::min_precision (range, sign);
1930 /* Find the narrowest viable offset type. */
1931 unsigned int offset_bits = 1U << ceil_log2 (min_offset_bits);
1932 tree offset_type = build_nonstandard_integer_type (offset_bits,
1933 sign == UNSIGNED);
1935 /* See whether the target supports the operation with an offset
1936 no narrower than OFFSET_TYPE. */
1937 tree memory_type = TREE_TYPE (DR_REF (dr));
1938 if (!vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr), masked_p,
1939 vectype, memory_type, offset_type, scale,
1940 &gs_info->ifn, &gs_info->offset_vectype)
1941 || gs_info->ifn == IFN_LAST)
1942 continue;
1944 gs_info->decl = NULL_TREE;
1945 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
1946 but we don't need to store that here. */
1947 gs_info->base = NULL_TREE;
1948 gs_info->element_type = TREE_TYPE (vectype);
1949 gs_info->offset = fold_convert (offset_type, step);
1950 gs_info->offset_dt = vect_constant_def;
1951 gs_info->scale = scale;
1952 gs_info->memory_type = memory_type;
1953 return true;
1956 if (overflow && dump_enabled_p ())
1957 dump_printf_loc (MSG_NOTE, vect_location,
1958 "truncating gather/scatter offset to %d bits"
1959 " might change its value.\n", element_bits);
1961 return false;
1964 /* Return true if we can use gather/scatter internal functions to
1965 vectorize STMT_INFO, which is a grouped or strided load or store.
1966 MASKED_P is true if load or store is conditional. When returning
1967 true, fill in GS_INFO with the information required to perform the
1968 operation. */
1970 static bool
1971 vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info,
1972 loop_vec_info loop_vinfo, bool masked_p,
1973 gather_scatter_info *gs_info)
1975 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info)
1976 || gs_info->ifn == IFN_LAST)
1977 return vect_truncate_gather_scatter_offset (stmt_info, loop_vinfo,
1978 masked_p, gs_info);
1980 tree old_offset_type = TREE_TYPE (gs_info->offset);
1981 tree new_offset_type = TREE_TYPE (gs_info->offset_vectype);
1983 gcc_assert (TYPE_PRECISION (new_offset_type)
1984 >= TYPE_PRECISION (old_offset_type));
1985 gs_info->offset = fold_convert (new_offset_type, gs_info->offset);
1987 if (dump_enabled_p ())
1988 dump_printf_loc (MSG_NOTE, vect_location,
1989 "using gather/scatter for strided/grouped access,"
1990 " scale = %d\n", gs_info->scale);
1992 return true;
1995 /* STMT_INFO is a non-strided load or store, meaning that it accesses
1996 elements with a known constant step. Return -1 if that step
1997 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1999 static int
2000 compare_step_with_zero (vec_info *vinfo, stmt_vec_info stmt_info)
2002 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2003 return tree_int_cst_compare (vect_dr_behavior (vinfo, dr_info)->step,
2004 size_zero_node);
2007 /* If the target supports a permute mask that reverses the elements in
2008 a vector of type VECTYPE, return that mask, otherwise return null. */
2010 static tree
2011 perm_mask_for_reverse (tree vectype)
2013 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2015 /* The encoding has a single stepped pattern. */
2016 vec_perm_builder sel (nunits, 1, 3);
2017 for (int i = 0; i < 3; ++i)
2018 sel.quick_push (nunits - 1 - i);
2020 vec_perm_indices indices (sel, 1, nunits);
2021 if (!can_vec_perm_const_p (TYPE_MODE (vectype), TYPE_MODE (vectype),
2022 indices))
2023 return NULL_TREE;
2024 return vect_gen_perm_mask_checked (vectype, indices);
2027 /* A subroutine of get_load_store_type, with a subset of the same
2028 arguments. Handle the case where STMT_INFO is a load or store that
2029 accesses consecutive elements with a negative step. Sets *POFFSET
2030 to the offset to be applied to the DR for the first access. */
2032 static vect_memory_access_type
2033 get_negative_load_store_type (vec_info *vinfo,
2034 stmt_vec_info stmt_info, tree vectype,
2035 vec_load_store_type vls_type,
2036 unsigned int ncopies, poly_int64 *poffset)
2038 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2039 dr_alignment_support alignment_support_scheme;
2041 if (ncopies > 1)
2043 if (dump_enabled_p ())
2044 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2045 "multiple types with negative step.\n");
2046 return VMAT_ELEMENTWISE;
2049 /* For backward running DRs the first access in vectype actually is
2050 N-1 elements before the address of the DR. */
2051 *poffset = ((-TYPE_VECTOR_SUBPARTS (vectype) + 1)
2052 * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2054 int misalignment = dr_misalignment (dr_info, vectype, *poffset);
2055 alignment_support_scheme
2056 = vect_supportable_dr_alignment (vinfo, dr_info, vectype, misalignment);
2057 if (alignment_support_scheme != dr_aligned
2058 && alignment_support_scheme != dr_unaligned_supported)
2060 if (dump_enabled_p ())
2061 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2062 "negative step but alignment required.\n");
2063 *poffset = 0;
2064 return VMAT_ELEMENTWISE;
2067 if (vls_type == VLS_STORE_INVARIANT)
2069 if (dump_enabled_p ())
2070 dump_printf_loc (MSG_NOTE, vect_location,
2071 "negative step with invariant source;"
2072 " no permute needed.\n");
2073 return VMAT_CONTIGUOUS_DOWN;
2076 if (!perm_mask_for_reverse (vectype))
2078 if (dump_enabled_p ())
2079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2080 "negative step and reversing not supported.\n");
2081 *poffset = 0;
2082 return VMAT_ELEMENTWISE;
2085 return VMAT_CONTIGUOUS_REVERSE;
2088 /* STMT_INFO is either a masked or unconditional store. Return the value
2089 being stored. */
2091 tree
2092 vect_get_store_rhs (stmt_vec_info stmt_info)
2094 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
2096 gcc_assert (gimple_assign_single_p (assign));
2097 return gimple_assign_rhs1 (assign);
2099 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
2101 internal_fn ifn = gimple_call_internal_fn (call);
2102 int index = internal_fn_stored_value_index (ifn);
2103 gcc_assert (index >= 0);
2104 return gimple_call_arg (call, index);
2106 gcc_unreachable ();
2109 /* Function VECTOR_VECTOR_COMPOSITION_TYPE
2111 This function returns a vector type which can be composed with NETLS pieces,
2112 whose type is recorded in PTYPE. VTYPE should be a vector type, and has the
2113 same vector size as the return vector. It checks target whether supports
2114 pieces-size vector mode for construction firstly, if target fails to, check
2115 pieces-size scalar mode for construction further. It returns NULL_TREE if
2116 fails to find the available composition.
2118 For example, for (vtype=V16QI, nelts=4), we can probably get:
2119 - V16QI with PTYPE V4QI.
2120 - V4SI with PTYPE SI.
2121 - NULL_TREE. */
2123 static tree
2124 vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype)
2126 gcc_assert (VECTOR_TYPE_P (vtype));
2127 gcc_assert (known_gt (nelts, 0U));
2129 machine_mode vmode = TYPE_MODE (vtype);
2130 if (!VECTOR_MODE_P (vmode))
2131 return NULL_TREE;
2133 poly_uint64 vbsize = GET_MODE_BITSIZE (vmode);
2134 unsigned int pbsize;
2135 if (constant_multiple_p (vbsize, nelts, &pbsize))
2137 /* First check if vec_init optab supports construction from
2138 vector pieces directly. */
2139 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vtype));
2140 poly_uint64 inelts = pbsize / GET_MODE_BITSIZE (elmode);
2141 machine_mode rmode;
2142 if (related_vector_mode (vmode, elmode, inelts).exists (&rmode)
2143 && (convert_optab_handler (vec_init_optab, vmode, rmode)
2144 != CODE_FOR_nothing))
2146 *ptype = build_vector_type (TREE_TYPE (vtype), inelts);
2147 return vtype;
2150 /* Otherwise check if exists an integer type of the same piece size and
2151 if vec_init optab supports construction from it directly. */
2152 if (int_mode_for_size (pbsize, 0).exists (&elmode)
2153 && related_vector_mode (vmode, elmode, nelts).exists (&rmode)
2154 && (convert_optab_handler (vec_init_optab, rmode, elmode)
2155 != CODE_FOR_nothing))
2157 *ptype = build_nonstandard_integer_type (pbsize, 1);
2158 return build_vector_type (*ptype, nelts);
2162 return NULL_TREE;
2165 /* A subroutine of get_load_store_type, with a subset of the same
2166 arguments. Handle the case where STMT_INFO is part of a grouped load
2167 or store.
2169 For stores, the statements in the group are all consecutive
2170 and there is no gap at the end. For loads, the statements in the
2171 group might not be consecutive; there can be gaps between statements
2172 as well as at the end. */
2174 static bool
2175 get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
2176 tree vectype, slp_tree slp_node,
2177 bool masked_p, vec_load_store_type vls_type,
2178 vect_memory_access_type *memory_access_type,
2179 poly_int64 *poffset,
2180 dr_alignment_support *alignment_support_scheme,
2181 int *misalignment,
2182 gather_scatter_info *gs_info)
2184 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
2185 class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2186 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2187 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2188 unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
2189 bool single_element_p = (stmt_info == first_stmt_info
2190 && !DR_GROUP_NEXT_ELEMENT (stmt_info));
2191 unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (first_stmt_info);
2192 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2194 /* True if the vectorized statements would access beyond the last
2195 statement in the group. */
2196 bool overrun_p = false;
2198 /* True if we can cope with such overrun by peeling for gaps, so that
2199 there is at least one final scalar iteration after the vector loop. */
2200 bool can_overrun_p = (!masked_p
2201 && vls_type == VLS_LOAD
2202 && loop_vinfo
2203 && !loop->inner);
2205 /* There can only be a gap at the end of the group if the stride is
2206 known at compile time. */
2207 gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0);
2209 /* Stores can't yet have gaps. */
2210 gcc_assert (slp_node || vls_type == VLS_LOAD || gap == 0);
2212 if (slp_node)
2214 /* For SLP vectorization we directly vectorize a subchain
2215 without permutation. */
2216 if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
2217 first_dr_info
2218 = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]);
2219 if (STMT_VINFO_STRIDED_P (first_stmt_info))
2221 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2222 separated by the stride, until we have a complete vector.
2223 Fall back to scalar accesses if that isn't possible. */
2224 if (multiple_p (nunits, group_size))
2225 *memory_access_type = VMAT_STRIDED_SLP;
2226 else
2227 *memory_access_type = VMAT_ELEMENTWISE;
2229 else
2231 overrun_p = loop_vinfo && gap != 0;
2232 if (overrun_p && vls_type != VLS_LOAD)
2234 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2235 "Grouped store with gaps requires"
2236 " non-consecutive accesses\n");
2237 return false;
2239 /* An overrun is fine if the trailing elements are smaller
2240 than the alignment boundary B. Every vector access will
2241 be a multiple of B and so we are guaranteed to access a
2242 non-gap element in the same B-sized block. */
2243 if (overrun_p
2244 && gap < (vect_known_alignment_in_bytes (first_dr_info,
2245 vectype)
2246 / vect_get_scalar_dr_size (first_dr_info)))
2247 overrun_p = false;
2249 /* If the gap splits the vector in half and the target
2250 can do half-vector operations avoid the epilogue peeling
2251 by simply loading half of the vector only. Usually
2252 the construction with an upper zero half will be elided. */
2253 dr_alignment_support alss;
2254 int misalign = dr_misalignment (first_dr_info, vectype);
2255 tree half_vtype;
2256 if (overrun_p
2257 && !masked_p
2258 && (((alss = vect_supportable_dr_alignment (vinfo, first_dr_info,
2259 vectype, misalign)))
2260 == dr_aligned
2261 || alss == dr_unaligned_supported)
2262 && known_eq (nunits, (group_size - gap) * 2)
2263 && known_eq (nunits, group_size)
2264 && (vector_vector_composition_type (vectype, 2, &half_vtype)
2265 != NULL_TREE))
2266 overrun_p = false;
2268 if (overrun_p && !can_overrun_p)
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "Peeling for outer loop is not supported\n");
2273 return false;
2275 int cmp = compare_step_with_zero (vinfo, stmt_info);
2276 if (cmp < 0)
2278 if (single_element_p)
2279 /* ??? The VMAT_CONTIGUOUS_REVERSE code generation is
2280 only correct for single element "interleaving" SLP. */
2281 *memory_access_type = get_negative_load_store_type
2282 (vinfo, stmt_info, vectype, vls_type, 1, poffset);
2283 else
2285 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2286 separated by the stride, until we have a complete vector.
2287 Fall back to scalar accesses if that isn't possible. */
2288 if (multiple_p (nunits, group_size))
2289 *memory_access_type = VMAT_STRIDED_SLP;
2290 else
2291 *memory_access_type = VMAT_ELEMENTWISE;
2294 else
2296 gcc_assert (!loop_vinfo || cmp > 0);
2297 *memory_access_type = VMAT_CONTIGUOUS;
2300 /* When we have a contiguous access across loop iterations
2301 but the access in the loop doesn't cover the full vector
2302 we can end up with no gap recorded but still excess
2303 elements accessed, see PR103116. Make sure we peel for
2304 gaps if necessary and sufficient and give up if not. */
2305 if (loop_vinfo
2306 && *memory_access_type == VMAT_CONTIGUOUS
2307 && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
2308 && !multiple_p (group_size * LOOP_VINFO_VECT_FACTOR (loop_vinfo),
2309 nunits))
2311 unsigned HOST_WIDE_INT cnunits, cvf;
2312 if (!can_overrun_p
2313 || !nunits.is_constant (&cnunits)
2314 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&cvf)
2315 /* Peeling for gaps assumes that a single scalar iteration
2316 is enough to make sure the last vector iteration doesn't
2317 access excess elements.
2318 ??? Enhancements include peeling multiple iterations
2319 or using masked loads with a static mask. */
2320 || (group_size * cvf) % cnunits + group_size < cnunits)
2322 if (dump_enabled_p ())
2323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2324 "peeling for gaps insufficient for "
2325 "access\n");
2326 return false;
2328 overrun_p = true;
2332 else
2334 /* We can always handle this case using elementwise accesses,
2335 but see if something more efficient is available. */
2336 *memory_access_type = VMAT_ELEMENTWISE;
2338 /* If there is a gap at the end of the group then these optimizations
2339 would access excess elements in the last iteration. */
2340 bool would_overrun_p = (gap != 0);
2341 /* An overrun is fine if the trailing elements are smaller than the
2342 alignment boundary B. Every vector access will be a multiple of B
2343 and so we are guaranteed to access a non-gap element in the
2344 same B-sized block. */
2345 if (would_overrun_p
2346 && !masked_p
2347 && gap < (vect_known_alignment_in_bytes (first_dr_info, vectype)
2348 / vect_get_scalar_dr_size (first_dr_info)))
2349 would_overrun_p = false;
2351 if (!STMT_VINFO_STRIDED_P (first_stmt_info)
2352 && (can_overrun_p || !would_overrun_p)
2353 && compare_step_with_zero (vinfo, stmt_info) > 0)
2355 /* First cope with the degenerate case of a single-element
2356 vector. */
2357 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
2360 /* Otherwise try using LOAD/STORE_LANES. */
2361 else if (vls_type == VLS_LOAD
2362 ? vect_load_lanes_supported (vectype, group_size, masked_p)
2363 : vect_store_lanes_supported (vectype, group_size,
2364 masked_p))
2366 *memory_access_type = VMAT_LOAD_STORE_LANES;
2367 overrun_p = would_overrun_p;
2370 /* If that fails, try using permuting loads. */
2371 else if (vls_type == VLS_LOAD
2372 ? vect_grouped_load_supported (vectype, single_element_p,
2373 group_size)
2374 : vect_grouped_store_supported (vectype, group_size))
2376 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
2377 overrun_p = would_overrun_p;
2381 /* As a last resort, trying using a gather load or scatter store.
2383 ??? Although the code can handle all group sizes correctly,
2384 it probably isn't a win to use separate strided accesses based
2385 on nearby locations. Or, even if it's a win over scalar code,
2386 it might not be a win over vectorizing at a lower VF, if that
2387 allows us to use contiguous accesses. */
2388 if (*memory_access_type == VMAT_ELEMENTWISE
2389 && single_element_p
2390 && loop_vinfo
2391 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2392 masked_p, gs_info))
2393 *memory_access_type = VMAT_GATHER_SCATTER;
2396 if (*memory_access_type == VMAT_GATHER_SCATTER
2397 || *memory_access_type == VMAT_ELEMENTWISE)
2399 *alignment_support_scheme = dr_unaligned_supported;
2400 *misalignment = DR_MISALIGNMENT_UNKNOWN;
2402 else
2404 *misalignment = dr_misalignment (first_dr_info, vectype, *poffset);
2405 *alignment_support_scheme
2406 = vect_supportable_dr_alignment (vinfo, first_dr_info, vectype,
2407 *misalignment);
2410 if (vls_type != VLS_LOAD && first_stmt_info == stmt_info)
2412 /* STMT is the leader of the group. Check the operands of all the
2413 stmts of the group. */
2414 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info);
2415 while (next_stmt_info)
2417 tree op = vect_get_store_rhs (next_stmt_info);
2418 enum vect_def_type dt;
2419 if (!vect_is_simple_use (op, vinfo, &dt))
2421 if (dump_enabled_p ())
2422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2423 "use not simple.\n");
2424 return false;
2426 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
2430 if (overrun_p)
2432 gcc_assert (can_overrun_p);
2433 if (dump_enabled_p ())
2434 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2435 "Data access with gaps requires scalar "
2436 "epilogue loop\n");
2437 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2440 return true;
2443 /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2444 if there is a memory access type that the vectorized form can use,
2445 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2446 or scatters, fill in GS_INFO accordingly. In addition
2447 *ALIGNMENT_SUPPORT_SCHEME is filled out and false is returned if
2448 the target does not support the alignment scheme. *MISALIGNMENT
2449 is set according to the alignment of the access (including
2450 DR_MISALIGNMENT_UNKNOWN when it is unknown).
2452 SLP says whether we're performing SLP rather than loop vectorization.
2453 MASKED_P is true if the statement is conditional on a vectorized mask.
2454 VECTYPE is the vector type that the vectorized statements will use.
2455 NCOPIES is the number of vector statements that will be needed. */
2457 static bool
2458 get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
2459 tree vectype, slp_tree slp_node,
2460 bool masked_p, vec_load_store_type vls_type,
2461 unsigned int ncopies,
2462 vect_memory_access_type *memory_access_type,
2463 poly_int64 *poffset,
2464 dr_alignment_support *alignment_support_scheme,
2465 int *misalignment,
2466 gather_scatter_info *gs_info)
2468 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
2469 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2470 *misalignment = DR_MISALIGNMENT_UNKNOWN;
2471 *poffset = 0;
2472 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2474 *memory_access_type = VMAT_GATHER_SCATTER;
2475 if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
2476 gcc_unreachable ();
2477 /* When using internal functions, we rely on pattern recognition
2478 to convert the type of the offset to the type that the target
2479 requires, with the result being a call to an internal function.
2480 If that failed for some reason (e.g. because another pattern
2481 took priority), just handle cases in which the offset already
2482 has the right type. */
2483 else if (gs_info->ifn != IFN_LAST
2484 && !is_gimple_call (stmt_info->stmt)
2485 && !tree_nop_conversion_p (TREE_TYPE (gs_info->offset),
2486 TREE_TYPE (gs_info->offset_vectype)))
2488 if (dump_enabled_p ())
2489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2490 "%s offset requires a conversion\n",
2491 vls_type == VLS_LOAD ? "gather" : "scatter");
2492 return false;
2494 else if (!vect_is_simple_use (gs_info->offset, vinfo,
2495 &gs_info->offset_dt,
2496 &gs_info->offset_vectype))
2498 if (dump_enabled_p ())
2499 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2500 "%s index use not simple.\n",
2501 vls_type == VLS_LOAD ? "gather" : "scatter");
2502 return false;
2504 else if (gs_info->ifn == IFN_LAST && !gs_info->decl)
2506 if (vls_type != VLS_LOAD)
2508 if (dump_enabled_p ())
2509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2510 "unsupported emulated scatter.\n");
2511 return false;
2513 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ()
2514 || !TYPE_VECTOR_SUBPARTS
2515 (gs_info->offset_vectype).is_constant ()
2516 || !constant_multiple_p (TYPE_VECTOR_SUBPARTS
2517 (gs_info->offset_vectype),
2518 TYPE_VECTOR_SUBPARTS (vectype)))
2520 if (dump_enabled_p ())
2521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2522 "unsupported vector types for emulated "
2523 "gather.\n");
2524 return false;
2527 /* Gather-scatter accesses perform only component accesses, alignment
2528 is irrelevant for them. */
2529 *alignment_support_scheme = dr_unaligned_supported;
2531 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2533 if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node,
2534 masked_p,
2535 vls_type, memory_access_type, poffset,
2536 alignment_support_scheme,
2537 misalignment, gs_info))
2538 return false;
2540 else if (STMT_VINFO_STRIDED_P (stmt_info))
2542 gcc_assert (!slp_node);
2543 if (loop_vinfo
2544 && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
2545 masked_p, gs_info))
2546 *memory_access_type = VMAT_GATHER_SCATTER;
2547 else
2548 *memory_access_type = VMAT_ELEMENTWISE;
2549 /* Alignment is irrelevant here. */
2550 *alignment_support_scheme = dr_unaligned_supported;
2552 else
2554 int cmp = compare_step_with_zero (vinfo, stmt_info);
2555 if (cmp == 0)
2557 gcc_assert (vls_type == VLS_LOAD);
2558 *memory_access_type = VMAT_INVARIANT;
2559 /* Invariant accesses perform only component accesses, alignment
2560 is irrelevant for them. */
2561 *alignment_support_scheme = dr_unaligned_supported;
2563 else
2565 if (cmp < 0)
2566 *memory_access_type = get_negative_load_store_type
2567 (vinfo, stmt_info, vectype, vls_type, ncopies, poffset);
2568 else
2569 *memory_access_type = VMAT_CONTIGUOUS;
2570 *misalignment = dr_misalignment (STMT_VINFO_DR_INFO (stmt_info),
2571 vectype, *poffset);
2572 *alignment_support_scheme
2573 = vect_supportable_dr_alignment (vinfo,
2574 STMT_VINFO_DR_INFO (stmt_info),
2575 vectype, *misalignment);
2579 if ((*memory_access_type == VMAT_ELEMENTWISE
2580 || *memory_access_type == VMAT_STRIDED_SLP)
2581 && !nunits.is_constant ())
2583 if (dump_enabled_p ())
2584 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2585 "Not using elementwise accesses due to variable "
2586 "vectorization factor.\n");
2587 return false;
2590 if (*alignment_support_scheme == dr_unaligned_unsupported)
2592 if (dump_enabled_p ())
2593 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2594 "unsupported unaligned access\n");
2595 return false;
2598 /* FIXME: At the moment the cost model seems to underestimate the
2599 cost of using elementwise accesses. This check preserves the
2600 traditional behavior until that can be fixed. */
2601 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
2602 if (!first_stmt_info)
2603 first_stmt_info = stmt_info;
2604 if (*memory_access_type == VMAT_ELEMENTWISE
2605 && !STMT_VINFO_STRIDED_P (first_stmt_info)
2606 && !(stmt_info == DR_GROUP_FIRST_ELEMENT (stmt_info)
2607 && !DR_GROUP_NEXT_ELEMENT (stmt_info)
2608 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
2610 if (dump_enabled_p ())
2611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2612 "not falling back to elementwise accesses\n");
2613 return false;
2615 return true;
2618 /* Return true if boolean argument at MASK_INDEX is suitable for vectorizing
2619 conditional operation STMT_INFO. When returning true, store the mask
2620 in *MASK, the type of its definition in *MASK_DT_OUT, the type of the
2621 vectorized mask in *MASK_VECTYPE_OUT and the SLP node corresponding
2622 to the mask in *MASK_NODE if MASK_NODE is not NULL. */
2624 static bool
2625 vect_check_scalar_mask (vec_info *vinfo, stmt_vec_info stmt_info,
2626 slp_tree slp_node, unsigned mask_index,
2627 tree *mask, slp_tree *mask_node,
2628 vect_def_type *mask_dt_out, tree *mask_vectype_out)
2630 enum vect_def_type mask_dt;
2631 tree mask_vectype;
2632 slp_tree mask_node_1;
2633 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, mask_index,
2634 mask, &mask_node_1, &mask_dt, &mask_vectype))
2636 if (dump_enabled_p ())
2637 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2638 "mask use not simple.\n");
2639 return false;
2642 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (*mask)))
2644 if (dump_enabled_p ())
2645 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2646 "mask argument is not a boolean.\n");
2647 return false;
2650 /* If the caller is not prepared for adjusting an external/constant
2651 SLP mask vector type fail. */
2652 if (slp_node
2653 && !mask_node
2654 && SLP_TREE_DEF_TYPE (mask_node_1) != vect_internal_def)
2656 if (dump_enabled_p ())
2657 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2658 "SLP mask argument is not vectorized.\n");
2659 return false;
2662 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2663 if (!mask_vectype)
2664 mask_vectype = get_mask_type_for_scalar_type (vinfo, TREE_TYPE (vectype));
2666 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
2668 if (dump_enabled_p ())
2669 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2670 "could not find an appropriate vector mask type.\n");
2671 return false;
2674 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2675 TYPE_VECTOR_SUBPARTS (vectype)))
2677 if (dump_enabled_p ())
2678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2679 "vector mask type %T"
2680 " does not match vector data type %T.\n",
2681 mask_vectype, vectype);
2683 return false;
2686 *mask_dt_out = mask_dt;
2687 *mask_vectype_out = mask_vectype;
2688 if (mask_node)
2689 *mask_node = mask_node_1;
2690 return true;
2693 /* Return true if stored value RHS is suitable for vectorizing store
2694 statement STMT_INFO. When returning true, store the type of the
2695 definition in *RHS_DT_OUT, the type of the vectorized store value in
2696 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2698 static bool
2699 vect_check_store_rhs (vec_info *vinfo, stmt_vec_info stmt_info,
2700 slp_tree slp_node, tree rhs,
2701 vect_def_type *rhs_dt_out, tree *rhs_vectype_out,
2702 vec_load_store_type *vls_type_out)
2704 /* In the case this is a store from a constant make sure
2705 native_encode_expr can handle it. */
2706 if (CONSTANT_CLASS_P (rhs) && native_encode_expr (rhs, NULL, 64) == 0)
2708 if (dump_enabled_p ())
2709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2710 "cannot encode constant as a byte sequence.\n");
2711 return false;
2714 unsigned op_no = 0;
2715 if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
2717 if (gimple_call_internal_p (call)
2718 && internal_store_fn_p (gimple_call_internal_fn (call)))
2719 op_no = internal_fn_stored_value_index (gimple_call_internal_fn (call));
2722 enum vect_def_type rhs_dt;
2723 tree rhs_vectype;
2724 slp_tree slp_op;
2725 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, op_no,
2726 &rhs, &slp_op, &rhs_dt, &rhs_vectype))
2728 if (dump_enabled_p ())
2729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2730 "use not simple.\n");
2731 return false;
2734 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2735 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
2737 if (dump_enabled_p ())
2738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2739 "incompatible vector types.\n");
2740 return false;
2743 *rhs_dt_out = rhs_dt;
2744 *rhs_vectype_out = rhs_vectype;
2745 if (rhs_dt == vect_constant_def || rhs_dt == vect_external_def)
2746 *vls_type_out = VLS_STORE_INVARIANT;
2747 else
2748 *vls_type_out = VLS_STORE;
2749 return true;
2752 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
2753 Note that we support masks with floating-point type, in which case the
2754 floats are interpreted as a bitmask. */
2756 static tree
2757 vect_build_all_ones_mask (vec_info *vinfo,
2758 stmt_vec_info stmt_info, tree masktype)
2760 if (TREE_CODE (masktype) == INTEGER_TYPE)
2761 return build_int_cst (masktype, -1);
2762 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
2764 tree mask = build_int_cst (TREE_TYPE (masktype), -1);
2765 mask = build_vector_from_val (masktype, mask);
2766 return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL);
2768 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
2770 REAL_VALUE_TYPE r;
2771 long tmp[6];
2772 for (int j = 0; j < 6; ++j)
2773 tmp[j] = -1;
2774 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
2775 tree mask = build_real (TREE_TYPE (masktype), r);
2776 mask = build_vector_from_val (masktype, mask);
2777 return vect_init_vector (vinfo, stmt_info, mask, masktype, NULL);
2779 gcc_unreachable ();
2782 /* Build an all-zero merge value of type VECTYPE while vectorizing
2783 STMT_INFO as a gather load. */
2785 static tree
2786 vect_build_zero_merge_argument (vec_info *vinfo,
2787 stmt_vec_info stmt_info, tree vectype)
2789 tree merge;
2790 if (TREE_CODE (TREE_TYPE (vectype)) == INTEGER_TYPE)
2791 merge = build_int_cst (TREE_TYPE (vectype), 0);
2792 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype)))
2794 REAL_VALUE_TYPE r;
2795 long tmp[6];
2796 for (int j = 0; j < 6; ++j)
2797 tmp[j] = 0;
2798 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (vectype)));
2799 merge = build_real (TREE_TYPE (vectype), r);
2801 else
2802 gcc_unreachable ();
2803 merge = build_vector_from_val (vectype, merge);
2804 return vect_init_vector (vinfo, stmt_info, merge, vectype, NULL);
2807 /* Build a gather load call while vectorizing STMT_INFO. Insert new
2808 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2809 the gather load operation. If the load is conditional, MASK is the
2810 unvectorized condition and MASK_DT is its definition type, otherwise
2811 MASK is null. */
2813 static void
2814 vect_build_gather_load_calls (vec_info *vinfo, stmt_vec_info stmt_info,
2815 gimple_stmt_iterator *gsi,
2816 gimple **vec_stmt,
2817 gather_scatter_info *gs_info,
2818 tree mask)
2820 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
2821 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2822 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2823 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2824 int ncopies = vect_get_num_copies (loop_vinfo, vectype);
2825 edge pe = loop_preheader_edge (loop);
2826 enum { NARROW, NONE, WIDEN } modifier;
2827 poly_uint64 gather_off_nunits
2828 = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype);
2830 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl));
2831 tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl));
2832 tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2833 tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2834 tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2835 tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2836 tree scaletype = TREE_VALUE (arglist);
2837 tree real_masktype = masktype;
2838 gcc_checking_assert (types_compatible_p (srctype, rettype)
2839 && (!mask
2840 || TREE_CODE (masktype) == INTEGER_TYPE
2841 || types_compatible_p (srctype, masktype)));
2842 if (mask)
2843 masktype = truth_type_for (srctype);
2845 tree mask_halftype = masktype;
2846 tree perm_mask = NULL_TREE;
2847 tree mask_perm_mask = NULL_TREE;
2848 if (known_eq (nunits, gather_off_nunits))
2849 modifier = NONE;
2850 else if (known_eq (nunits * 2, gather_off_nunits))
2852 modifier = WIDEN;
2854 /* Currently widening gathers and scatters are only supported for
2855 fixed-length vectors. */
2856 int count = gather_off_nunits.to_constant ();
2857 vec_perm_builder sel (count, count, 1);
2858 for (int i = 0; i < count; ++i)
2859 sel.quick_push (i | (count / 2));
2861 vec_perm_indices indices (sel, 1, count);
2862 perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype,
2863 indices);
2865 else if (known_eq (nunits, gather_off_nunits * 2))
2867 modifier = NARROW;
2869 /* Currently narrowing gathers and scatters are only supported for
2870 fixed-length vectors. */
2871 int count = nunits.to_constant ();
2872 vec_perm_builder sel (count, count, 1);
2873 sel.quick_grow (count);
2874 for (int i = 0; i < count; ++i)
2875 sel[i] = i < count / 2 ? i : i + count / 2;
2876 vec_perm_indices indices (sel, 2, count);
2877 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2879 ncopies *= 2;
2881 if (mask && VECTOR_TYPE_P (real_masktype))
2883 for (int i = 0; i < count; ++i)
2884 sel[i] = i | (count / 2);
2885 indices.new_vector (sel, 2, count);
2886 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2888 else if (mask)
2889 mask_halftype = truth_type_for (gs_info->offset_vectype);
2891 else
2892 gcc_unreachable ();
2894 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
2895 tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
2897 tree ptr = fold_convert (ptrtype, gs_info->base);
2898 if (!is_gimple_min_invariant (ptr))
2900 gimple_seq seq;
2901 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2902 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2903 gcc_assert (!new_bb);
2906 tree scale = build_int_cst (scaletype, gs_info->scale);
2908 tree vec_oprnd0 = NULL_TREE;
2909 tree vec_mask = NULL_TREE;
2910 tree src_op = NULL_TREE;
2911 tree mask_op = NULL_TREE;
2912 tree prev_res = NULL_TREE;
2914 if (!mask)
2916 src_op = vect_build_zero_merge_argument (vinfo, stmt_info, rettype);
2917 mask_op = vect_build_all_ones_mask (vinfo, stmt_info, masktype);
2920 auto_vec<tree> vec_oprnds0;
2921 auto_vec<tree> vec_masks;
2922 vect_get_vec_defs_for_operand (vinfo, stmt_info,
2923 modifier == WIDEN ? ncopies / 2 : ncopies,
2924 gs_info->offset, &vec_oprnds0);
2925 if (mask)
2926 vect_get_vec_defs_for_operand (vinfo, stmt_info,
2927 modifier == NARROW ? ncopies / 2 : ncopies,
2928 mask, &vec_masks, masktype);
2929 for (int j = 0; j < ncopies; ++j)
2931 tree op, var;
2932 if (modifier == WIDEN && (j & 1))
2933 op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0,
2934 perm_mask, stmt_info, gsi);
2935 else
2936 op = vec_oprnd0 = vec_oprnds0[modifier == WIDEN ? j / 2 : j];
2938 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2940 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2941 TYPE_VECTOR_SUBPARTS (idxtype)));
2942 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2943 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2944 gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2945 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
2946 op = var;
2949 if (mask)
2951 if (mask_perm_mask && (j & 1))
2952 mask_op = permute_vec_elements (vinfo, mask_op, mask_op,
2953 mask_perm_mask, stmt_info, gsi);
2954 else
2956 if (modifier == NARROW)
2958 if ((j & 1) == 0)
2959 vec_mask = vec_masks[j / 2];
2961 else
2962 vec_mask = vec_masks[j];
2964 mask_op = vec_mask;
2965 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2967 poly_uint64 sub1 = TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op));
2968 poly_uint64 sub2 = TYPE_VECTOR_SUBPARTS (masktype);
2969 gcc_assert (known_eq (sub1, sub2));
2970 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2971 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2972 gassign *new_stmt
2973 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2974 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
2975 mask_op = var;
2978 if (modifier == NARROW && !VECTOR_TYPE_P (real_masktype))
2980 var = vect_get_new_ssa_name (mask_halftype, vect_simple_var);
2981 gassign *new_stmt
2982 = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
2983 : VEC_UNPACK_LO_EXPR,
2984 mask_op);
2985 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
2986 mask_op = var;
2988 src_op = mask_op;
2991 tree mask_arg = mask_op;
2992 if (masktype != real_masktype)
2994 tree utype, optype = TREE_TYPE (mask_op);
2995 if (VECTOR_TYPE_P (real_masktype)
2996 || TYPE_MODE (real_masktype) == TYPE_MODE (optype))
2997 utype = real_masktype;
2998 else
2999 utype = lang_hooks.types.type_for_mode (TYPE_MODE (optype), 1);
3000 var = vect_get_new_ssa_name (utype, vect_scalar_var);
3001 mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_op);
3002 gassign *new_stmt
3003 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
3004 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3005 mask_arg = var;
3006 if (!useless_type_conversion_p (real_masktype, utype))
3008 gcc_assert (TYPE_PRECISION (utype)
3009 <= TYPE_PRECISION (real_masktype));
3010 var = vect_get_new_ssa_name (real_masktype, vect_scalar_var);
3011 new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
3012 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3013 mask_arg = var;
3015 src_op = build_zero_cst (srctype);
3017 gimple *new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op,
3018 mask_arg, scale);
3020 if (!useless_type_conversion_p (vectype, rettype))
3022 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
3023 TYPE_VECTOR_SUBPARTS (rettype)));
3024 op = vect_get_new_ssa_name (rettype, vect_simple_var);
3025 gimple_call_set_lhs (new_stmt, op);
3026 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3027 var = make_ssa_name (vec_dest);
3028 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
3029 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
3030 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3032 else
3034 var = make_ssa_name (vec_dest, new_stmt);
3035 gimple_call_set_lhs (new_stmt, var);
3036 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3039 if (modifier == NARROW)
3041 if ((j & 1) == 0)
3043 prev_res = var;
3044 continue;
3046 var = permute_vec_elements (vinfo, prev_res, var, perm_mask,
3047 stmt_info, gsi);
3048 new_stmt = SSA_NAME_DEF_STMT (var);
3051 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
3053 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
3056 /* Prepare the base and offset in GS_INFO for vectorization.
3057 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
3058 to the vectorized offset argument for the first copy of STMT_INFO.
3059 STMT_INFO is the statement described by GS_INFO and LOOP is the
3060 containing loop. */
3062 static void
3063 vect_get_gather_scatter_ops (loop_vec_info loop_vinfo,
3064 class loop *loop, stmt_vec_info stmt_info,
3065 slp_tree slp_node, gather_scatter_info *gs_info,
3066 tree *dataref_ptr, vec<tree> *vec_offset)
3068 gimple_seq stmts = NULL;
3069 *dataref_ptr = force_gimple_operand (gs_info->base, &stmts, true, NULL_TREE);
3070 if (stmts != NULL)
3072 basic_block new_bb;
3073 edge pe = loop_preheader_edge (loop);
3074 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3075 gcc_assert (!new_bb);
3077 if (slp_node)
3078 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[0], vec_offset);
3079 else
3081 unsigned ncopies
3082 = vect_get_num_copies (loop_vinfo, gs_info->offset_vectype);
3083 vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, ncopies,
3084 gs_info->offset, vec_offset,
3085 gs_info->offset_vectype);
3089 /* Prepare to implement a grouped or strided load or store using
3090 the gather load or scatter store operation described by GS_INFO.
3091 STMT_INFO is the load or store statement.
3093 Set *DATAREF_BUMP to the amount that should be added to the base
3094 address after each copy of the vectorized statement. Set *VEC_OFFSET
3095 to an invariant offset vector in which element I has the value
3096 I * DR_STEP / SCALE. */
3098 static void
3099 vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
3100 loop_vec_info loop_vinfo,
3101 gather_scatter_info *gs_info,
3102 tree *dataref_bump, tree *vec_offset)
3104 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3105 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3107 tree bump = size_binop (MULT_EXPR,
3108 fold_convert (sizetype, unshare_expr (DR_STEP (dr))),
3109 size_int (TYPE_VECTOR_SUBPARTS (vectype)));
3110 *dataref_bump = cse_and_gimplify_to_preheader (loop_vinfo, bump);
3112 /* The offset given in GS_INFO can have pointer type, so use the element
3113 type of the vector instead. */
3114 tree offset_type = TREE_TYPE (gs_info->offset_vectype);
3116 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
3117 tree step = size_binop (EXACT_DIV_EXPR, unshare_expr (DR_STEP (dr)),
3118 ssize_int (gs_info->scale));
3119 step = fold_convert (offset_type, step);
3121 /* Create {0, X, X*2, X*3, ...}. */
3122 tree offset = fold_build2 (VEC_SERIES_EXPR, gs_info->offset_vectype,
3123 build_zero_cst (offset_type), step);
3124 *vec_offset = cse_and_gimplify_to_preheader (loop_vinfo, offset);
3127 /* Return the amount that should be added to a vector pointer to move
3128 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
3129 being vectorized and MEMORY_ACCESS_TYPE describes the type of
3130 vectorization. */
3132 static tree
3133 vect_get_data_ptr_increment (vec_info *vinfo,
3134 dr_vec_info *dr_info, tree aggr_type,
3135 vect_memory_access_type memory_access_type)
3137 if (memory_access_type == VMAT_INVARIANT)
3138 return size_zero_node;
3140 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
3141 tree step = vect_dr_behavior (vinfo, dr_info)->step;
3142 if (tree_int_cst_sgn (step) == -1)
3143 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
3144 return iv_step;
3147 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64,128}. */
3149 static bool
3150 vectorizable_bswap (vec_info *vinfo,
3151 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
3152 gimple **vec_stmt, slp_tree slp_node,
3153 slp_tree *slp_op,
3154 tree vectype_in, stmt_vector_for_cost *cost_vec)
3156 tree op, vectype;
3157 gcall *stmt = as_a <gcall *> (stmt_info->stmt);
3158 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3159 unsigned ncopies;
3161 op = gimple_call_arg (stmt, 0);
3162 vectype = STMT_VINFO_VECTYPE (stmt_info);
3163 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3165 /* Multiple types in SLP are handled by creating the appropriate number of
3166 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3167 case of SLP. */
3168 if (slp_node)
3169 ncopies = 1;
3170 else
3171 ncopies = vect_get_num_copies (loop_vinfo, vectype);
3173 gcc_assert (ncopies >= 1);
3175 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
3176 if (! char_vectype)
3177 return false;
3179 poly_uint64 num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
3180 unsigned word_bytes;
3181 if (!constant_multiple_p (num_bytes, nunits, &word_bytes))
3182 return false;
3184 /* The encoding uses one stepped pattern for each byte in the word. */
3185 vec_perm_builder elts (num_bytes, word_bytes, 3);
3186 for (unsigned i = 0; i < 3; ++i)
3187 for (unsigned j = 0; j < word_bytes; ++j)
3188 elts.quick_push ((i + 1) * word_bytes - j - 1);
3190 vec_perm_indices indices (elts, 1, num_bytes);
3191 machine_mode vmode = TYPE_MODE (char_vectype);
3192 if (!can_vec_perm_const_p (vmode, vmode, indices))
3193 return false;
3195 if (! vec_stmt)
3197 if (slp_node
3198 && !vect_maybe_update_slp_op_vectype (slp_op[0], vectype_in))
3200 if (dump_enabled_p ())
3201 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3202 "incompatible vector types for invariants\n");
3203 return false;
3206 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3207 DUMP_VECT_SCOPE ("vectorizable_bswap");
3208 record_stmt_cost (cost_vec,
3209 1, vector_stmt, stmt_info, 0, vect_prologue);
3210 record_stmt_cost (cost_vec,
3211 slp_node
3212 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies,
3213 vec_perm, stmt_info, 0, vect_body);
3214 return true;
3217 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
3219 /* Transform. */
3220 vec<tree> vec_oprnds = vNULL;
3221 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
3222 op, &vec_oprnds);
3223 /* Arguments are ready. create the new vector stmt. */
3224 unsigned i;
3225 tree vop;
3226 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
3228 gimple *new_stmt;
3229 tree tem = make_ssa_name (char_vectype);
3230 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3231 char_vectype, vop));
3232 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3233 tree tem2 = make_ssa_name (char_vectype);
3234 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
3235 tem, tem, bswap_vconst);
3236 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3237 tem = make_ssa_name (vectype);
3238 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
3239 vectype, tem2));
3240 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3241 if (slp_node)
3242 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3243 else
3244 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
3247 if (!slp_node)
3248 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
3250 vec_oprnds.release ();
3251 return true;
3254 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3255 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3256 in a single step. On success, store the binary pack code in
3257 *CONVERT_CODE. */
3259 static bool
3260 simple_integer_narrowing (tree vectype_out, tree vectype_in,
3261 tree_code *convert_code)
3263 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
3264 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
3265 return false;
3267 tree_code code;
3268 int multi_step_cvt = 0;
3269 auto_vec <tree, 8> interm_types;
3270 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
3271 &code, &multi_step_cvt, &interm_types)
3272 || multi_step_cvt)
3273 return false;
3275 *convert_code = code;
3276 return true;
3279 /* Function vectorizable_call.
3281 Check if STMT_INFO performs a function call that can be vectorized.
3282 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3283 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3284 Return true if STMT_INFO is vectorizable in this way. */
3286 static bool
3287 vectorizable_call (vec_info *vinfo,
3288 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
3289 gimple **vec_stmt, slp_tree slp_node,
3290 stmt_vector_for_cost *cost_vec)
3292 gcall *stmt;
3293 tree vec_dest;
3294 tree scalar_dest;
3295 tree op;
3296 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3297 tree vectype_out, vectype_in;
3298 poly_uint64 nunits_in;
3299 poly_uint64 nunits_out;
3300 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3301 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
3302 tree fndecl, new_temp, rhs_type;
3303 enum vect_def_type dt[4]
3304 = { vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type,
3305 vect_unknown_def_type };
3306 tree vectypes[ARRAY_SIZE (dt)] = {};
3307 slp_tree slp_op[ARRAY_SIZE (dt)] = {};
3308 int ndts = ARRAY_SIZE (dt);
3309 int ncopies, j;
3310 auto_vec<tree, 8> vargs;
3311 enum { NARROW, NONE, WIDEN } modifier;
3312 size_t i, nargs;
3313 tree lhs;
3315 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3316 return false;
3318 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3319 && ! vec_stmt)
3320 return false;
3322 /* Is STMT_INFO a vectorizable call? */
3323 stmt = dyn_cast <gcall *> (stmt_info->stmt);
3324 if (!stmt)
3325 return false;
3327 if (gimple_call_internal_p (stmt)
3328 && (internal_load_fn_p (gimple_call_internal_fn (stmt))
3329 || internal_store_fn_p (gimple_call_internal_fn (stmt))))
3330 /* Handled by vectorizable_load and vectorizable_store. */
3331 return false;
3333 if (gimple_call_lhs (stmt) == NULL_TREE
3334 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3335 return false;
3337 gcc_checking_assert (!stmt_can_throw_internal (cfun, stmt));
3339 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3341 /* Process function arguments. */
3342 rhs_type = NULL_TREE;
3343 vectype_in = NULL_TREE;
3344 nargs = gimple_call_num_args (stmt);
3346 /* Bail out if the function has more than four arguments, we do not have
3347 interesting builtin functions to vectorize with more than two arguments
3348 except for fma. No arguments is also not good. */
3349 if (nargs == 0 || nargs > 4)
3350 return false;
3352 /* Ignore the arguments of IFN_GOMP_SIMD_LANE, they are magic. */
3353 combined_fn cfn = gimple_call_combined_fn (stmt);
3354 if (cfn == CFN_GOMP_SIMD_LANE)
3356 nargs = 0;
3357 rhs_type = unsigned_type_node;
3360 int mask_opno = -1;
3361 if (internal_fn_p (cfn))
3362 mask_opno = internal_fn_mask_index (as_internal_fn (cfn));
3364 for (i = 0; i < nargs; i++)
3366 if ((int) i == mask_opno)
3368 if (!vect_check_scalar_mask (vinfo, stmt_info, slp_node, mask_opno,
3369 &op, &slp_op[i], &dt[i], &vectypes[i]))
3370 return false;
3371 continue;
3374 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
3375 i, &op, &slp_op[i], &dt[i], &vectypes[i]))
3377 if (dump_enabled_p ())
3378 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3379 "use not simple.\n");
3380 return false;
3383 /* We can only handle calls with arguments of the same type. */
3384 if (rhs_type
3385 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
3387 if (dump_enabled_p ())
3388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3389 "argument types differ.\n");
3390 return false;
3392 if (!rhs_type)
3393 rhs_type = TREE_TYPE (op);
3395 if (!vectype_in)
3396 vectype_in = vectypes[i];
3397 else if (vectypes[i]
3398 && !types_compatible_p (vectypes[i], vectype_in))
3400 if (dump_enabled_p ())
3401 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3402 "argument vector types differ.\n");
3403 return false;
3406 /* If all arguments are external or constant defs, infer the vector type
3407 from the scalar type. */
3408 if (!vectype_in)
3409 vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type, slp_node);
3410 if (vec_stmt)
3411 gcc_assert (vectype_in);
3412 if (!vectype_in)
3414 if (dump_enabled_p ())
3415 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3416 "no vectype for scalar type %T\n", rhs_type);
3418 return false;
3420 /* FORNOW: we don't yet support mixtures of vector sizes for calls,
3421 just mixtures of nunits. E.g. DI->SI versions of __builtin_ctz*
3422 are traditionally vectorized as two VnDI->VnDI IFN_CTZs followed
3423 by a pack of the two vectors into an SI vector. We would need
3424 separate code to handle direct VnDI->VnSI IFN_CTZs. */
3425 if (TYPE_SIZE (vectype_in) != TYPE_SIZE (vectype_out))
3427 if (dump_enabled_p ())
3428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3429 "mismatched vector sizes %T and %T\n",
3430 vectype_in, vectype_out);
3431 return false;
3434 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
3435 != VECTOR_BOOLEAN_TYPE_P (vectype_in))
3437 if (dump_enabled_p ())
3438 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3439 "mixed mask and nonmask vector types\n");
3440 return false;
3443 if (vect_emulated_vector_p (vectype_in) || vect_emulated_vector_p (vectype_out))
3445 if (dump_enabled_p ())
3446 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3447 "use emulated vector type for call\n");
3448 return false;
3451 /* FORNOW */
3452 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3453 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3454 if (known_eq (nunits_in * 2, nunits_out))
3455 modifier = NARROW;
3456 else if (known_eq (nunits_out, nunits_in))
3457 modifier = NONE;
3458 else if (known_eq (nunits_out * 2, nunits_in))
3459 modifier = WIDEN;
3460 else
3461 return false;
3463 /* We only handle functions that do not read or clobber memory. */
3464 if (gimple_vuse (stmt))
3466 if (dump_enabled_p ())
3467 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3468 "function reads from or writes to memory.\n");
3469 return false;
3472 /* For now, we only vectorize functions if a target specific builtin
3473 is available. TODO -- in some cases, it might be profitable to
3474 insert the calls for pieces of the vector, in order to be able
3475 to vectorize other operations in the loop. */
3476 fndecl = NULL_TREE;
3477 internal_fn ifn = IFN_LAST;
3478 tree callee = gimple_call_fndecl (stmt);
3480 /* First try using an internal function. */
3481 tree_code convert_code = ERROR_MARK;
3482 if (cfn != CFN_LAST
3483 && (modifier == NONE
3484 || (modifier == NARROW
3485 && simple_integer_narrowing (vectype_out, vectype_in,
3486 &convert_code))))
3487 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
3488 vectype_in);
3490 /* If that fails, try asking for a target-specific built-in function. */
3491 if (ifn == IFN_LAST)
3493 if (cfn != CFN_LAST)
3494 fndecl = targetm.vectorize.builtin_vectorized_function
3495 (cfn, vectype_out, vectype_in);
3496 else if (callee && fndecl_built_in_p (callee, BUILT_IN_MD))
3497 fndecl = targetm.vectorize.builtin_md_vectorized_function
3498 (callee, vectype_out, vectype_in);
3501 if (ifn == IFN_LAST && !fndecl)
3503 if (cfn == CFN_GOMP_SIMD_LANE
3504 && !slp_node
3505 && loop_vinfo
3506 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3507 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
3508 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
3509 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
3511 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3512 { 0, 1, 2, ... vf - 1 } vector. */
3513 gcc_assert (nargs == 0);
3515 else if (modifier == NONE
3516 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
3517 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
3518 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)
3519 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP128)))
3520 return vectorizable_bswap (vinfo, stmt_info, gsi, vec_stmt, slp_node,
3521 slp_op, vectype_in, cost_vec);
3522 else
3524 if (dump_enabled_p ())
3525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3526 "function is not vectorizable.\n");
3527 return false;
3531 if (slp_node)
3532 ncopies = 1;
3533 else if (modifier == NARROW && ifn == IFN_LAST)
3534 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
3535 else
3536 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
3538 /* Sanity check: make sure that at least one copy of the vectorized stmt
3539 needs to be generated. */
3540 gcc_assert (ncopies >= 1);
3542 int reduc_idx = STMT_VINFO_REDUC_IDX (stmt_info);
3543 internal_fn cond_fn = get_conditional_internal_fn (ifn);
3544 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
3545 if (!vec_stmt) /* transformation not required. */
3547 if (slp_node)
3548 for (i = 0; i < nargs; ++i)
3549 if (!vect_maybe_update_slp_op_vectype (slp_op[i],
3550 vectypes[i]
3551 ? vectypes[i] : vectype_in))
3553 if (dump_enabled_p ())
3554 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3555 "incompatible vector types for invariants\n");
3556 return false;
3558 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
3559 DUMP_VECT_SCOPE ("vectorizable_call");
3560 vect_model_simple_cost (vinfo, stmt_info,
3561 ncopies, dt, ndts, slp_node, cost_vec);
3562 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
3563 record_stmt_cost (cost_vec, ncopies / 2,
3564 vec_promote_demote, stmt_info, 0, vect_body);
3566 if (loop_vinfo
3567 && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
3568 && (reduc_idx >= 0 || mask_opno >= 0))
3570 if (reduc_idx >= 0
3571 && (cond_fn == IFN_LAST
3572 || !direct_internal_fn_supported_p (cond_fn, vectype_out,
3573 OPTIMIZE_FOR_SPEED)))
3575 if (dump_enabled_p ())
3576 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3577 "can't use a fully-masked loop because no"
3578 " conditional operation is available.\n");
3579 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
3581 else
3583 unsigned int nvectors
3584 = (slp_node
3585 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node)
3586 : ncopies);
3587 tree scalar_mask = NULL_TREE;
3588 if (mask_opno >= 0)
3589 scalar_mask = gimple_call_arg (stmt_info->stmt, mask_opno);
3590 vect_record_loop_mask (loop_vinfo, masks, nvectors,
3591 vectype_out, scalar_mask);
3594 return true;
3597 /* Transform. */
3599 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3602 /* Handle def. */
3603 scalar_dest = gimple_call_lhs (stmt);
3604 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
3606 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
3607 unsigned int vect_nargs = nargs;
3608 if (masked_loop_p && reduc_idx >= 0)
3610 ifn = cond_fn;
3611 vect_nargs += 2;
3614 if (modifier == NONE || ifn != IFN_LAST)
3616 tree prev_res = NULL_TREE;
3617 vargs.safe_grow (vect_nargs, true);
3618 auto_vec<vec<tree> > vec_defs (nargs);
3619 for (j = 0; j < ncopies; ++j)
3621 /* Build argument list for the vectorized call. */
3622 if (slp_node)
3624 vec<tree> vec_oprnds0;
3626 vect_get_slp_defs (vinfo, slp_node, &vec_defs);
3627 vec_oprnds0 = vec_defs[0];
3629 /* Arguments are ready. Create the new vector stmt. */
3630 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
3632 int varg = 0;
3633 if (masked_loop_p && reduc_idx >= 0)
3635 unsigned int vec_num = vec_oprnds0.length ();
3636 /* Always true for SLP. */
3637 gcc_assert (ncopies == 1);
3638 vargs[varg++] = vect_get_loop_mask (gsi, masks, vec_num,
3639 vectype_out, i);
3641 size_t k;
3642 for (k = 0; k < nargs; k++)
3644 vec<tree> vec_oprndsk = vec_defs[k];
3645 vargs[varg++] = vec_oprndsk[i];
3647 if (masked_loop_p && reduc_idx >= 0)
3648 vargs[varg++] = vargs[reduc_idx + 1];
3649 gimple *new_stmt;
3650 if (modifier == NARROW)
3652 /* We don't define any narrowing conditional functions
3653 at present. */
3654 gcc_assert (mask_opno < 0);
3655 tree half_res = make_ssa_name (vectype_in);
3656 gcall *call
3657 = gimple_build_call_internal_vec (ifn, vargs);
3658 gimple_call_set_lhs (call, half_res);
3659 gimple_call_set_nothrow (call, true);
3660 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
3661 if ((i & 1) == 0)
3663 prev_res = half_res;
3664 continue;
3666 new_temp = make_ssa_name (vec_dest);
3667 new_stmt = gimple_build_assign (new_temp, convert_code,
3668 prev_res, half_res);
3669 vect_finish_stmt_generation (vinfo, stmt_info,
3670 new_stmt, gsi);
3672 else
3674 if (mask_opno >= 0 && masked_loop_p)
3676 unsigned int vec_num = vec_oprnds0.length ();
3677 /* Always true for SLP. */
3678 gcc_assert (ncopies == 1);
3679 tree mask = vect_get_loop_mask (gsi, masks, vec_num,
3680 vectype_out, i);
3681 vargs[mask_opno] = prepare_vec_mask
3682 (loop_vinfo, TREE_TYPE (mask), mask,
3683 vargs[mask_opno], gsi);
3686 gcall *call;
3687 if (ifn != IFN_LAST)
3688 call = gimple_build_call_internal_vec (ifn, vargs);
3689 else
3690 call = gimple_build_call_vec (fndecl, vargs);
3691 new_temp = make_ssa_name (vec_dest, call);
3692 gimple_call_set_lhs (call, new_temp);
3693 gimple_call_set_nothrow (call, true);
3694 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
3695 new_stmt = call;
3697 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3699 continue;
3702 int varg = 0;
3703 if (masked_loop_p && reduc_idx >= 0)
3704 vargs[varg++] = vect_get_loop_mask (gsi, masks, ncopies,
3705 vectype_out, j);
3706 for (i = 0; i < nargs; i++)
3708 op = gimple_call_arg (stmt, i);
3709 if (j == 0)
3711 vec_defs.quick_push (vNULL);
3712 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
3713 op, &vec_defs[i],
3714 vectypes[i]);
3716 vargs[varg++] = vec_defs[i][j];
3718 if (masked_loop_p && reduc_idx >= 0)
3719 vargs[varg++] = vargs[reduc_idx + 1];
3721 if (mask_opno >= 0 && masked_loop_p)
3723 tree mask = vect_get_loop_mask (gsi, masks, ncopies,
3724 vectype_out, j);
3725 vargs[mask_opno]
3726 = prepare_vec_mask (loop_vinfo, TREE_TYPE (mask), mask,
3727 vargs[mask_opno], gsi);
3730 gimple *new_stmt;
3731 if (cfn == CFN_GOMP_SIMD_LANE)
3733 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
3734 tree new_var
3735 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
3736 gimple *init_stmt = gimple_build_assign (new_var, cst);
3737 vect_init_vector_1 (vinfo, stmt_info, init_stmt, NULL);
3738 new_temp = make_ssa_name (vec_dest);
3739 new_stmt = gimple_build_assign (new_temp, new_var);
3740 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3742 else if (modifier == NARROW)
3744 /* We don't define any narrowing conditional functions at
3745 present. */
3746 gcc_assert (mask_opno < 0);
3747 tree half_res = make_ssa_name (vectype_in);
3748 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
3749 gimple_call_set_lhs (call, half_res);
3750 gimple_call_set_nothrow (call, true);
3751 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
3752 if ((j & 1) == 0)
3754 prev_res = half_res;
3755 continue;
3757 new_temp = make_ssa_name (vec_dest);
3758 new_stmt = gimple_build_assign (new_temp, convert_code,
3759 prev_res, half_res);
3760 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3762 else
3764 gcall *call;
3765 if (ifn != IFN_LAST)
3766 call = gimple_build_call_internal_vec (ifn, vargs);
3767 else
3768 call = gimple_build_call_vec (fndecl, vargs);
3769 new_temp = make_ssa_name (vec_dest, call);
3770 gimple_call_set_lhs (call, new_temp);
3771 gimple_call_set_nothrow (call, true);
3772 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
3773 new_stmt = call;
3776 if (j == (modifier == NARROW ? 1 : 0))
3777 *vec_stmt = new_stmt;
3778 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
3780 for (i = 0; i < nargs; i++)
3782 vec<tree> vec_oprndsi = vec_defs[i];
3783 vec_oprndsi.release ();
3786 else if (modifier == NARROW)
3788 auto_vec<vec<tree> > vec_defs (nargs);
3789 /* We don't define any narrowing conditional functions at present. */
3790 gcc_assert (mask_opno < 0);
3791 for (j = 0; j < ncopies; ++j)
3793 /* Build argument list for the vectorized call. */
3794 if (j == 0)
3795 vargs.create (nargs * 2);
3796 else
3797 vargs.truncate (0);
3799 if (slp_node)
3801 vec<tree> vec_oprnds0;
3803 vect_get_slp_defs (vinfo, slp_node, &vec_defs);
3804 vec_oprnds0 = vec_defs[0];
3806 /* Arguments are ready. Create the new vector stmt. */
3807 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3809 size_t k;
3810 vargs.truncate (0);
3811 for (k = 0; k < nargs; k++)
3813 vec<tree> vec_oprndsk = vec_defs[k];
3814 vargs.quick_push (vec_oprndsk[i]);
3815 vargs.quick_push (vec_oprndsk[i + 1]);
3817 gcall *call;
3818 if (ifn != IFN_LAST)
3819 call = gimple_build_call_internal_vec (ifn, vargs);
3820 else
3821 call = gimple_build_call_vec (fndecl, vargs);
3822 new_temp = make_ssa_name (vec_dest, call);
3823 gimple_call_set_lhs (call, new_temp);
3824 gimple_call_set_nothrow (call, true);
3825 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
3826 SLP_TREE_VEC_STMTS (slp_node).quick_push (call);
3828 continue;
3831 for (i = 0; i < nargs; i++)
3833 op = gimple_call_arg (stmt, i);
3834 if (j == 0)
3836 vec_defs.quick_push (vNULL);
3837 vect_get_vec_defs_for_operand (vinfo, stmt_info, 2 * ncopies,
3838 op, &vec_defs[i], vectypes[i]);
3840 vec_oprnd0 = vec_defs[i][2*j];
3841 vec_oprnd1 = vec_defs[i][2*j+1];
3843 vargs.quick_push (vec_oprnd0);
3844 vargs.quick_push (vec_oprnd1);
3847 gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
3848 new_temp = make_ssa_name (vec_dest, new_stmt);
3849 gimple_call_set_lhs (new_stmt, new_temp);
3850 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
3852 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
3855 if (!slp_node)
3856 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
3858 for (i = 0; i < nargs; i++)
3860 vec<tree> vec_oprndsi = vec_defs[i];
3861 vec_oprndsi.release ();
3864 else
3865 /* No current target implements this case. */
3866 return false;
3868 vargs.release ();
3870 /* The call in STMT might prevent it from being removed in dce.
3871 We however cannot remove it here, due to the way the ssa name
3872 it defines is mapped to the new definition. So just replace
3873 rhs of the statement with something harmless. */
3875 if (slp_node)
3876 return true;
3878 stmt_info = vect_orig_stmt (stmt_info);
3879 lhs = gimple_get_lhs (stmt_info->stmt);
3881 gassign *new_stmt
3882 = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3883 vinfo->replace_stmt (gsi, stmt_info, new_stmt);
3885 return true;
3889 struct simd_call_arg_info
3891 tree vectype;
3892 tree op;
3893 HOST_WIDE_INT linear_step;
3894 enum vect_def_type dt;
3895 unsigned int align;
3896 bool simd_lane_linear;
3899 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3900 is linear within simd lane (but not within whole loop), note it in
3901 *ARGINFO. */
3903 static void
3904 vect_simd_lane_linear (tree op, class loop *loop,
3905 struct simd_call_arg_info *arginfo)
3907 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3909 if (!is_gimple_assign (def_stmt)
3910 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3911 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3912 return;
3914 tree base = gimple_assign_rhs1 (def_stmt);
3915 HOST_WIDE_INT linear_step = 0;
3916 tree v = gimple_assign_rhs2 (def_stmt);
3917 while (TREE_CODE (v) == SSA_NAME)
3919 tree t;
3920 def_stmt = SSA_NAME_DEF_STMT (v);
3921 if (is_gimple_assign (def_stmt))
3922 switch (gimple_assign_rhs_code (def_stmt))
3924 case PLUS_EXPR:
3925 t = gimple_assign_rhs2 (def_stmt);
3926 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3927 return;
3928 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3929 v = gimple_assign_rhs1 (def_stmt);
3930 continue;
3931 case MULT_EXPR:
3932 t = gimple_assign_rhs2 (def_stmt);
3933 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3934 return;
3935 linear_step = tree_to_shwi (t);
3936 v = gimple_assign_rhs1 (def_stmt);
3937 continue;
3938 CASE_CONVERT:
3939 t = gimple_assign_rhs1 (def_stmt);
3940 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3941 || (TYPE_PRECISION (TREE_TYPE (v))
3942 < TYPE_PRECISION (TREE_TYPE (t))))
3943 return;
3944 if (!linear_step)
3945 linear_step = 1;
3946 v = t;
3947 continue;
3948 default:
3949 return;
3951 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3952 && loop->simduid
3953 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3954 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3955 == loop->simduid))
3957 if (!linear_step)
3958 linear_step = 1;
3959 arginfo->linear_step = linear_step;
3960 arginfo->op = base;
3961 arginfo->simd_lane_linear = true;
3962 return;
3967 /* Return the number of elements in vector type VECTYPE, which is associated
3968 with a SIMD clone. At present these vectors always have a constant
3969 length. */
3971 static unsigned HOST_WIDE_INT
3972 simd_clone_subparts (tree vectype)
3974 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3977 /* Function vectorizable_simd_clone_call.
3979 Check if STMT_INFO performs a function call that can be vectorized
3980 by calling a simd clone of the function.
3981 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3982 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3983 Return true if STMT_INFO is vectorizable in this way. */
3985 static bool
3986 vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info,
3987 gimple_stmt_iterator *gsi,
3988 gimple **vec_stmt, slp_tree slp_node,
3989 stmt_vector_for_cost *)
3991 tree vec_dest;
3992 tree scalar_dest;
3993 tree op, type;
3994 tree vec_oprnd0 = NULL_TREE;
3995 tree vectype;
3996 poly_uint64 nunits;
3997 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3998 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
3999 class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
4000 tree fndecl, new_temp;
4001 int ncopies, j;
4002 auto_vec<simd_call_arg_info> arginfo;
4003 vec<tree> vargs = vNULL;
4004 size_t i, nargs;
4005 tree lhs, rtype, ratype;
4006 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
4007 int arg_offset = 0;
4009 /* Is STMT a vectorizable call? */
4010 gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt);
4011 if (!stmt)
4012 return false;
4014 fndecl = gimple_call_fndecl (stmt);
4015 if (fndecl == NULL_TREE
4016 && gimple_call_internal_p (stmt, IFN_MASK_CALL))
4018 fndecl = gimple_call_arg (stmt, 0);
4019 gcc_checking_assert (TREE_CODE (fndecl) == ADDR_EXPR);
4020 fndecl = TREE_OPERAND (fndecl, 0);
4021 gcc_checking_assert (TREE_CODE (fndecl) == FUNCTION_DECL);
4022 arg_offset = 1;
4024 if (fndecl == NULL_TREE)
4025 return false;
4027 struct cgraph_node *node = cgraph_node::get (fndecl);
4028 if (node == NULL || node->simd_clones == NULL)
4029 return false;
4031 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4032 return false;
4034 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4035 && ! vec_stmt)
4036 return false;
4038 if (gimple_call_lhs (stmt)
4039 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
4040 return false;
4042 gcc_checking_assert (!stmt_can_throw_internal (cfun, stmt));
4044 vectype = STMT_VINFO_VECTYPE (stmt_info);
4046 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
4047 return false;
4049 /* FORNOW */
4050 if (slp_node)
4051 return false;
4053 /* Process function arguments. */
4054 nargs = gimple_call_num_args (stmt) - arg_offset;
4056 /* Bail out if the function has zero arguments. */
4057 if (nargs == 0)
4058 return false;
4060 arginfo.reserve (nargs, true);
4062 for (i = 0; i < nargs; i++)
4064 simd_call_arg_info thisarginfo;
4065 affine_iv iv;
4067 thisarginfo.linear_step = 0;
4068 thisarginfo.align = 0;
4069 thisarginfo.op = NULL_TREE;
4070 thisarginfo.simd_lane_linear = false;
4072 op = gimple_call_arg (stmt, i + arg_offset);
4073 if (!vect_is_simple_use (op, vinfo, &thisarginfo.dt,
4074 &thisarginfo.vectype)
4075 || thisarginfo.dt == vect_uninitialized_def)
4077 if (dump_enabled_p ())
4078 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4079 "use not simple.\n");
4080 return false;
4083 if (thisarginfo.dt == vect_constant_def
4084 || thisarginfo.dt == vect_external_def)
4085 gcc_assert (thisarginfo.vectype == NULL_TREE);
4086 else
4087 gcc_assert (thisarginfo.vectype != NULL_TREE);
4089 /* For linear arguments, the analyze phase should have saved
4090 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
4091 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
4092 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
4094 gcc_assert (vec_stmt);
4095 thisarginfo.linear_step
4096 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
4097 thisarginfo.op
4098 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
4099 thisarginfo.simd_lane_linear
4100 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
4101 == boolean_true_node);
4102 /* If loop has been peeled for alignment, we need to adjust it. */
4103 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
4104 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
4105 if (n1 != n2 && !thisarginfo.simd_lane_linear)
4107 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
4108 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
4109 tree opt = TREE_TYPE (thisarginfo.op);
4110 bias = fold_convert (TREE_TYPE (step), bias);
4111 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
4112 thisarginfo.op
4113 = fold_build2 (POINTER_TYPE_P (opt)
4114 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
4115 thisarginfo.op, bias);
4118 else if (!vec_stmt
4119 && thisarginfo.dt != vect_constant_def
4120 && thisarginfo.dt != vect_external_def
4121 && loop_vinfo
4122 && TREE_CODE (op) == SSA_NAME
4123 && simple_iv (loop, loop_containing_stmt (stmt), op,
4124 &iv, false)
4125 && tree_fits_shwi_p (iv.step))
4127 thisarginfo.linear_step = tree_to_shwi (iv.step);
4128 thisarginfo.op = iv.base;
4130 else if ((thisarginfo.dt == vect_constant_def
4131 || thisarginfo.dt == vect_external_def)
4132 && POINTER_TYPE_P (TREE_TYPE (op)))
4133 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
4134 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
4135 linear too. */
4136 if (POINTER_TYPE_P (TREE_TYPE (op))
4137 && !thisarginfo.linear_step
4138 && !vec_stmt
4139 && thisarginfo.dt != vect_constant_def
4140 && thisarginfo.dt != vect_external_def
4141 && loop_vinfo
4142 && !slp_node
4143 && TREE_CODE (op) == SSA_NAME)
4144 vect_simd_lane_linear (op, loop, &thisarginfo);
4146 arginfo.quick_push (thisarginfo);
4149 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4150 if (!vf.is_constant ())
4152 if (dump_enabled_p ())
4153 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4154 "not considering SIMD clones; not yet supported"
4155 " for variable-width vectors.\n");
4156 return false;
4159 unsigned int badness = 0;
4160 struct cgraph_node *bestn = NULL;
4161 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
4162 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
4163 else
4164 for (struct cgraph_node *n = node->simd_clones; n != NULL;
4165 n = n->simdclone->next_clone)
4167 unsigned int this_badness = 0;
4168 unsigned int num_calls;
4169 if (!constant_multiple_p (vf, n->simdclone->simdlen, &num_calls)
4170 || n->simdclone->nargs != nargs)
4171 continue;
4172 if (num_calls != 1)
4173 this_badness += exact_log2 (num_calls) * 4096;
4174 if (n->simdclone->inbranch)
4175 this_badness += 8192;
4176 int target_badness = targetm.simd_clone.usable (n);
4177 if (target_badness < 0)
4178 continue;
4179 this_badness += target_badness * 512;
4180 for (i = 0; i < nargs; i++)
4182 switch (n->simdclone->args[i].arg_type)
4184 case SIMD_CLONE_ARG_TYPE_VECTOR:
4185 if (!useless_type_conversion_p
4186 (n->simdclone->args[i].orig_type,
4187 TREE_TYPE (gimple_call_arg (stmt, i + arg_offset))))
4188 i = -1;
4189 else if (arginfo[i].dt == vect_constant_def
4190 || arginfo[i].dt == vect_external_def
4191 || arginfo[i].linear_step)
4192 this_badness += 64;
4193 break;
4194 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4195 if (arginfo[i].dt != vect_constant_def
4196 && arginfo[i].dt != vect_external_def)
4197 i = -1;
4198 break;
4199 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4200 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4201 if (arginfo[i].dt == vect_constant_def
4202 || arginfo[i].dt == vect_external_def
4203 || (arginfo[i].linear_step
4204 != n->simdclone->args[i].linear_step))
4205 i = -1;
4206 break;
4207 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4208 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4209 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4210 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4211 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4212 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4213 /* FORNOW */
4214 i = -1;
4215 break;
4216 case SIMD_CLONE_ARG_TYPE_MASK:
4217 break;
4219 if (i == (size_t) -1)
4220 break;
4221 if (n->simdclone->args[i].alignment > arginfo[i].align)
4223 i = -1;
4224 break;
4226 if (arginfo[i].align)
4227 this_badness += (exact_log2 (arginfo[i].align)
4228 - exact_log2 (n->simdclone->args[i].alignment));
4230 if (i == (size_t) -1)
4231 continue;
4232 if (bestn == NULL || this_badness < badness)
4234 bestn = n;
4235 badness = this_badness;
4239 if (bestn == NULL)
4240 return false;
4242 for (i = 0; i < nargs; i++)
4244 if ((arginfo[i].dt == vect_constant_def
4245 || arginfo[i].dt == vect_external_def)
4246 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
4248 tree arg_type = TREE_TYPE (gimple_call_arg (stmt, i + arg_offset));
4249 arginfo[i].vectype = get_vectype_for_scalar_type (vinfo, arg_type,
4250 slp_node);
4251 if (arginfo[i].vectype == NULL
4252 || !constant_multiple_p (bestn->simdclone->simdlen,
4253 simd_clone_subparts (arginfo[i].vectype)))
4254 return false;
4257 if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR
4258 && VECTOR_BOOLEAN_TYPE_P (bestn->simdclone->args[i].vector_type))
4260 if (dump_enabled_p ())
4261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4262 "vector mask arguments are not supported.\n");
4263 return false;
4266 if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK
4267 && bestn->simdclone->mask_mode == VOIDmode
4268 && (simd_clone_subparts (bestn->simdclone->args[i].vector_type)
4269 != simd_clone_subparts (arginfo[i].vectype)))
4271 /* FORNOW we only have partial support for vector-type masks that
4272 can't hold all of simdlen. */
4273 if (dump_enabled_p ())
4274 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
4275 vect_location,
4276 "in-branch vector clones are not yet"
4277 " supported for mismatched vector sizes.\n");
4278 return false;
4280 if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK
4281 && bestn->simdclone->mask_mode != VOIDmode)
4283 /* FORNOW don't support integer-type masks. */
4284 if (dump_enabled_p ())
4285 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
4286 vect_location,
4287 "in-branch vector clones are not yet"
4288 " supported for integer mask modes.\n");
4289 return false;
4293 fndecl = bestn->decl;
4294 nunits = bestn->simdclone->simdlen;
4295 ncopies = vector_unroll_factor (vf, nunits);
4297 /* If the function isn't const, only allow it in simd loops where user
4298 has asserted that at least nunits consecutive iterations can be
4299 performed using SIMD instructions. */
4300 if ((loop == NULL || maybe_lt ((unsigned) loop->safelen, nunits))
4301 && gimple_vuse (stmt))
4302 return false;
4304 /* Sanity check: make sure that at least one copy of the vectorized stmt
4305 needs to be generated. */
4306 gcc_assert (ncopies >= 1);
4308 if (!vec_stmt) /* transformation not required. */
4310 /* When the original call is pure or const but the SIMD ABI dictates
4311 an aggregate return we will have to use a virtual definition and
4312 in a loop eventually even need to add a virtual PHI. That's
4313 not straight-forward so allow to fix this up via renaming. */
4314 if (gimple_call_lhs (stmt)
4315 && !gimple_vdef (stmt)
4316 && TREE_CODE (TREE_TYPE (TREE_TYPE (bestn->decl))) == ARRAY_TYPE)
4317 vinfo->any_known_not_updated_vssa = true;
4318 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
4319 for (i = 0; i < nargs; i++)
4320 if ((bestn->simdclone->args[i].arg_type
4321 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
4322 || (bestn->simdclone->args[i].arg_type
4323 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
4325 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
4326 + 1,
4327 true);
4328 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
4329 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
4330 ? size_type_node : TREE_TYPE (arginfo[i].op);
4331 tree ls = build_int_cst (lst, arginfo[i].linear_step);
4332 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
4333 tree sll = arginfo[i].simd_lane_linear
4334 ? boolean_true_node : boolean_false_node;
4335 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
4337 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
4338 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4339 /* vect_model_simple_cost (vinfo, stmt_info, ncopies,
4340 dt, slp_node, cost_vec); */
4341 return true;
4344 /* Transform. */
4346 if (dump_enabled_p ())
4347 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
4349 /* Handle def. */
4350 scalar_dest = gimple_call_lhs (stmt);
4351 vec_dest = NULL_TREE;
4352 rtype = NULL_TREE;
4353 ratype = NULL_TREE;
4354 if (scalar_dest)
4356 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4357 rtype = TREE_TYPE (TREE_TYPE (fndecl));
4358 if (TREE_CODE (rtype) == ARRAY_TYPE)
4360 ratype = rtype;
4361 rtype = TREE_TYPE (ratype);
4365 auto_vec<vec<tree> > vec_oprnds;
4366 auto_vec<unsigned> vec_oprnds_i;
4367 vec_oprnds.safe_grow_cleared (nargs, true);
4368 vec_oprnds_i.safe_grow_cleared (nargs, true);
4369 for (j = 0; j < ncopies; ++j)
4371 /* Build argument list for the vectorized call. */
4372 if (j == 0)
4373 vargs.create (nargs);
4374 else
4375 vargs.truncate (0);
4377 for (i = 0; i < nargs; i++)
4379 unsigned int k, l, m, o;
4380 tree atype;
4381 op = gimple_call_arg (stmt, i + arg_offset);
4382 switch (bestn->simdclone->args[i].arg_type)
4384 case SIMD_CLONE_ARG_TYPE_VECTOR:
4385 atype = bestn->simdclone->args[i].vector_type;
4386 o = vector_unroll_factor (nunits,
4387 simd_clone_subparts (atype));
4388 for (m = j * o; m < (j + 1) * o; m++)
4390 if (simd_clone_subparts (atype)
4391 < simd_clone_subparts (arginfo[i].vectype))
4393 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
4394 k = (simd_clone_subparts (arginfo[i].vectype)
4395 / simd_clone_subparts (atype));
4396 gcc_assert ((k & (k - 1)) == 0);
4397 if (m == 0)
4399 vect_get_vec_defs_for_operand (vinfo, stmt_info,
4400 ncopies * o / k, op,
4401 &vec_oprnds[i]);
4402 vec_oprnds_i[i] = 0;
4403 vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
4405 else
4407 vec_oprnd0 = arginfo[i].op;
4408 if ((m & (k - 1)) == 0)
4409 vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
4411 arginfo[i].op = vec_oprnd0;
4412 vec_oprnd0
4413 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
4414 bitsize_int (prec),
4415 bitsize_int ((m & (k - 1)) * prec));
4416 gassign *new_stmt
4417 = gimple_build_assign (make_ssa_name (atype),
4418 vec_oprnd0);
4419 vect_finish_stmt_generation (vinfo, stmt_info,
4420 new_stmt, gsi);
4421 vargs.safe_push (gimple_assign_lhs (new_stmt));
4423 else
4425 k = (simd_clone_subparts (atype)
4426 / simd_clone_subparts (arginfo[i].vectype));
4427 gcc_assert ((k & (k - 1)) == 0);
4428 vec<constructor_elt, va_gc> *ctor_elts;
4429 if (k != 1)
4430 vec_alloc (ctor_elts, k);
4431 else
4432 ctor_elts = NULL;
4433 for (l = 0; l < k; l++)
4435 if (m == 0 && l == 0)
4437 vect_get_vec_defs_for_operand (vinfo, stmt_info,
4438 k * o * ncopies,
4440 &vec_oprnds[i]);
4441 vec_oprnds_i[i] = 0;
4442 vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
4444 else
4445 vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
4446 arginfo[i].op = vec_oprnd0;
4447 if (k == 1)
4448 break;
4449 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
4450 vec_oprnd0);
4452 if (k == 1)
4453 if (!useless_type_conversion_p (TREE_TYPE (vec_oprnd0),
4454 atype))
4456 vec_oprnd0
4457 = build1 (VIEW_CONVERT_EXPR, atype, vec_oprnd0);
4458 gassign *new_stmt
4459 = gimple_build_assign (make_ssa_name (atype),
4460 vec_oprnd0);
4461 vect_finish_stmt_generation (vinfo, stmt_info,
4462 new_stmt, gsi);
4463 vargs.safe_push (gimple_assign_lhs (new_stmt));
4465 else
4466 vargs.safe_push (vec_oprnd0);
4467 else
4469 vec_oprnd0 = build_constructor (atype, ctor_elts);
4470 gassign *new_stmt
4471 = gimple_build_assign (make_ssa_name (atype),
4472 vec_oprnd0);
4473 vect_finish_stmt_generation (vinfo, stmt_info,
4474 new_stmt, gsi);
4475 vargs.safe_push (gimple_assign_lhs (new_stmt));
4479 break;
4480 case SIMD_CLONE_ARG_TYPE_MASK:
4481 atype = bestn->simdclone->args[i].vector_type;
4482 if (bestn->simdclone->mask_mode != VOIDmode)
4484 /* FORNOW: this is disabled above. */
4485 gcc_unreachable ();
4487 else
4489 tree elt_type = TREE_TYPE (atype);
4490 tree one = fold_convert (elt_type, integer_one_node);
4491 tree zero = fold_convert (elt_type, integer_zero_node);
4492 o = vector_unroll_factor (nunits,
4493 simd_clone_subparts (atype));
4494 for (m = j * o; m < (j + 1) * o; m++)
4496 if (simd_clone_subparts (atype)
4497 < simd_clone_subparts (arginfo[i].vectype))
4499 /* The mask type has fewer elements than simdlen. */
4501 /* FORNOW */
4502 gcc_unreachable ();
4504 else if (simd_clone_subparts (atype)
4505 == simd_clone_subparts (arginfo[i].vectype))
4507 /* The SIMD clone function has the same number of
4508 elements as the current function. */
4509 if (m == 0)
4511 vect_get_vec_defs_for_operand (vinfo, stmt_info,
4512 o * ncopies,
4514 &vec_oprnds[i]);
4515 vec_oprnds_i[i] = 0;
4517 vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++];
4518 vec_oprnd0
4519 = build3 (VEC_COND_EXPR, atype, vec_oprnd0,
4520 build_vector_from_val (atype, one),
4521 build_vector_from_val (atype, zero));
4522 gassign *new_stmt
4523 = gimple_build_assign (make_ssa_name (atype),
4524 vec_oprnd0);
4525 vect_finish_stmt_generation (vinfo, stmt_info,
4526 new_stmt, gsi);
4527 vargs.safe_push (gimple_assign_lhs (new_stmt));
4529 else
4531 /* The mask type has more elements than simdlen. */
4533 /* FORNOW */
4534 gcc_unreachable ();
4538 break;
4539 case SIMD_CLONE_ARG_TYPE_UNIFORM:
4540 vargs.safe_push (op);
4541 break;
4542 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
4543 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
4544 if (j == 0)
4546 gimple_seq stmts;
4547 arginfo[i].op
4548 = force_gimple_operand (unshare_expr (arginfo[i].op),
4549 &stmts, true, NULL_TREE);
4550 if (stmts != NULL)
4552 basic_block new_bb;
4553 edge pe = loop_preheader_edge (loop);
4554 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4555 gcc_assert (!new_bb);
4557 if (arginfo[i].simd_lane_linear)
4559 vargs.safe_push (arginfo[i].op);
4560 break;
4562 tree phi_res = copy_ssa_name (op);
4563 gphi *new_phi = create_phi_node (phi_res, loop->header);
4564 add_phi_arg (new_phi, arginfo[i].op,
4565 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4566 enum tree_code code
4567 = POINTER_TYPE_P (TREE_TYPE (op))
4568 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4569 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4570 ? sizetype : TREE_TYPE (op);
4571 poly_widest_int cst
4572 = wi::mul (bestn->simdclone->args[i].linear_step,
4573 ncopies * nunits);
4574 tree tcst = wide_int_to_tree (type, cst);
4575 tree phi_arg = copy_ssa_name (op);
4576 gassign *new_stmt
4577 = gimple_build_assign (phi_arg, code, phi_res, tcst);
4578 gimple_stmt_iterator si = gsi_after_labels (loop->header);
4579 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
4580 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
4581 UNKNOWN_LOCATION);
4582 arginfo[i].op = phi_res;
4583 vargs.safe_push (phi_res);
4585 else
4587 enum tree_code code
4588 = POINTER_TYPE_P (TREE_TYPE (op))
4589 ? POINTER_PLUS_EXPR : PLUS_EXPR;
4590 tree type = POINTER_TYPE_P (TREE_TYPE (op))
4591 ? sizetype : TREE_TYPE (op);
4592 poly_widest_int cst
4593 = wi::mul (bestn->simdclone->args[i].linear_step,
4594 j * nunits);
4595 tree tcst = wide_int_to_tree (type, cst);
4596 new_temp = make_ssa_name (TREE_TYPE (op));
4597 gassign *new_stmt
4598 = gimple_build_assign (new_temp, code,
4599 arginfo[i].op, tcst);
4600 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4601 vargs.safe_push (new_temp);
4603 break;
4604 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
4605 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
4606 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
4607 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
4608 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
4609 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
4610 default:
4611 gcc_unreachable ();
4615 gcall *new_call = gimple_build_call_vec (fndecl, vargs);
4616 if (vec_dest)
4618 gcc_assert (ratype
4619 || known_eq (simd_clone_subparts (rtype), nunits));
4620 if (ratype)
4621 new_temp = create_tmp_var (ratype);
4622 else if (useless_type_conversion_p (vectype, rtype))
4623 new_temp = make_ssa_name (vec_dest, new_call);
4624 else
4625 new_temp = make_ssa_name (rtype, new_call);
4626 gimple_call_set_lhs (new_call, new_temp);
4628 vect_finish_stmt_generation (vinfo, stmt_info, new_call, gsi);
4629 gimple *new_stmt = new_call;
4631 if (vec_dest)
4633 if (!multiple_p (simd_clone_subparts (vectype), nunits))
4635 unsigned int k, l;
4636 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
4637 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
4638 k = vector_unroll_factor (nunits,
4639 simd_clone_subparts (vectype));
4640 gcc_assert ((k & (k - 1)) == 0);
4641 for (l = 0; l < k; l++)
4643 tree t;
4644 if (ratype)
4646 t = build_fold_addr_expr (new_temp);
4647 t = build2 (MEM_REF, vectype, t,
4648 build_int_cst (TREE_TYPE (t), l * bytes));
4650 else
4651 t = build3 (BIT_FIELD_REF, vectype, new_temp,
4652 bitsize_int (prec), bitsize_int (l * prec));
4653 new_stmt = gimple_build_assign (make_ssa_name (vectype), t);
4654 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4656 if (j == 0 && l == 0)
4657 *vec_stmt = new_stmt;
4658 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
4661 if (ratype)
4662 vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
4663 continue;
4665 else if (!multiple_p (nunits, simd_clone_subparts (vectype)))
4667 unsigned int k = (simd_clone_subparts (vectype)
4668 / simd_clone_subparts (rtype));
4669 gcc_assert ((k & (k - 1)) == 0);
4670 if ((j & (k - 1)) == 0)
4671 vec_alloc (ret_ctor_elts, k);
4672 if (ratype)
4674 unsigned int m, o;
4675 o = vector_unroll_factor (nunits,
4676 simd_clone_subparts (rtype));
4677 for (m = 0; m < o; m++)
4679 tree tem = build4 (ARRAY_REF, rtype, new_temp,
4680 size_int (m), NULL_TREE, NULL_TREE);
4681 new_stmt = gimple_build_assign (make_ssa_name (rtype),
4682 tem);
4683 vect_finish_stmt_generation (vinfo, stmt_info,
4684 new_stmt, gsi);
4685 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
4686 gimple_assign_lhs (new_stmt));
4688 vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
4690 else
4691 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
4692 if ((j & (k - 1)) != k - 1)
4693 continue;
4694 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
4695 new_stmt
4696 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4697 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4699 if ((unsigned) j == k - 1)
4700 *vec_stmt = new_stmt;
4701 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
4702 continue;
4704 else if (ratype)
4706 tree t = build_fold_addr_expr (new_temp);
4707 t = build2 (MEM_REF, vectype, t,
4708 build_int_cst (TREE_TYPE (t), 0));
4709 new_stmt = gimple_build_assign (make_ssa_name (vec_dest), t);
4710 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4711 vect_clobber_variable (vinfo, stmt_info, gsi, new_temp);
4713 else if (!useless_type_conversion_p (vectype, rtype))
4715 vec_oprnd0 = build1 (VIEW_CONVERT_EXPR, vectype, new_temp);
4716 new_stmt
4717 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
4718 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4722 if (j == 0)
4723 *vec_stmt = new_stmt;
4724 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
4727 for (i = 0; i < nargs; ++i)
4729 vec<tree> oprndsi = vec_oprnds[i];
4730 oprndsi.release ();
4732 vargs.release ();
4734 /* Mark the clone as no longer being a candidate for GC. */
4735 bestn->gc_candidate = false;
4737 /* The call in STMT might prevent it from being removed in dce.
4738 We however cannot remove it here, due to the way the ssa name
4739 it defines is mapped to the new definition. So just replace
4740 rhs of the statement with something harmless. */
4742 if (slp_node)
4743 return true;
4745 gimple *new_stmt;
4746 if (scalar_dest)
4748 type = TREE_TYPE (scalar_dest);
4749 lhs = gimple_call_lhs (vect_orig_stmt (stmt_info)->stmt);
4750 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
4752 else
4753 new_stmt = gimple_build_nop ();
4754 vinfo->replace_stmt (gsi, vect_orig_stmt (stmt_info), new_stmt);
4755 unlink_stmt_vdef (stmt);
4757 return true;
4761 /* Function vect_gen_widened_results_half
4763 Create a vector stmt whose code, type, number of arguments, and result
4764 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4765 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at GSI.
4766 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4767 needs to be created (DECL is a function-decl of a target-builtin).
4768 STMT_INFO is the original scalar stmt that we are vectorizing. */
4770 static gimple *
4771 vect_gen_widened_results_half (vec_info *vinfo, enum tree_code code,
4772 tree vec_oprnd0, tree vec_oprnd1, int op_type,
4773 tree vec_dest, gimple_stmt_iterator *gsi,
4774 stmt_vec_info stmt_info)
4776 gimple *new_stmt;
4777 tree new_temp;
4779 /* Generate half of the widened result: */
4780 gcc_assert (op_type == TREE_CODE_LENGTH (code));
4781 if (op_type != binary_op)
4782 vec_oprnd1 = NULL;
4783 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
4784 new_temp = make_ssa_name (vec_dest, new_stmt);
4785 gimple_assign_set_lhs (new_stmt, new_temp);
4786 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4788 return new_stmt;
4792 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4793 For multi-step conversions store the resulting vectors and call the function
4794 recursively. */
4796 static void
4797 vect_create_vectorized_demotion_stmts (vec_info *vinfo, vec<tree> *vec_oprnds,
4798 int multi_step_cvt,
4799 stmt_vec_info stmt_info,
4800 vec<tree> &vec_dsts,
4801 gimple_stmt_iterator *gsi,
4802 slp_tree slp_node, enum tree_code code)
4804 unsigned int i;
4805 tree vop0, vop1, new_tmp, vec_dest;
4807 vec_dest = vec_dsts.pop ();
4809 for (i = 0; i < vec_oprnds->length (); i += 2)
4811 /* Create demotion operation. */
4812 vop0 = (*vec_oprnds)[i];
4813 vop1 = (*vec_oprnds)[i + 1];
4814 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4815 new_tmp = make_ssa_name (vec_dest, new_stmt);
4816 gimple_assign_set_lhs (new_stmt, new_tmp);
4817 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
4819 if (multi_step_cvt)
4820 /* Store the resulting vector for next recursive call. */
4821 (*vec_oprnds)[i/2] = new_tmp;
4822 else
4824 /* This is the last step of the conversion sequence. Store the
4825 vectors in SLP_NODE or in vector info of the scalar statement
4826 (or in STMT_VINFO_RELATED_STMT chain). */
4827 if (slp_node)
4828 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4829 else
4830 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
4834 /* For multi-step demotion operations we first generate demotion operations
4835 from the source type to the intermediate types, and then combine the
4836 results (stored in VEC_OPRNDS) in demotion operation to the destination
4837 type. */
4838 if (multi_step_cvt)
4840 /* At each level of recursion we have half of the operands we had at the
4841 previous level. */
4842 vec_oprnds->truncate ((i+1)/2);
4843 vect_create_vectorized_demotion_stmts (vinfo, vec_oprnds,
4844 multi_step_cvt - 1,
4845 stmt_info, vec_dsts, gsi,
4846 slp_node, VEC_PACK_TRUNC_EXPR);
4849 vec_dsts.quick_push (vec_dest);
4853 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4854 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4855 STMT_INFO. For multi-step conversions store the resulting vectors and
4856 call the function recursively. */
4858 static void
4859 vect_create_vectorized_promotion_stmts (vec_info *vinfo,
4860 vec<tree> *vec_oprnds0,
4861 vec<tree> *vec_oprnds1,
4862 stmt_vec_info stmt_info, tree vec_dest,
4863 gimple_stmt_iterator *gsi,
4864 enum tree_code code1,
4865 enum tree_code code2, int op_type)
4867 int i;
4868 tree vop0, vop1, new_tmp1, new_tmp2;
4869 gimple *new_stmt1, *new_stmt2;
4870 vec<tree> vec_tmp = vNULL;
4872 vec_tmp.create (vec_oprnds0->length () * 2);
4873 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4875 if (op_type == binary_op)
4876 vop1 = (*vec_oprnds1)[i];
4877 else
4878 vop1 = NULL_TREE;
4880 /* Generate the two halves of promotion operation. */
4881 new_stmt1 = vect_gen_widened_results_half (vinfo, code1, vop0, vop1,
4882 op_type, vec_dest, gsi,
4883 stmt_info);
4884 new_stmt2 = vect_gen_widened_results_half (vinfo, code2, vop0, vop1,
4885 op_type, vec_dest, gsi,
4886 stmt_info);
4887 if (is_gimple_call (new_stmt1))
4889 new_tmp1 = gimple_call_lhs (new_stmt1);
4890 new_tmp2 = gimple_call_lhs (new_stmt2);
4892 else
4894 new_tmp1 = gimple_assign_lhs (new_stmt1);
4895 new_tmp2 = gimple_assign_lhs (new_stmt2);
4898 /* Store the results for the next step. */
4899 vec_tmp.quick_push (new_tmp1);
4900 vec_tmp.quick_push (new_tmp2);
4903 vec_oprnds0->release ();
4904 *vec_oprnds0 = vec_tmp;
4907 /* Create vectorized promotion stmts for widening stmts using only half the
4908 potential vector size for input. */
4909 static void
4910 vect_create_half_widening_stmts (vec_info *vinfo,
4911 vec<tree> *vec_oprnds0,
4912 vec<tree> *vec_oprnds1,
4913 stmt_vec_info stmt_info, tree vec_dest,
4914 gimple_stmt_iterator *gsi,
4915 enum tree_code code1,
4916 int op_type)
4918 int i;
4919 tree vop0, vop1;
4920 gimple *new_stmt1;
4921 gimple *new_stmt2;
4922 gimple *new_stmt3;
4923 vec<tree> vec_tmp = vNULL;
4925 vec_tmp.create (vec_oprnds0->length ());
4926 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4928 tree new_tmp1, new_tmp2, new_tmp3, out_type;
4930 gcc_assert (op_type == binary_op);
4931 vop1 = (*vec_oprnds1)[i];
4933 /* Widen the first vector input. */
4934 out_type = TREE_TYPE (vec_dest);
4935 new_tmp1 = make_ssa_name (out_type);
4936 new_stmt1 = gimple_build_assign (new_tmp1, NOP_EXPR, vop0);
4937 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt1, gsi);
4938 if (VECTOR_TYPE_P (TREE_TYPE (vop1)))
4940 /* Widen the second vector input. */
4941 new_tmp2 = make_ssa_name (out_type);
4942 new_stmt2 = gimple_build_assign (new_tmp2, NOP_EXPR, vop1);
4943 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt2, gsi);
4944 /* Perform the operation. With both vector inputs widened. */
4945 new_stmt3 = gimple_build_assign (vec_dest, code1, new_tmp1, new_tmp2);
4947 else
4949 /* Perform the operation. With the single vector input widened. */
4950 new_stmt3 = gimple_build_assign (vec_dest, code1, new_tmp1, vop1);
4953 new_tmp3 = make_ssa_name (vec_dest, new_stmt3);
4954 gimple_assign_set_lhs (new_stmt3, new_tmp3);
4955 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt3, gsi);
4957 /* Store the results for the next step. */
4958 vec_tmp.quick_push (new_tmp3);
4961 vec_oprnds0->release ();
4962 *vec_oprnds0 = vec_tmp;
4966 /* Check if STMT_INFO performs a conversion operation that can be vectorized.
4967 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4968 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4969 Return true if STMT_INFO is vectorizable in this way. */
4971 static bool
4972 vectorizable_conversion (vec_info *vinfo,
4973 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
4974 gimple **vec_stmt, slp_tree slp_node,
4975 stmt_vector_for_cost *cost_vec)
4977 tree vec_dest;
4978 tree scalar_dest;
4979 tree op0, op1 = NULL_TREE;
4980 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
4981 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4982 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4983 tree new_temp;
4984 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4985 int ndts = 2;
4986 poly_uint64 nunits_in;
4987 poly_uint64 nunits_out;
4988 tree vectype_out, vectype_in;
4989 int ncopies, i;
4990 tree lhs_type, rhs_type;
4991 enum { NARROW, NONE, WIDEN } modifier;
4992 vec<tree> vec_oprnds0 = vNULL;
4993 vec<tree> vec_oprnds1 = vNULL;
4994 tree vop0;
4995 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
4996 int multi_step_cvt = 0;
4997 vec<tree> interm_types = vNULL;
4998 tree intermediate_type, cvt_type = NULL_TREE;
4999 int op_type;
5000 unsigned short fltsz;
5002 /* Is STMT a vectorizable conversion? */
5004 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5005 return false;
5007 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5008 && ! vec_stmt)
5009 return false;
5011 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5012 if (!stmt)
5013 return false;
5015 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5016 return false;
5018 code = gimple_assign_rhs_code (stmt);
5019 if (!CONVERT_EXPR_CODE_P (code)
5020 && code != FIX_TRUNC_EXPR
5021 && code != FLOAT_EXPR
5022 && code != WIDEN_PLUS_EXPR
5023 && code != WIDEN_MINUS_EXPR
5024 && code != WIDEN_MULT_EXPR
5025 && code != WIDEN_LSHIFT_EXPR)
5026 return false;
5028 bool widen_arith = (code == WIDEN_PLUS_EXPR
5029 || code == WIDEN_MINUS_EXPR
5030 || code == WIDEN_MULT_EXPR
5031 || code == WIDEN_LSHIFT_EXPR);
5032 op_type = TREE_CODE_LENGTH (code);
5034 /* Check types of lhs and rhs. */
5035 scalar_dest = gimple_assign_lhs (stmt);
5036 lhs_type = TREE_TYPE (scalar_dest);
5037 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5039 /* Check the operands of the operation. */
5040 slp_tree slp_op0, slp_op1 = NULL;
5041 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
5042 0, &op0, &slp_op0, &dt[0], &vectype_in))
5044 if (dump_enabled_p ())
5045 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5046 "use not simple.\n");
5047 return false;
5050 rhs_type = TREE_TYPE (op0);
5051 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
5052 && !((INTEGRAL_TYPE_P (lhs_type)
5053 && INTEGRAL_TYPE_P (rhs_type))
5054 || (SCALAR_FLOAT_TYPE_P (lhs_type)
5055 && SCALAR_FLOAT_TYPE_P (rhs_type))))
5056 return false;
5058 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5059 && ((INTEGRAL_TYPE_P (lhs_type)
5060 && !type_has_mode_precision_p (lhs_type))
5061 || (INTEGRAL_TYPE_P (rhs_type)
5062 && !type_has_mode_precision_p (rhs_type))))
5064 if (dump_enabled_p ())
5065 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5066 "type conversion to/from bit-precision unsupported."
5067 "\n");
5068 return false;
5071 if (op_type == binary_op)
5073 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR
5074 || code == WIDEN_PLUS_EXPR || code == WIDEN_MINUS_EXPR);
5076 op1 = gimple_assign_rhs2 (stmt);
5077 tree vectype1_in;
5078 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 1,
5079 &op1, &slp_op1, &dt[1], &vectype1_in))
5081 if (dump_enabled_p ())
5082 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5083 "use not simple.\n");
5084 return false;
5086 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
5087 OP1. */
5088 if (!vectype_in)
5089 vectype_in = vectype1_in;
5092 /* If op0 is an external or constant def, infer the vector type
5093 from the scalar type. */
5094 if (!vectype_in)
5095 vectype_in = get_vectype_for_scalar_type (vinfo, rhs_type, slp_node);
5096 if (vec_stmt)
5097 gcc_assert (vectype_in);
5098 if (!vectype_in)
5100 if (dump_enabled_p ())
5101 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5102 "no vectype for scalar type %T\n", rhs_type);
5104 return false;
5107 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
5108 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
5110 if (dump_enabled_p ())
5111 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5112 "can't convert between boolean and non "
5113 "boolean vectors %T\n", rhs_type);
5115 return false;
5118 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
5119 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5120 if (known_eq (nunits_out, nunits_in))
5121 if (widen_arith)
5122 modifier = WIDEN;
5123 else
5124 modifier = NONE;
5125 else if (multiple_p (nunits_out, nunits_in))
5126 modifier = NARROW;
5127 else
5129 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
5130 modifier = WIDEN;
5133 /* Multiple types in SLP are handled by creating the appropriate number of
5134 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5135 case of SLP. */
5136 if (slp_node)
5137 ncopies = 1;
5138 else if (modifier == NARROW)
5139 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
5140 else
5141 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5143 /* Sanity check: make sure that at least one copy of the vectorized stmt
5144 needs to be generated. */
5145 gcc_assert (ncopies >= 1);
5147 bool found_mode = false;
5148 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
5149 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
5150 opt_scalar_mode rhs_mode_iter;
5152 /* Supportable by target? */
5153 switch (modifier)
5155 case NONE:
5156 if (code != FIX_TRUNC_EXPR
5157 && code != FLOAT_EXPR
5158 && !CONVERT_EXPR_CODE_P (code))
5159 return false;
5160 if (supportable_convert_operation (code, vectype_out, vectype_in, &code1))
5161 break;
5162 /* FALLTHRU */
5163 unsupported:
5164 if (dump_enabled_p ())
5165 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5166 "conversion not supported by target.\n");
5167 return false;
5169 case WIDEN:
5170 if (known_eq (nunits_in, nunits_out))
5172 if (!supportable_half_widening_operation (code, vectype_out,
5173 vectype_in, &code1))
5174 goto unsupported;
5175 gcc_assert (!(multi_step_cvt && op_type == binary_op));
5176 break;
5178 if (supportable_widening_operation (vinfo, code, stmt_info,
5179 vectype_out, vectype_in, &code1,
5180 &code2, &multi_step_cvt,
5181 &interm_types))
5183 /* Binary widening operation can only be supported directly by the
5184 architecture. */
5185 gcc_assert (!(multi_step_cvt && op_type == binary_op));
5186 break;
5189 if (code != FLOAT_EXPR
5190 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
5191 goto unsupported;
5193 fltsz = GET_MODE_SIZE (lhs_mode);
5194 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
5196 rhs_mode = rhs_mode_iter.require ();
5197 if (GET_MODE_SIZE (rhs_mode) > fltsz)
5198 break;
5200 cvt_type
5201 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
5202 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
5203 if (cvt_type == NULL_TREE)
5204 goto unsupported;
5206 if (GET_MODE_SIZE (rhs_mode) == fltsz)
5208 if (!supportable_convert_operation (code, vectype_out,
5209 cvt_type, &codecvt1))
5210 goto unsupported;
5212 else if (!supportable_widening_operation (vinfo, code, stmt_info,
5213 vectype_out, cvt_type,
5214 &codecvt1, &codecvt2,
5215 &multi_step_cvt,
5216 &interm_types))
5217 continue;
5218 else
5219 gcc_assert (multi_step_cvt == 0);
5221 if (supportable_widening_operation (vinfo, NOP_EXPR, stmt_info,
5222 cvt_type,
5223 vectype_in, &code1, &code2,
5224 &multi_step_cvt, &interm_types))
5226 found_mode = true;
5227 break;
5231 if (!found_mode)
5232 goto unsupported;
5234 if (GET_MODE_SIZE (rhs_mode) == fltsz)
5235 codecvt2 = ERROR_MARK;
5236 else
5238 multi_step_cvt++;
5239 interm_types.safe_push (cvt_type);
5240 cvt_type = NULL_TREE;
5242 break;
5244 case NARROW:
5245 gcc_assert (op_type == unary_op);
5246 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
5247 &code1, &multi_step_cvt,
5248 &interm_types))
5249 break;
5251 if (code != FIX_TRUNC_EXPR
5252 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
5253 goto unsupported;
5255 cvt_type
5256 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
5257 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
5258 if (cvt_type == NULL_TREE)
5259 goto unsupported;
5260 if (!supportable_convert_operation (code, cvt_type, vectype_in,
5261 &codecvt1))
5262 goto unsupported;
5263 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
5264 &code1, &multi_step_cvt,
5265 &interm_types))
5266 break;
5267 goto unsupported;
5269 default:
5270 gcc_unreachable ();
5273 if (!vec_stmt) /* transformation not required. */
5275 if (slp_node
5276 && (!vect_maybe_update_slp_op_vectype (slp_op0, vectype_in)
5277 || !vect_maybe_update_slp_op_vectype (slp_op1, vectype_in)))
5279 if (dump_enabled_p ())
5280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5281 "incompatible vector types for invariants\n");
5282 return false;
5284 DUMP_VECT_SCOPE ("vectorizable_conversion");
5285 if (modifier == NONE)
5287 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
5288 vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node,
5289 cost_vec);
5291 else if (modifier == NARROW)
5293 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
5294 /* The final packing step produces one vector result per copy. */
5295 unsigned int nvectors
5296 = (slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies);
5297 vect_model_promotion_demotion_cost (stmt_info, dt, nvectors,
5298 multi_step_cvt, cost_vec,
5299 widen_arith);
5301 else
5303 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
5304 /* The initial unpacking step produces two vector results
5305 per copy. MULTI_STEP_CVT is 0 for a single conversion,
5306 so >> MULTI_STEP_CVT divides by 2^(number of steps - 1). */
5307 unsigned int nvectors
5308 = (slp_node
5309 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) >> multi_step_cvt
5310 : ncopies * 2);
5311 vect_model_promotion_demotion_cost (stmt_info, dt, nvectors,
5312 multi_step_cvt, cost_vec,
5313 widen_arith);
5315 interm_types.release ();
5316 return true;
5319 /* Transform. */
5320 if (dump_enabled_p ())
5321 dump_printf_loc (MSG_NOTE, vect_location,
5322 "transform conversion. ncopies = %d.\n", ncopies);
5324 if (op_type == binary_op)
5326 if (CONSTANT_CLASS_P (op0))
5327 op0 = fold_convert (TREE_TYPE (op1), op0);
5328 else if (CONSTANT_CLASS_P (op1))
5329 op1 = fold_convert (TREE_TYPE (op0), op1);
5332 /* In case of multi-step conversion, we first generate conversion operations
5333 to the intermediate types, and then from that types to the final one.
5334 We create vector destinations for the intermediate type (TYPES) received
5335 from supportable_*_operation, and store them in the correct order
5336 for future use in vect_create_vectorized_*_stmts (). */
5337 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
5338 vec_dest = vect_create_destination_var (scalar_dest,
5339 (cvt_type && modifier == WIDEN)
5340 ? cvt_type : vectype_out);
5341 vec_dsts.quick_push (vec_dest);
5343 if (multi_step_cvt)
5345 for (i = interm_types.length () - 1;
5346 interm_types.iterate (i, &intermediate_type); i--)
5348 vec_dest = vect_create_destination_var (scalar_dest,
5349 intermediate_type);
5350 vec_dsts.quick_push (vec_dest);
5354 if (cvt_type)
5355 vec_dest = vect_create_destination_var (scalar_dest,
5356 modifier == WIDEN
5357 ? vectype_out : cvt_type);
5359 int ninputs = 1;
5360 if (!slp_node)
5362 if (modifier == WIDEN)
5364 else if (modifier == NARROW)
5366 if (multi_step_cvt)
5367 ninputs = vect_pow2 (multi_step_cvt);
5368 ninputs *= 2;
5372 switch (modifier)
5374 case NONE:
5375 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
5376 op0, &vec_oprnds0);
5377 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5379 /* Arguments are ready, create the new vector stmt. */
5380 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
5381 gassign *new_stmt = gimple_build_assign (vec_dest, code1, vop0);
5382 new_temp = make_ssa_name (vec_dest, new_stmt);
5383 gimple_assign_set_lhs (new_stmt, new_temp);
5384 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
5386 if (slp_node)
5387 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5388 else
5389 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
5391 break;
5393 case WIDEN:
5394 /* In case the vectorization factor (VF) is bigger than the number
5395 of elements that we can fit in a vectype (nunits), we have to
5396 generate more than one vector stmt - i.e - we need to "unroll"
5397 the vector stmt by a factor VF/nunits. */
5398 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs,
5399 op0, &vec_oprnds0,
5400 code == WIDEN_LSHIFT_EXPR ? NULL_TREE : op1,
5401 &vec_oprnds1);
5402 if (code == WIDEN_LSHIFT_EXPR)
5404 int oprnds_size = vec_oprnds0.length ();
5405 vec_oprnds1.create (oprnds_size);
5406 for (i = 0; i < oprnds_size; ++i)
5407 vec_oprnds1.quick_push (op1);
5409 /* Arguments are ready. Create the new vector stmts. */
5410 for (i = multi_step_cvt; i >= 0; i--)
5412 tree this_dest = vec_dsts[i];
5413 enum tree_code c1 = code1, c2 = code2;
5414 if (i == 0 && codecvt2 != ERROR_MARK)
5416 c1 = codecvt1;
5417 c2 = codecvt2;
5419 if (known_eq (nunits_out, nunits_in))
5420 vect_create_half_widening_stmts (vinfo, &vec_oprnds0,
5421 &vec_oprnds1, stmt_info,
5422 this_dest, gsi,
5423 c1, op_type);
5424 else
5425 vect_create_vectorized_promotion_stmts (vinfo, &vec_oprnds0,
5426 &vec_oprnds1, stmt_info,
5427 this_dest, gsi,
5428 c1, c2, op_type);
5431 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5433 gimple *new_stmt;
5434 if (cvt_type)
5436 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5437 new_temp = make_ssa_name (vec_dest);
5438 new_stmt = gimple_build_assign (new_temp, codecvt1, vop0);
5439 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
5441 else
5442 new_stmt = SSA_NAME_DEF_STMT (vop0);
5444 if (slp_node)
5445 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5446 else
5447 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
5449 break;
5451 case NARROW:
5452 /* In case the vectorization factor (VF) is bigger than the number
5453 of elements that we can fit in a vectype (nunits), we have to
5454 generate more than one vector stmt - i.e - we need to "unroll"
5455 the vector stmt by a factor VF/nunits. */
5456 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies * ninputs,
5457 op0, &vec_oprnds0);
5458 /* Arguments are ready. Create the new vector stmts. */
5459 if (cvt_type)
5460 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5462 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
5463 new_temp = make_ssa_name (vec_dest);
5464 gassign *new_stmt
5465 = gimple_build_assign (new_temp, codecvt1, vop0);
5466 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
5467 vec_oprnds0[i] = new_temp;
5470 vect_create_vectorized_demotion_stmts (vinfo, &vec_oprnds0,
5471 multi_step_cvt,
5472 stmt_info, vec_dsts, gsi,
5473 slp_node, code1);
5474 break;
5476 if (!slp_node)
5477 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
5479 vec_oprnds0.release ();
5480 vec_oprnds1.release ();
5481 interm_types.release ();
5483 return true;
5486 /* Return true if we can assume from the scalar form of STMT_INFO that
5487 neither the scalar nor the vector forms will generate code. STMT_INFO
5488 is known not to involve a data reference. */
5490 bool
5491 vect_nop_conversion_p (stmt_vec_info stmt_info)
5493 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5494 if (!stmt)
5495 return false;
5497 tree lhs = gimple_assign_lhs (stmt);
5498 tree_code code = gimple_assign_rhs_code (stmt);
5499 tree rhs = gimple_assign_rhs1 (stmt);
5501 if (code == SSA_NAME || code == VIEW_CONVERT_EXPR)
5502 return true;
5504 if (CONVERT_EXPR_CODE_P (code))
5505 return tree_nop_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs));
5507 return false;
5510 /* Function vectorizable_assignment.
5512 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5513 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5514 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5515 Return true if STMT_INFO is vectorizable in this way. */
5517 static bool
5518 vectorizable_assignment (vec_info *vinfo,
5519 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5520 gimple **vec_stmt, slp_tree slp_node,
5521 stmt_vector_for_cost *cost_vec)
5523 tree vec_dest;
5524 tree scalar_dest;
5525 tree op;
5526 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5527 tree new_temp;
5528 enum vect_def_type dt[1] = {vect_unknown_def_type};
5529 int ndts = 1;
5530 int ncopies;
5531 int i;
5532 vec<tree> vec_oprnds = vNULL;
5533 tree vop;
5534 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
5535 enum tree_code code;
5536 tree vectype_in;
5538 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5539 return false;
5541 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5542 && ! vec_stmt)
5543 return false;
5545 /* Is vectorizable assignment? */
5546 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5547 if (!stmt)
5548 return false;
5550 scalar_dest = gimple_assign_lhs (stmt);
5551 if (TREE_CODE (scalar_dest) != SSA_NAME)
5552 return false;
5554 if (STMT_VINFO_DATA_REF (stmt_info))
5555 return false;
5557 code = gimple_assign_rhs_code (stmt);
5558 if (!(gimple_assign_single_p (stmt)
5559 || code == PAREN_EXPR
5560 || CONVERT_EXPR_CODE_P (code)))
5561 return false;
5563 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5564 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5566 /* Multiple types in SLP are handled by creating the appropriate number of
5567 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5568 case of SLP. */
5569 if (slp_node)
5570 ncopies = 1;
5571 else
5572 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5574 gcc_assert (ncopies >= 1);
5576 slp_tree slp_op;
5577 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 0, &op, &slp_op,
5578 &dt[0], &vectype_in))
5580 if (dump_enabled_p ())
5581 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5582 "use not simple.\n");
5583 return false;
5585 if (!vectype_in)
5586 vectype_in = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), slp_node);
5588 /* We can handle NOP_EXPR conversions that do not change the number
5589 of elements or the vector size. */
5590 if ((CONVERT_EXPR_CODE_P (code)
5591 || code == VIEW_CONVERT_EXPR)
5592 && (!vectype_in
5593 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
5594 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
5595 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
5596 return false;
5598 if (VECTOR_BOOLEAN_TYPE_P (vectype)
5599 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
5601 if (dump_enabled_p ())
5602 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5603 "can't convert between boolean and non "
5604 "boolean vectors %T\n", TREE_TYPE (op));
5606 return false;
5609 /* We do not handle bit-precision changes. */
5610 if ((CONVERT_EXPR_CODE_P (code)
5611 || code == VIEW_CONVERT_EXPR)
5612 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
5613 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5614 || !type_has_mode_precision_p (TREE_TYPE (op)))
5615 /* But a conversion that does not change the bit-pattern is ok. */
5616 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
5617 > TYPE_PRECISION (TREE_TYPE (op)))
5618 && TYPE_UNSIGNED (TREE_TYPE (op))))
5620 if (dump_enabled_p ())
5621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5622 "type conversion to/from bit-precision "
5623 "unsupported.\n");
5624 return false;
5627 if (!vec_stmt) /* transformation not required. */
5629 if (slp_node
5630 && !vect_maybe_update_slp_op_vectype (slp_op, vectype_in))
5632 if (dump_enabled_p ())
5633 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5634 "incompatible vector types for invariants\n");
5635 return false;
5637 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
5638 DUMP_VECT_SCOPE ("vectorizable_assignment");
5639 if (!vect_nop_conversion_p (stmt_info))
5640 vect_model_simple_cost (vinfo, stmt_info, ncopies, dt, ndts, slp_node,
5641 cost_vec);
5642 return true;
5645 /* Transform. */
5646 if (dump_enabled_p ())
5647 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
5649 /* Handle def. */
5650 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5652 /* Handle use. */
5653 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies, op, &vec_oprnds);
5655 /* Arguments are ready. create the new vector stmt. */
5656 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
5658 if (CONVERT_EXPR_CODE_P (code)
5659 || code == VIEW_CONVERT_EXPR)
5660 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
5661 gassign *new_stmt = gimple_build_assign (vec_dest, vop);
5662 new_temp = make_ssa_name (vec_dest, new_stmt);
5663 gimple_assign_set_lhs (new_stmt, new_temp);
5664 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
5665 if (slp_node)
5666 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5667 else
5668 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
5670 if (!slp_node)
5671 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
5673 vec_oprnds.release ();
5674 return true;
5678 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5679 either as shift by a scalar or by a vector. */
5681 bool
5682 vect_supportable_shift (vec_info *vinfo, enum tree_code code, tree scalar_type)
5685 machine_mode vec_mode;
5686 optab optab;
5687 int icode;
5688 tree vectype;
5690 vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
5691 if (!vectype)
5692 return false;
5694 optab = optab_for_tree_code (code, vectype, optab_scalar);
5695 if (!optab
5696 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
5698 optab = optab_for_tree_code (code, vectype, optab_vector);
5699 if (!optab
5700 || (optab_handler (optab, TYPE_MODE (vectype))
5701 == CODE_FOR_nothing))
5702 return false;
5705 vec_mode = TYPE_MODE (vectype);
5706 icode = (int) optab_handler (optab, vec_mode);
5707 if (icode == CODE_FOR_nothing)
5708 return false;
5710 return true;
5714 /* Function vectorizable_shift.
5716 Check if STMT_INFO performs a shift operation that can be vectorized.
5717 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5718 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5719 Return true if STMT_INFO is vectorizable in this way. */
5721 static bool
5722 vectorizable_shift (vec_info *vinfo,
5723 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5724 gimple **vec_stmt, slp_tree slp_node,
5725 stmt_vector_for_cost *cost_vec)
5727 tree vec_dest;
5728 tree scalar_dest;
5729 tree op0, op1 = NULL;
5730 tree vec_oprnd1 = NULL_TREE;
5731 tree vectype;
5732 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5733 enum tree_code code;
5734 machine_mode vec_mode;
5735 tree new_temp;
5736 optab optab;
5737 int icode;
5738 machine_mode optab_op2_mode;
5739 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
5740 int ndts = 2;
5741 poly_uint64 nunits_in;
5742 poly_uint64 nunits_out;
5743 tree vectype_out;
5744 tree op1_vectype;
5745 int ncopies;
5746 int i;
5747 vec<tree> vec_oprnds0 = vNULL;
5748 vec<tree> vec_oprnds1 = vNULL;
5749 tree vop0, vop1;
5750 unsigned int k;
5751 bool scalar_shift_arg = true;
5752 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
5753 bool incompatible_op1_vectype_p = false;
5755 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5756 return false;
5758 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5759 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle
5760 && ! vec_stmt)
5761 return false;
5763 /* Is STMT a vectorizable binary/unary operation? */
5764 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
5765 if (!stmt)
5766 return false;
5768 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5769 return false;
5771 code = gimple_assign_rhs_code (stmt);
5773 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5774 || code == RROTATE_EXPR))
5775 return false;
5777 scalar_dest = gimple_assign_lhs (stmt);
5778 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5779 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
5781 if (dump_enabled_p ())
5782 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5783 "bit-precision shifts not supported.\n");
5784 return false;
5787 slp_tree slp_op0;
5788 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
5789 0, &op0, &slp_op0, &dt[0], &vectype))
5791 if (dump_enabled_p ())
5792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5793 "use not simple.\n");
5794 return false;
5796 /* If op0 is an external or constant def, infer the vector type
5797 from the scalar type. */
5798 if (!vectype)
5799 vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0), slp_node);
5800 if (vec_stmt)
5801 gcc_assert (vectype);
5802 if (!vectype)
5804 if (dump_enabled_p ())
5805 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5806 "no vectype for scalar type\n");
5807 return false;
5810 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5811 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5812 if (maybe_ne (nunits_out, nunits_in))
5813 return false;
5815 stmt_vec_info op1_def_stmt_info;
5816 slp_tree slp_op1;
5817 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 1, &op1, &slp_op1,
5818 &dt[1], &op1_vectype, &op1_def_stmt_info))
5820 if (dump_enabled_p ())
5821 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5822 "use not simple.\n");
5823 return false;
5826 /* Multiple types in SLP are handled by creating the appropriate number of
5827 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5828 case of SLP. */
5829 if (slp_node)
5830 ncopies = 1;
5831 else
5832 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5834 gcc_assert (ncopies >= 1);
5836 /* Determine whether the shift amount is a vector, or scalar. If the
5837 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5839 if ((dt[1] == vect_internal_def
5840 || dt[1] == vect_induction_def
5841 || dt[1] == vect_nested_cycle)
5842 && !slp_node)
5843 scalar_shift_arg = false;
5844 else if (dt[1] == vect_constant_def
5845 || dt[1] == vect_external_def
5846 || dt[1] == vect_internal_def)
5848 /* In SLP, need to check whether the shift count is the same,
5849 in loops if it is a constant or invariant, it is always
5850 a scalar shift. */
5851 if (slp_node)
5853 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5854 stmt_vec_info slpstmt_info;
5856 FOR_EACH_VEC_ELT (stmts, k, slpstmt_info)
5858 gassign *slpstmt = as_a <gassign *> (slpstmt_info->stmt);
5859 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5860 scalar_shift_arg = false;
5863 /* For internal SLP defs we have to make sure we see scalar stmts
5864 for all vector elements.
5865 ??? For different vectors we could resort to a different
5866 scalar shift operand but code-generation below simply always
5867 takes the first. */
5868 if (dt[1] == vect_internal_def
5869 && maybe_ne (nunits_out * SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
5870 stmts.length ()))
5871 scalar_shift_arg = false;
5874 /* If the shift amount is computed by a pattern stmt we cannot
5875 use the scalar amount directly thus give up and use a vector
5876 shift. */
5877 if (op1_def_stmt_info && is_pattern_stmt_p (op1_def_stmt_info))
5878 scalar_shift_arg = false;
5880 else
5882 if (dump_enabled_p ())
5883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5884 "operand mode requires invariant argument.\n");
5885 return false;
5888 /* Vector shifted by vector. */
5889 bool was_scalar_shift_arg = scalar_shift_arg;
5890 if (!scalar_shift_arg)
5892 optab = optab_for_tree_code (code, vectype, optab_vector);
5893 if (dump_enabled_p ())
5894 dump_printf_loc (MSG_NOTE, vect_location,
5895 "vector/vector shift/rotate found.\n");
5897 if (!op1_vectype)
5898 op1_vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op1),
5899 slp_op1);
5900 incompatible_op1_vectype_p
5901 = (op1_vectype == NULL_TREE
5902 || maybe_ne (TYPE_VECTOR_SUBPARTS (op1_vectype),
5903 TYPE_VECTOR_SUBPARTS (vectype))
5904 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype));
5905 if (incompatible_op1_vectype_p
5906 && (!slp_node
5907 || SLP_TREE_DEF_TYPE (slp_op1) != vect_constant_def
5908 || slp_op1->refcnt != 1))
5910 if (dump_enabled_p ())
5911 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5912 "unusable type for last operand in"
5913 " vector/vector shift/rotate.\n");
5914 return false;
5917 /* See if the machine has a vector shifted by scalar insn and if not
5918 then see if it has a vector shifted by vector insn. */
5919 else
5921 optab = optab_for_tree_code (code, vectype, optab_scalar);
5922 if (optab
5923 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5925 if (dump_enabled_p ())
5926 dump_printf_loc (MSG_NOTE, vect_location,
5927 "vector/scalar shift/rotate found.\n");
5929 else
5931 optab = optab_for_tree_code (code, vectype, optab_vector);
5932 if (optab
5933 && (optab_handler (optab, TYPE_MODE (vectype))
5934 != CODE_FOR_nothing))
5936 scalar_shift_arg = false;
5938 if (dump_enabled_p ())
5939 dump_printf_loc (MSG_NOTE, vect_location,
5940 "vector/vector shift/rotate found.\n");
5942 if (!op1_vectype)
5943 op1_vectype = get_vectype_for_scalar_type (vinfo,
5944 TREE_TYPE (op1),
5945 slp_op1);
5947 /* Unlike the other binary operators, shifts/rotates have
5948 the rhs being int, instead of the same type as the lhs,
5949 so make sure the scalar is the right type if we are
5950 dealing with vectors of long long/long/short/char. */
5951 incompatible_op1_vectype_p
5952 = (!op1_vectype
5953 || !tree_nop_conversion_p (TREE_TYPE (vectype),
5954 TREE_TYPE (op1)));
5955 if (incompatible_op1_vectype_p
5956 && dt[1] == vect_internal_def)
5958 if (dump_enabled_p ())
5959 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5960 "unusable type for last operand in"
5961 " vector/vector shift/rotate.\n");
5962 return false;
5968 /* Supportable by target? */
5969 if (!optab)
5971 if (dump_enabled_p ())
5972 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5973 "no optab.\n");
5974 return false;
5976 vec_mode = TYPE_MODE (vectype);
5977 icode = (int) optab_handler (optab, vec_mode);
5978 if (icode == CODE_FOR_nothing)
5980 if (dump_enabled_p ())
5981 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5982 "op not supported by target.\n");
5983 return false;
5985 /* vector lowering cannot optimize vector shifts using word arithmetic. */
5986 if (vect_emulated_vector_p (vectype))
5987 return false;
5989 if (!vec_stmt) /* transformation not required. */
5991 if (slp_node
5992 && (!vect_maybe_update_slp_op_vectype (slp_op0, vectype)
5993 || ((!scalar_shift_arg || dt[1] == vect_internal_def)
5994 && (!incompatible_op1_vectype_p
5995 || dt[1] == vect_constant_def)
5996 && !vect_maybe_update_slp_op_vectype
5997 (slp_op1,
5998 incompatible_op1_vectype_p ? vectype : op1_vectype))))
6000 if (dump_enabled_p ())
6001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6002 "incompatible vector types for invariants\n");
6003 return false;
6005 /* Now adjust the constant shift amount in place. */
6006 if (slp_node
6007 && incompatible_op1_vectype_p
6008 && dt[1] == vect_constant_def)
6010 for (unsigned i = 0;
6011 i < SLP_TREE_SCALAR_OPS (slp_op1).length (); ++i)
6013 SLP_TREE_SCALAR_OPS (slp_op1)[i]
6014 = fold_convert (TREE_TYPE (vectype),
6015 SLP_TREE_SCALAR_OPS (slp_op1)[i]);
6016 gcc_assert ((TREE_CODE (SLP_TREE_SCALAR_OPS (slp_op1)[i])
6017 == INTEGER_CST));
6020 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
6021 DUMP_VECT_SCOPE ("vectorizable_shift");
6022 vect_model_simple_cost (vinfo, stmt_info, ncopies, dt,
6023 scalar_shift_arg ? 1 : ndts, slp_node, cost_vec);
6024 return true;
6027 /* Transform. */
6029 if (dump_enabled_p ())
6030 dump_printf_loc (MSG_NOTE, vect_location,
6031 "transform binary/unary operation.\n");
6033 if (incompatible_op1_vectype_p && !slp_node)
6035 gcc_assert (!scalar_shift_arg && was_scalar_shift_arg);
6036 op1 = fold_convert (TREE_TYPE (vectype), op1);
6037 if (dt[1] != vect_constant_def)
6038 op1 = vect_init_vector (vinfo, stmt_info, op1,
6039 TREE_TYPE (vectype), NULL);
6042 /* Handle def. */
6043 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6045 if (scalar_shift_arg && dt[1] != vect_internal_def)
6047 /* Vector shl and shr insn patterns can be defined with scalar
6048 operand 2 (shift operand). In this case, use constant or loop
6049 invariant op1 directly, without extending it to vector mode
6050 first. */
6051 optab_op2_mode = insn_data[icode].operand[2].mode;
6052 if (!VECTOR_MODE_P (optab_op2_mode))
6054 if (dump_enabled_p ())
6055 dump_printf_loc (MSG_NOTE, vect_location,
6056 "operand 1 using scalar mode.\n");
6057 vec_oprnd1 = op1;
6058 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : ncopies);
6059 vec_oprnds1.quick_push (vec_oprnd1);
6060 /* Store vec_oprnd1 for every vector stmt to be created.
6061 We check during the analysis that all the shift arguments
6062 are the same.
6063 TODO: Allow different constants for different vector
6064 stmts generated for an SLP instance. */
6065 for (k = 0;
6066 k < (slp_node ? slp_node->vec_stmts_size - 1 : ncopies - 1); k++)
6067 vec_oprnds1.quick_push (vec_oprnd1);
6070 else if (!scalar_shift_arg && slp_node && incompatible_op1_vectype_p)
6072 if (was_scalar_shift_arg)
6074 /* If the argument was the same in all lanes create
6075 the correctly typed vector shift amount directly. */
6076 op1 = fold_convert (TREE_TYPE (vectype), op1);
6077 op1 = vect_init_vector (vinfo, stmt_info, op1, TREE_TYPE (vectype),
6078 !loop_vinfo ? gsi : NULL);
6079 vec_oprnd1 = vect_init_vector (vinfo, stmt_info, op1, vectype,
6080 !loop_vinfo ? gsi : NULL);
6081 vec_oprnds1.create (slp_node->vec_stmts_size);
6082 for (k = 0; k < slp_node->vec_stmts_size; k++)
6083 vec_oprnds1.quick_push (vec_oprnd1);
6085 else if (dt[1] == vect_constant_def)
6086 /* The constant shift amount has been adjusted in place. */
6088 else
6089 gcc_assert (TYPE_MODE (op1_vectype) == TYPE_MODE (vectype));
6092 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
6093 (a special case for certain kind of vector shifts); otherwise,
6094 operand 1 should be of a vector type (the usual case). */
6095 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
6096 op0, &vec_oprnds0,
6097 vec_oprnd1 ? NULL_TREE : op1, &vec_oprnds1);
6099 /* Arguments are ready. Create the new vector stmt. */
6100 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6102 /* For internal defs where we need to use a scalar shift arg
6103 extract the first lane. */
6104 if (scalar_shift_arg && dt[1] == vect_internal_def)
6106 vop1 = vec_oprnds1[0];
6107 new_temp = make_ssa_name (TREE_TYPE (TREE_TYPE (vop1)));
6108 gassign *new_stmt
6109 = gimple_build_assign (new_temp,
6110 build3 (BIT_FIELD_REF, TREE_TYPE (new_temp),
6111 vop1,
6112 TYPE_SIZE (TREE_TYPE (new_temp)),
6113 bitsize_zero_node));
6114 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
6115 vop1 = new_temp;
6117 else
6118 vop1 = vec_oprnds1[i];
6119 gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
6120 new_temp = make_ssa_name (vec_dest, new_stmt);
6121 gimple_assign_set_lhs (new_stmt, new_temp);
6122 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
6123 if (slp_node)
6124 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6125 else
6126 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
6129 if (!slp_node)
6130 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
6132 vec_oprnds0.release ();
6133 vec_oprnds1.release ();
6135 return true;
6139 /* Function vectorizable_operation.
6141 Check if STMT_INFO performs a binary, unary or ternary operation that can
6142 be vectorized.
6143 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6144 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6145 Return true if STMT_INFO is vectorizable in this way. */
6147 static bool
6148 vectorizable_operation (vec_info *vinfo,
6149 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
6150 gimple **vec_stmt, slp_tree slp_node,
6151 stmt_vector_for_cost *cost_vec)
6153 tree vec_dest;
6154 tree scalar_dest;
6155 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
6156 tree vectype;
6157 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6158 enum tree_code code, orig_code;
6159 machine_mode vec_mode;
6160 tree new_temp;
6161 int op_type;
6162 optab optab;
6163 bool target_support_p;
6164 enum vect_def_type dt[3]
6165 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
6166 int ndts = 3;
6167 poly_uint64 nunits_in;
6168 poly_uint64 nunits_out;
6169 tree vectype_out;
6170 int ncopies, vec_num;
6171 int i;
6172 vec<tree> vec_oprnds0 = vNULL;
6173 vec<tree> vec_oprnds1 = vNULL;
6174 vec<tree> vec_oprnds2 = vNULL;
6175 tree vop0, vop1, vop2;
6176 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
6178 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6179 return false;
6181 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6182 && ! vec_stmt)
6183 return false;
6185 /* Is STMT a vectorizable binary/unary operation? */
6186 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
6187 if (!stmt)
6188 return false;
6190 /* Loads and stores are handled in vectorizable_{load,store}. */
6191 if (STMT_VINFO_DATA_REF (stmt_info))
6192 return false;
6194 orig_code = code = gimple_assign_rhs_code (stmt);
6196 /* Shifts are handled in vectorizable_shift. */
6197 if (code == LSHIFT_EXPR
6198 || code == RSHIFT_EXPR
6199 || code == LROTATE_EXPR
6200 || code == RROTATE_EXPR)
6201 return false;
6203 /* Comparisons are handled in vectorizable_comparison. */
6204 if (TREE_CODE_CLASS (code) == tcc_comparison)
6205 return false;
6207 /* Conditions are handled in vectorizable_condition. */
6208 if (code == COND_EXPR)
6209 return false;
6211 /* For pointer addition and subtraction, we should use the normal
6212 plus and minus for the vector operation. */
6213 if (code == POINTER_PLUS_EXPR)
6214 code = PLUS_EXPR;
6215 if (code == POINTER_DIFF_EXPR)
6216 code = MINUS_EXPR;
6218 /* Support only unary or binary operations. */
6219 op_type = TREE_CODE_LENGTH (code);
6220 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
6222 if (dump_enabled_p ())
6223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6224 "num. args = %d (not unary/binary/ternary op).\n",
6225 op_type);
6226 return false;
6229 scalar_dest = gimple_assign_lhs (stmt);
6230 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6232 /* Most operations cannot handle bit-precision types without extra
6233 truncations. */
6234 bool mask_op_p = VECTOR_BOOLEAN_TYPE_P (vectype_out);
6235 if (!mask_op_p
6236 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
6237 /* Exception are bitwise binary operations. */
6238 && code != BIT_IOR_EXPR
6239 && code != BIT_XOR_EXPR
6240 && code != BIT_AND_EXPR)
6242 if (dump_enabled_p ())
6243 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6244 "bit-precision arithmetic not supported.\n");
6245 return false;
6248 slp_tree slp_op0;
6249 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
6250 0, &op0, &slp_op0, &dt[0], &vectype))
6252 if (dump_enabled_p ())
6253 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6254 "use not simple.\n");
6255 return false;
6257 bool is_invariant = (dt[0] == vect_external_def
6258 || dt[0] == vect_constant_def);
6259 /* If op0 is an external or constant def, infer the vector type
6260 from the scalar type. */
6261 if (!vectype)
6263 /* For boolean type we cannot determine vectype by
6264 invariant value (don't know whether it is a vector
6265 of booleans or vector of integers). We use output
6266 vectype because operations on boolean don't change
6267 type. */
6268 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
6270 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
6272 if (dump_enabled_p ())
6273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6274 "not supported operation on bool value.\n");
6275 return false;
6277 vectype = vectype_out;
6279 else
6280 vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op0),
6281 slp_node);
6283 if (vec_stmt)
6284 gcc_assert (vectype);
6285 if (!vectype)
6287 if (dump_enabled_p ())
6288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6289 "no vectype for scalar type %T\n",
6290 TREE_TYPE (op0));
6292 return false;
6295 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6296 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
6297 if (maybe_ne (nunits_out, nunits_in))
6298 return false;
6300 tree vectype2 = NULL_TREE, vectype3 = NULL_TREE;
6301 slp_tree slp_op1 = NULL, slp_op2 = NULL;
6302 if (op_type == binary_op || op_type == ternary_op)
6304 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
6305 1, &op1, &slp_op1, &dt[1], &vectype2))
6307 if (dump_enabled_p ())
6308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6309 "use not simple.\n");
6310 return false;
6312 is_invariant &= (dt[1] == vect_external_def
6313 || dt[1] == vect_constant_def);
6314 if (vectype2
6315 && maybe_ne (nunits_out, TYPE_VECTOR_SUBPARTS (vectype2)))
6316 return false;
6318 if (op_type == ternary_op)
6320 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
6321 2, &op2, &slp_op2, &dt[2], &vectype3))
6323 if (dump_enabled_p ())
6324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6325 "use not simple.\n");
6326 return false;
6328 is_invariant &= (dt[2] == vect_external_def
6329 || dt[2] == vect_constant_def);
6330 if (vectype3
6331 && maybe_ne (nunits_out, TYPE_VECTOR_SUBPARTS (vectype3)))
6332 return false;
6335 /* Multiple types in SLP are handled by creating the appropriate number of
6336 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6337 case of SLP. */
6338 if (slp_node)
6340 ncopies = 1;
6341 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6343 else
6345 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6346 vec_num = 1;
6349 gcc_assert (ncopies >= 1);
6351 /* Reject attempts to combine mask types with nonmask types, e.g. if
6352 we have an AND between a (nonmask) boolean loaded from memory and
6353 a (mask) boolean result of a comparison.
6355 TODO: We could easily fix these cases up using pattern statements. */
6356 if (VECTOR_BOOLEAN_TYPE_P (vectype) != mask_op_p
6357 || (vectype2 && VECTOR_BOOLEAN_TYPE_P (vectype2) != mask_op_p)
6358 || (vectype3 && VECTOR_BOOLEAN_TYPE_P (vectype3) != mask_op_p))
6360 if (dump_enabled_p ())
6361 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6362 "mixed mask and nonmask vector types\n");
6363 return false;
6366 /* Supportable by target? */
6368 vec_mode = TYPE_MODE (vectype);
6369 if (code == MULT_HIGHPART_EXPR)
6370 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
6371 else
6373 optab = optab_for_tree_code (code, vectype, optab_default);
6374 if (!optab)
6376 if (dump_enabled_p ())
6377 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6378 "no optab.\n");
6379 return false;
6381 target_support_p = (optab_handler (optab, vec_mode)
6382 != CODE_FOR_nothing);
6383 tree cst;
6384 if (!target_support_p
6385 && op1
6386 && (cst = uniform_integer_cst_p (op1)))
6387 target_support_p
6388 = targetm.vectorize.can_special_div_by_const (code, vectype,
6389 wi::to_wide (cst),
6390 NULL, NULL_RTX,
6391 NULL_RTX);
6394 bool using_emulated_vectors_p = vect_emulated_vector_p (vectype);
6395 if (!target_support_p)
6397 if (dump_enabled_p ())
6398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6399 "op not supported by target.\n");
6400 /* Check only during analysis. */
6401 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6402 || (!vec_stmt && !vect_can_vectorize_without_simd_p (code)))
6403 return false;
6404 if (dump_enabled_p ())
6405 dump_printf_loc (MSG_NOTE, vect_location,
6406 "proceeding using word mode.\n");
6407 using_emulated_vectors_p = true;
6410 if (using_emulated_vectors_p
6411 && !vect_can_vectorize_without_simd_p (code))
6413 if (dump_enabled_p ())
6414 dump_printf (MSG_NOTE, "using word mode not possible.\n");
6415 return false;
6418 /* ??? We should instead expand the operations here, instead of
6419 relying on vector lowering which has this hard cap on the number
6420 of vector elements below it performs elementwise operations. */
6421 if (using_emulated_vectors_p
6422 && (code == PLUS_EXPR || code == MINUS_EXPR || code == NEGATE_EXPR)
6423 && ((BITS_PER_WORD / vector_element_bits (vectype)) < 4
6424 || maybe_lt (nunits_out, 4U)))
6426 if (dump_enabled_p ())
6427 dump_printf (MSG_NOTE, "not using word mode for +- and less than "
6428 "four vector elements\n");
6429 return false;
6432 int reduc_idx = STMT_VINFO_REDUC_IDX (stmt_info);
6433 vec_loop_masks *masks = (loop_vinfo ? &LOOP_VINFO_MASKS (loop_vinfo) : NULL);
6434 internal_fn cond_fn = get_conditional_internal_fn (code);
6436 /* If operating on inactive elements could generate spurious traps,
6437 we need to restrict the operation to active lanes. Note that this
6438 specifically doesn't apply to unhoisted invariants, since they
6439 operate on the same value for every lane.
6441 Similarly, if this operation is part of a reduction, a fully-masked
6442 loop should only change the active lanes of the reduction chain,
6443 keeping the inactive lanes as-is. */
6444 bool mask_out_inactive = ((!is_invariant && gimple_could_trap_p (stmt))
6445 || reduc_idx >= 0);
6447 if (!vec_stmt) /* transformation not required. */
6449 if (loop_vinfo
6450 && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
6451 && mask_out_inactive)
6453 if (cond_fn == IFN_LAST
6454 || !direct_internal_fn_supported_p (cond_fn, vectype,
6455 OPTIMIZE_FOR_SPEED))
6457 if (dump_enabled_p ())
6458 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6459 "can't use a fully-masked loop because no"
6460 " conditional operation is available.\n");
6461 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
6463 else
6464 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
6465 vectype, NULL);
6468 /* Put types on constant and invariant SLP children. */
6469 if (slp_node
6470 && (!vect_maybe_update_slp_op_vectype (slp_op0, vectype)
6471 || !vect_maybe_update_slp_op_vectype (slp_op1, vectype)
6472 || !vect_maybe_update_slp_op_vectype (slp_op2, vectype)))
6474 if (dump_enabled_p ())
6475 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6476 "incompatible vector types for invariants\n");
6477 return false;
6480 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
6481 DUMP_VECT_SCOPE ("vectorizable_operation");
6482 vect_model_simple_cost (vinfo, stmt_info,
6483 ncopies, dt, ndts, slp_node, cost_vec);
6484 if (using_emulated_vectors_p)
6486 /* The above vect_model_simple_cost call handles constants
6487 in the prologue and (mis-)costs one of the stmts as
6488 vector stmt. See tree-vect-generic.cc:do_plus_minus/do_negate
6489 for the actual lowering that will be applied. */
6490 unsigned n
6491 = slp_node ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) : ncopies;
6492 switch (code)
6494 case PLUS_EXPR:
6495 n *= 5;
6496 break;
6497 case MINUS_EXPR:
6498 n *= 6;
6499 break;
6500 case NEGATE_EXPR:
6501 n *= 4;
6502 break;
6503 default:;
6505 record_stmt_cost (cost_vec, n, scalar_stmt, stmt_info, 0, vect_body);
6507 return true;
6510 /* Transform. */
6512 if (dump_enabled_p ())
6513 dump_printf_loc (MSG_NOTE, vect_location,
6514 "transform binary/unary operation.\n");
6516 bool masked_loop_p = loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
6518 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6519 vectors with unsigned elements, but the result is signed. So, we
6520 need to compute the MINUS_EXPR into vectype temporary and
6521 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6522 tree vec_cvt_dest = NULL_TREE;
6523 if (orig_code == POINTER_DIFF_EXPR)
6525 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6526 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
6528 /* Handle def. */
6529 else
6530 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6532 /* In case the vectorization factor (VF) is bigger than the number
6533 of elements that we can fit in a vectype (nunits), we have to generate
6534 more than one vector stmt - i.e - we need to "unroll" the
6535 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6536 from one copy of the vector stmt to the next, in the field
6537 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6538 stages to find the correct vector defs to be used when vectorizing
6539 stmts that use the defs of the current stmt. The example below
6540 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6541 we need to create 4 vectorized stmts):
6543 before vectorization:
6544 RELATED_STMT VEC_STMT
6545 S1: x = memref - -
6546 S2: z = x + 1 - -
6548 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6549 there):
6550 RELATED_STMT VEC_STMT
6551 VS1_0: vx0 = memref0 VS1_1 -
6552 VS1_1: vx1 = memref1 VS1_2 -
6553 VS1_2: vx2 = memref2 VS1_3 -
6554 VS1_3: vx3 = memref3 - -
6555 S1: x = load - VS1_0
6556 S2: z = x + 1 - -
6558 step2: vectorize stmt S2 (done here):
6559 To vectorize stmt S2 we first need to find the relevant vector
6560 def for the first operand 'x'. This is, as usual, obtained from
6561 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6562 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6563 relevant vector def 'vx0'. Having found 'vx0' we can generate
6564 the vector stmt VS2_0, and as usual, record it in the
6565 STMT_VINFO_VEC_STMT of stmt S2.
6566 When creating the second copy (VS2_1), we obtain the relevant vector
6567 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6568 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6569 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6570 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6571 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6572 chain of stmts and pointers:
6573 RELATED_STMT VEC_STMT
6574 VS1_0: vx0 = memref0 VS1_1 -
6575 VS1_1: vx1 = memref1 VS1_2 -
6576 VS1_2: vx2 = memref2 VS1_3 -
6577 VS1_3: vx3 = memref3 - -
6578 S1: x = load - VS1_0
6579 VS2_0: vz0 = vx0 + v1 VS2_1 -
6580 VS2_1: vz1 = vx1 + v1 VS2_2 -
6581 VS2_2: vz2 = vx2 + v1 VS2_3 -
6582 VS2_3: vz3 = vx3 + v1 - -
6583 S2: z = x + 1 - VS2_0 */
6585 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
6586 op0, &vec_oprnds0, op1, &vec_oprnds1, op2, &vec_oprnds2);
6587 /* Arguments are ready. Create the new vector stmt. */
6588 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
6590 gimple *new_stmt = NULL;
6591 vop1 = ((op_type == binary_op || op_type == ternary_op)
6592 ? vec_oprnds1[i] : NULL_TREE);
6593 vop2 = ((op_type == ternary_op) ? vec_oprnds2[i] : NULL_TREE);
6594 if (masked_loop_p && mask_out_inactive)
6596 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
6597 vectype, i);
6598 auto_vec<tree> vops (5);
6599 vops.quick_push (mask);
6600 vops.quick_push (vop0);
6601 if (vop1)
6602 vops.quick_push (vop1);
6603 if (vop2)
6604 vops.quick_push (vop2);
6605 if (reduc_idx >= 0)
6607 /* Perform the operation on active elements only and take
6608 inactive elements from the reduction chain input. */
6609 gcc_assert (!vop2);
6610 vops.quick_push (reduc_idx == 1 ? vop1 : vop0);
6612 else
6614 auto else_value = targetm.preferred_else_value
6615 (cond_fn, vectype, vops.length () - 1, &vops[1]);
6616 vops.quick_push (else_value);
6618 gcall *call = gimple_build_call_internal_vec (cond_fn, vops);
6619 new_temp = make_ssa_name (vec_dest, call);
6620 gimple_call_set_lhs (call, new_temp);
6621 gimple_call_set_nothrow (call, true);
6622 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
6623 new_stmt = call;
6625 else
6627 tree mask = NULL_TREE;
6628 /* When combining two masks check if either of them is elsewhere
6629 combined with a loop mask, if that's the case we can mark that the
6630 new combined mask doesn't need to be combined with a loop mask. */
6631 if (masked_loop_p
6632 && code == BIT_AND_EXPR
6633 && VECTOR_BOOLEAN_TYPE_P (vectype))
6635 if (loop_vinfo->scalar_cond_masked_set.contains ({ op0,
6636 ncopies}))
6638 mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
6639 vectype, i);
6641 vop0 = prepare_vec_mask (loop_vinfo, TREE_TYPE (mask), mask,
6642 vop0, gsi);
6645 if (loop_vinfo->scalar_cond_masked_set.contains ({ op1,
6646 ncopies }))
6648 mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
6649 vectype, i);
6651 vop1 = prepare_vec_mask (loop_vinfo, TREE_TYPE (mask), mask,
6652 vop1, gsi);
6656 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
6657 new_temp = make_ssa_name (vec_dest, new_stmt);
6658 gimple_assign_set_lhs (new_stmt, new_temp);
6659 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
6660 if (using_emulated_vectors_p)
6661 suppress_warning (new_stmt, OPT_Wvector_operation_performance);
6663 /* Enter the combined value into the vector cond hash so we don't
6664 AND it with a loop mask again. */
6665 if (mask)
6666 loop_vinfo->vec_cond_masked_set.add ({ new_temp, mask });
6668 if (vec_cvt_dest)
6670 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
6671 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
6672 new_temp);
6673 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
6674 gimple_assign_set_lhs (new_stmt, new_temp);
6675 vect_finish_stmt_generation (vinfo, stmt_info,
6676 new_stmt, gsi);
6679 if (slp_node)
6680 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6681 else
6682 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
6685 if (!slp_node)
6686 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
6688 vec_oprnds0.release ();
6689 vec_oprnds1.release ();
6690 vec_oprnds2.release ();
6692 return true;
6695 /* A helper function to ensure data reference DR_INFO's base alignment. */
6697 static void
6698 ensure_base_align (dr_vec_info *dr_info)
6700 /* Alignment is only analyzed for the first element of a DR group,
6701 use that to look at base alignment we need to enforce. */
6702 if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
6703 dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
6705 gcc_assert (dr_info->misalignment != DR_MISALIGNMENT_UNINITIALIZED);
6707 if (dr_info->base_misaligned)
6709 tree base_decl = dr_info->base_decl;
6711 // We should only be able to increase the alignment of a base object if
6712 // we know what its new alignment should be at compile time.
6713 unsigned HOST_WIDE_INT align_base_to =
6714 DR_TARGET_ALIGNMENT (dr_info).to_constant () * BITS_PER_UNIT;
6716 if (decl_in_symtab_p (base_decl))
6717 symtab_node::get (base_decl)->increase_alignment (align_base_to);
6718 else if (DECL_ALIGN (base_decl) < align_base_to)
6720 SET_DECL_ALIGN (base_decl, align_base_to);
6721 DECL_USER_ALIGN (base_decl) = 1;
6723 dr_info->base_misaligned = false;
6728 /* Function get_group_alias_ptr_type.
6730 Return the alias type for the group starting at FIRST_STMT_INFO. */
6732 static tree
6733 get_group_alias_ptr_type (stmt_vec_info first_stmt_info)
6735 struct data_reference *first_dr, *next_dr;
6737 first_dr = STMT_VINFO_DATA_REF (first_stmt_info);
6738 stmt_vec_info next_stmt_info = DR_GROUP_NEXT_ELEMENT (first_stmt_info);
6739 while (next_stmt_info)
6741 next_dr = STMT_VINFO_DATA_REF (next_stmt_info);
6742 if (get_alias_set (DR_REF (first_dr))
6743 != get_alias_set (DR_REF (next_dr)))
6745 if (dump_enabled_p ())
6746 dump_printf_loc (MSG_NOTE, vect_location,
6747 "conflicting alias set types.\n");
6748 return ptr_type_node;
6750 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6752 return reference_alias_ptr_type (DR_REF (first_dr));
6756 /* Function scan_operand_equal_p.
6758 Helper function for check_scan_store. Compare two references
6759 with .GOMP_SIMD_LANE bases. */
6761 static bool
6762 scan_operand_equal_p (tree ref1, tree ref2)
6764 tree ref[2] = { ref1, ref2 };
6765 poly_int64 bitsize[2], bitpos[2];
6766 tree offset[2], base[2];
6767 for (int i = 0; i < 2; ++i)
6769 machine_mode mode;
6770 int unsignedp, reversep, volatilep = 0;
6771 base[i] = get_inner_reference (ref[i], &bitsize[i], &bitpos[i],
6772 &offset[i], &mode, &unsignedp,
6773 &reversep, &volatilep);
6774 if (reversep || volatilep || maybe_ne (bitpos[i], 0))
6775 return false;
6776 if (TREE_CODE (base[i]) == MEM_REF
6777 && offset[i] == NULL_TREE
6778 && TREE_CODE (TREE_OPERAND (base[i], 0)) == SSA_NAME)
6780 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base[i], 0));
6781 if (is_gimple_assign (def_stmt)
6782 && gimple_assign_rhs_code (def_stmt) == POINTER_PLUS_EXPR
6783 && TREE_CODE (gimple_assign_rhs1 (def_stmt)) == ADDR_EXPR
6784 && TREE_CODE (gimple_assign_rhs2 (def_stmt)) == SSA_NAME)
6786 if (maybe_ne (mem_ref_offset (base[i]), 0))
6787 return false;
6788 base[i] = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
6789 offset[i] = gimple_assign_rhs2 (def_stmt);
6794 if (!operand_equal_p (base[0], base[1], 0))
6795 return false;
6796 if (maybe_ne (bitsize[0], bitsize[1]))
6797 return false;
6798 if (offset[0] != offset[1])
6800 if (!offset[0] || !offset[1])
6801 return false;
6802 if (!operand_equal_p (offset[0], offset[1], 0))
6804 tree step[2];
6805 for (int i = 0; i < 2; ++i)
6807 step[i] = integer_one_node;
6808 if (TREE_CODE (offset[i]) == SSA_NAME)
6810 gimple *def_stmt = SSA_NAME_DEF_STMT (offset[i]);
6811 if (is_gimple_assign (def_stmt)
6812 && gimple_assign_rhs_code (def_stmt) == MULT_EXPR
6813 && (TREE_CODE (gimple_assign_rhs2 (def_stmt))
6814 == INTEGER_CST))
6816 step[i] = gimple_assign_rhs2 (def_stmt);
6817 offset[i] = gimple_assign_rhs1 (def_stmt);
6820 else if (TREE_CODE (offset[i]) == MULT_EXPR)
6822 step[i] = TREE_OPERAND (offset[i], 1);
6823 offset[i] = TREE_OPERAND (offset[i], 0);
6825 tree rhs1 = NULL_TREE;
6826 if (TREE_CODE (offset[i]) == SSA_NAME)
6828 gimple *def_stmt = SSA_NAME_DEF_STMT (offset[i]);
6829 if (gimple_assign_cast_p (def_stmt))
6830 rhs1 = gimple_assign_rhs1 (def_stmt);
6832 else if (CONVERT_EXPR_P (offset[i]))
6833 rhs1 = TREE_OPERAND (offset[i], 0);
6834 if (rhs1
6835 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
6836 && INTEGRAL_TYPE_P (TREE_TYPE (offset[i]))
6837 && (TYPE_PRECISION (TREE_TYPE (offset[i]))
6838 >= TYPE_PRECISION (TREE_TYPE (rhs1))))
6839 offset[i] = rhs1;
6841 if (!operand_equal_p (offset[0], offset[1], 0)
6842 || !operand_equal_p (step[0], step[1], 0))
6843 return false;
6846 return true;
6850 enum scan_store_kind {
6851 /* Normal permutation. */
6852 scan_store_kind_perm,
6854 /* Whole vector left shift permutation with zero init. */
6855 scan_store_kind_lshift_zero,
6857 /* Whole vector left shift permutation and VEC_COND_EXPR. */
6858 scan_store_kind_lshift_cond
6861 /* Function check_scan_store.
6863 Verify if we can perform the needed permutations or whole vector shifts.
6864 Return -1 on failure, otherwise exact log2 of vectype's nunits.
6865 USE_WHOLE_VECTOR is a vector of enum scan_store_kind which operation
6866 to do at each step. */
6868 static int
6869 scan_store_can_perm_p (tree vectype, tree init,
6870 vec<enum scan_store_kind> *use_whole_vector = NULL)
6872 enum machine_mode vec_mode = TYPE_MODE (vectype);
6873 unsigned HOST_WIDE_INT nunits;
6874 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
6875 return -1;
6876 int units_log2 = exact_log2 (nunits);
6877 if (units_log2 <= 0)
6878 return -1;
6880 int i;
6881 enum scan_store_kind whole_vector_shift_kind = scan_store_kind_perm;
6882 for (i = 0; i <= units_log2; ++i)
6884 unsigned HOST_WIDE_INT j, k;
6885 enum scan_store_kind kind = scan_store_kind_perm;
6886 vec_perm_builder sel (nunits, nunits, 1);
6887 sel.quick_grow (nunits);
6888 if (i == units_log2)
6890 for (j = 0; j < nunits; ++j)
6891 sel[j] = nunits - 1;
6893 else
6895 for (j = 0; j < (HOST_WIDE_INT_1U << i); ++j)
6896 sel[j] = j;
6897 for (k = 0; j < nunits; ++j, ++k)
6898 sel[j] = nunits + k;
6900 vec_perm_indices indices (sel, i == units_log2 ? 1 : 2, nunits);
6901 if (!can_vec_perm_const_p (vec_mode, vec_mode, indices))
6903 if (i == units_log2)
6904 return -1;
6906 if (whole_vector_shift_kind == scan_store_kind_perm)
6908 if (optab_handler (vec_shl_optab, vec_mode) == CODE_FOR_nothing)
6909 return -1;
6910 whole_vector_shift_kind = scan_store_kind_lshift_zero;
6911 /* Whole vector shifts shift in zeros, so if init is all zero
6912 constant, there is no need to do anything further. */
6913 if ((TREE_CODE (init) != INTEGER_CST
6914 && TREE_CODE (init) != REAL_CST)
6915 || !initializer_zerop (init))
6917 tree masktype = truth_type_for (vectype);
6918 if (!expand_vec_cond_expr_p (vectype, masktype, VECTOR_CST))
6919 return -1;
6920 whole_vector_shift_kind = scan_store_kind_lshift_cond;
6923 kind = whole_vector_shift_kind;
6925 if (use_whole_vector)
6927 if (kind != scan_store_kind_perm && use_whole_vector->is_empty ())
6928 use_whole_vector->safe_grow_cleared (i, true);
6929 if (kind != scan_store_kind_perm || !use_whole_vector->is_empty ())
6930 use_whole_vector->safe_push (kind);
6934 return units_log2;
6938 /* Function check_scan_store.
6940 Check magic stores for #pragma omp scan {in,ex}clusive reductions. */
6942 static bool
6943 check_scan_store (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype,
6944 enum vect_def_type rhs_dt, bool slp, tree mask,
6945 vect_memory_access_type memory_access_type)
6947 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6948 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
6949 tree ref_type;
6951 gcc_assert (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1);
6952 if (slp
6953 || mask
6954 || memory_access_type != VMAT_CONTIGUOUS
6955 || TREE_CODE (DR_BASE_ADDRESS (dr_info->dr)) != ADDR_EXPR
6956 || !VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0))
6957 || loop_vinfo == NULL
6958 || LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
6959 || STMT_VINFO_GROUPED_ACCESS (stmt_info)
6960 || !integer_zerop (get_dr_vinfo_offset (vinfo, dr_info))
6961 || !integer_zerop (DR_INIT (dr_info->dr))
6962 || !(ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr)))
6963 || !alias_sets_conflict_p (get_alias_set (vectype),
6964 get_alias_set (TREE_TYPE (ref_type))))
6966 if (dump_enabled_p ())
6967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6968 "unsupported OpenMP scan store.\n");
6969 return false;
6972 /* We need to pattern match code built by OpenMP lowering and simplified
6973 by following optimizations into something we can handle.
6974 #pragma omp simd reduction(inscan,+:r)
6975 for (...)
6977 r += something ();
6978 #pragma omp scan inclusive (r)
6979 use (r);
6981 shall have body with:
6982 // Initialization for input phase, store the reduction initializer:
6983 _20 = .GOMP_SIMD_LANE (simduid.3_14(D), 0);
6984 _21 = .GOMP_SIMD_LANE (simduid.3_14(D), 1);
6985 D.2042[_21] = 0;
6986 // Actual input phase:
6988 r.0_5 = D.2042[_20];
6989 _6 = _4 + r.0_5;
6990 D.2042[_20] = _6;
6991 // Initialization for scan phase:
6992 _25 = .GOMP_SIMD_LANE (simduid.3_14(D), 2);
6993 _26 = D.2043[_25];
6994 _27 = D.2042[_25];
6995 _28 = _26 + _27;
6996 D.2043[_25] = _28;
6997 D.2042[_25] = _28;
6998 // Actual scan phase:
7000 r.1_8 = D.2042[_20];
7002 The "omp simd array" variable D.2042 holds the privatized copy used
7003 inside of the loop and D.2043 is another one that holds copies of
7004 the current original list item. The separate GOMP_SIMD_LANE ifn
7005 kinds are there in order to allow optimizing the initializer store
7006 and combiner sequence, e.g. if it is originally some C++ish user
7007 defined reduction, but allow the vectorizer to pattern recognize it
7008 and turn into the appropriate vectorized scan.
7010 For exclusive scan, this is slightly different:
7011 #pragma omp simd reduction(inscan,+:r)
7012 for (...)
7014 use (r);
7015 #pragma omp scan exclusive (r)
7016 r += something ();
7018 shall have body with:
7019 // Initialization for input phase, store the reduction initializer:
7020 _20 = .GOMP_SIMD_LANE (simduid.3_14(D), 0);
7021 _21 = .GOMP_SIMD_LANE (simduid.3_14(D), 1);
7022 D.2042[_21] = 0;
7023 // Actual input phase:
7025 r.0_5 = D.2042[_20];
7026 _6 = _4 + r.0_5;
7027 D.2042[_20] = _6;
7028 // Initialization for scan phase:
7029 _25 = .GOMP_SIMD_LANE (simduid.3_14(D), 3);
7030 _26 = D.2043[_25];
7031 D.2044[_25] = _26;
7032 _27 = D.2042[_25];
7033 _28 = _26 + _27;
7034 D.2043[_25] = _28;
7035 // Actual scan phase:
7037 r.1_8 = D.2044[_20];
7038 ... */
7040 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 2)
7042 /* Match the D.2042[_21] = 0; store above. Just require that
7043 it is a constant or external definition store. */
7044 if (rhs_dt != vect_constant_def && rhs_dt != vect_external_def)
7046 fail_init:
7047 if (dump_enabled_p ())
7048 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7049 "unsupported OpenMP scan initializer store.\n");
7050 return false;
7053 if (! loop_vinfo->scan_map)
7054 loop_vinfo->scan_map = new hash_map<tree, tree>;
7055 tree var = TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0);
7056 tree &cached = loop_vinfo->scan_map->get_or_insert (var);
7057 if (cached)
7058 goto fail_init;
7059 cached = gimple_assign_rhs1 (STMT_VINFO_STMT (stmt_info));
7061 /* These stores can be vectorized normally. */
7062 return true;
7065 if (rhs_dt != vect_internal_def)
7067 fail:
7068 if (dump_enabled_p ())
7069 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7070 "unsupported OpenMP scan combiner pattern.\n");
7071 return false;
7074 gimple *stmt = STMT_VINFO_STMT (stmt_info);
7075 tree rhs = gimple_assign_rhs1 (stmt);
7076 if (TREE_CODE (rhs) != SSA_NAME)
7077 goto fail;
7079 gimple *other_store_stmt = NULL;
7080 tree var = TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0);
7081 bool inscan_var_store
7082 = lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var)) != NULL;
7084 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4)
7086 if (!inscan_var_store)
7088 use_operand_p use_p;
7089 imm_use_iterator iter;
7090 FOR_EACH_IMM_USE_FAST (use_p, iter, rhs)
7092 gimple *use_stmt = USE_STMT (use_p);
7093 if (use_stmt == stmt || is_gimple_debug (use_stmt))
7094 continue;
7095 if (gimple_bb (use_stmt) != gimple_bb (stmt)
7096 || !is_gimple_assign (use_stmt)
7097 || gimple_assign_rhs_class (use_stmt) != GIMPLE_BINARY_RHS
7098 || other_store_stmt
7099 || TREE_CODE (gimple_assign_lhs (use_stmt)) != SSA_NAME)
7100 goto fail;
7101 other_store_stmt = use_stmt;
7103 if (other_store_stmt == NULL)
7104 goto fail;
7105 rhs = gimple_assign_lhs (other_store_stmt);
7106 if (!single_imm_use (rhs, &use_p, &other_store_stmt))
7107 goto fail;
7110 else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 3)
7112 use_operand_p use_p;
7113 imm_use_iterator iter;
7114 FOR_EACH_IMM_USE_FAST (use_p, iter, rhs)
7116 gimple *use_stmt = USE_STMT (use_p);
7117 if (use_stmt == stmt || is_gimple_debug (use_stmt))
7118 continue;
7119 if (other_store_stmt)
7120 goto fail;
7121 other_store_stmt = use_stmt;
7124 else
7125 goto fail;
7127 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
7128 if (gimple_bb (def_stmt) != gimple_bb (stmt)
7129 || !is_gimple_assign (def_stmt)
7130 || gimple_assign_rhs_class (def_stmt) != GIMPLE_BINARY_RHS)
7131 goto fail;
7133 enum tree_code code = gimple_assign_rhs_code (def_stmt);
7134 /* For pointer addition, we should use the normal plus for the vector
7135 operation. */
7136 switch (code)
7138 case POINTER_PLUS_EXPR:
7139 code = PLUS_EXPR;
7140 break;
7141 case MULT_HIGHPART_EXPR:
7142 goto fail;
7143 default:
7144 break;
7146 if (TREE_CODE_LENGTH (code) != binary_op || !commutative_tree_code (code))
7147 goto fail;
7149 tree rhs1 = gimple_assign_rhs1 (def_stmt);
7150 tree rhs2 = gimple_assign_rhs2 (def_stmt);
7151 if (TREE_CODE (rhs1) != SSA_NAME || TREE_CODE (rhs2) != SSA_NAME)
7152 goto fail;
7154 gimple *load1_stmt = SSA_NAME_DEF_STMT (rhs1);
7155 gimple *load2_stmt = SSA_NAME_DEF_STMT (rhs2);
7156 if (gimple_bb (load1_stmt) != gimple_bb (stmt)
7157 || !gimple_assign_load_p (load1_stmt)
7158 || gimple_bb (load2_stmt) != gimple_bb (stmt)
7159 || !gimple_assign_load_p (load2_stmt))
7160 goto fail;
7162 stmt_vec_info load1_stmt_info = loop_vinfo->lookup_stmt (load1_stmt);
7163 stmt_vec_info load2_stmt_info = loop_vinfo->lookup_stmt (load2_stmt);
7164 if (load1_stmt_info == NULL
7165 || load2_stmt_info == NULL
7166 || (STMT_VINFO_SIMD_LANE_ACCESS_P (load1_stmt_info)
7167 != STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info))
7168 || (STMT_VINFO_SIMD_LANE_ACCESS_P (load2_stmt_info)
7169 != STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)))
7170 goto fail;
7172 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && inscan_var_store)
7174 dr_vec_info *load1_dr_info = STMT_VINFO_DR_INFO (load1_stmt_info);
7175 if (TREE_CODE (DR_BASE_ADDRESS (load1_dr_info->dr)) != ADDR_EXPR
7176 || !VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (load1_dr_info->dr), 0)))
7177 goto fail;
7178 tree var1 = TREE_OPERAND (DR_BASE_ADDRESS (load1_dr_info->dr), 0);
7179 tree lrhs;
7180 if (lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var1)))
7181 lrhs = rhs1;
7182 else
7183 lrhs = rhs2;
7184 use_operand_p use_p;
7185 imm_use_iterator iter;
7186 FOR_EACH_IMM_USE_FAST (use_p, iter, lrhs)
7188 gimple *use_stmt = USE_STMT (use_p);
7189 if (use_stmt == def_stmt || is_gimple_debug (use_stmt))
7190 continue;
7191 if (other_store_stmt)
7192 goto fail;
7193 other_store_stmt = use_stmt;
7197 if (other_store_stmt == NULL)
7198 goto fail;
7199 if (gimple_bb (other_store_stmt) != gimple_bb (stmt)
7200 || !gimple_store_p (other_store_stmt))
7201 goto fail;
7203 stmt_vec_info other_store_stmt_info
7204 = loop_vinfo->lookup_stmt (other_store_stmt);
7205 if (other_store_stmt_info == NULL
7206 || (STMT_VINFO_SIMD_LANE_ACCESS_P (other_store_stmt_info)
7207 != STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)))
7208 goto fail;
7210 gimple *stmt1 = stmt;
7211 gimple *stmt2 = other_store_stmt;
7212 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && !inscan_var_store)
7213 std::swap (stmt1, stmt2);
7214 if (scan_operand_equal_p (gimple_assign_lhs (stmt1),
7215 gimple_assign_rhs1 (load2_stmt)))
7217 std::swap (rhs1, rhs2);
7218 std::swap (load1_stmt, load2_stmt);
7219 std::swap (load1_stmt_info, load2_stmt_info);
7221 if (!scan_operand_equal_p (gimple_assign_lhs (stmt1),
7222 gimple_assign_rhs1 (load1_stmt)))
7223 goto fail;
7225 tree var3 = NULL_TREE;
7226 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 3
7227 && !scan_operand_equal_p (gimple_assign_lhs (stmt2),
7228 gimple_assign_rhs1 (load2_stmt)))
7229 goto fail;
7230 else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4)
7232 dr_vec_info *load2_dr_info = STMT_VINFO_DR_INFO (load2_stmt_info);
7233 if (TREE_CODE (DR_BASE_ADDRESS (load2_dr_info->dr)) != ADDR_EXPR
7234 || !VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (load2_dr_info->dr), 0)))
7235 goto fail;
7236 var3 = TREE_OPERAND (DR_BASE_ADDRESS (load2_dr_info->dr), 0);
7237 if (!lookup_attribute ("omp simd array", DECL_ATTRIBUTES (var3))
7238 || lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var3))
7239 || lookup_attribute ("omp simd inscan exclusive",
7240 DECL_ATTRIBUTES (var3)))
7241 goto fail;
7244 dr_vec_info *other_dr_info = STMT_VINFO_DR_INFO (other_store_stmt_info);
7245 if (TREE_CODE (DR_BASE_ADDRESS (other_dr_info->dr)) != ADDR_EXPR
7246 || !VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (other_dr_info->dr), 0)))
7247 goto fail;
7249 tree var1 = TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0);
7250 tree var2 = TREE_OPERAND (DR_BASE_ADDRESS (other_dr_info->dr), 0);
7251 if (!lookup_attribute ("omp simd array", DECL_ATTRIBUTES (var1))
7252 || !lookup_attribute ("omp simd array", DECL_ATTRIBUTES (var2))
7253 || (!lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var1)))
7254 == (!lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var2))))
7255 goto fail;
7257 if (lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var1)))
7258 std::swap (var1, var2);
7260 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4)
7262 if (!lookup_attribute ("omp simd inscan exclusive",
7263 DECL_ATTRIBUTES (var1)))
7264 goto fail;
7265 var1 = var3;
7268 if (loop_vinfo->scan_map == NULL)
7269 goto fail;
7270 tree *init = loop_vinfo->scan_map->get (var1);
7271 if (init == NULL)
7272 goto fail;
7274 /* The IL is as expected, now check if we can actually vectorize it.
7275 Inclusive scan:
7276 _26 = D.2043[_25];
7277 _27 = D.2042[_25];
7278 _28 = _26 + _27;
7279 D.2043[_25] = _28;
7280 D.2042[_25] = _28;
7281 should be vectorized as (where _40 is the vectorized rhs
7282 from the D.2042[_21] = 0; store):
7283 _30 = MEM <vector(8) int> [(int *)&D.2043];
7284 _31 = MEM <vector(8) int> [(int *)&D.2042];
7285 _32 = VEC_PERM_EXPR <_40, _31, { 0, 8, 9, 10, 11, 12, 13, 14 }>;
7286 _33 = _31 + _32;
7287 // _33 = { _31[0], _31[0]+_31[1], _31[1]+_31[2], ..., _31[6]+_31[7] };
7288 _34 = VEC_PERM_EXPR <_40, _33, { 0, 1, 8, 9, 10, 11, 12, 13 }>;
7289 _35 = _33 + _34;
7290 // _35 = { _31[0], _31[0]+_31[1], _31[0]+.._31[2], _31[0]+.._31[3],
7291 // _31[1]+.._31[4], ... _31[4]+.._31[7] };
7292 _36 = VEC_PERM_EXPR <_40, _35, { 0, 1, 2, 3, 8, 9, 10, 11 }>;
7293 _37 = _35 + _36;
7294 // _37 = { _31[0], _31[0]+_31[1], _31[0]+.._31[2], _31[0]+.._31[3],
7295 // _31[0]+.._31[4], ... _31[0]+.._31[7] };
7296 _38 = _30 + _37;
7297 _39 = VEC_PERM_EXPR <_38, _38, { 7, 7, 7, 7, 7, 7, 7, 7 }>;
7298 MEM <vector(8) int> [(int *)&D.2043] = _39;
7299 MEM <vector(8) int> [(int *)&D.2042] = _38;
7300 Exclusive scan:
7301 _26 = D.2043[_25];
7302 D.2044[_25] = _26;
7303 _27 = D.2042[_25];
7304 _28 = _26 + _27;
7305 D.2043[_25] = _28;
7306 should be vectorized as (where _40 is the vectorized rhs
7307 from the D.2042[_21] = 0; store):
7308 _30 = MEM <vector(8) int> [(int *)&D.2043];
7309 _31 = MEM <vector(8) int> [(int *)&D.2042];
7310 _32 = VEC_PERM_EXPR <_40, _31, { 0, 8, 9, 10, 11, 12, 13, 14 }>;
7311 _33 = VEC_PERM_EXPR <_40, _32, { 0, 8, 9, 10, 11, 12, 13, 14 }>;
7312 _34 = _32 + _33;
7313 // _34 = { 0, _31[0], _31[0]+_31[1], _31[1]+_31[2], _31[2]+_31[3],
7314 // _31[3]+_31[4], ... _31[5]+.._31[6] };
7315 _35 = VEC_PERM_EXPR <_40, _34, { 0, 1, 8, 9, 10, 11, 12, 13 }>;
7316 _36 = _34 + _35;
7317 // _36 = { 0, _31[0], _31[0]+_31[1], _31[0]+.._31[2], _31[0]+.._31[3],
7318 // _31[1]+.._31[4], ... _31[3]+.._31[6] };
7319 _37 = VEC_PERM_EXPR <_40, _36, { 0, 1, 2, 3, 8, 9, 10, 11 }>;
7320 _38 = _36 + _37;
7321 // _38 = { 0, _31[0], _31[0]+_31[1], _31[0]+.._31[2], _31[0]+.._31[3],
7322 // _31[0]+.._31[4], ... _31[0]+.._31[6] };
7323 _39 = _30 + _38;
7324 _50 = _31 + _39;
7325 _51 = VEC_PERM_EXPR <_50, _50, { 7, 7, 7, 7, 7, 7, 7, 7 }>;
7326 MEM <vector(8) int> [(int *)&D.2044] = _39;
7327 MEM <vector(8) int> [(int *)&D.2042] = _51; */
7328 enum machine_mode vec_mode = TYPE_MODE (vectype);
7329 optab optab = optab_for_tree_code (code, vectype, optab_default);
7330 if (!optab || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
7331 goto fail;
7333 int units_log2 = scan_store_can_perm_p (vectype, *init);
7334 if (units_log2 == -1)
7335 goto fail;
7337 return true;
7341 /* Function vectorizable_scan_store.
7343 Helper of vectorizable_score, arguments like on vectorizable_store.
7344 Handle only the transformation, checking is done in check_scan_store. */
7346 static bool
7347 vectorizable_scan_store (vec_info *vinfo,
7348 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7349 gimple **vec_stmt, int ncopies)
7351 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7352 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
7353 tree ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
7354 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7356 if (dump_enabled_p ())
7357 dump_printf_loc (MSG_NOTE, vect_location,
7358 "transform scan store. ncopies = %d\n", ncopies);
7360 gimple *stmt = STMT_VINFO_STMT (stmt_info);
7361 tree rhs = gimple_assign_rhs1 (stmt);
7362 gcc_assert (TREE_CODE (rhs) == SSA_NAME);
7364 tree var = TREE_OPERAND (DR_BASE_ADDRESS (dr_info->dr), 0);
7365 bool inscan_var_store
7366 = lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var)) != NULL;
7368 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && !inscan_var_store)
7370 use_operand_p use_p;
7371 imm_use_iterator iter;
7372 FOR_EACH_IMM_USE_FAST (use_p, iter, rhs)
7374 gimple *use_stmt = USE_STMT (use_p);
7375 if (use_stmt == stmt || is_gimple_debug (use_stmt))
7376 continue;
7377 rhs = gimple_assign_lhs (use_stmt);
7378 break;
7382 gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
7383 enum tree_code code = gimple_assign_rhs_code (def_stmt);
7384 if (code == POINTER_PLUS_EXPR)
7385 code = PLUS_EXPR;
7386 gcc_assert (TREE_CODE_LENGTH (code) == binary_op
7387 && commutative_tree_code (code));
7388 tree rhs1 = gimple_assign_rhs1 (def_stmt);
7389 tree rhs2 = gimple_assign_rhs2 (def_stmt);
7390 gcc_assert (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == SSA_NAME);
7391 gimple *load1_stmt = SSA_NAME_DEF_STMT (rhs1);
7392 gimple *load2_stmt = SSA_NAME_DEF_STMT (rhs2);
7393 stmt_vec_info load1_stmt_info = loop_vinfo->lookup_stmt (load1_stmt);
7394 stmt_vec_info load2_stmt_info = loop_vinfo->lookup_stmt (load2_stmt);
7395 dr_vec_info *load1_dr_info = STMT_VINFO_DR_INFO (load1_stmt_info);
7396 dr_vec_info *load2_dr_info = STMT_VINFO_DR_INFO (load2_stmt_info);
7397 tree var1 = TREE_OPERAND (DR_BASE_ADDRESS (load1_dr_info->dr), 0);
7398 tree var2 = TREE_OPERAND (DR_BASE_ADDRESS (load2_dr_info->dr), 0);
7400 if (lookup_attribute ("omp simd inscan", DECL_ATTRIBUTES (var1)))
7402 std::swap (rhs1, rhs2);
7403 std::swap (var1, var2);
7404 std::swap (load1_dr_info, load2_dr_info);
7407 tree *init = loop_vinfo->scan_map->get (var1);
7408 gcc_assert (init);
7410 unsigned HOST_WIDE_INT nunits;
7411 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
7412 gcc_unreachable ();
7413 auto_vec<enum scan_store_kind, 16> use_whole_vector;
7414 int units_log2 = scan_store_can_perm_p (vectype, *init, &use_whole_vector);
7415 gcc_assert (units_log2 > 0);
7416 auto_vec<tree, 16> perms;
7417 perms.quick_grow (units_log2 + 1);
7418 tree zero_vec = NULL_TREE, masktype = NULL_TREE;
7419 for (int i = 0; i <= units_log2; ++i)
7421 unsigned HOST_WIDE_INT j, k;
7422 vec_perm_builder sel (nunits, nunits, 1);
7423 sel.quick_grow (nunits);
7424 if (i == units_log2)
7425 for (j = 0; j < nunits; ++j)
7426 sel[j] = nunits - 1;
7427 else
7429 for (j = 0; j < (HOST_WIDE_INT_1U << i); ++j)
7430 sel[j] = j;
7431 for (k = 0; j < nunits; ++j, ++k)
7432 sel[j] = nunits + k;
7434 vec_perm_indices indices (sel, i == units_log2 ? 1 : 2, nunits);
7435 if (!use_whole_vector.is_empty ()
7436 && use_whole_vector[i] != scan_store_kind_perm)
7438 if (zero_vec == NULL_TREE)
7439 zero_vec = build_zero_cst (vectype);
7440 if (masktype == NULL_TREE
7441 && use_whole_vector[i] == scan_store_kind_lshift_cond)
7442 masktype = truth_type_for (vectype);
7443 perms[i] = vect_gen_perm_mask_any (vectype, indices);
7445 else
7446 perms[i] = vect_gen_perm_mask_checked (vectype, indices);
7449 tree vec_oprnd1 = NULL_TREE;
7450 tree vec_oprnd2 = NULL_TREE;
7451 tree vec_oprnd3 = NULL_TREE;
7452 tree dataref_ptr = DR_BASE_ADDRESS (dr_info->dr);
7453 tree dataref_offset = build_int_cst (ref_type, 0);
7454 tree bump = vect_get_data_ptr_increment (vinfo, dr_info,
7455 vectype, VMAT_CONTIGUOUS);
7456 tree ldataref_ptr = NULL_TREE;
7457 tree orig = NULL_TREE;
7458 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4 && !inscan_var_store)
7459 ldataref_ptr = DR_BASE_ADDRESS (load1_dr_info->dr);
7460 auto_vec<tree> vec_oprnds1;
7461 auto_vec<tree> vec_oprnds2;
7462 auto_vec<tree> vec_oprnds3;
7463 vect_get_vec_defs (vinfo, stmt_info, NULL, ncopies,
7464 *init, &vec_oprnds1,
7465 ldataref_ptr == NULL ? rhs1 : NULL, &vec_oprnds2,
7466 rhs2, &vec_oprnds3);
7467 for (int j = 0; j < ncopies; j++)
7469 vec_oprnd1 = vec_oprnds1[j];
7470 if (ldataref_ptr == NULL)
7471 vec_oprnd2 = vec_oprnds2[j];
7472 vec_oprnd3 = vec_oprnds3[j];
7473 if (j == 0)
7474 orig = vec_oprnd3;
7475 else if (!inscan_var_store)
7476 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7478 if (ldataref_ptr)
7480 vec_oprnd2 = make_ssa_name (vectype);
7481 tree data_ref = fold_build2 (MEM_REF, vectype,
7482 unshare_expr (ldataref_ptr),
7483 dataref_offset);
7484 vect_copy_ref_info (data_ref, DR_REF (load1_dr_info->dr));
7485 gimple *g = gimple_build_assign (vec_oprnd2, data_ref);
7486 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7487 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7488 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7491 tree v = vec_oprnd2;
7492 for (int i = 0; i < units_log2; ++i)
7494 tree new_temp = make_ssa_name (vectype);
7495 gimple *g = gimple_build_assign (new_temp, VEC_PERM_EXPR,
7496 (zero_vec
7497 && (use_whole_vector[i]
7498 != scan_store_kind_perm))
7499 ? zero_vec : vec_oprnd1, v,
7500 perms[i]);
7501 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7502 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7503 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
7505 if (zero_vec && use_whole_vector[i] == scan_store_kind_lshift_cond)
7507 /* Whole vector shift shifted in zero bits, but if *init
7508 is not initializer_zerop, we need to replace those elements
7509 with elements from vec_oprnd1. */
7510 tree_vector_builder vb (masktype, nunits, 1);
7511 for (unsigned HOST_WIDE_INT k = 0; k < nunits; ++k)
7512 vb.quick_push (k < (HOST_WIDE_INT_1U << i)
7513 ? boolean_false_node : boolean_true_node);
7515 tree new_temp2 = make_ssa_name (vectype);
7516 g = gimple_build_assign (new_temp2, VEC_COND_EXPR, vb.build (),
7517 new_temp, vec_oprnd1);
7518 vect_finish_stmt_generation (vinfo, stmt_info,
7519 g, gsi);
7520 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7521 new_temp = new_temp2;
7524 /* For exclusive scan, perform the perms[i] permutation once
7525 more. */
7526 if (i == 0
7527 && STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4
7528 && v == vec_oprnd2)
7530 v = new_temp;
7531 --i;
7532 continue;
7535 tree new_temp2 = make_ssa_name (vectype);
7536 g = gimple_build_assign (new_temp2, code, v, new_temp);
7537 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7538 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7540 v = new_temp2;
7543 tree new_temp = make_ssa_name (vectype);
7544 gimple *g = gimple_build_assign (new_temp, code, orig, v);
7545 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7546 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7548 tree last_perm_arg = new_temp;
7549 /* For exclusive scan, new_temp computed above is the exclusive scan
7550 prefix sum. Turn it into inclusive prefix sum for the broadcast
7551 of the last element into orig. */
7552 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) == 4)
7554 last_perm_arg = make_ssa_name (vectype);
7555 g = gimple_build_assign (last_perm_arg, code, new_temp, vec_oprnd2);
7556 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7557 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7560 orig = make_ssa_name (vectype);
7561 g = gimple_build_assign (orig, VEC_PERM_EXPR, last_perm_arg,
7562 last_perm_arg, perms[units_log2]);
7563 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7564 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7566 if (!inscan_var_store)
7568 tree data_ref = fold_build2 (MEM_REF, vectype,
7569 unshare_expr (dataref_ptr),
7570 dataref_offset);
7571 vect_copy_ref_info (data_ref, DR_REF (dr_info->dr));
7572 g = gimple_build_assign (data_ref, new_temp);
7573 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7574 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7578 if (inscan_var_store)
7579 for (int j = 0; j < ncopies; j++)
7581 if (j != 0)
7582 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump);
7584 tree data_ref = fold_build2 (MEM_REF, vectype,
7585 unshare_expr (dataref_ptr),
7586 dataref_offset);
7587 vect_copy_ref_info (data_ref, DR_REF (dr_info->dr));
7588 gimple *g = gimple_build_assign (data_ref, orig);
7589 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
7590 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (g);
7592 return true;
7596 /* Function vectorizable_store.
7598 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
7599 that can be vectorized.
7600 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7601 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7602 Return true if STMT_INFO is vectorizable in this way. */
7604 static bool
7605 vectorizable_store (vec_info *vinfo,
7606 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
7607 gimple **vec_stmt, slp_tree slp_node,
7608 stmt_vector_for_cost *cost_vec)
7610 tree data_ref;
7611 tree op;
7612 tree vec_oprnd = NULL_TREE;
7613 tree elem_type;
7614 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7615 class loop *loop = NULL;
7616 machine_mode vec_mode;
7617 tree dummy;
7618 enum vect_def_type rhs_dt = vect_unknown_def_type;
7619 enum vect_def_type mask_dt = vect_unknown_def_type;
7620 tree dataref_ptr = NULL_TREE;
7621 tree dataref_offset = NULL_TREE;
7622 gimple *ptr_incr = NULL;
7623 int ncopies;
7624 int j;
7625 stmt_vec_info first_stmt_info;
7626 bool grouped_store;
7627 unsigned int group_size, i;
7628 vec<tree> oprnds = vNULL;
7629 vec<tree> result_chain = vNULL;
7630 vec<tree> vec_oprnds = vNULL;
7631 bool slp = (slp_node != NULL);
7632 unsigned int vec_num;
7633 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
7634 tree aggr_type;
7635 gather_scatter_info gs_info;
7636 poly_uint64 vf;
7637 vec_load_store_type vls_type;
7638 tree ref_type;
7640 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7641 return false;
7643 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7644 && ! vec_stmt)
7645 return false;
7647 /* Is vectorizable store? */
7649 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
7650 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
7652 tree scalar_dest = gimple_assign_lhs (assign);
7653 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
7654 && is_pattern_stmt_p (stmt_info))
7655 scalar_dest = TREE_OPERAND (scalar_dest, 0);
7656 if (TREE_CODE (scalar_dest) != ARRAY_REF
7657 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
7658 && TREE_CODE (scalar_dest) != INDIRECT_REF
7659 && TREE_CODE (scalar_dest) != COMPONENT_REF
7660 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
7661 && TREE_CODE (scalar_dest) != REALPART_EXPR
7662 && TREE_CODE (scalar_dest) != MEM_REF)
7663 return false;
7665 else
7667 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
7668 if (!call || !gimple_call_internal_p (call))
7669 return false;
7671 internal_fn ifn = gimple_call_internal_fn (call);
7672 if (!internal_store_fn_p (ifn))
7673 return false;
7675 if (slp_node != NULL)
7677 if (dump_enabled_p ())
7678 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7679 "SLP of masked stores not supported.\n");
7680 return false;
7683 int mask_index = internal_fn_mask_index (ifn);
7684 if (mask_index >= 0
7685 && !vect_check_scalar_mask (vinfo, stmt_info, slp_node, mask_index,
7686 &mask, NULL, &mask_dt, &mask_vectype))
7687 return false;
7690 op = vect_get_store_rhs (stmt_info);
7692 /* Cannot have hybrid store SLP -- that would mean storing to the
7693 same location twice. */
7694 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
7696 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
7697 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7699 if (loop_vinfo)
7701 loop = LOOP_VINFO_LOOP (loop_vinfo);
7702 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7704 else
7705 vf = 1;
7707 /* Multiple types in SLP are handled by creating the appropriate number of
7708 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7709 case of SLP. */
7710 if (slp)
7711 ncopies = 1;
7712 else
7713 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7715 gcc_assert (ncopies >= 1);
7717 /* FORNOW. This restriction should be relaxed. */
7718 if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
7720 if (dump_enabled_p ())
7721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7722 "multiple types in nested loop.\n");
7723 return false;
7726 if (!vect_check_store_rhs (vinfo, stmt_info, slp_node,
7727 op, &rhs_dt, &rhs_vectype, &vls_type))
7728 return false;
7730 elem_type = TREE_TYPE (vectype);
7731 vec_mode = TYPE_MODE (vectype);
7733 if (!STMT_VINFO_DATA_REF (stmt_info))
7734 return false;
7736 vect_memory_access_type memory_access_type;
7737 enum dr_alignment_support alignment_support_scheme;
7738 int misalignment;
7739 poly_int64 poffset;
7740 if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, vls_type,
7741 ncopies, &memory_access_type, &poffset,
7742 &alignment_support_scheme, &misalignment, &gs_info))
7743 return false;
7745 if (mask)
7747 if (memory_access_type == VMAT_CONTIGUOUS)
7749 if (!VECTOR_MODE_P (vec_mode)
7750 || !can_vec_mask_load_store_p (vec_mode,
7751 TYPE_MODE (mask_vectype), false))
7752 return false;
7754 else if (memory_access_type != VMAT_LOAD_STORE_LANES
7755 && (memory_access_type != VMAT_GATHER_SCATTER
7756 || (gs_info.decl && !VECTOR_BOOLEAN_TYPE_P (mask_vectype))))
7758 if (dump_enabled_p ())
7759 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7760 "unsupported access type for masked store.\n");
7761 return false;
7764 else
7766 /* FORNOW. In some cases can vectorize even if data-type not supported
7767 (e.g. - array initialization with 0). */
7768 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
7769 return false;
7772 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
7773 grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
7774 && memory_access_type != VMAT_GATHER_SCATTER
7775 && (slp || memory_access_type != VMAT_CONTIGUOUS));
7776 if (grouped_store)
7778 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
7779 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
7780 group_size = DR_GROUP_SIZE (first_stmt_info);
7782 else
7784 first_stmt_info = stmt_info;
7785 first_dr_info = dr_info;
7786 group_size = vec_num = 1;
7789 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) > 1 && !vec_stmt)
7791 if (!check_scan_store (vinfo, stmt_info, vectype, rhs_dt, slp, mask,
7792 memory_access_type))
7793 return false;
7796 if (!vec_stmt) /* transformation not required. */
7798 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
7800 if (loop_vinfo
7801 && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
7802 check_load_store_for_partial_vectors (loop_vinfo, vectype, slp_node,
7803 vls_type, group_size,
7804 memory_access_type, &gs_info,
7805 mask);
7807 if (slp_node
7808 && !vect_maybe_update_slp_op_vectype (SLP_TREE_CHILDREN (slp_node)[0],
7809 vectype))
7811 if (dump_enabled_p ())
7812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7813 "incompatible vector types for invariants\n");
7814 return false;
7817 if (dump_enabled_p ()
7818 && memory_access_type != VMAT_ELEMENTWISE
7819 && memory_access_type != VMAT_GATHER_SCATTER
7820 && alignment_support_scheme != dr_aligned)
7821 dump_printf_loc (MSG_NOTE, vect_location,
7822 "Vectorizing an unaligned access.\n");
7824 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
7825 vect_model_store_cost (vinfo, stmt_info, ncopies,
7826 memory_access_type, alignment_support_scheme,
7827 misalignment, vls_type, slp_node, cost_vec);
7828 return true;
7830 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
7832 /* Transform. */
7834 ensure_base_align (dr_info);
7836 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
7838 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, src;
7839 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
7840 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
7841 tree ptr, var, scale, vec_mask;
7842 tree mask_arg = NULL_TREE, mask_op = NULL_TREE, perm_mask = NULL_TREE;
7843 tree mask_halfvectype = mask_vectype;
7844 edge pe = loop_preheader_edge (loop);
7845 gimple_seq seq;
7846 basic_block new_bb;
7847 enum { NARROW, NONE, WIDEN } modifier;
7848 poly_uint64 scatter_off_nunits
7849 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
7851 if (known_eq (nunits, scatter_off_nunits))
7852 modifier = NONE;
7853 else if (known_eq (nunits * 2, scatter_off_nunits))
7855 modifier = WIDEN;
7857 /* Currently gathers and scatters are only supported for
7858 fixed-length vectors. */
7859 unsigned int count = scatter_off_nunits.to_constant ();
7860 vec_perm_builder sel (count, count, 1);
7861 for (i = 0; i < (unsigned int) count; ++i)
7862 sel.quick_push (i | (count / 2));
7864 vec_perm_indices indices (sel, 1, count);
7865 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
7866 indices);
7867 gcc_assert (perm_mask != NULL_TREE);
7869 else if (known_eq (nunits, scatter_off_nunits * 2))
7871 modifier = NARROW;
7873 /* Currently gathers and scatters are only supported for
7874 fixed-length vectors. */
7875 unsigned int count = nunits.to_constant ();
7876 vec_perm_builder sel (count, count, 1);
7877 for (i = 0; i < (unsigned int) count; ++i)
7878 sel.quick_push (i | (count / 2));
7880 vec_perm_indices indices (sel, 2, count);
7881 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
7882 gcc_assert (perm_mask != NULL_TREE);
7883 ncopies *= 2;
7885 if (mask)
7886 mask_halfvectype = truth_type_for (gs_info.offset_vectype);
7888 else
7889 gcc_unreachable ();
7891 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
7892 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
7893 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
7894 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
7895 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
7896 scaletype = TREE_VALUE (arglist);
7898 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
7899 && TREE_CODE (rettype) == VOID_TYPE);
7901 ptr = fold_convert (ptrtype, gs_info.base);
7902 if (!is_gimple_min_invariant (ptr))
7904 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
7905 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7906 gcc_assert (!new_bb);
7909 if (mask == NULL_TREE)
7911 mask_arg = build_int_cst (masktype, -1);
7912 mask_arg = vect_init_vector (vinfo, stmt_info,
7913 mask_arg, masktype, NULL);
7916 scale = build_int_cst (scaletype, gs_info.scale);
7918 auto_vec<tree> vec_oprnds0;
7919 auto_vec<tree> vec_oprnds1;
7920 auto_vec<tree> vec_masks;
7921 if (mask)
7923 tree mask_vectype = truth_type_for (vectype);
7924 vect_get_vec_defs_for_operand (vinfo, stmt_info,
7925 modifier == NARROW
7926 ? ncopies / 2 : ncopies,
7927 mask, &vec_masks, mask_vectype);
7929 vect_get_vec_defs_for_operand (vinfo, stmt_info,
7930 modifier == WIDEN
7931 ? ncopies / 2 : ncopies,
7932 gs_info.offset, &vec_oprnds0);
7933 vect_get_vec_defs_for_operand (vinfo, stmt_info,
7934 modifier == NARROW
7935 ? ncopies / 2 : ncopies,
7936 op, &vec_oprnds1);
7937 for (j = 0; j < ncopies; ++j)
7939 if (modifier == WIDEN)
7941 if (j & 1)
7942 op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0,
7943 perm_mask, stmt_info, gsi);
7944 else
7945 op = vec_oprnd0 = vec_oprnds0[j / 2];
7946 src = vec_oprnd1 = vec_oprnds1[j];
7947 if (mask)
7948 mask_op = vec_mask = vec_masks[j];
7950 else if (modifier == NARROW)
7952 if (j & 1)
7953 src = permute_vec_elements (vinfo, vec_oprnd1, vec_oprnd1,
7954 perm_mask, stmt_info, gsi);
7955 else
7956 src = vec_oprnd1 = vec_oprnds1[j / 2];
7957 op = vec_oprnd0 = vec_oprnds0[j];
7958 if (mask)
7959 mask_op = vec_mask = vec_masks[j / 2];
7961 else
7963 op = vec_oprnd0 = vec_oprnds0[j];
7964 src = vec_oprnd1 = vec_oprnds1[j];
7965 if (mask)
7966 mask_op = vec_mask = vec_masks[j];
7969 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
7971 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
7972 TYPE_VECTOR_SUBPARTS (srctype)));
7973 var = vect_get_new_ssa_name (srctype, vect_simple_var);
7974 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
7975 gassign *new_stmt
7976 = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
7977 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
7978 src = var;
7981 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
7983 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
7984 TYPE_VECTOR_SUBPARTS (idxtype)));
7985 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
7986 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
7987 gassign *new_stmt
7988 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7989 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
7990 op = var;
7993 if (mask)
7995 tree utype;
7996 mask_arg = mask_op;
7997 if (modifier == NARROW)
7999 var = vect_get_new_ssa_name (mask_halfvectype,
8000 vect_simple_var);
8001 gassign *new_stmt
8002 = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR
8003 : VEC_UNPACK_LO_EXPR,
8004 mask_op);
8005 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
8006 mask_arg = var;
8008 tree optype = TREE_TYPE (mask_arg);
8009 if (TYPE_MODE (masktype) == TYPE_MODE (optype))
8010 utype = masktype;
8011 else
8012 utype = lang_hooks.types.type_for_mode (TYPE_MODE (optype), 1);
8013 var = vect_get_new_ssa_name (utype, vect_scalar_var);
8014 mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_arg);
8015 gassign *new_stmt
8016 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg);
8017 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
8018 mask_arg = var;
8019 if (!useless_type_conversion_p (masktype, utype))
8021 gcc_assert (TYPE_PRECISION (utype)
8022 <= TYPE_PRECISION (masktype));
8023 var = vect_get_new_ssa_name (masktype, vect_scalar_var);
8024 new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg);
8025 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
8026 mask_arg = var;
8030 gcall *new_stmt
8031 = gimple_build_call (gs_info.decl, 5, ptr, mask_arg, op, src, scale);
8032 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
8034 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
8036 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
8037 return true;
8039 else if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
8040 return vectorizable_scan_store (vinfo, stmt_info, gsi, vec_stmt, ncopies);
8042 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
8043 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info))++;
8045 if (grouped_store)
8047 /* FORNOW */
8048 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
8050 /* We vectorize all the stmts of the interleaving group when we
8051 reach the last stmt in the group. */
8052 if (DR_GROUP_STORE_COUNT (first_stmt_info)
8053 < DR_GROUP_SIZE (first_stmt_info)
8054 && !slp)
8056 *vec_stmt = NULL;
8057 return true;
8060 if (slp)
8062 grouped_store = false;
8063 /* VEC_NUM is the number of vect stmts to be created for this
8064 group. */
8065 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8066 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
8067 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
8068 == first_stmt_info);
8069 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
8070 op = vect_get_store_rhs (first_stmt_info);
8072 else
8073 /* VEC_NUM is the number of vect stmts to be created for this
8074 group. */
8075 vec_num = group_size;
8077 ref_type = get_group_alias_ptr_type (first_stmt_info);
8079 else
8080 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
8082 if (dump_enabled_p ())
8083 dump_printf_loc (MSG_NOTE, vect_location,
8084 "transform store. ncopies = %d\n", ncopies);
8086 if (memory_access_type == VMAT_ELEMENTWISE
8087 || memory_access_type == VMAT_STRIDED_SLP)
8089 gimple_stmt_iterator incr_gsi;
8090 bool insert_after;
8091 gimple *incr;
8092 tree offvar;
8093 tree ivstep;
8094 tree running_off;
8095 tree stride_base, stride_step, alias_off;
8096 tree vec_oprnd;
8097 tree dr_offset;
8098 unsigned int g;
8099 /* Checked by get_load_store_type. */
8100 unsigned int const_nunits = nunits.to_constant ();
8102 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
8103 gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
8105 dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info);
8106 stride_base
8107 = fold_build_pointer_plus
8108 (DR_BASE_ADDRESS (first_dr_info->dr),
8109 size_binop (PLUS_EXPR,
8110 convert_to_ptrofftype (dr_offset),
8111 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
8112 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
8114 /* For a store with loop-invariant (but other than power-of-2)
8115 stride (i.e. not a grouped access) like so:
8117 for (i = 0; i < n; i += stride)
8118 array[i] = ...;
8120 we generate a new induction variable and new stores from
8121 the components of the (vectorized) rhs:
8123 for (j = 0; ; j += VF*stride)
8124 vectemp = ...;
8125 tmp1 = vectemp[0];
8126 array[j] = tmp1;
8127 tmp2 = vectemp[1];
8128 array[j + stride] = tmp2;
8132 unsigned nstores = const_nunits;
8133 unsigned lnel = 1;
8134 tree ltype = elem_type;
8135 tree lvectype = vectype;
8136 if (slp)
8138 if (group_size < const_nunits
8139 && const_nunits % group_size == 0)
8141 nstores = const_nunits / group_size;
8142 lnel = group_size;
8143 ltype = build_vector_type (elem_type, group_size);
8144 lvectype = vectype;
8146 /* First check if vec_extract optab doesn't support extraction
8147 of vector elts directly. */
8148 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
8149 machine_mode vmode;
8150 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8151 || !related_vector_mode (TYPE_MODE (vectype), elmode,
8152 group_size).exists (&vmode)
8153 || (convert_optab_handler (vec_extract_optab,
8154 TYPE_MODE (vectype), vmode)
8155 == CODE_FOR_nothing))
8157 /* Try to avoid emitting an extract of vector elements
8158 by performing the extracts using an integer type of the
8159 same size, extracting from a vector of those and then
8160 re-interpreting it as the original vector type if
8161 supported. */
8162 unsigned lsize
8163 = group_size * GET_MODE_BITSIZE (elmode);
8164 unsigned int lnunits = const_nunits / group_size;
8165 /* If we can't construct such a vector fall back to
8166 element extracts from the original vector type and
8167 element size stores. */
8168 if (int_mode_for_size (lsize, 0).exists (&elmode)
8169 && VECTOR_MODE_P (TYPE_MODE (vectype))
8170 && related_vector_mode (TYPE_MODE (vectype), elmode,
8171 lnunits).exists (&vmode)
8172 && (convert_optab_handler (vec_extract_optab,
8173 vmode, elmode)
8174 != CODE_FOR_nothing))
8176 nstores = lnunits;
8177 lnel = group_size;
8178 ltype = build_nonstandard_integer_type (lsize, 1);
8179 lvectype = build_vector_type (ltype, nstores);
8181 /* Else fall back to vector extraction anyway.
8182 Fewer stores are more important than avoiding spilling
8183 of the vector we extract from. Compared to the
8184 construction case in vectorizable_load no store-forwarding
8185 issue exists here for reasonable archs. */
8188 else if (group_size >= const_nunits
8189 && group_size % const_nunits == 0)
8191 nstores = 1;
8192 lnel = const_nunits;
8193 ltype = vectype;
8194 lvectype = vectype;
8196 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
8197 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
8200 ivstep = stride_step;
8201 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
8202 build_int_cst (TREE_TYPE (ivstep), vf));
8204 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
8206 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
8207 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
8208 create_iv (stride_base, ivstep, NULL,
8209 loop, &incr_gsi, insert_after,
8210 &offvar, NULL);
8211 incr = gsi_stmt (incr_gsi);
8213 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
8215 alias_off = build_int_cst (ref_type, 0);
8216 stmt_vec_info next_stmt_info = first_stmt_info;
8217 for (g = 0; g < group_size; g++)
8219 running_off = offvar;
8220 if (g)
8222 tree size = TYPE_SIZE_UNIT (ltype);
8223 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
8224 size);
8225 tree newoff = copy_ssa_name (running_off, NULL);
8226 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
8227 running_off, pos);
8228 vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
8229 running_off = newoff;
8231 if (!slp)
8232 op = vect_get_store_rhs (next_stmt_info);
8233 vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies,
8234 op, &vec_oprnds);
8235 unsigned int group_el = 0;
8236 unsigned HOST_WIDE_INT
8237 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
8238 for (j = 0; j < ncopies; j++)
8240 vec_oprnd = vec_oprnds[j];
8241 /* Pun the vector to extract from if necessary. */
8242 if (lvectype != vectype)
8244 tree tem = make_ssa_name (lvectype);
8245 gimple *pun
8246 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
8247 lvectype, vec_oprnd));
8248 vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
8249 vec_oprnd = tem;
8251 for (i = 0; i < nstores; i++)
8253 tree newref, newoff;
8254 gimple *incr, *assign;
8255 tree size = TYPE_SIZE (ltype);
8256 /* Extract the i'th component. */
8257 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
8258 bitsize_int (i), size);
8259 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
8260 size, pos);
8262 elem = force_gimple_operand_gsi (gsi, elem, true,
8263 NULL_TREE, true,
8264 GSI_SAME_STMT);
8266 tree this_off = build_int_cst (TREE_TYPE (alias_off),
8267 group_el * elsz);
8268 newref = build2 (MEM_REF, ltype,
8269 running_off, this_off);
8270 vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
8272 /* And store it to *running_off. */
8273 assign = gimple_build_assign (newref, elem);
8274 vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
8276 group_el += lnel;
8277 if (! slp
8278 || group_el == group_size)
8280 newoff = copy_ssa_name (running_off, NULL);
8281 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
8282 running_off, stride_step);
8283 vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
8285 running_off = newoff;
8286 group_el = 0;
8288 if (g == group_size - 1
8289 && !slp)
8291 if (j == 0 && i == 0)
8292 *vec_stmt = assign;
8293 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (assign);
8297 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
8298 vec_oprnds.release ();
8299 if (slp)
8300 break;
8303 return true;
8306 auto_vec<tree> dr_chain (group_size);
8307 oprnds.create (group_size);
8309 gcc_assert (alignment_support_scheme);
8310 vec_loop_masks *loop_masks
8311 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8312 ? &LOOP_VINFO_MASKS (loop_vinfo)
8313 : NULL);
8314 vec_loop_lens *loop_lens
8315 = (loop_vinfo && LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo)
8316 ? &LOOP_VINFO_LENS (loop_vinfo)
8317 : NULL);
8319 /* Shouldn't go with length-based approach if fully masked. */
8320 gcc_assert (!loop_lens || !loop_masks);
8322 /* Targets with store-lane instructions must not require explicit
8323 realignment. vect_supportable_dr_alignment always returns either
8324 dr_aligned or dr_unaligned_supported for masked operations. */
8325 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
8326 && !mask
8327 && !loop_masks)
8328 || alignment_support_scheme == dr_aligned
8329 || alignment_support_scheme == dr_unaligned_supported);
8331 tree offset = NULL_TREE;
8332 if (!known_eq (poffset, 0))
8333 offset = size_int (poffset);
8335 tree bump;
8336 tree vec_offset = NULL_TREE;
8337 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8339 aggr_type = NULL_TREE;
8340 bump = NULL_TREE;
8342 else if (memory_access_type == VMAT_GATHER_SCATTER)
8344 aggr_type = elem_type;
8345 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
8346 &bump, &vec_offset);
8348 else
8350 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8351 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
8352 else
8353 aggr_type = vectype;
8354 bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type,
8355 memory_access_type);
8358 if (mask)
8359 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
8361 /* In case the vectorization factor (VF) is bigger than the number
8362 of elements that we can fit in a vectype (nunits), we have to generate
8363 more than one vector stmt - i.e - we need to "unroll" the
8364 vector stmt by a factor VF/nunits. */
8366 /* In case of interleaving (non-unit grouped access):
8368 S1: &base + 2 = x2
8369 S2: &base = x0
8370 S3: &base + 1 = x1
8371 S4: &base + 3 = x3
8373 We create vectorized stores starting from base address (the access of the
8374 first stmt in the chain (S2 in the above example), when the last store stmt
8375 of the chain (S4) is reached:
8377 VS1: &base = vx2
8378 VS2: &base + vec_size*1 = vx0
8379 VS3: &base + vec_size*2 = vx1
8380 VS4: &base + vec_size*3 = vx3
8382 Then permutation statements are generated:
8384 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
8385 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
8388 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8389 (the order of the data-refs in the output of vect_permute_store_chain
8390 corresponds to the order of scalar stmts in the interleaving chain - see
8391 the documentation of vect_permute_store_chain()).
8393 In case of both multiple types and interleaving, above vector stores and
8394 permutation stmts are created for every copy. The result vector stmts are
8395 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
8396 STMT_VINFO_RELATED_STMT for the next copies.
8399 auto_vec<tree> vec_masks;
8400 tree vec_mask = NULL;
8401 auto_vec<tree> vec_offsets;
8402 auto_vec<vec<tree> > gvec_oprnds;
8403 gvec_oprnds.safe_grow_cleared (group_size, true);
8404 for (j = 0; j < ncopies; j++)
8406 gimple *new_stmt;
8407 if (j == 0)
8409 if (slp)
8411 /* Get vectorized arguments for SLP_NODE. */
8412 vect_get_vec_defs (vinfo, stmt_info, slp_node, 1,
8413 op, &vec_oprnds);
8414 vec_oprnd = vec_oprnds[0];
8416 else
8418 /* For interleaved stores we collect vectorized defs for all the
8419 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
8420 used as an input to vect_permute_store_chain().
8422 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN
8423 and OPRNDS are of size 1. */
8424 stmt_vec_info next_stmt_info = first_stmt_info;
8425 for (i = 0; i < group_size; i++)
8427 /* Since gaps are not supported for interleaved stores,
8428 DR_GROUP_SIZE is the exact number of stmts in the chain.
8429 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
8430 that there is no interleaving, DR_GROUP_SIZE is 1,
8431 and only one iteration of the loop will be executed. */
8432 op = vect_get_store_rhs (next_stmt_info);
8433 vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
8434 ncopies, op, &gvec_oprnds[i]);
8435 vec_oprnd = gvec_oprnds[i][0];
8436 dr_chain.quick_push (gvec_oprnds[i][0]);
8437 oprnds.quick_push (gvec_oprnds[i][0]);
8438 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
8440 if (mask)
8442 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
8443 mask, &vec_masks, mask_vectype);
8444 vec_mask = vec_masks[0];
8448 /* We should have catched mismatched types earlier. */
8449 gcc_assert (useless_type_conversion_p (vectype,
8450 TREE_TYPE (vec_oprnd)));
8451 bool simd_lane_access_p
8452 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
8453 if (simd_lane_access_p
8454 && !loop_masks
8455 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
8456 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
8457 && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
8458 && integer_zerop (DR_INIT (first_dr_info->dr))
8459 && alias_sets_conflict_p (get_alias_set (aggr_type),
8460 get_alias_set (TREE_TYPE (ref_type))))
8462 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
8463 dataref_offset = build_int_cst (ref_type, 0);
8465 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8467 vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
8468 slp_node, &gs_info, &dataref_ptr,
8469 &vec_offsets);
8470 vec_offset = vec_offsets[0];
8472 else
8473 dataref_ptr
8474 = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
8475 simd_lane_access_p ? loop : NULL,
8476 offset, &dummy, gsi, &ptr_incr,
8477 simd_lane_access_p, bump);
8479 else
8481 /* For interleaved stores we created vectorized defs for all the
8482 defs stored in OPRNDS in the previous iteration (previous copy).
8483 DR_CHAIN is then used as an input to vect_permute_store_chain().
8484 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
8485 OPRNDS are of size 1. */
8486 for (i = 0; i < group_size; i++)
8488 vec_oprnd = gvec_oprnds[i][j];
8489 dr_chain[i] = gvec_oprnds[i][j];
8490 oprnds[i] = gvec_oprnds[i][j];
8492 if (mask)
8493 vec_mask = vec_masks[j];
8494 if (dataref_offset)
8495 dataref_offset
8496 = int_const_binop (PLUS_EXPR, dataref_offset, bump);
8497 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
8498 vec_offset = vec_offsets[j];
8499 else
8500 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
8501 stmt_info, bump);
8504 if (memory_access_type == VMAT_LOAD_STORE_LANES)
8506 tree vec_array;
8508 /* Get an array into which we can store the individual vectors. */
8509 vec_array = create_vector_array (vectype, vec_num);
8511 /* Invalidate the current contents of VEC_ARRAY. This should
8512 become an RTL clobber too, which prevents the vector registers
8513 from being upward-exposed. */
8514 vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
8516 /* Store the individual vectors into the array. */
8517 for (i = 0; i < vec_num; i++)
8519 vec_oprnd = dr_chain[i];
8520 write_vector_array (vinfo, stmt_info,
8521 gsi, vec_oprnd, vec_array, i);
8524 tree final_mask = NULL;
8525 if (loop_masks)
8526 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
8527 vectype, j);
8528 if (vec_mask)
8529 final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
8530 final_mask, vec_mask, gsi);
8532 gcall *call;
8533 if (final_mask)
8535 /* Emit:
8536 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
8537 VEC_ARRAY). */
8538 unsigned int align = TYPE_ALIGN (TREE_TYPE (vectype));
8539 tree alias_ptr = build_int_cst (ref_type, align);
8540 call = gimple_build_call_internal (IFN_MASK_STORE_LANES, 4,
8541 dataref_ptr, alias_ptr,
8542 final_mask, vec_array);
8544 else
8546 /* Emit:
8547 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
8548 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
8549 call = gimple_build_call_internal (IFN_STORE_LANES, 1,
8550 vec_array);
8551 gimple_call_set_lhs (call, data_ref);
8553 gimple_call_set_nothrow (call, true);
8554 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
8555 new_stmt = call;
8557 /* Record that VEC_ARRAY is now dead. */
8558 vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
8560 else
8562 new_stmt = NULL;
8563 if (grouped_store)
8565 if (j == 0)
8566 result_chain.create (group_size);
8567 /* Permute. */
8568 vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
8569 gsi, &result_chain);
8572 stmt_vec_info next_stmt_info = first_stmt_info;
8573 for (i = 0; i < vec_num; i++)
8575 unsigned misalign;
8576 unsigned HOST_WIDE_INT align;
8578 tree final_mask = NULL_TREE;
8579 if (loop_masks)
8580 final_mask = vect_get_loop_mask (gsi, loop_masks,
8581 vec_num * ncopies,
8582 vectype, vec_num * j + i);
8583 if (vec_mask)
8584 final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
8585 final_mask, vec_mask, gsi);
8587 if (memory_access_type == VMAT_GATHER_SCATTER)
8589 tree scale = size_int (gs_info.scale);
8590 gcall *call;
8591 if (final_mask)
8592 call = gimple_build_call_internal
8593 (IFN_MASK_SCATTER_STORE, 5, dataref_ptr, vec_offset,
8594 scale, vec_oprnd, final_mask);
8595 else
8596 call = gimple_build_call_internal
8597 (IFN_SCATTER_STORE, 4, dataref_ptr, vec_offset,
8598 scale, vec_oprnd);
8599 gimple_call_set_nothrow (call, true);
8600 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
8601 new_stmt = call;
8602 break;
8605 if (i > 0)
8606 /* Bump the vector pointer. */
8607 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
8608 gsi, stmt_info, bump);
8610 if (slp)
8611 vec_oprnd = vec_oprnds[i];
8612 else if (grouped_store)
8613 /* For grouped stores vectorized defs are interleaved in
8614 vect_permute_store_chain(). */
8615 vec_oprnd = result_chain[i];
8617 align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
8618 if (alignment_support_scheme == dr_aligned)
8619 misalign = 0;
8620 else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
8622 align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
8623 misalign = 0;
8625 else
8626 misalign = misalignment;
8627 if (dataref_offset == NULL_TREE
8628 && TREE_CODE (dataref_ptr) == SSA_NAME)
8629 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
8630 misalign);
8631 align = least_bit_hwi (misalign | align);
8633 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
8635 tree perm_mask = perm_mask_for_reverse (vectype);
8636 tree perm_dest = vect_create_destination_var
8637 (vect_get_store_rhs (stmt_info), vectype);
8638 tree new_temp = make_ssa_name (perm_dest);
8640 /* Generate the permute statement. */
8641 gimple *perm_stmt
8642 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
8643 vec_oprnd, perm_mask);
8644 vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
8646 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
8647 vec_oprnd = new_temp;
8650 /* Arguments are ready. Create the new vector stmt. */
8651 if (final_mask)
8653 tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
8654 gcall *call
8655 = gimple_build_call_internal (IFN_MASK_STORE, 4,
8656 dataref_ptr, ptr,
8657 final_mask, vec_oprnd);
8658 gimple_call_set_nothrow (call, true);
8659 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
8660 new_stmt = call;
8662 else if (loop_lens)
8664 tree final_len
8665 = vect_get_loop_len (loop_vinfo, loop_lens,
8666 vec_num * ncopies, vec_num * j + i);
8667 tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
8668 machine_mode vmode = TYPE_MODE (vectype);
8669 opt_machine_mode new_ovmode
8670 = get_len_load_store_mode (vmode, false);
8671 machine_mode new_vmode = new_ovmode.require ();
8672 /* Need conversion if it's wrapped with VnQI. */
8673 if (vmode != new_vmode)
8675 tree new_vtype
8676 = build_vector_type_for_mode (unsigned_intQI_type_node,
8677 new_vmode);
8678 tree var
8679 = vect_get_new_ssa_name (new_vtype, vect_simple_var);
8680 vec_oprnd
8681 = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
8682 gassign *new_stmt
8683 = gimple_build_assign (var, VIEW_CONVERT_EXPR,
8684 vec_oprnd);
8685 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt,
8686 gsi);
8687 vec_oprnd = var;
8690 signed char biasval =
8691 LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
8693 tree bias = build_int_cst (intQI_type_node, biasval);
8694 gcall *call
8695 = gimple_build_call_internal (IFN_LEN_STORE, 5, dataref_ptr,
8696 ptr, final_len, vec_oprnd,
8697 bias);
8698 gimple_call_set_nothrow (call, true);
8699 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
8700 new_stmt = call;
8702 else
8704 data_ref = fold_build2 (MEM_REF, vectype,
8705 dataref_ptr,
8706 dataref_offset
8707 ? dataref_offset
8708 : build_int_cst (ref_type, 0));
8709 if (alignment_support_scheme == dr_aligned)
8711 else
8712 TREE_TYPE (data_ref)
8713 = build_aligned_type (TREE_TYPE (data_ref),
8714 align * BITS_PER_UNIT);
8715 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
8716 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
8717 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
8720 if (slp)
8721 continue;
8723 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
8724 if (!next_stmt_info)
8725 break;
8728 if (!slp)
8730 if (j == 0)
8731 *vec_stmt = new_stmt;
8732 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
8736 for (i = 0; i < group_size; ++i)
8738 vec<tree> oprndsi = gvec_oprnds[i];
8739 oprndsi.release ();
8741 oprnds.release ();
8742 result_chain.release ();
8743 vec_oprnds.release ();
8745 return true;
8748 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
8749 VECTOR_CST mask. No checks are made that the target platform supports the
8750 mask, so callers may wish to test can_vec_perm_const_p separately, or use
8751 vect_gen_perm_mask_checked. */
8753 tree
8754 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
8756 tree mask_type;
8758 poly_uint64 nunits = sel.length ();
8759 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
8761 mask_type = build_vector_type (ssizetype, nunits);
8762 return vec_perm_indices_to_tree (mask_type, sel);
8765 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
8766 i.e. that the target supports the pattern _for arbitrary input vectors_. */
8768 tree
8769 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
8771 machine_mode vmode = TYPE_MODE (vectype);
8772 gcc_assert (can_vec_perm_const_p (vmode, vmode, sel));
8773 return vect_gen_perm_mask_any (vectype, sel);
8776 /* Given a vector variable X and Y, that was generated for the scalar
8777 STMT_INFO, generate instructions to permute the vector elements of X and Y
8778 using permutation mask MASK_VEC, insert them at *GSI and return the
8779 permuted vector variable. */
8781 static tree
8782 permute_vec_elements (vec_info *vinfo,
8783 tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
8784 gimple_stmt_iterator *gsi)
8786 tree vectype = TREE_TYPE (x);
8787 tree perm_dest, data_ref;
8788 gimple *perm_stmt;
8790 tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
8791 if (scalar_dest && TREE_CODE (scalar_dest) == SSA_NAME)
8792 perm_dest = vect_create_destination_var (scalar_dest, vectype);
8793 else
8794 perm_dest = vect_get_new_vect_var (vectype, vect_simple_var, NULL);
8795 data_ref = make_ssa_name (perm_dest);
8797 /* Generate the permute statement. */
8798 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
8799 vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
8801 return data_ref;
8804 /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
8805 inserting them on the loops preheader edge. Returns true if we
8806 were successful in doing so (and thus STMT_INFO can be moved then),
8807 otherwise returns false. */
8809 static bool
8810 hoist_defs_of_uses (stmt_vec_info stmt_info, class loop *loop)
8812 ssa_op_iter i;
8813 tree op;
8814 bool any = false;
8816 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
8818 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
8819 if (!gimple_nop_p (def_stmt)
8820 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
8822 /* Make sure we don't need to recurse. While we could do
8823 so in simple cases when there are more complex use webs
8824 we don't have an easy way to preserve stmt order to fulfil
8825 dependencies within them. */
8826 tree op2;
8827 ssa_op_iter i2;
8828 if (gimple_code (def_stmt) == GIMPLE_PHI)
8829 return false;
8830 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
8832 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
8833 if (!gimple_nop_p (def_stmt2)
8834 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
8835 return false;
8837 any = true;
8841 if (!any)
8842 return true;
8844 FOR_EACH_SSA_TREE_OPERAND (op, stmt_info->stmt, i, SSA_OP_USE)
8846 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
8847 if (!gimple_nop_p (def_stmt)
8848 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
8850 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
8851 gsi_remove (&gsi, false);
8852 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
8856 return true;
8859 /* vectorizable_load.
8861 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
8862 that can be vectorized.
8863 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
8864 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
8865 Return true if STMT_INFO is vectorizable in this way. */
8867 static bool
8868 vectorizable_load (vec_info *vinfo,
8869 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
8870 gimple **vec_stmt, slp_tree slp_node,
8871 stmt_vector_for_cost *cost_vec)
8873 tree scalar_dest;
8874 tree vec_dest = NULL;
8875 tree data_ref = NULL;
8876 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
8877 class loop *loop = NULL;
8878 class loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
8879 bool nested_in_vect_loop = false;
8880 tree elem_type;
8881 tree new_temp;
8882 machine_mode mode;
8883 tree dummy;
8884 tree dataref_ptr = NULL_TREE;
8885 tree dataref_offset = NULL_TREE;
8886 gimple *ptr_incr = NULL;
8887 int ncopies;
8888 int i, j;
8889 unsigned int group_size;
8890 poly_uint64 group_gap_adj;
8891 tree msq = NULL_TREE, lsq;
8892 tree realignment_token = NULL_TREE;
8893 gphi *phi = NULL;
8894 vec<tree> dr_chain = vNULL;
8895 bool grouped_load = false;
8896 stmt_vec_info first_stmt_info;
8897 stmt_vec_info first_stmt_info_for_drptr = NULL;
8898 bool compute_in_loop = false;
8899 class loop *at_loop;
8900 int vec_num;
8901 bool slp = (slp_node != NULL);
8902 bool slp_perm = false;
8903 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
8904 poly_uint64 vf;
8905 tree aggr_type;
8906 gather_scatter_info gs_info;
8907 tree ref_type;
8908 enum vect_def_type mask_dt = vect_unknown_def_type;
8910 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8911 return false;
8913 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8914 && ! vec_stmt)
8915 return false;
8917 if (!STMT_VINFO_DATA_REF (stmt_info))
8918 return false;
8920 tree mask = NULL_TREE, mask_vectype = NULL_TREE;
8921 int mask_index = -1;
8922 if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
8924 scalar_dest = gimple_assign_lhs (assign);
8925 if (TREE_CODE (scalar_dest) != SSA_NAME)
8926 return false;
8928 tree_code code = gimple_assign_rhs_code (assign);
8929 if (code != ARRAY_REF
8930 && code != BIT_FIELD_REF
8931 && code != INDIRECT_REF
8932 && code != COMPONENT_REF
8933 && code != IMAGPART_EXPR
8934 && code != REALPART_EXPR
8935 && code != MEM_REF
8936 && TREE_CODE_CLASS (code) != tcc_declaration)
8937 return false;
8939 else
8941 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
8942 if (!call || !gimple_call_internal_p (call))
8943 return false;
8945 internal_fn ifn = gimple_call_internal_fn (call);
8946 if (!internal_load_fn_p (ifn))
8947 return false;
8949 scalar_dest = gimple_call_lhs (call);
8950 if (!scalar_dest)
8951 return false;
8953 mask_index = internal_fn_mask_index (ifn);
8954 /* ??? For SLP the mask operand is always last. */
8955 if (mask_index >= 0 && slp_node)
8956 mask_index = SLP_TREE_CHILDREN (slp_node).length () - 1;
8957 if (mask_index >= 0
8958 && !vect_check_scalar_mask (vinfo, stmt_info, slp_node, mask_index,
8959 &mask, NULL, &mask_dt, &mask_vectype))
8960 return false;
8963 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8964 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8966 if (loop_vinfo)
8968 loop = LOOP_VINFO_LOOP (loop_vinfo);
8969 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
8970 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8972 else
8973 vf = 1;
8975 /* Multiple types in SLP are handled by creating the appropriate number of
8976 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
8977 case of SLP. */
8978 if (slp)
8979 ncopies = 1;
8980 else
8981 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8983 gcc_assert (ncopies >= 1);
8985 /* FORNOW. This restriction should be relaxed. */
8986 if (nested_in_vect_loop && ncopies > 1)
8988 if (dump_enabled_p ())
8989 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8990 "multiple types in nested loop.\n");
8991 return false;
8994 /* Invalidate assumptions made by dependence analysis when vectorization
8995 on the unrolled body effectively re-orders stmts. */
8996 if (ncopies > 1
8997 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
8998 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
8999 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
9001 if (dump_enabled_p ())
9002 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9003 "cannot perform implicit CSE when unrolling "
9004 "with negative dependence distance\n");
9005 return false;
9008 elem_type = TREE_TYPE (vectype);
9009 mode = TYPE_MODE (vectype);
9011 /* FORNOW. In some cases can vectorize even if data-type not supported
9012 (e.g. - data copies). */
9013 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
9015 if (dump_enabled_p ())
9016 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9017 "Aligned load, but unsupported type.\n");
9018 return false;
9021 /* Check if the load is a part of an interleaving chain. */
9022 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
9024 grouped_load = true;
9025 /* FORNOW */
9026 gcc_assert (!nested_in_vect_loop);
9027 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
9029 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9030 group_size = DR_GROUP_SIZE (first_stmt_info);
9032 /* Refuse non-SLP vectorization of SLP-only groups. */
9033 if (!slp && STMT_VINFO_SLP_VECT_ONLY (first_stmt_info))
9035 if (dump_enabled_p ())
9036 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9037 "cannot vectorize load in non-SLP mode.\n");
9038 return false;
9041 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
9043 slp_perm = true;
9045 if (!loop_vinfo)
9047 /* In BB vectorization we may not actually use a loaded vector
9048 accessing elements in excess of DR_GROUP_SIZE. */
9049 stmt_vec_info group_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
9050 group_info = DR_GROUP_FIRST_ELEMENT (group_info);
9051 unsigned HOST_WIDE_INT nunits;
9052 unsigned j, k, maxk = 0;
9053 FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), j, k)
9054 if (k > maxk)
9055 maxk = k;
9056 tree vectype = SLP_TREE_VECTYPE (slp_node);
9057 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
9058 || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1)))
9060 if (dump_enabled_p ())
9061 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9062 "BB vectorization with gaps at the end of "
9063 "a load is not supported\n");
9064 return false;
9068 auto_vec<tree> tem;
9069 unsigned n_perms;
9070 if (!vect_transform_slp_perm_load (vinfo, slp_node, tem, NULL, vf,
9071 true, &n_perms))
9073 if (dump_enabled_p ())
9074 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
9075 vect_location,
9076 "unsupported load permutation\n");
9077 return false;
9081 /* Invalidate assumptions made by dependence analysis when vectorization
9082 on the unrolled body effectively re-orders stmts. */
9083 if (!PURE_SLP_STMT (stmt_info)
9084 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
9085 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
9086 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
9088 if (dump_enabled_p ())
9089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9090 "cannot perform implicit CSE when performing "
9091 "group loads with negative dependence distance\n");
9092 return false;
9095 else
9096 group_size = 1;
9098 vect_memory_access_type memory_access_type;
9099 enum dr_alignment_support alignment_support_scheme;
9100 int misalignment;
9101 poly_int64 poffset;
9102 if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, VLS_LOAD,
9103 ncopies, &memory_access_type, &poffset,
9104 &alignment_support_scheme, &misalignment, &gs_info))
9105 return false;
9107 if (mask)
9109 if (memory_access_type == VMAT_CONTIGUOUS)
9111 machine_mode vec_mode = TYPE_MODE (vectype);
9112 if (!VECTOR_MODE_P (vec_mode)
9113 || !can_vec_mask_load_store_p (vec_mode,
9114 TYPE_MODE (mask_vectype), true))
9115 return false;
9117 else if (memory_access_type != VMAT_LOAD_STORE_LANES
9118 && memory_access_type != VMAT_GATHER_SCATTER)
9120 if (dump_enabled_p ())
9121 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9122 "unsupported access type for masked load.\n");
9123 return false;
9125 else if (memory_access_type == VMAT_GATHER_SCATTER
9126 && gs_info.ifn == IFN_LAST
9127 && !gs_info.decl)
9129 if (dump_enabled_p ())
9130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9131 "unsupported masked emulated gather.\n");
9132 return false;
9136 if (!vec_stmt) /* transformation not required. */
9138 if (slp_node
9139 && mask
9140 && !vect_maybe_update_slp_op_vectype (SLP_TREE_CHILDREN (slp_node)[0],
9141 mask_vectype))
9143 if (dump_enabled_p ())
9144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9145 "incompatible vector types for invariants\n");
9146 return false;
9149 if (!slp)
9150 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
9152 if (loop_vinfo
9153 && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
9154 check_load_store_for_partial_vectors (loop_vinfo, vectype, slp_node,
9155 VLS_LOAD, group_size,
9156 memory_access_type, &gs_info,
9157 mask);
9159 if (dump_enabled_p ()
9160 && memory_access_type != VMAT_ELEMENTWISE
9161 && memory_access_type != VMAT_GATHER_SCATTER
9162 && alignment_support_scheme != dr_aligned)
9163 dump_printf_loc (MSG_NOTE, vect_location,
9164 "Vectorizing an unaligned access.\n");
9166 if (memory_access_type == VMAT_LOAD_STORE_LANES)
9167 vinfo->any_known_not_updated_vssa = true;
9169 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
9170 vect_model_load_cost (vinfo, stmt_info, ncopies, vf, memory_access_type,
9171 alignment_support_scheme, misalignment,
9172 &gs_info, slp_node, cost_vec);
9173 return true;
9176 if (!slp)
9177 gcc_assert (memory_access_type
9178 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
9180 if (dump_enabled_p ())
9181 dump_printf_loc (MSG_NOTE, vect_location,
9182 "transform load. ncopies = %d\n", ncopies);
9184 /* Transform. */
9186 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
9187 ensure_base_align (dr_info);
9189 if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
9191 vect_build_gather_load_calls (vinfo,
9192 stmt_info, gsi, vec_stmt, &gs_info, mask);
9193 return true;
9196 if (memory_access_type == VMAT_INVARIANT)
9198 gcc_assert (!grouped_load && !mask && !bb_vinfo);
9199 /* If we have versioned for aliasing or the loop doesn't
9200 have any data dependencies that would preclude this,
9201 then we are sure this is a loop invariant load and
9202 thus we can insert it on the preheader edge. */
9203 bool hoist_p = (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
9204 && !nested_in_vect_loop
9205 && hoist_defs_of_uses (stmt_info, loop));
9206 if (hoist_p)
9208 gassign *stmt = as_a <gassign *> (stmt_info->stmt);
9209 if (dump_enabled_p ())
9210 dump_printf_loc (MSG_NOTE, vect_location,
9211 "hoisting out of the vectorized loop: %G",
9212 (gimple *) stmt);
9213 scalar_dest = copy_ssa_name (scalar_dest);
9214 tree rhs = unshare_expr (gimple_assign_rhs1 (stmt));
9215 edge pe = loop_preheader_edge (loop);
9216 gphi *vphi = get_virtual_phi (loop->header);
9217 tree vuse;
9218 if (vphi)
9219 vuse = PHI_ARG_DEF_FROM_EDGE (vphi, pe);
9220 else
9221 vuse = gimple_vuse (gsi_stmt (*gsi));
9222 gimple *new_stmt = gimple_build_assign (scalar_dest, rhs);
9223 gimple_set_vuse (new_stmt, vuse);
9224 gsi_insert_on_edge_immediate (pe, new_stmt);
9226 /* These copies are all equivalent, but currently the representation
9227 requires a separate STMT_VINFO_VEC_STMT for each one. */
9228 gimple_stmt_iterator gsi2 = *gsi;
9229 gsi_next (&gsi2);
9230 for (j = 0; j < ncopies; j++)
9232 if (hoist_p)
9233 new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest,
9234 vectype, NULL);
9235 else
9236 new_temp = vect_init_vector (vinfo, stmt_info, scalar_dest,
9237 vectype, &gsi2);
9238 gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
9239 if (slp)
9240 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9241 else
9243 if (j == 0)
9244 *vec_stmt = new_stmt;
9245 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
9248 return true;
9251 if (memory_access_type == VMAT_ELEMENTWISE
9252 || memory_access_type == VMAT_STRIDED_SLP)
9254 gimple_stmt_iterator incr_gsi;
9255 bool insert_after;
9256 tree offvar;
9257 tree ivstep;
9258 tree running_off;
9259 vec<constructor_elt, va_gc> *v = NULL;
9260 tree stride_base, stride_step, alias_off;
9261 /* Checked by get_load_store_type. */
9262 unsigned int const_nunits = nunits.to_constant ();
9263 unsigned HOST_WIDE_INT cst_offset = 0;
9264 tree dr_offset;
9266 gcc_assert (!LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo));
9267 gcc_assert (!nested_in_vect_loop);
9269 if (grouped_load)
9271 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9272 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
9274 else
9276 first_stmt_info = stmt_info;
9277 first_dr_info = dr_info;
9279 if (slp && grouped_load)
9281 group_size = DR_GROUP_SIZE (first_stmt_info);
9282 ref_type = get_group_alias_ptr_type (first_stmt_info);
9284 else
9286 if (grouped_load)
9287 cst_offset
9288 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
9289 * vect_get_place_in_interleaving_chain (stmt_info,
9290 first_stmt_info));
9291 group_size = 1;
9292 ref_type = reference_alias_ptr_type (DR_REF (dr_info->dr));
9295 dr_offset = get_dr_vinfo_offset (vinfo, first_dr_info);
9296 stride_base
9297 = fold_build_pointer_plus
9298 (DR_BASE_ADDRESS (first_dr_info->dr),
9299 size_binop (PLUS_EXPR,
9300 convert_to_ptrofftype (dr_offset),
9301 convert_to_ptrofftype (DR_INIT (first_dr_info->dr))));
9302 stride_step = fold_convert (sizetype, DR_STEP (first_dr_info->dr));
9304 /* For a load with loop-invariant (but other than power-of-2)
9305 stride (i.e. not a grouped access) like so:
9307 for (i = 0; i < n; i += stride)
9308 ... = array[i];
9310 we generate a new induction variable and new accesses to
9311 form a new vector (or vectors, depending on ncopies):
9313 for (j = 0; ; j += VF*stride)
9314 tmp1 = array[j];
9315 tmp2 = array[j + stride];
9317 vectemp = {tmp1, tmp2, ...}
9320 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
9321 build_int_cst (TREE_TYPE (stride_step), vf));
9323 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
9325 stride_base = cse_and_gimplify_to_preheader (loop_vinfo, stride_base);
9326 ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
9327 create_iv (stride_base, ivstep, NULL,
9328 loop, &incr_gsi, insert_after,
9329 &offvar, NULL);
9331 stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
9333 running_off = offvar;
9334 alias_off = build_int_cst (ref_type, 0);
9335 int nloads = const_nunits;
9336 int lnel = 1;
9337 tree ltype = TREE_TYPE (vectype);
9338 tree lvectype = vectype;
9339 auto_vec<tree> dr_chain;
9340 if (memory_access_type == VMAT_STRIDED_SLP)
9342 if (group_size < const_nunits)
9344 /* First check if vec_init optab supports construction from vector
9345 elts directly. Otherwise avoid emitting a constructor of
9346 vector elements by performing the loads using an integer type
9347 of the same size, constructing a vector of those and then
9348 re-interpreting it as the original vector type. This avoids a
9349 huge runtime penalty due to the general inability to perform
9350 store forwarding from smaller stores to a larger load. */
9351 tree ptype;
9352 tree vtype
9353 = vector_vector_composition_type (vectype,
9354 const_nunits / group_size,
9355 &ptype);
9356 if (vtype != NULL_TREE)
9358 nloads = const_nunits / group_size;
9359 lnel = group_size;
9360 lvectype = vtype;
9361 ltype = ptype;
9364 else
9366 nloads = 1;
9367 lnel = const_nunits;
9368 ltype = vectype;
9370 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
9372 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
9373 else if (nloads == 1)
9374 ltype = vectype;
9376 if (slp)
9378 /* For SLP permutation support we need to load the whole group,
9379 not only the number of vector stmts the permutation result
9380 fits in. */
9381 if (slp_perm)
9383 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
9384 variable VF. */
9385 unsigned int const_vf = vf.to_constant ();
9386 ncopies = CEIL (group_size * const_vf, const_nunits);
9387 dr_chain.create (ncopies);
9389 else
9390 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9392 unsigned int group_el = 0;
9393 unsigned HOST_WIDE_INT
9394 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
9395 unsigned int n_groups = 0;
9396 for (j = 0; j < ncopies; j++)
9398 if (nloads > 1)
9399 vec_alloc (v, nloads);
9400 gimple *new_stmt = NULL;
9401 for (i = 0; i < nloads; i++)
9403 tree this_off = build_int_cst (TREE_TYPE (alias_off),
9404 group_el * elsz + cst_offset);
9405 tree data_ref = build2 (MEM_REF, ltype, running_off, this_off);
9406 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
9407 new_stmt = gimple_build_assign (make_ssa_name (ltype), data_ref);
9408 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
9409 if (nloads > 1)
9410 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
9411 gimple_assign_lhs (new_stmt));
9413 group_el += lnel;
9414 if (! slp
9415 || group_el == group_size)
9417 n_groups++;
9418 /* When doing SLP make sure to not load elements from
9419 the next vector iteration, those will not be accessed
9420 so just use the last element again. See PR107451. */
9421 if (!slp || known_lt (n_groups, vf))
9423 tree newoff = copy_ssa_name (running_off);
9424 gimple *incr
9425 = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
9426 running_off, stride_step);
9427 vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
9428 running_off = newoff;
9430 group_el = 0;
9433 if (nloads > 1)
9435 tree vec_inv = build_constructor (lvectype, v);
9436 new_temp = vect_init_vector (vinfo, stmt_info,
9437 vec_inv, lvectype, gsi);
9438 new_stmt = SSA_NAME_DEF_STMT (new_temp);
9439 if (lvectype != vectype)
9441 new_stmt = gimple_build_assign (make_ssa_name (vectype),
9442 VIEW_CONVERT_EXPR,
9443 build1 (VIEW_CONVERT_EXPR,
9444 vectype, new_temp));
9445 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
9449 if (slp)
9451 if (slp_perm)
9452 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
9453 else
9454 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
9456 else
9458 if (j == 0)
9459 *vec_stmt = new_stmt;
9460 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
9463 if (slp_perm)
9465 unsigned n_perms;
9466 vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf,
9467 false, &n_perms);
9469 return true;
9472 if (memory_access_type == VMAT_GATHER_SCATTER
9473 || (!slp && memory_access_type == VMAT_CONTIGUOUS))
9474 grouped_load = false;
9476 if (grouped_load)
9478 first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
9479 group_size = DR_GROUP_SIZE (first_stmt_info);
9480 /* For SLP vectorization we directly vectorize a subchain
9481 without permutation. */
9482 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
9483 first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
9484 /* For BB vectorization always use the first stmt to base
9485 the data ref pointer on. */
9486 if (bb_vinfo)
9487 first_stmt_info_for_drptr
9488 = vect_find_first_scalar_stmt_in_slp (slp_node);
9490 /* Check if the chain of loads is already vectorized. */
9491 if (STMT_VINFO_VEC_STMTS (first_stmt_info).exists ()
9492 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
9493 ??? But we can only do so if there is exactly one
9494 as we have no way to get at the rest. Leave the CSE
9495 opportunity alone.
9496 ??? With the group load eventually participating
9497 in multiple different permutations (having multiple
9498 slp nodes which refer to the same group) the CSE
9499 is even wrong code. See PR56270. */
9500 && !slp)
9502 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
9503 return true;
9505 first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
9506 group_gap_adj = 0;
9508 /* VEC_NUM is the number of vect stmts to be created for this group. */
9509 if (slp)
9511 grouped_load = false;
9512 /* If an SLP permutation is from N elements to N elements,
9513 and if one vector holds a whole number of N, we can load
9514 the inputs to the permutation in the same way as an
9515 unpermuted sequence. In other cases we need to load the
9516 whole group, not only the number of vector stmts the
9517 permutation result fits in. */
9518 unsigned scalar_lanes = SLP_TREE_LANES (slp_node);
9519 if (slp_perm
9520 && (group_size != scalar_lanes
9521 || !multiple_p (nunits, group_size)))
9523 /* We don't yet generate such SLP_TREE_LOAD_PERMUTATIONs for
9524 variable VF; see vect_transform_slp_perm_load. */
9525 unsigned int const_vf = vf.to_constant ();
9526 unsigned int const_nunits = nunits.to_constant ();
9527 vec_num = CEIL (group_size * const_vf, const_nunits);
9528 group_gap_adj = vf * group_size - nunits * vec_num;
9530 else
9532 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9533 group_gap_adj
9534 = group_size - scalar_lanes;
9537 else
9538 vec_num = group_size;
9540 ref_type = get_group_alias_ptr_type (first_stmt_info);
9542 else
9544 first_stmt_info = stmt_info;
9545 first_dr_info = dr_info;
9546 group_size = vec_num = 1;
9547 group_gap_adj = 0;
9548 ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
9549 if (slp)
9550 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
9553 gcc_assert (alignment_support_scheme);
9554 vec_loop_masks *loop_masks
9555 = (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
9556 ? &LOOP_VINFO_MASKS (loop_vinfo)
9557 : NULL);
9558 vec_loop_lens *loop_lens
9559 = (loop_vinfo && LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo)
9560 ? &LOOP_VINFO_LENS (loop_vinfo)
9561 : NULL);
9563 /* Shouldn't go with length-based approach if fully masked. */
9564 gcc_assert (!loop_lens || !loop_masks);
9566 /* Targets with store-lane instructions must not require explicit
9567 realignment. vect_supportable_dr_alignment always returns either
9568 dr_aligned or dr_unaligned_supported for masked operations. */
9569 gcc_assert ((memory_access_type != VMAT_LOAD_STORE_LANES
9570 && !mask
9571 && !loop_masks)
9572 || alignment_support_scheme == dr_aligned
9573 || alignment_support_scheme == dr_unaligned_supported);
9575 /* In case the vectorization factor (VF) is bigger than the number
9576 of elements that we can fit in a vectype (nunits), we have to generate
9577 more than one vector stmt - i.e - we need to "unroll" the
9578 vector stmt by a factor VF/nunits. In doing so, we record a pointer
9579 from one copy of the vector stmt to the next, in the field
9580 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
9581 stages to find the correct vector defs to be used when vectorizing
9582 stmts that use the defs of the current stmt. The example below
9583 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
9584 need to create 4 vectorized stmts):
9586 before vectorization:
9587 RELATED_STMT VEC_STMT
9588 S1: x = memref - -
9589 S2: z = x + 1 - -
9591 step 1: vectorize stmt S1:
9592 We first create the vector stmt VS1_0, and, as usual, record a
9593 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
9594 Next, we create the vector stmt VS1_1, and record a pointer to
9595 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
9596 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
9597 stmts and pointers:
9598 RELATED_STMT VEC_STMT
9599 VS1_0: vx0 = memref0 VS1_1 -
9600 VS1_1: vx1 = memref1 VS1_2 -
9601 VS1_2: vx2 = memref2 VS1_3 -
9602 VS1_3: vx3 = memref3 - -
9603 S1: x = load - VS1_0
9604 S2: z = x + 1 - -
9607 /* In case of interleaving (non-unit grouped access):
9609 S1: x2 = &base + 2
9610 S2: x0 = &base
9611 S3: x1 = &base + 1
9612 S4: x3 = &base + 3
9614 Vectorized loads are created in the order of memory accesses
9615 starting from the access of the first stmt of the chain:
9617 VS1: vx0 = &base
9618 VS2: vx1 = &base + vec_size*1
9619 VS3: vx3 = &base + vec_size*2
9620 VS4: vx4 = &base + vec_size*3
9622 Then permutation statements are generated:
9624 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
9625 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
9628 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
9629 (the order of the data-refs in the output of vect_permute_load_chain
9630 corresponds to the order of scalar stmts in the interleaving chain - see
9631 the documentation of vect_permute_load_chain()).
9632 The generation of permutation stmts and recording them in
9633 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
9635 In case of both multiple types and interleaving, the vector loads and
9636 permutation stmts above are created for every copy. The result vector
9637 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
9638 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
9640 /* If the data reference is aligned (dr_aligned) or potentially unaligned
9641 on a target that supports unaligned accesses (dr_unaligned_supported)
9642 we generate the following code:
9643 p = initial_addr;
9644 indx = 0;
9645 loop {
9646 p = p + indx * vectype_size;
9647 vec_dest = *(p);
9648 indx = indx + 1;
9651 Otherwise, the data reference is potentially unaligned on a target that
9652 does not support unaligned accesses (dr_explicit_realign_optimized) -
9653 then generate the following code, in which the data in each iteration is
9654 obtained by two vector loads, one from the previous iteration, and one
9655 from the current iteration:
9656 p1 = initial_addr;
9657 msq_init = *(floor(p1))
9658 p2 = initial_addr + VS - 1;
9659 realignment_token = call target_builtin;
9660 indx = 0;
9661 loop {
9662 p2 = p2 + indx * vectype_size
9663 lsq = *(floor(p2))
9664 vec_dest = realign_load (msq, lsq, realignment_token)
9665 indx = indx + 1;
9666 msq = lsq;
9667 } */
9669 /* If the misalignment remains the same throughout the execution of the
9670 loop, we can create the init_addr and permutation mask at the loop
9671 preheader. Otherwise, it needs to be created inside the loop.
9672 This can only occur when vectorizing memory accesses in the inner-loop
9673 nested within an outer-loop that is being vectorized. */
9675 if (nested_in_vect_loop
9676 && !multiple_p (DR_STEP_ALIGNMENT (dr_info->dr),
9677 GET_MODE_SIZE (TYPE_MODE (vectype))))
9679 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
9680 compute_in_loop = true;
9683 bool diff_first_stmt_info
9684 = first_stmt_info_for_drptr && first_stmt_info != first_stmt_info_for_drptr;
9686 tree offset = NULL_TREE;
9687 if ((alignment_support_scheme == dr_explicit_realign_optimized
9688 || alignment_support_scheme == dr_explicit_realign)
9689 && !compute_in_loop)
9691 /* If we have different first_stmt_info, we can't set up realignment
9692 here, since we can't guarantee first_stmt_info DR has been
9693 initialized yet, use first_stmt_info_for_drptr DR by bumping the
9694 distance from first_stmt_info DR instead as below. */
9695 if (!diff_first_stmt_info)
9696 msq = vect_setup_realignment (vinfo,
9697 first_stmt_info, gsi, &realignment_token,
9698 alignment_support_scheme, NULL_TREE,
9699 &at_loop);
9700 if (alignment_support_scheme == dr_explicit_realign_optimized)
9702 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
9703 offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
9704 size_one_node);
9705 gcc_assert (!first_stmt_info_for_drptr);
9708 else
9709 at_loop = loop;
9711 if (!known_eq (poffset, 0))
9712 offset = (offset
9713 ? size_binop (PLUS_EXPR, offset, size_int (poffset))
9714 : size_int (poffset));
9716 tree bump;
9717 tree vec_offset = NULL_TREE;
9718 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
9720 aggr_type = NULL_TREE;
9721 bump = NULL_TREE;
9723 else if (memory_access_type == VMAT_GATHER_SCATTER)
9725 aggr_type = elem_type;
9726 vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
9727 &bump, &vec_offset);
9729 else
9731 if (memory_access_type == VMAT_LOAD_STORE_LANES)
9732 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
9733 else
9734 aggr_type = vectype;
9735 bump = vect_get_data_ptr_increment (vinfo, dr_info, aggr_type,
9736 memory_access_type);
9739 auto_vec<tree> vec_offsets;
9740 auto_vec<tree> vec_masks;
9741 if (mask)
9743 if (slp_node)
9744 vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[mask_index],
9745 &vec_masks);
9746 else
9747 vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies, mask,
9748 &vec_masks, mask_vectype);
9750 tree vec_mask = NULL_TREE;
9751 poly_uint64 group_elt = 0;
9752 for (j = 0; j < ncopies; j++)
9754 /* 1. Create the vector or array pointer update chain. */
9755 if (j == 0)
9757 bool simd_lane_access_p
9758 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
9759 if (simd_lane_access_p
9760 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
9761 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
9762 && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
9763 && integer_zerop (DR_INIT (first_dr_info->dr))
9764 && alias_sets_conflict_p (get_alias_set (aggr_type),
9765 get_alias_set (TREE_TYPE (ref_type)))
9766 && (alignment_support_scheme == dr_aligned
9767 || alignment_support_scheme == dr_unaligned_supported))
9769 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
9770 dataref_offset = build_int_cst (ref_type, 0);
9772 else if (diff_first_stmt_info)
9774 dataref_ptr
9775 = vect_create_data_ref_ptr (vinfo, first_stmt_info_for_drptr,
9776 aggr_type, at_loop, offset, &dummy,
9777 gsi, &ptr_incr, simd_lane_access_p,
9778 bump);
9779 /* Adjust the pointer by the difference to first_stmt. */
9780 data_reference_p ptrdr
9781 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr);
9782 tree diff
9783 = fold_convert (sizetype,
9784 size_binop (MINUS_EXPR,
9785 DR_INIT (first_dr_info->dr),
9786 DR_INIT (ptrdr)));
9787 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
9788 stmt_info, diff);
9789 if (alignment_support_scheme == dr_explicit_realign)
9791 msq = vect_setup_realignment (vinfo,
9792 first_stmt_info_for_drptr, gsi,
9793 &realignment_token,
9794 alignment_support_scheme,
9795 dataref_ptr, &at_loop);
9796 gcc_assert (!compute_in_loop);
9799 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
9801 vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
9802 slp_node, &gs_info, &dataref_ptr,
9803 &vec_offsets);
9805 else
9806 dataref_ptr
9807 = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
9808 at_loop,
9809 offset, &dummy, gsi, &ptr_incr,
9810 simd_lane_access_p, bump);
9811 if (mask)
9812 vec_mask = vec_masks[0];
9814 else
9816 if (dataref_offset)
9817 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
9818 bump);
9819 else if (!STMT_VINFO_GATHER_SCATTER_P (stmt_info))
9820 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
9821 stmt_info, bump);
9822 if (mask)
9823 vec_mask = vec_masks[j];
9826 if (grouped_load || slp_perm)
9827 dr_chain.create (vec_num);
9829 gimple *new_stmt = NULL;
9830 if (memory_access_type == VMAT_LOAD_STORE_LANES)
9832 tree vec_array;
9834 vec_array = create_vector_array (vectype, vec_num);
9836 tree final_mask = NULL_TREE;
9837 if (loop_masks)
9838 final_mask = vect_get_loop_mask (gsi, loop_masks, ncopies,
9839 vectype, j);
9840 if (vec_mask)
9841 final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
9842 final_mask, vec_mask, gsi);
9844 gcall *call;
9845 if (final_mask)
9847 /* Emit:
9848 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
9849 VEC_MASK). */
9850 unsigned int align = TYPE_ALIGN (TREE_TYPE (vectype));
9851 tree alias_ptr = build_int_cst (ref_type, align);
9852 call = gimple_build_call_internal (IFN_MASK_LOAD_LANES, 3,
9853 dataref_ptr, alias_ptr,
9854 final_mask);
9856 else
9858 /* Emit:
9859 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
9860 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
9861 call = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
9863 gimple_call_set_lhs (call, vec_array);
9864 gimple_call_set_nothrow (call, true);
9865 vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
9866 new_stmt = call;
9868 /* Extract each vector into an SSA_NAME. */
9869 for (i = 0; i < vec_num; i++)
9871 new_temp = read_vector_array (vinfo, stmt_info, gsi, scalar_dest,
9872 vec_array, i);
9873 dr_chain.quick_push (new_temp);
9876 /* Record the mapping between SSA_NAMEs and statements. */
9877 vect_record_grouped_load_vectors (vinfo, stmt_info, dr_chain);
9879 /* Record that VEC_ARRAY is now dead. */
9880 vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
9882 else
9884 for (i = 0; i < vec_num; i++)
9886 tree final_mask = NULL_TREE;
9887 if (loop_masks
9888 && memory_access_type != VMAT_INVARIANT)
9889 final_mask = vect_get_loop_mask (gsi, loop_masks,
9890 vec_num * ncopies,
9891 vectype, vec_num * j + i);
9892 if (vec_mask)
9893 final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
9894 final_mask, vec_mask, gsi);
9896 if (i > 0 && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
9897 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
9898 gsi, stmt_info, bump);
9900 /* 2. Create the vector-load in the loop. */
9901 switch (alignment_support_scheme)
9903 case dr_aligned:
9904 case dr_unaligned_supported:
9906 unsigned int misalign;
9907 unsigned HOST_WIDE_INT align;
9909 if (memory_access_type == VMAT_GATHER_SCATTER
9910 && gs_info.ifn != IFN_LAST)
9912 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
9913 vec_offset = vec_offsets[vec_num * j + i];
9914 tree zero = build_zero_cst (vectype);
9915 tree scale = size_int (gs_info.scale);
9916 gcall *call;
9917 if (final_mask)
9918 call = gimple_build_call_internal
9919 (IFN_MASK_GATHER_LOAD, 5, dataref_ptr,
9920 vec_offset, scale, zero, final_mask);
9921 else
9922 call = gimple_build_call_internal
9923 (IFN_GATHER_LOAD, 4, dataref_ptr,
9924 vec_offset, scale, zero);
9925 gimple_call_set_nothrow (call, true);
9926 new_stmt = call;
9927 data_ref = NULL_TREE;
9928 break;
9930 else if (memory_access_type == VMAT_GATHER_SCATTER)
9932 /* Emulated gather-scatter. */
9933 gcc_assert (!final_mask);
9934 unsigned HOST_WIDE_INT const_nunits
9935 = nunits.to_constant ();
9936 unsigned HOST_WIDE_INT const_offset_nunits
9937 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype)
9938 .to_constant ();
9939 vec<constructor_elt, va_gc> *ctor_elts;
9940 vec_alloc (ctor_elts, const_nunits);
9941 gimple_seq stmts = NULL;
9942 /* We support offset vectors with more elements
9943 than the data vector for now. */
9944 unsigned HOST_WIDE_INT factor
9945 = const_offset_nunits / const_nunits;
9946 vec_offset = vec_offsets[j / factor];
9947 unsigned elt_offset = (j % factor) * const_nunits;
9948 tree idx_type = TREE_TYPE (TREE_TYPE (vec_offset));
9949 tree scale = size_int (gs_info.scale);
9950 align
9951 = get_object_alignment (DR_REF (first_dr_info->dr));
9952 tree ltype = build_aligned_type (TREE_TYPE (vectype),
9953 align);
9954 for (unsigned k = 0; k < const_nunits; ++k)
9956 tree boff = size_binop (MULT_EXPR,
9957 TYPE_SIZE (idx_type),
9958 bitsize_int
9959 (k + elt_offset));
9960 tree idx = gimple_build (&stmts, BIT_FIELD_REF,
9961 idx_type, vec_offset,
9962 TYPE_SIZE (idx_type),
9963 boff);
9964 idx = gimple_convert (&stmts, sizetype, idx);
9965 idx = gimple_build (&stmts, MULT_EXPR,
9966 sizetype, idx, scale);
9967 tree ptr = gimple_build (&stmts, PLUS_EXPR,
9968 TREE_TYPE (dataref_ptr),
9969 dataref_ptr, idx);
9970 ptr = gimple_convert (&stmts, ptr_type_node, ptr);
9971 tree elt = make_ssa_name (TREE_TYPE (vectype));
9972 tree ref = build2 (MEM_REF, ltype, ptr,
9973 build_int_cst (ref_type, 0));
9974 new_stmt = gimple_build_assign (elt, ref);
9975 gimple_set_vuse (new_stmt,
9976 gimple_vuse (gsi_stmt (*gsi)));
9977 gimple_seq_add_stmt (&stmts, new_stmt);
9978 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE, elt);
9980 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
9981 new_stmt = gimple_build_assign (NULL_TREE,
9982 build_constructor
9983 (vectype, ctor_elts));
9984 data_ref = NULL_TREE;
9985 break;
9988 align =
9989 known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
9990 if (alignment_support_scheme == dr_aligned)
9991 misalign = 0;
9992 else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
9994 align = dr_alignment
9995 (vect_dr_behavior (vinfo, first_dr_info));
9996 misalign = 0;
9998 else
9999 misalign = misalignment;
10000 if (dataref_offset == NULL_TREE
10001 && TREE_CODE (dataref_ptr) == SSA_NAME)
10002 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
10003 align, misalign);
10004 align = least_bit_hwi (misalign | align);
10006 if (final_mask)
10008 tree ptr = build_int_cst (ref_type,
10009 align * BITS_PER_UNIT);
10010 gcall *call
10011 = gimple_build_call_internal (IFN_MASK_LOAD, 3,
10012 dataref_ptr, ptr,
10013 final_mask);
10014 gimple_call_set_nothrow (call, true);
10015 new_stmt = call;
10016 data_ref = NULL_TREE;
10018 else if (loop_lens && memory_access_type != VMAT_INVARIANT)
10020 tree final_len
10021 = vect_get_loop_len (loop_vinfo, loop_lens,
10022 vec_num * ncopies,
10023 vec_num * j + i);
10024 tree ptr = build_int_cst (ref_type,
10025 align * BITS_PER_UNIT);
10027 machine_mode vmode = TYPE_MODE (vectype);
10028 opt_machine_mode new_ovmode
10029 = get_len_load_store_mode (vmode, true);
10030 machine_mode new_vmode = new_ovmode.require ();
10031 tree qi_type = unsigned_intQI_type_node;
10033 signed char biasval =
10034 LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
10036 tree bias = build_int_cst (intQI_type_node, biasval);
10038 gcall *call
10039 = gimple_build_call_internal (IFN_LEN_LOAD, 4,
10040 dataref_ptr, ptr,
10041 final_len, bias);
10042 gimple_call_set_nothrow (call, true);
10043 new_stmt = call;
10044 data_ref = NULL_TREE;
10046 /* Need conversion if it's wrapped with VnQI. */
10047 if (vmode != new_vmode)
10049 tree new_vtype
10050 = build_vector_type_for_mode (qi_type, new_vmode);
10051 tree var = vect_get_new_ssa_name (new_vtype,
10052 vect_simple_var);
10053 gimple_set_lhs (call, var);
10054 vect_finish_stmt_generation (vinfo, stmt_info, call,
10055 gsi);
10056 tree op = build1 (VIEW_CONVERT_EXPR, vectype, var);
10057 new_stmt
10058 = gimple_build_assign (vec_dest,
10059 VIEW_CONVERT_EXPR, op);
10062 else
10064 tree ltype = vectype;
10065 tree new_vtype = NULL_TREE;
10066 unsigned HOST_WIDE_INT gap
10067 = DR_GROUP_GAP (first_stmt_info);
10068 unsigned int vect_align
10069 = vect_known_alignment_in_bytes (first_dr_info,
10070 vectype);
10071 unsigned int scalar_dr_size
10072 = vect_get_scalar_dr_size (first_dr_info);
10073 /* If there's no peeling for gaps but we have a gap
10074 with slp loads then load the lower half of the
10075 vector only. See get_group_load_store_type for
10076 when we apply this optimization. */
10077 if (slp
10078 && loop_vinfo
10079 && !LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
10080 && gap != 0
10081 && known_eq (nunits, (group_size - gap) * 2)
10082 && known_eq (nunits, group_size)
10083 && gap >= (vect_align / scalar_dr_size))
10085 tree half_vtype;
10086 new_vtype
10087 = vector_vector_composition_type (vectype, 2,
10088 &half_vtype);
10089 if (new_vtype != NULL_TREE)
10090 ltype = half_vtype;
10092 tree offset
10093 = (dataref_offset ? dataref_offset
10094 : build_int_cst (ref_type, 0));
10095 if (ltype != vectype
10096 && memory_access_type == VMAT_CONTIGUOUS_REVERSE)
10098 unsigned HOST_WIDE_INT gap_offset
10099 = gap * tree_to_uhwi (TYPE_SIZE_UNIT (elem_type));
10100 tree gapcst = build_int_cst (ref_type, gap_offset);
10101 offset = size_binop (PLUS_EXPR, offset, gapcst);
10103 data_ref
10104 = fold_build2 (MEM_REF, ltype, dataref_ptr, offset);
10105 if (alignment_support_scheme == dr_aligned)
10107 else
10108 TREE_TYPE (data_ref)
10109 = build_aligned_type (TREE_TYPE (data_ref),
10110 align * BITS_PER_UNIT);
10111 if (ltype != vectype)
10113 vect_copy_ref_info (data_ref,
10114 DR_REF (first_dr_info->dr));
10115 tree tem = make_ssa_name (ltype);
10116 new_stmt = gimple_build_assign (tem, data_ref);
10117 vect_finish_stmt_generation (vinfo, stmt_info,
10118 new_stmt, gsi);
10119 data_ref = NULL;
10120 vec<constructor_elt, va_gc> *v;
10121 vec_alloc (v, 2);
10122 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
10124 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
10125 build_zero_cst (ltype));
10126 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem);
10128 else
10130 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, tem);
10131 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
10132 build_zero_cst (ltype));
10134 gcc_assert (new_vtype != NULL_TREE);
10135 if (new_vtype == vectype)
10136 new_stmt = gimple_build_assign (
10137 vec_dest, build_constructor (vectype, v));
10138 else
10140 tree new_vname = make_ssa_name (new_vtype);
10141 new_stmt = gimple_build_assign (
10142 new_vname, build_constructor (new_vtype, v));
10143 vect_finish_stmt_generation (vinfo, stmt_info,
10144 new_stmt, gsi);
10145 new_stmt = gimple_build_assign (
10146 vec_dest, build1 (VIEW_CONVERT_EXPR, vectype,
10147 new_vname));
10151 break;
10153 case dr_explicit_realign:
10155 tree ptr, bump;
10157 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
10159 if (compute_in_loop)
10160 msq = vect_setup_realignment (vinfo, first_stmt_info, gsi,
10161 &realignment_token,
10162 dr_explicit_realign,
10163 dataref_ptr, NULL);
10165 if (TREE_CODE (dataref_ptr) == SSA_NAME)
10166 ptr = copy_ssa_name (dataref_ptr);
10167 else
10168 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
10169 // For explicit realign the target alignment should be
10170 // known at compile time.
10171 unsigned HOST_WIDE_INT align =
10172 DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
10173 new_stmt = gimple_build_assign
10174 (ptr, BIT_AND_EXPR, dataref_ptr,
10175 build_int_cst
10176 (TREE_TYPE (dataref_ptr),
10177 -(HOST_WIDE_INT) align));
10178 vect_finish_stmt_generation (vinfo, stmt_info,
10179 new_stmt, gsi);
10180 data_ref
10181 = build2 (MEM_REF, vectype, ptr,
10182 build_int_cst (ref_type, 0));
10183 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
10184 vec_dest = vect_create_destination_var (scalar_dest,
10185 vectype);
10186 new_stmt = gimple_build_assign (vec_dest, data_ref);
10187 new_temp = make_ssa_name (vec_dest, new_stmt);
10188 gimple_assign_set_lhs (new_stmt, new_temp);
10189 gimple_move_vops (new_stmt, stmt_info->stmt);
10190 vect_finish_stmt_generation (vinfo, stmt_info,
10191 new_stmt, gsi);
10192 msq = new_temp;
10194 bump = size_binop (MULT_EXPR, vs,
10195 TYPE_SIZE_UNIT (elem_type));
10196 bump = size_binop (MINUS_EXPR, bump, size_one_node);
10197 ptr = bump_vector_ptr (vinfo, dataref_ptr, NULL, gsi,
10198 stmt_info, bump);
10199 new_stmt = gimple_build_assign
10200 (NULL_TREE, BIT_AND_EXPR, ptr,
10201 build_int_cst
10202 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
10203 if (TREE_CODE (ptr) == SSA_NAME)
10204 ptr = copy_ssa_name (ptr, new_stmt);
10205 else
10206 ptr = make_ssa_name (TREE_TYPE (ptr), new_stmt);
10207 gimple_assign_set_lhs (new_stmt, ptr);
10208 vect_finish_stmt_generation (vinfo, stmt_info,
10209 new_stmt, gsi);
10210 data_ref
10211 = build2 (MEM_REF, vectype, ptr,
10212 build_int_cst (ref_type, 0));
10213 break;
10215 case dr_explicit_realign_optimized:
10217 if (TREE_CODE (dataref_ptr) == SSA_NAME)
10218 new_temp = copy_ssa_name (dataref_ptr);
10219 else
10220 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
10221 // We should only be doing this if we know the target
10222 // alignment at compile time.
10223 unsigned HOST_WIDE_INT align =
10224 DR_TARGET_ALIGNMENT (first_dr_info).to_constant ();
10225 new_stmt = gimple_build_assign
10226 (new_temp, BIT_AND_EXPR, dataref_ptr,
10227 build_int_cst (TREE_TYPE (dataref_ptr),
10228 -(HOST_WIDE_INT) align));
10229 vect_finish_stmt_generation (vinfo, stmt_info,
10230 new_stmt, gsi);
10231 data_ref
10232 = build2 (MEM_REF, vectype, new_temp,
10233 build_int_cst (ref_type, 0));
10234 break;
10236 default:
10237 gcc_unreachable ();
10239 vec_dest = vect_create_destination_var (scalar_dest, vectype);
10240 /* DATA_REF is null if we've already built the statement. */
10241 if (data_ref)
10243 vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
10244 new_stmt = gimple_build_assign (vec_dest, data_ref);
10246 new_temp = make_ssa_name (vec_dest, new_stmt);
10247 gimple_set_lhs (new_stmt, new_temp);
10248 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10250 /* 3. Handle explicit realignment if necessary/supported.
10251 Create in loop:
10252 vec_dest = realign_load (msq, lsq, realignment_token) */
10253 if (alignment_support_scheme == dr_explicit_realign_optimized
10254 || alignment_support_scheme == dr_explicit_realign)
10256 lsq = gimple_assign_lhs (new_stmt);
10257 if (!realignment_token)
10258 realignment_token = dataref_ptr;
10259 vec_dest = vect_create_destination_var (scalar_dest, vectype);
10260 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
10261 msq, lsq, realignment_token);
10262 new_temp = make_ssa_name (vec_dest, new_stmt);
10263 gimple_assign_set_lhs (new_stmt, new_temp);
10264 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10266 if (alignment_support_scheme == dr_explicit_realign_optimized)
10268 gcc_assert (phi);
10269 if (i == vec_num - 1 && j == ncopies - 1)
10270 add_phi_arg (phi, lsq,
10271 loop_latch_edge (containing_loop),
10272 UNKNOWN_LOCATION);
10273 msq = lsq;
10277 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
10279 tree perm_mask = perm_mask_for_reverse (vectype);
10280 new_temp = permute_vec_elements (vinfo, new_temp, new_temp,
10281 perm_mask, stmt_info, gsi);
10282 new_stmt = SSA_NAME_DEF_STMT (new_temp);
10285 /* Collect vector loads and later create their permutation in
10286 vect_transform_grouped_load (). */
10287 if (grouped_load || slp_perm)
10288 dr_chain.quick_push (new_temp);
10290 /* Store vector loads in the corresponding SLP_NODE. */
10291 if (slp && !slp_perm)
10292 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
10294 /* With SLP permutation we load the gaps as well, without
10295 we need to skip the gaps after we manage to fully load
10296 all elements. group_gap_adj is DR_GROUP_SIZE here. */
10297 group_elt += nunits;
10298 if (maybe_ne (group_gap_adj, 0U)
10299 && !slp_perm
10300 && known_eq (group_elt, group_size - group_gap_adj))
10302 poly_wide_int bump_val
10303 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
10304 * group_gap_adj);
10305 if (tree_int_cst_sgn
10306 (vect_dr_behavior (vinfo, dr_info)->step) == -1)
10307 bump_val = -bump_val;
10308 tree bump = wide_int_to_tree (sizetype, bump_val);
10309 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
10310 gsi, stmt_info, bump);
10311 group_elt = 0;
10314 /* Bump the vector pointer to account for a gap or for excess
10315 elements loaded for a permuted SLP load. */
10316 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
10318 poly_wide_int bump_val
10319 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
10320 * group_gap_adj);
10321 if (tree_int_cst_sgn
10322 (vect_dr_behavior (vinfo, dr_info)->step) == -1)
10323 bump_val = -bump_val;
10324 tree bump = wide_int_to_tree (sizetype, bump_val);
10325 dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
10326 stmt_info, bump);
10330 if (slp && !slp_perm)
10331 continue;
10333 if (slp_perm)
10335 unsigned n_perms;
10336 /* For SLP we know we've seen all possible uses of dr_chain so
10337 direct vect_transform_slp_perm_load to DCE the unused parts.
10338 ??? This is a hack to prevent compile-time issues as seen
10339 in PR101120 and friends. */
10340 bool ok = vect_transform_slp_perm_load (vinfo, slp_node, dr_chain,
10341 gsi, vf, false, &n_perms,
10342 nullptr, true);
10343 gcc_assert (ok);
10345 else
10347 if (grouped_load)
10349 if (memory_access_type != VMAT_LOAD_STORE_LANES)
10350 vect_transform_grouped_load (vinfo, stmt_info, dr_chain,
10351 group_size, gsi);
10352 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
10354 else
10356 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
10359 dr_chain.release ();
10361 if (!slp)
10362 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
10364 return true;
10367 /* Function vect_is_simple_cond.
10369 Input:
10370 LOOP - the loop that is being vectorized.
10371 COND - Condition that is checked for simple use.
10373 Output:
10374 *COMP_VECTYPE - the vector type for the comparison.
10375 *DTS - The def types for the arguments of the comparison
10377 Returns whether a COND can be vectorized. Checks whether
10378 condition operands are supportable using vec_is_simple_use. */
10380 static bool
10381 vect_is_simple_cond (tree cond, vec_info *vinfo, stmt_vec_info stmt_info,
10382 slp_tree slp_node, tree *comp_vectype,
10383 enum vect_def_type *dts, tree vectype)
10385 tree lhs, rhs;
10386 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
10387 slp_tree slp_op;
10389 /* Mask case. */
10390 if (TREE_CODE (cond) == SSA_NAME
10391 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
10393 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 0, &cond,
10394 &slp_op, &dts[0], comp_vectype)
10395 || !*comp_vectype
10396 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
10397 return false;
10398 return true;
10401 if (!COMPARISON_CLASS_P (cond))
10402 return false;
10404 lhs = TREE_OPERAND (cond, 0);
10405 rhs = TREE_OPERAND (cond, 1);
10407 if (TREE_CODE (lhs) == SSA_NAME)
10409 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 0,
10410 &lhs, &slp_op, &dts[0], &vectype1))
10411 return false;
10413 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
10414 || TREE_CODE (lhs) == FIXED_CST)
10415 dts[0] = vect_constant_def;
10416 else
10417 return false;
10419 if (TREE_CODE (rhs) == SSA_NAME)
10421 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 1,
10422 &rhs, &slp_op, &dts[1], &vectype2))
10423 return false;
10425 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
10426 || TREE_CODE (rhs) == FIXED_CST)
10427 dts[1] = vect_constant_def;
10428 else
10429 return false;
10431 if (vectype1 && vectype2
10432 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
10433 TYPE_VECTOR_SUBPARTS (vectype2)))
10434 return false;
10436 *comp_vectype = vectype1 ? vectype1 : vectype2;
10437 /* Invariant comparison. */
10438 if (! *comp_vectype)
10440 tree scalar_type = TREE_TYPE (lhs);
10441 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
10442 *comp_vectype = truth_type_for (vectype);
10443 else
10445 /* If we can widen the comparison to match vectype do so. */
10446 if (INTEGRAL_TYPE_P (scalar_type)
10447 && !slp_node
10448 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
10449 TYPE_SIZE (TREE_TYPE (vectype))))
10450 scalar_type = build_nonstandard_integer_type
10451 (vector_element_bits (vectype), TYPE_UNSIGNED (scalar_type));
10452 *comp_vectype = get_vectype_for_scalar_type (vinfo, scalar_type,
10453 slp_node);
10457 return true;
10460 /* vectorizable_condition.
10462 Check if STMT_INFO is conditional modify expression that can be vectorized.
10463 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
10464 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
10465 at GSI.
10467 When STMT_INFO is vectorized as a nested cycle, for_reduction is true.
10469 Return true if STMT_INFO is vectorizable in this way. */
10471 static bool
10472 vectorizable_condition (vec_info *vinfo,
10473 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
10474 gimple **vec_stmt,
10475 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
10477 tree scalar_dest = NULL_TREE;
10478 tree vec_dest = NULL_TREE;
10479 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
10480 tree then_clause, else_clause;
10481 tree comp_vectype = NULL_TREE;
10482 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
10483 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
10484 tree vec_compare;
10485 tree new_temp;
10486 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
10487 enum vect_def_type dts[4]
10488 = {vect_unknown_def_type, vect_unknown_def_type,
10489 vect_unknown_def_type, vect_unknown_def_type};
10490 int ndts = 4;
10491 int ncopies;
10492 int vec_num;
10493 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
10494 int i;
10495 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
10496 vec<tree> vec_oprnds0 = vNULL;
10497 vec<tree> vec_oprnds1 = vNULL;
10498 vec<tree> vec_oprnds2 = vNULL;
10499 vec<tree> vec_oprnds3 = vNULL;
10500 tree vec_cmp_type;
10501 bool masked = false;
10503 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
10504 return false;
10506 /* Is vectorizable conditional operation? */
10507 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
10508 if (!stmt)
10509 return false;
10511 code = gimple_assign_rhs_code (stmt);
10512 if (code != COND_EXPR)
10513 return false;
10515 stmt_vec_info reduc_info = NULL;
10516 int reduc_index = -1;
10517 vect_reduction_type reduction_type = TREE_CODE_REDUCTION;
10518 bool for_reduction
10519 = STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info)) != NULL;
10520 if (for_reduction)
10522 if (STMT_SLP_TYPE (stmt_info))
10523 return false;
10524 reduc_info = info_for_reduction (vinfo, stmt_info);
10525 reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
10526 reduc_index = STMT_VINFO_REDUC_IDX (stmt_info);
10527 gcc_assert (reduction_type != EXTRACT_LAST_REDUCTION
10528 || reduc_index != -1);
10530 else
10532 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
10533 return false;
10536 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
10537 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
10539 if (slp_node)
10541 ncopies = 1;
10542 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
10544 else
10546 ncopies = vect_get_num_copies (loop_vinfo, vectype);
10547 vec_num = 1;
10550 gcc_assert (ncopies >= 1);
10551 if (for_reduction && ncopies > 1)
10552 return false; /* FORNOW */
10554 cond_expr = gimple_assign_rhs1 (stmt);
10556 if (!vect_is_simple_cond (cond_expr, vinfo, stmt_info, slp_node,
10557 &comp_vectype, &dts[0], vectype)
10558 || !comp_vectype)
10559 return false;
10561 unsigned op_adjust = COMPARISON_CLASS_P (cond_expr) ? 1 : 0;
10562 slp_tree then_slp_node, else_slp_node;
10563 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 1 + op_adjust,
10564 &then_clause, &then_slp_node, &dts[2], &vectype1))
10565 return false;
10566 if (!vect_is_simple_use (vinfo, stmt_info, slp_node, 2 + op_adjust,
10567 &else_clause, &else_slp_node, &dts[3], &vectype2))
10568 return false;
10570 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
10571 return false;
10573 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
10574 return false;
10576 masked = !COMPARISON_CLASS_P (cond_expr);
10577 vec_cmp_type = truth_type_for (comp_vectype);
10579 if (vec_cmp_type == NULL_TREE)
10580 return false;
10582 cond_code = TREE_CODE (cond_expr);
10583 if (!masked)
10585 cond_expr0 = TREE_OPERAND (cond_expr, 0);
10586 cond_expr1 = TREE_OPERAND (cond_expr, 1);
10589 /* For conditional reductions, the "then" value needs to be the candidate
10590 value calculated by this iteration while the "else" value needs to be
10591 the result carried over from previous iterations. If the COND_EXPR
10592 is the other way around, we need to swap it. */
10593 bool must_invert_cmp_result = false;
10594 if (reduction_type == EXTRACT_LAST_REDUCTION && reduc_index == 1)
10596 if (masked)
10597 must_invert_cmp_result = true;
10598 else
10600 bool honor_nans = HONOR_NANS (TREE_TYPE (cond_expr0));
10601 tree_code new_code = invert_tree_comparison (cond_code, honor_nans);
10602 if (new_code == ERROR_MARK)
10603 must_invert_cmp_result = true;
10604 else
10606 cond_code = new_code;
10607 /* Make sure we don't accidentally use the old condition. */
10608 cond_expr = NULL_TREE;
10611 std::swap (then_clause, else_clause);
10614 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
10616 /* Boolean values may have another representation in vectors
10617 and therefore we prefer bit operations over comparison for
10618 them (which also works for scalar masks). We store opcodes
10619 to use in bitop1 and bitop2. Statement is vectorized as
10620 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
10621 depending on bitop1 and bitop2 arity. */
10622 switch (cond_code)
10624 case GT_EXPR:
10625 bitop1 = BIT_NOT_EXPR;
10626 bitop2 = BIT_AND_EXPR;
10627 break;
10628 case GE_EXPR:
10629 bitop1 = BIT_NOT_EXPR;
10630 bitop2 = BIT_IOR_EXPR;
10631 break;
10632 case LT_EXPR:
10633 bitop1 = BIT_NOT_EXPR;
10634 bitop2 = BIT_AND_EXPR;
10635 std::swap (cond_expr0, cond_expr1);
10636 break;
10637 case LE_EXPR:
10638 bitop1 = BIT_NOT_EXPR;
10639 bitop2 = BIT_IOR_EXPR;
10640 std::swap (cond_expr0, cond_expr1);
10641 break;
10642 case NE_EXPR:
10643 bitop1 = BIT_XOR_EXPR;
10644 break;
10645 case EQ_EXPR:
10646 bitop1 = BIT_XOR_EXPR;
10647 bitop2 = BIT_NOT_EXPR;
10648 break;
10649 default:
10650 return false;
10652 cond_code = SSA_NAME;
10655 if (TREE_CODE_CLASS (cond_code) == tcc_comparison
10656 && reduction_type == EXTRACT_LAST_REDUCTION
10657 && !expand_vec_cmp_expr_p (comp_vectype, vec_cmp_type, cond_code))
10659 if (dump_enabled_p ())
10660 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10661 "reduction comparison operation not supported.\n");
10662 return false;
10665 if (!vec_stmt)
10667 if (bitop1 != NOP_EXPR)
10669 machine_mode mode = TYPE_MODE (comp_vectype);
10670 optab optab;
10672 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
10673 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
10674 return false;
10676 if (bitop2 != NOP_EXPR)
10678 optab = optab_for_tree_code (bitop2, comp_vectype,
10679 optab_default);
10680 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
10681 return false;
10685 vect_cost_for_stmt kind = vector_stmt;
10686 if (reduction_type == EXTRACT_LAST_REDUCTION)
10687 /* Count one reduction-like operation per vector. */
10688 kind = vec_to_scalar;
10689 else if (!expand_vec_cond_expr_p (vectype, comp_vectype, cond_code))
10690 return false;
10692 if (slp_node
10693 && (!vect_maybe_update_slp_op_vectype
10694 (SLP_TREE_CHILDREN (slp_node)[0], comp_vectype)
10695 || (op_adjust == 1
10696 && !vect_maybe_update_slp_op_vectype
10697 (SLP_TREE_CHILDREN (slp_node)[1], comp_vectype))
10698 || !vect_maybe_update_slp_op_vectype (then_slp_node, vectype)
10699 || !vect_maybe_update_slp_op_vectype (else_slp_node, vectype)))
10701 if (dump_enabled_p ())
10702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10703 "incompatible vector types for invariants\n");
10704 return false;
10707 if (loop_vinfo && for_reduction
10708 && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
10710 if (reduction_type == EXTRACT_LAST_REDUCTION)
10711 vect_record_loop_mask (loop_vinfo, &LOOP_VINFO_MASKS (loop_vinfo),
10712 ncopies * vec_num, vectype, NULL);
10713 /* Extra inactive lanes should be safe for vect_nested_cycle. */
10714 else if (STMT_VINFO_DEF_TYPE (reduc_info) != vect_nested_cycle)
10716 if (dump_enabled_p ())
10717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
10718 "conditional reduction prevents the use"
10719 " of partial vectors.\n");
10720 LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
10724 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
10725 vect_model_simple_cost (vinfo, stmt_info, ncopies, dts, ndts, slp_node,
10726 cost_vec, kind);
10727 return true;
10730 /* Transform. */
10732 /* Handle def. */
10733 scalar_dest = gimple_assign_lhs (stmt);
10734 if (reduction_type != EXTRACT_LAST_REDUCTION)
10735 vec_dest = vect_create_destination_var (scalar_dest, vectype);
10737 bool swap_cond_operands = false;
10739 /* See whether another part of the vectorized code applies a loop
10740 mask to the condition, or to its inverse. */
10742 vec_loop_masks *masks = NULL;
10743 if (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
10745 if (reduction_type == EXTRACT_LAST_REDUCTION)
10746 masks = &LOOP_VINFO_MASKS (loop_vinfo);
10747 else
10749 scalar_cond_masked_key cond (cond_expr, ncopies);
10750 if (loop_vinfo->scalar_cond_masked_set.contains (cond))
10751 masks = &LOOP_VINFO_MASKS (loop_vinfo);
10752 else
10754 bool honor_nans = HONOR_NANS (TREE_TYPE (cond.op0));
10755 tree_code orig_code = cond.code;
10756 cond.code = invert_tree_comparison (cond.code, honor_nans);
10757 if (!masked && loop_vinfo->scalar_cond_masked_set.contains (cond))
10759 masks = &LOOP_VINFO_MASKS (loop_vinfo);
10760 cond_code = cond.code;
10761 swap_cond_operands = true;
10763 else
10765 /* Try the inverse of the current mask. We check if the
10766 inverse mask is live and if so we generate a negate of
10767 the current mask such that we still honor NaNs. */
10768 cond.inverted_p = true;
10769 cond.code = orig_code;
10770 if (loop_vinfo->scalar_cond_masked_set.contains (cond))
10772 masks = &LOOP_VINFO_MASKS (loop_vinfo);
10773 cond_code = cond.code;
10774 swap_cond_operands = true;
10775 must_invert_cmp_result = true;
10782 /* Handle cond expr. */
10783 if (masked)
10784 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
10785 cond_expr, &vec_oprnds0, comp_vectype,
10786 then_clause, &vec_oprnds2, vectype,
10787 reduction_type != EXTRACT_LAST_REDUCTION
10788 ? else_clause : NULL, &vec_oprnds3, vectype);
10789 else
10790 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
10791 cond_expr0, &vec_oprnds0, comp_vectype,
10792 cond_expr1, &vec_oprnds1, comp_vectype,
10793 then_clause, &vec_oprnds2, vectype,
10794 reduction_type != EXTRACT_LAST_REDUCTION
10795 ? else_clause : NULL, &vec_oprnds3, vectype);
10797 /* Arguments are ready. Create the new vector stmt. */
10798 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
10800 vec_then_clause = vec_oprnds2[i];
10801 if (reduction_type != EXTRACT_LAST_REDUCTION)
10802 vec_else_clause = vec_oprnds3[i];
10804 if (swap_cond_operands)
10805 std::swap (vec_then_clause, vec_else_clause);
10807 if (masked)
10808 vec_compare = vec_cond_lhs;
10809 else
10811 vec_cond_rhs = vec_oprnds1[i];
10812 if (bitop1 == NOP_EXPR)
10814 gimple_seq stmts = NULL;
10815 vec_compare = gimple_build (&stmts, cond_code, vec_cmp_type,
10816 vec_cond_lhs, vec_cond_rhs);
10817 gsi_insert_before (gsi, stmts, GSI_SAME_STMT);
10819 else
10821 new_temp = make_ssa_name (vec_cmp_type);
10822 gassign *new_stmt;
10823 if (bitop1 == BIT_NOT_EXPR)
10824 new_stmt = gimple_build_assign (new_temp, bitop1,
10825 vec_cond_rhs);
10826 else
10827 new_stmt
10828 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
10829 vec_cond_rhs);
10830 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10831 if (bitop2 == NOP_EXPR)
10832 vec_compare = new_temp;
10833 else if (bitop2 == BIT_NOT_EXPR
10834 && reduction_type != EXTRACT_LAST_REDUCTION)
10836 /* Instead of doing ~x ? y : z do x ? z : y. */
10837 vec_compare = new_temp;
10838 std::swap (vec_then_clause, vec_else_clause);
10840 else
10842 vec_compare = make_ssa_name (vec_cmp_type);
10843 if (bitop2 == BIT_NOT_EXPR)
10844 new_stmt
10845 = gimple_build_assign (vec_compare, bitop2, new_temp);
10846 else
10847 new_stmt
10848 = gimple_build_assign (vec_compare, bitop2,
10849 vec_cond_lhs, new_temp);
10850 vect_finish_stmt_generation (vinfo, stmt_info,
10851 new_stmt, gsi);
10856 /* If we decided to apply a loop mask to the result of the vector
10857 comparison, AND the comparison with the mask now. Later passes
10858 should then be able to reuse the AND results between mulitple
10859 vector statements.
10861 For example:
10862 for (int i = 0; i < 100; ++i)
10863 x[i] = y[i] ? z[i] : 10;
10865 results in following optimized GIMPLE:
10867 mask__35.8_43 = vect__4.7_41 != { 0, ... };
10868 vec_mask_and_46 = loop_mask_40 & mask__35.8_43;
10869 _19 = &MEM[base: z_12(D), index: ivtmp_56, step: 4, offset: 0B];
10870 vect_iftmp.11_47 = .MASK_LOAD (_19, 4B, vec_mask_and_46);
10871 vect_iftmp.12_52 = VEC_COND_EXPR <vec_mask_and_46,
10872 vect_iftmp.11_47, { 10, ... }>;
10874 instead of using a masked and unmasked forms of
10875 vec != { 0, ... } (masked in the MASK_LOAD,
10876 unmasked in the VEC_COND_EXPR). */
10878 /* Force vec_compare to be an SSA_NAME rather than a comparison,
10879 in cases where that's necessary. */
10881 if (masks || reduction_type == EXTRACT_LAST_REDUCTION)
10883 if (!is_gimple_val (vec_compare))
10885 tree vec_compare_name = make_ssa_name (vec_cmp_type);
10886 gassign *new_stmt = gimple_build_assign (vec_compare_name,
10887 vec_compare);
10888 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10889 vec_compare = vec_compare_name;
10892 if (must_invert_cmp_result)
10894 tree vec_compare_name = make_ssa_name (vec_cmp_type);
10895 gassign *new_stmt = gimple_build_assign (vec_compare_name,
10896 BIT_NOT_EXPR,
10897 vec_compare);
10898 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10899 vec_compare = vec_compare_name;
10902 if (masks)
10904 tree loop_mask
10905 = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
10906 vectype, i);
10907 tree tmp2 = make_ssa_name (vec_cmp_type);
10908 gassign *g
10909 = gimple_build_assign (tmp2, BIT_AND_EXPR, vec_compare,
10910 loop_mask);
10911 vect_finish_stmt_generation (vinfo, stmt_info, g, gsi);
10912 vec_compare = tmp2;
10916 gimple *new_stmt;
10917 if (reduction_type == EXTRACT_LAST_REDUCTION)
10919 gimple *old_stmt = vect_orig_stmt (stmt_info)->stmt;
10920 tree lhs = gimple_get_lhs (old_stmt);
10921 new_stmt = gimple_build_call_internal
10922 (IFN_FOLD_EXTRACT_LAST, 3, else_clause, vec_compare,
10923 vec_then_clause);
10924 gimple_call_set_lhs (new_stmt, lhs);
10925 SSA_NAME_DEF_STMT (lhs) = new_stmt;
10926 if (old_stmt == gsi_stmt (*gsi))
10927 vect_finish_replace_stmt (vinfo, stmt_info, new_stmt);
10928 else
10930 /* In this case we're moving the definition to later in the
10931 block. That doesn't matter because the only uses of the
10932 lhs are in phi statements. */
10933 gimple_stmt_iterator old_gsi = gsi_for_stmt (old_stmt);
10934 gsi_remove (&old_gsi, true);
10935 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10938 else
10940 new_temp = make_ssa_name (vec_dest);
10941 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
10942 vec_then_clause, vec_else_clause);
10943 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
10945 if (slp_node)
10946 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
10947 else
10948 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
10951 if (!slp_node)
10952 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
10954 vec_oprnds0.release ();
10955 vec_oprnds1.release ();
10956 vec_oprnds2.release ();
10957 vec_oprnds3.release ();
10959 return true;
10962 /* vectorizable_comparison.
10964 Check if STMT_INFO is comparison expression that can be vectorized.
10965 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
10966 comparison, put it in VEC_STMT, and insert it at GSI.
10968 Return true if STMT_INFO is vectorizable in this way. */
10970 static bool
10971 vectorizable_comparison (vec_info *vinfo,
10972 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
10973 gimple **vec_stmt,
10974 slp_tree slp_node, stmt_vector_for_cost *cost_vec)
10976 tree lhs, rhs1, rhs2;
10977 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
10978 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
10979 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
10980 tree new_temp;
10981 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
10982 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
10983 int ndts = 2;
10984 poly_uint64 nunits;
10985 int ncopies;
10986 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
10987 int i;
10988 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
10989 vec<tree> vec_oprnds0 = vNULL;
10990 vec<tree> vec_oprnds1 = vNULL;
10991 tree mask_type;
10992 tree mask;
10994 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
10995 return false;
10997 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
10998 return false;
11000 mask_type = vectype;
11001 nunits = TYPE_VECTOR_SUBPARTS (vectype);
11003 if (slp_node)
11004 ncopies = 1;
11005 else
11006 ncopies = vect_get_num_copies (loop_vinfo, vectype);
11008 gcc_assert (ncopies >= 1);
11009 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
11010 return false;
11012 gassign *stmt = dyn_cast <gassign *> (stmt_info->stmt);
11013 if (!stmt)
11014 return false;
11016 code = gimple_assign_rhs_code (stmt);
11018 if (TREE_CODE_CLASS (code) != tcc_comparison)
11019 return false;
11021 slp_tree slp_rhs1, slp_rhs2;
11022 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
11023 0, &rhs1, &slp_rhs1, &dts[0], &vectype1))
11024 return false;
11026 if (!vect_is_simple_use (vinfo, stmt_info, slp_node,
11027 1, &rhs2, &slp_rhs2, &dts[1], &vectype2))
11028 return false;
11030 if (vectype1 && vectype2
11031 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
11032 TYPE_VECTOR_SUBPARTS (vectype2)))
11033 return false;
11035 vectype = vectype1 ? vectype1 : vectype2;
11037 /* Invariant comparison. */
11038 if (!vectype)
11040 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1)))
11041 vectype = mask_type;
11042 else
11043 vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (rhs1),
11044 slp_node);
11045 if (!vectype || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
11046 return false;
11048 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
11049 return false;
11051 /* Can't compare mask and non-mask types. */
11052 if (vectype1 && vectype2
11053 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
11054 return false;
11056 /* Boolean values may have another representation in vectors
11057 and therefore we prefer bit operations over comparison for
11058 them (which also works for scalar masks). We store opcodes
11059 to use in bitop1 and bitop2. Statement is vectorized as
11060 BITOP2 (rhs1 BITOP1 rhs2) or
11061 rhs1 BITOP2 (BITOP1 rhs2)
11062 depending on bitop1 and bitop2 arity. */
11063 bool swap_p = false;
11064 if (VECTOR_BOOLEAN_TYPE_P (vectype))
11066 if (code == GT_EXPR)
11068 bitop1 = BIT_NOT_EXPR;
11069 bitop2 = BIT_AND_EXPR;
11071 else if (code == GE_EXPR)
11073 bitop1 = BIT_NOT_EXPR;
11074 bitop2 = BIT_IOR_EXPR;
11076 else if (code == LT_EXPR)
11078 bitop1 = BIT_NOT_EXPR;
11079 bitop2 = BIT_AND_EXPR;
11080 swap_p = true;
11082 else if (code == LE_EXPR)
11084 bitop1 = BIT_NOT_EXPR;
11085 bitop2 = BIT_IOR_EXPR;
11086 swap_p = true;
11088 else
11090 bitop1 = BIT_XOR_EXPR;
11091 if (code == EQ_EXPR)
11092 bitop2 = BIT_NOT_EXPR;
11096 if (!vec_stmt)
11098 if (bitop1 == NOP_EXPR)
11100 if (!expand_vec_cmp_expr_p (vectype, mask_type, code))
11101 return false;
11103 else
11105 machine_mode mode = TYPE_MODE (vectype);
11106 optab optab;
11108 optab = optab_for_tree_code (bitop1, vectype, optab_default);
11109 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
11110 return false;
11112 if (bitop2 != NOP_EXPR)
11114 optab = optab_for_tree_code (bitop2, vectype, optab_default);
11115 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
11116 return false;
11120 /* Put types on constant and invariant SLP children. */
11121 if (slp_node
11122 && (!vect_maybe_update_slp_op_vectype (slp_rhs1, vectype)
11123 || !vect_maybe_update_slp_op_vectype (slp_rhs2, vectype)))
11125 if (dump_enabled_p ())
11126 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
11127 "incompatible vector types for invariants\n");
11128 return false;
11131 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
11132 vect_model_simple_cost (vinfo, stmt_info,
11133 ncopies * (1 + (bitop2 != NOP_EXPR)),
11134 dts, ndts, slp_node, cost_vec);
11135 return true;
11138 /* Transform. */
11140 /* Handle def. */
11141 lhs = gimple_assign_lhs (stmt);
11142 mask = vect_create_destination_var (lhs, mask_type);
11144 vect_get_vec_defs (vinfo, stmt_info, slp_node, ncopies,
11145 rhs1, &vec_oprnds0, vectype,
11146 rhs2, &vec_oprnds1, vectype);
11147 if (swap_p)
11148 std::swap (vec_oprnds0, vec_oprnds1);
11150 /* Arguments are ready. Create the new vector stmt. */
11151 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
11153 gimple *new_stmt;
11154 vec_rhs2 = vec_oprnds1[i];
11156 new_temp = make_ssa_name (mask);
11157 if (bitop1 == NOP_EXPR)
11159 new_stmt = gimple_build_assign (new_temp, code,
11160 vec_rhs1, vec_rhs2);
11161 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
11163 else
11165 if (bitop1 == BIT_NOT_EXPR)
11166 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
11167 else
11168 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
11169 vec_rhs2);
11170 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
11171 if (bitop2 != NOP_EXPR)
11173 tree res = make_ssa_name (mask);
11174 if (bitop2 == BIT_NOT_EXPR)
11175 new_stmt = gimple_build_assign (res, bitop2, new_temp);
11176 else
11177 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
11178 new_temp);
11179 vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
11182 if (slp_node)
11183 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
11184 else
11185 STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
11188 if (!slp_node)
11189 *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
11191 vec_oprnds0.release ();
11192 vec_oprnds1.release ();
11194 return true;
11197 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
11198 can handle all live statements in the node. Otherwise return true
11199 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
11200 GSI and VEC_STMT_P are as for vectorizable_live_operation. */
11202 static bool
11203 can_vectorize_live_stmts (vec_info *vinfo,
11204 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
11205 slp_tree slp_node, slp_instance slp_node_instance,
11206 bool vec_stmt_p,
11207 stmt_vector_for_cost *cost_vec)
11209 if (slp_node)
11211 stmt_vec_info slp_stmt_info;
11212 unsigned int i;
11213 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt_info)
11215 if (STMT_VINFO_LIVE_P (slp_stmt_info)
11216 && !vectorizable_live_operation (vinfo,
11217 slp_stmt_info, gsi, slp_node,
11218 slp_node_instance, i,
11219 vec_stmt_p, cost_vec))
11220 return false;
11223 else if (STMT_VINFO_LIVE_P (stmt_info)
11224 && !vectorizable_live_operation (vinfo, stmt_info, gsi,
11225 slp_node, slp_node_instance, -1,
11226 vec_stmt_p, cost_vec))
11227 return false;
11229 return true;
11232 /* Make sure the statement is vectorizable. */
11234 opt_result
11235 vect_analyze_stmt (vec_info *vinfo,
11236 stmt_vec_info stmt_info, bool *need_to_vectorize,
11237 slp_tree node, slp_instance node_instance,
11238 stmt_vector_for_cost *cost_vec)
11240 bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
11241 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
11242 bool ok;
11243 gimple_seq pattern_def_seq;
11245 if (dump_enabled_p ())
11246 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
11247 stmt_info->stmt);
11249 if (gimple_has_volatile_ops (stmt_info->stmt))
11250 return opt_result::failure_at (stmt_info->stmt,
11251 "not vectorized:"
11252 " stmt has volatile operands: %G\n",
11253 stmt_info->stmt);
11255 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
11256 && node == NULL
11257 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
11259 gimple_stmt_iterator si;
11261 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
11263 stmt_vec_info pattern_def_stmt_info
11264 = vinfo->lookup_stmt (gsi_stmt (si));
11265 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
11266 || STMT_VINFO_LIVE_P (pattern_def_stmt_info))
11268 /* Analyze def stmt of STMT if it's a pattern stmt. */
11269 if (dump_enabled_p ())
11270 dump_printf_loc (MSG_NOTE, vect_location,
11271 "==> examining pattern def statement: %G",
11272 pattern_def_stmt_info->stmt);
11274 opt_result res
11275 = vect_analyze_stmt (vinfo, pattern_def_stmt_info,
11276 need_to_vectorize, node, node_instance,
11277 cost_vec);
11278 if (!res)
11279 return res;
11284 /* Skip stmts that do not need to be vectorized. In loops this is expected
11285 to include:
11286 - the COND_EXPR which is the loop exit condition
11287 - any LABEL_EXPRs in the loop
11288 - computations that are used only for array indexing or loop control.
11289 In basic blocks we only analyze statements that are a part of some SLP
11290 instance, therefore, all the statements are relevant.
11292 Pattern statement needs to be analyzed instead of the original statement
11293 if the original statement is not relevant. Otherwise, we analyze both
11294 statements. In basic blocks we are called from some SLP instance
11295 traversal, don't analyze pattern stmts instead, the pattern stmts
11296 already will be part of SLP instance. */
11298 stmt_vec_info pattern_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
11299 if (!STMT_VINFO_RELEVANT_P (stmt_info)
11300 && !STMT_VINFO_LIVE_P (stmt_info))
11302 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
11303 && pattern_stmt_info
11304 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
11305 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
11307 /* Analyze PATTERN_STMT instead of the original stmt. */
11308 stmt_info = pattern_stmt_info;
11309 if (dump_enabled_p ())
11310 dump_printf_loc (MSG_NOTE, vect_location,
11311 "==> examining pattern statement: %G",
11312 stmt_info->stmt);
11314 else
11316 if (dump_enabled_p ())
11317 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
11319 return opt_result::success ();
11322 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
11323 && node == NULL
11324 && pattern_stmt_info
11325 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info)
11326 || STMT_VINFO_LIVE_P (pattern_stmt_info)))
11328 /* Analyze PATTERN_STMT too. */
11329 if (dump_enabled_p ())
11330 dump_printf_loc (MSG_NOTE, vect_location,
11331 "==> examining pattern statement: %G",
11332 pattern_stmt_info->stmt);
11334 opt_result res
11335 = vect_analyze_stmt (vinfo, pattern_stmt_info, need_to_vectorize, node,
11336 node_instance, cost_vec);
11337 if (!res)
11338 return res;
11341 switch (STMT_VINFO_DEF_TYPE (stmt_info))
11343 case vect_internal_def:
11344 break;
11346 case vect_reduction_def:
11347 case vect_nested_cycle:
11348 gcc_assert (!bb_vinfo
11349 && (relevance == vect_used_in_outer
11350 || relevance == vect_used_in_outer_by_reduction
11351 || relevance == vect_used_by_reduction
11352 || relevance == vect_unused_in_scope
11353 || relevance == vect_used_only_live));
11354 break;
11356 case vect_induction_def:
11357 case vect_first_order_recurrence:
11358 gcc_assert (!bb_vinfo);
11359 break;
11361 case vect_constant_def:
11362 case vect_external_def:
11363 case vect_unknown_def_type:
11364 default:
11365 gcc_unreachable ();
11368 tree saved_vectype = STMT_VINFO_VECTYPE (stmt_info);
11369 if (node)
11370 STMT_VINFO_VECTYPE (stmt_info) = SLP_TREE_VECTYPE (node);
11372 if (STMT_VINFO_RELEVANT_P (stmt_info))
11374 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
11375 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
11376 || (call && gimple_call_lhs (call) == NULL_TREE));
11377 *need_to_vectorize = true;
11380 if (PURE_SLP_STMT (stmt_info) && !node)
11382 if (dump_enabled_p ())
11383 dump_printf_loc (MSG_NOTE, vect_location,
11384 "handled only by SLP analysis\n");
11385 return opt_result::success ();
11388 ok = true;
11389 if (!bb_vinfo
11390 && (STMT_VINFO_RELEVANT_P (stmt_info)
11391 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
11392 /* Prefer vectorizable_call over vectorizable_simd_clone_call so
11393 -mveclibabi= takes preference over library functions with
11394 the simd attribute. */
11395 ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec)
11396 || vectorizable_simd_clone_call (vinfo, stmt_info, NULL, NULL, node,
11397 cost_vec)
11398 || vectorizable_conversion (vinfo, stmt_info,
11399 NULL, NULL, node, cost_vec)
11400 || vectorizable_operation (vinfo, stmt_info,
11401 NULL, NULL, node, cost_vec)
11402 || vectorizable_assignment (vinfo, stmt_info,
11403 NULL, NULL, node, cost_vec)
11404 || vectorizable_load (vinfo, stmt_info, NULL, NULL, node, cost_vec)
11405 || vectorizable_store (vinfo, stmt_info, NULL, NULL, node, cost_vec)
11406 || vectorizable_reduction (as_a <loop_vec_info> (vinfo), stmt_info,
11407 node, node_instance, cost_vec)
11408 || vectorizable_induction (as_a <loop_vec_info> (vinfo), stmt_info,
11409 NULL, node, cost_vec)
11410 || vectorizable_shift (vinfo, stmt_info, NULL, NULL, node, cost_vec)
11411 || vectorizable_condition (vinfo, stmt_info,
11412 NULL, NULL, node, cost_vec)
11413 || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node,
11414 cost_vec)
11415 || vectorizable_lc_phi (as_a <loop_vec_info> (vinfo),
11416 stmt_info, NULL, node)
11417 || vectorizable_recurr (as_a <loop_vec_info> (vinfo),
11418 stmt_info, NULL, node, cost_vec));
11419 else
11421 if (bb_vinfo)
11422 ok = (vectorizable_call (vinfo, stmt_info, NULL, NULL, node, cost_vec)
11423 || vectorizable_simd_clone_call (vinfo, stmt_info,
11424 NULL, NULL, node, cost_vec)
11425 || vectorizable_conversion (vinfo, stmt_info, NULL, NULL, node,
11426 cost_vec)
11427 || vectorizable_shift (vinfo, stmt_info,
11428 NULL, NULL, node, cost_vec)
11429 || vectorizable_operation (vinfo, stmt_info,
11430 NULL, NULL, node, cost_vec)
11431 || vectorizable_assignment (vinfo, stmt_info, NULL, NULL, node,
11432 cost_vec)
11433 || vectorizable_load (vinfo, stmt_info,
11434 NULL, NULL, node, cost_vec)
11435 || vectorizable_store (vinfo, stmt_info,
11436 NULL, NULL, node, cost_vec)
11437 || vectorizable_condition (vinfo, stmt_info,
11438 NULL, NULL, node, cost_vec)
11439 || vectorizable_comparison (vinfo, stmt_info, NULL, NULL, node,
11440 cost_vec)
11441 || vectorizable_phi (vinfo, stmt_info, NULL, node, cost_vec));
11444 if (node)
11445 STMT_VINFO_VECTYPE (stmt_info) = saved_vectype;
11447 if (!ok)
11448 return opt_result::failure_at (stmt_info->stmt,
11449 "not vectorized:"
11450 " relevant stmt not supported: %G",
11451 stmt_info->stmt);
11453 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
11454 need extra handling, except for vectorizable reductions. */
11455 if (!bb_vinfo
11456 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
11457 && STMT_VINFO_TYPE (stmt_info) != lc_phi_info_type
11458 && !can_vectorize_live_stmts (as_a <loop_vec_info> (vinfo),
11459 stmt_info, NULL, node, node_instance,
11460 false, cost_vec))
11461 return opt_result::failure_at (stmt_info->stmt,
11462 "not vectorized:"
11463 " live stmt not supported: %G",
11464 stmt_info->stmt);
11466 return opt_result::success ();
11470 /* Function vect_transform_stmt.
11472 Create a vectorized stmt to replace STMT_INFO, and insert it at GSI. */
11474 bool
11475 vect_transform_stmt (vec_info *vinfo,
11476 stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
11477 slp_tree slp_node, slp_instance slp_node_instance)
11479 bool is_store = false;
11480 gimple *vec_stmt = NULL;
11481 bool done;
11483 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
11485 tree saved_vectype = STMT_VINFO_VECTYPE (stmt_info);
11486 if (slp_node)
11487 STMT_VINFO_VECTYPE (stmt_info) = SLP_TREE_VECTYPE (slp_node);
11489 switch (STMT_VINFO_TYPE (stmt_info))
11491 case type_demotion_vec_info_type:
11492 case type_promotion_vec_info_type:
11493 case type_conversion_vec_info_type:
11494 done = vectorizable_conversion (vinfo, stmt_info,
11495 gsi, &vec_stmt, slp_node, NULL);
11496 gcc_assert (done);
11497 break;
11499 case induc_vec_info_type:
11500 done = vectorizable_induction (as_a <loop_vec_info> (vinfo),
11501 stmt_info, &vec_stmt, slp_node,
11502 NULL);
11503 gcc_assert (done);
11504 break;
11506 case shift_vec_info_type:
11507 done = vectorizable_shift (vinfo, stmt_info,
11508 gsi, &vec_stmt, slp_node, NULL);
11509 gcc_assert (done);
11510 break;
11512 case op_vec_info_type:
11513 done = vectorizable_operation (vinfo, stmt_info, gsi, &vec_stmt, slp_node,
11514 NULL);
11515 gcc_assert (done);
11516 break;
11518 case assignment_vec_info_type:
11519 done = vectorizable_assignment (vinfo, stmt_info,
11520 gsi, &vec_stmt, slp_node, NULL);
11521 gcc_assert (done);
11522 break;
11524 case load_vec_info_type:
11525 done = vectorizable_load (vinfo, stmt_info, gsi, &vec_stmt, slp_node,
11526 NULL);
11527 gcc_assert (done);
11528 break;
11530 case store_vec_info_type:
11531 done = vectorizable_store (vinfo, stmt_info,
11532 gsi, &vec_stmt, slp_node, NULL);
11533 gcc_assert (done);
11534 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
11536 /* In case of interleaving, the whole chain is vectorized when the
11537 last store in the chain is reached. Store stmts before the last
11538 one are skipped, and there vec_stmt_info shouldn't be freed
11539 meanwhile. */
11540 stmt_vec_info group_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
11541 if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
11542 is_store = true;
11544 else
11545 is_store = true;
11546 break;
11548 case condition_vec_info_type:
11549 done = vectorizable_condition (vinfo, stmt_info,
11550 gsi, &vec_stmt, slp_node, NULL);
11551 gcc_assert (done);
11552 break;
11554 case comparison_vec_info_type:
11555 done = vectorizable_comparison (vinfo, stmt_info, gsi, &vec_stmt,
11556 slp_node, NULL);
11557 gcc_assert (done);
11558 break;
11560 case call_vec_info_type:
11561 done = vectorizable_call (vinfo, stmt_info,
11562 gsi, &vec_stmt, slp_node, NULL);
11563 break;
11565 case call_simd_clone_vec_info_type:
11566 done = vectorizable_simd_clone_call (vinfo, stmt_info, gsi, &vec_stmt,
11567 slp_node, NULL);
11568 break;
11570 case reduc_vec_info_type:
11571 done = vect_transform_reduction (as_a <loop_vec_info> (vinfo), stmt_info,
11572 gsi, &vec_stmt, slp_node);
11573 gcc_assert (done);
11574 break;
11576 case cycle_phi_info_type:
11577 done = vect_transform_cycle_phi (as_a <loop_vec_info> (vinfo), stmt_info,
11578 &vec_stmt, slp_node, slp_node_instance);
11579 gcc_assert (done);
11580 break;
11582 case lc_phi_info_type:
11583 done = vectorizable_lc_phi (as_a <loop_vec_info> (vinfo),
11584 stmt_info, &vec_stmt, slp_node);
11585 gcc_assert (done);
11586 break;
11588 case recurr_info_type:
11589 done = vectorizable_recurr (as_a <loop_vec_info> (vinfo),
11590 stmt_info, &vec_stmt, slp_node, NULL);
11591 gcc_assert (done);
11592 break;
11594 case phi_info_type:
11595 done = vectorizable_phi (vinfo, stmt_info, &vec_stmt, slp_node, NULL);
11596 gcc_assert (done);
11597 break;
11599 default:
11600 if (!STMT_VINFO_LIVE_P (stmt_info))
11602 if (dump_enabled_p ())
11603 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
11604 "stmt not supported.\n");
11605 gcc_unreachable ();
11607 done = true;
11610 if (!slp_node && vec_stmt)
11611 gcc_assert (STMT_VINFO_VEC_STMTS (stmt_info).exists ());
11613 if (STMT_VINFO_TYPE (stmt_info) != store_vec_info_type)
11615 /* Handle stmts whose DEF is used outside the loop-nest that is
11616 being vectorized. */
11617 done = can_vectorize_live_stmts (vinfo, stmt_info, gsi, slp_node,
11618 slp_node_instance, true, NULL);
11619 gcc_assert (done);
11622 if (slp_node)
11623 STMT_VINFO_VECTYPE (stmt_info) = saved_vectype;
11625 return is_store;
11629 /* Remove a group of stores (for SLP or interleaving), free their
11630 stmt_vec_info. */
11632 void
11633 vect_remove_stores (vec_info *vinfo, stmt_vec_info first_stmt_info)
11635 stmt_vec_info next_stmt_info = first_stmt_info;
11637 while (next_stmt_info)
11639 stmt_vec_info tmp = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
11640 next_stmt_info = vect_orig_stmt (next_stmt_info);
11641 /* Free the attached stmt_vec_info and remove the stmt. */
11642 vinfo->remove_stmt (next_stmt_info);
11643 next_stmt_info = tmp;
11647 /* If NUNITS is nonzero, return a vector type that contains NUNITS
11648 elements of type SCALAR_TYPE, or null if the target doesn't support
11649 such a type.
11651 If NUNITS is zero, return a vector type that contains elements of
11652 type SCALAR_TYPE, choosing whichever vector size the target prefers.
11654 If PREVAILING_MODE is VOIDmode, we have not yet chosen a vector mode
11655 for this vectorization region and want to "autodetect" the best choice.
11656 Otherwise, PREVAILING_MODE is a previously-chosen vector TYPE_MODE
11657 and we want the new type to be interoperable with it. PREVAILING_MODE
11658 in this case can be a scalar integer mode or a vector mode; when it
11659 is a vector mode, the function acts like a tree-level version of
11660 related_vector_mode. */
11662 tree
11663 get_related_vectype_for_scalar_type (machine_mode prevailing_mode,
11664 tree scalar_type, poly_uint64 nunits)
11666 tree orig_scalar_type = scalar_type;
11667 scalar_mode inner_mode;
11668 machine_mode simd_mode;
11669 tree vectype;
11671 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
11672 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
11673 return NULL_TREE;
11675 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
11677 /* Interoperability between modes requires one to be a constant multiple
11678 of the other, so that the number of vectors required for each operation
11679 is a compile-time constant. */
11680 if (prevailing_mode != VOIDmode
11681 && !constant_multiple_p (nunits * nbytes,
11682 GET_MODE_SIZE (prevailing_mode))
11683 && !constant_multiple_p (GET_MODE_SIZE (prevailing_mode),
11684 nunits * nbytes))
11685 return NULL_TREE;
11687 /* For vector types of elements whose mode precision doesn't
11688 match their types precision we use a element type of mode
11689 precision. The vectorization routines will have to make sure
11690 they support the proper result truncation/extension.
11691 We also make sure to build vector types with INTEGER_TYPE
11692 component type only. */
11693 if (INTEGRAL_TYPE_P (scalar_type)
11694 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
11695 || TREE_CODE (scalar_type) != INTEGER_TYPE))
11696 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
11697 TYPE_UNSIGNED (scalar_type));
11699 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
11700 When the component mode passes the above test simply use a type
11701 corresponding to that mode. The theory is that any use that
11702 would cause problems with this will disable vectorization anyway. */
11703 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
11704 && !INTEGRAL_TYPE_P (scalar_type))
11705 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
11707 /* We can't build a vector type of elements with alignment bigger than
11708 their size. */
11709 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
11710 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
11711 TYPE_UNSIGNED (scalar_type));
11713 /* If we felt back to using the mode fail if there was
11714 no scalar type for it. */
11715 if (scalar_type == NULL_TREE)
11716 return NULL_TREE;
11718 /* If no prevailing mode was supplied, use the mode the target prefers.
11719 Otherwise lookup a vector mode based on the prevailing mode. */
11720 if (prevailing_mode == VOIDmode)
11722 gcc_assert (known_eq (nunits, 0U));
11723 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
11724 if (SCALAR_INT_MODE_P (simd_mode))
11726 /* Traditional behavior is not to take the integer mode
11727 literally, but simply to use it as a way of determining
11728 the vector size. It is up to mode_for_vector to decide
11729 what the TYPE_MODE should be.
11731 Note that nunits == 1 is allowed in order to support single
11732 element vector types. */
11733 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits)
11734 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
11735 return NULL_TREE;
11738 else if (SCALAR_INT_MODE_P (prevailing_mode)
11739 || !related_vector_mode (prevailing_mode,
11740 inner_mode, nunits).exists (&simd_mode))
11742 /* Fall back to using mode_for_vector, mostly in the hope of being
11743 able to use an integer mode. */
11744 if (known_eq (nunits, 0U)
11745 && !multiple_p (GET_MODE_SIZE (prevailing_mode), nbytes, &nunits))
11746 return NULL_TREE;
11748 if (!mode_for_vector (inner_mode, nunits).exists (&simd_mode))
11749 return NULL_TREE;
11752 vectype = build_vector_type_for_mode (scalar_type, simd_mode);
11754 /* In cases where the mode was chosen by mode_for_vector, check that
11755 the target actually supports the chosen mode, or that it at least
11756 allows the vector mode to be replaced by a like-sized integer. */
11757 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
11758 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
11759 return NULL_TREE;
11761 /* Re-attach the address-space qualifier if we canonicalized the scalar
11762 type. */
11763 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
11764 return build_qualified_type
11765 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
11767 return vectype;
11770 /* Function get_vectype_for_scalar_type.
11772 Returns the vector type corresponding to SCALAR_TYPE as supported
11773 by the target. If GROUP_SIZE is nonzero and we're performing BB
11774 vectorization, make sure that the number of elements in the vector
11775 is no bigger than GROUP_SIZE. */
11777 tree
11778 get_vectype_for_scalar_type (vec_info *vinfo, tree scalar_type,
11779 unsigned int group_size)
11781 /* For BB vectorization, we should always have a group size once we've
11782 constructed the SLP tree; the only valid uses of zero GROUP_SIZEs
11783 are tentative requests during things like early data reference
11784 analysis and pattern recognition. */
11785 if (is_a <bb_vec_info> (vinfo))
11786 gcc_assert (vinfo->slp_instances.is_empty () || group_size != 0);
11787 else
11788 group_size = 0;
11790 tree vectype = get_related_vectype_for_scalar_type (vinfo->vector_mode,
11791 scalar_type);
11792 if (vectype && vinfo->vector_mode == VOIDmode)
11793 vinfo->vector_mode = TYPE_MODE (vectype);
11795 /* Register the natural choice of vector type, before the group size
11796 has been applied. */
11797 if (vectype)
11798 vinfo->used_vector_modes.add (TYPE_MODE (vectype));
11800 /* If the natural choice of vector type doesn't satisfy GROUP_SIZE,
11801 try again with an explicit number of elements. */
11802 if (vectype
11803 && group_size
11804 && maybe_ge (TYPE_VECTOR_SUBPARTS (vectype), group_size))
11806 /* Start with the biggest number of units that fits within
11807 GROUP_SIZE and halve it until we find a valid vector type.
11808 Usually either the first attempt will succeed or all will
11809 fail (in the latter case because GROUP_SIZE is too small
11810 for the target), but it's possible that a target could have
11811 a hole between supported vector types.
11813 If GROUP_SIZE is not a power of 2, this has the effect of
11814 trying the largest power of 2 that fits within the group,
11815 even though the group is not a multiple of that vector size.
11816 The BB vectorizer will then try to carve up the group into
11817 smaller pieces. */
11818 unsigned int nunits = 1 << floor_log2 (group_size);
11821 vectype = get_related_vectype_for_scalar_type (vinfo->vector_mode,
11822 scalar_type, nunits);
11823 nunits /= 2;
11825 while (nunits > 1 && !vectype);
11828 return vectype;
11831 /* Return the vector type corresponding to SCALAR_TYPE as supported
11832 by the target. NODE, if nonnull, is the SLP tree node that will
11833 use the returned vector type. */
11835 tree
11836 get_vectype_for_scalar_type (vec_info *vinfo, tree scalar_type, slp_tree node)
11838 unsigned int group_size = 0;
11839 if (node)
11840 group_size = SLP_TREE_LANES (node);
11841 return get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
11844 /* Function get_mask_type_for_scalar_type.
11846 Returns the mask type corresponding to a result of comparison
11847 of vectors of specified SCALAR_TYPE as supported by target.
11848 If GROUP_SIZE is nonzero and we're performing BB vectorization,
11849 make sure that the number of elements in the vector is no bigger
11850 than GROUP_SIZE. */
11852 tree
11853 get_mask_type_for_scalar_type (vec_info *vinfo, tree scalar_type,
11854 unsigned int group_size)
11856 tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
11858 if (!vectype)
11859 return NULL;
11861 return truth_type_for (vectype);
11864 /* Function get_same_sized_vectype
11866 Returns a vector type corresponding to SCALAR_TYPE of size
11867 VECTOR_TYPE if supported by the target. */
11869 tree
11870 get_same_sized_vectype (tree scalar_type, tree vector_type)
11872 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
11873 return truth_type_for (vector_type);
11875 poly_uint64 nunits;
11876 if (!multiple_p (GET_MODE_SIZE (TYPE_MODE (vector_type)),
11877 GET_MODE_SIZE (TYPE_MODE (scalar_type)), &nunits))
11878 return NULL_TREE;
11880 return get_related_vectype_for_scalar_type (TYPE_MODE (vector_type),
11881 scalar_type, nunits);
11884 /* Return true if replacing LOOP_VINFO->vector_mode with VECTOR_MODE
11885 would not change the chosen vector modes. */
11887 bool
11888 vect_chooses_same_modes_p (vec_info *vinfo, machine_mode vector_mode)
11890 for (vec_info::mode_set::iterator i = vinfo->used_vector_modes.begin ();
11891 i != vinfo->used_vector_modes.end (); ++i)
11892 if (!VECTOR_MODE_P (*i)
11893 || related_vector_mode (vector_mode, GET_MODE_INNER (*i), 0) != *i)
11894 return false;
11895 return true;
11898 /* Function vect_is_simple_use.
11900 Input:
11901 VINFO - the vect info of the loop or basic block that is being vectorized.
11902 OPERAND - operand in the loop or bb.
11903 Output:
11904 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
11905 case OPERAND is an SSA_NAME that is defined in the vectorizable region
11906 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
11907 the definition could be anywhere in the function
11908 DT - the type of definition
11910 Returns whether a stmt with OPERAND can be vectorized.
11911 For loops, supportable operands are constants, loop invariants, and operands
11912 that are defined by the current iteration of the loop. Unsupportable
11913 operands are those that are defined by a previous iteration of the loop (as
11914 is the case in reduction/induction computations).
11915 For basic blocks, supportable operands are constants and bb invariants.
11916 For now, operands defined outside the basic block are not supported. */
11918 bool
11919 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
11920 stmt_vec_info *def_stmt_info_out, gimple **def_stmt_out)
11922 if (def_stmt_info_out)
11923 *def_stmt_info_out = NULL;
11924 if (def_stmt_out)
11925 *def_stmt_out = NULL;
11926 *dt = vect_unknown_def_type;
11928 if (dump_enabled_p ())
11930 dump_printf_loc (MSG_NOTE, vect_location,
11931 "vect_is_simple_use: operand ");
11932 if (TREE_CODE (operand) == SSA_NAME
11933 && !SSA_NAME_IS_DEFAULT_DEF (operand))
11934 dump_gimple_expr (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (operand), 0);
11935 else
11936 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
11939 if (CONSTANT_CLASS_P (operand))
11940 *dt = vect_constant_def;
11941 else if (is_gimple_min_invariant (operand))
11942 *dt = vect_external_def;
11943 else if (TREE_CODE (operand) != SSA_NAME)
11944 *dt = vect_unknown_def_type;
11945 else if (SSA_NAME_IS_DEFAULT_DEF (operand))
11946 *dt = vect_external_def;
11947 else
11949 gimple *def_stmt = SSA_NAME_DEF_STMT (operand);
11950 stmt_vec_info stmt_vinfo = vinfo->lookup_def (operand);
11951 if (!stmt_vinfo)
11952 *dt = vect_external_def;
11953 else
11955 stmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo);
11956 def_stmt = stmt_vinfo->stmt;
11957 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
11958 if (def_stmt_info_out)
11959 *def_stmt_info_out = stmt_vinfo;
11961 if (def_stmt_out)
11962 *def_stmt_out = def_stmt;
11965 if (dump_enabled_p ())
11967 dump_printf (MSG_NOTE, ", type of def: ");
11968 switch (*dt)
11970 case vect_uninitialized_def:
11971 dump_printf (MSG_NOTE, "uninitialized\n");
11972 break;
11973 case vect_constant_def:
11974 dump_printf (MSG_NOTE, "constant\n");
11975 break;
11976 case vect_external_def:
11977 dump_printf (MSG_NOTE, "external\n");
11978 break;
11979 case vect_internal_def:
11980 dump_printf (MSG_NOTE, "internal\n");
11981 break;
11982 case vect_induction_def:
11983 dump_printf (MSG_NOTE, "induction\n");
11984 break;
11985 case vect_reduction_def:
11986 dump_printf (MSG_NOTE, "reduction\n");
11987 break;
11988 case vect_double_reduction_def:
11989 dump_printf (MSG_NOTE, "double reduction\n");
11990 break;
11991 case vect_nested_cycle:
11992 dump_printf (MSG_NOTE, "nested cycle\n");
11993 break;
11994 case vect_first_order_recurrence:
11995 dump_printf (MSG_NOTE, "first order recurrence\n");
11996 break;
11997 case vect_unknown_def_type:
11998 dump_printf (MSG_NOTE, "unknown\n");
11999 break;
12003 if (*dt == vect_unknown_def_type)
12005 if (dump_enabled_p ())
12006 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
12007 "Unsupported pattern.\n");
12008 return false;
12011 return true;
12014 /* Function vect_is_simple_use.
12016 Same as vect_is_simple_use but also determines the vector operand
12017 type of OPERAND and stores it to *VECTYPE. If the definition of
12018 OPERAND is vect_uninitialized_def, vect_constant_def or
12019 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
12020 is responsible to compute the best suited vector type for the
12021 scalar operand. */
12023 bool
12024 vect_is_simple_use (tree operand, vec_info *vinfo, enum vect_def_type *dt,
12025 tree *vectype, stmt_vec_info *def_stmt_info_out,
12026 gimple **def_stmt_out)
12028 stmt_vec_info def_stmt_info;
12029 gimple *def_stmt;
12030 if (!vect_is_simple_use (operand, vinfo, dt, &def_stmt_info, &def_stmt))
12031 return false;
12033 if (def_stmt_out)
12034 *def_stmt_out = def_stmt;
12035 if (def_stmt_info_out)
12036 *def_stmt_info_out = def_stmt_info;
12038 /* Now get a vector type if the def is internal, otherwise supply
12039 NULL_TREE and leave it up to the caller to figure out a proper
12040 type for the use stmt. */
12041 if (*dt == vect_internal_def
12042 || *dt == vect_induction_def
12043 || *dt == vect_reduction_def
12044 || *dt == vect_double_reduction_def
12045 || *dt == vect_nested_cycle
12046 || *dt == vect_first_order_recurrence)
12048 *vectype = STMT_VINFO_VECTYPE (def_stmt_info);
12049 gcc_assert (*vectype != NULL_TREE);
12050 if (dump_enabled_p ())
12051 dump_printf_loc (MSG_NOTE, vect_location,
12052 "vect_is_simple_use: vectype %T\n", *vectype);
12054 else if (*dt == vect_uninitialized_def
12055 || *dt == vect_constant_def
12056 || *dt == vect_external_def)
12057 *vectype = NULL_TREE;
12058 else
12059 gcc_unreachable ();
12061 return true;
12064 /* Function vect_is_simple_use.
12066 Same as vect_is_simple_use but determines the operand by operand
12067 position OPERAND from either STMT or SLP_NODE, filling in *OP
12068 and *SLP_DEF (when SLP_NODE is not NULL). */
12070 bool
12071 vect_is_simple_use (vec_info *vinfo, stmt_vec_info stmt, slp_tree slp_node,
12072 unsigned operand, tree *op, slp_tree *slp_def,
12073 enum vect_def_type *dt,
12074 tree *vectype, stmt_vec_info *def_stmt_info_out)
12076 if (slp_node)
12078 slp_tree child = SLP_TREE_CHILDREN (slp_node)[operand];
12079 *slp_def = child;
12080 *vectype = SLP_TREE_VECTYPE (child);
12081 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
12083 *op = gimple_get_lhs (SLP_TREE_REPRESENTATIVE (child)->stmt);
12084 return vect_is_simple_use (*op, vinfo, dt, def_stmt_info_out);
12086 else
12088 if (def_stmt_info_out)
12089 *def_stmt_info_out = NULL;
12090 *op = SLP_TREE_SCALAR_OPS (child)[0];
12091 *dt = SLP_TREE_DEF_TYPE (child);
12092 return true;
12095 else
12097 *slp_def = NULL;
12098 if (gassign *ass = dyn_cast <gassign *> (stmt->stmt))
12100 if (gimple_assign_rhs_code (ass) == COND_EXPR
12101 && COMPARISON_CLASS_P (gimple_assign_rhs1 (ass)))
12103 if (operand < 2)
12104 *op = TREE_OPERAND (gimple_assign_rhs1 (ass), operand);
12105 else
12106 *op = gimple_op (ass, operand);
12108 else if (gimple_assign_rhs_code (ass) == VIEW_CONVERT_EXPR)
12109 *op = TREE_OPERAND (gimple_assign_rhs1 (ass), 0);
12110 else
12111 *op = gimple_op (ass, operand + 1);
12113 else if (gcall *call = dyn_cast <gcall *> (stmt->stmt))
12114 *op = gimple_call_arg (call, operand);
12115 else
12116 gcc_unreachable ();
12117 return vect_is_simple_use (*op, vinfo, dt, vectype, def_stmt_info_out);
12121 /* If OP is not NULL and is external or constant update its vector
12122 type with VECTYPE. Returns true if successful or false if not,
12123 for example when conflicting vector types are present. */
12125 bool
12126 vect_maybe_update_slp_op_vectype (slp_tree op, tree vectype)
12128 if (!op || SLP_TREE_DEF_TYPE (op) == vect_internal_def)
12129 return true;
12130 if (SLP_TREE_VECTYPE (op))
12131 return types_compatible_p (SLP_TREE_VECTYPE (op), vectype);
12132 /* For external defs refuse to produce VECTOR_BOOLEAN_TYPE_P, those
12133 should be handled by patters. Allow vect_constant_def for now. */
12134 if (VECTOR_BOOLEAN_TYPE_P (vectype)
12135 && SLP_TREE_DEF_TYPE (op) == vect_external_def)
12136 return false;
12137 SLP_TREE_VECTYPE (op) = vectype;
12138 return true;
12141 /* Function supportable_widening_operation
12143 Check whether an operation represented by the code CODE is a
12144 widening operation that is supported by the target platform in
12145 vector form (i.e., when operating on arguments of type VECTYPE_IN
12146 producing a result of type VECTYPE_OUT).
12148 Widening operations we currently support are NOP (CONVERT), FLOAT,
12149 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
12150 are supported by the target platform either directly (via vector
12151 tree-codes), or via target builtins.
12153 Output:
12154 - CODE1 and CODE2 are codes of vector operations to be used when
12155 vectorizing the operation, if available.
12156 - MULTI_STEP_CVT determines the number of required intermediate steps in
12157 case of multi-step conversion (like char->short->int - in that case
12158 MULTI_STEP_CVT will be 1).
12159 - INTERM_TYPES contains the intermediate type required to perform the
12160 widening operation (short in the above example). */
12162 bool
12163 supportable_widening_operation (vec_info *vinfo,
12164 enum tree_code code, stmt_vec_info stmt_info,
12165 tree vectype_out, tree vectype_in,
12166 enum tree_code *code1, enum tree_code *code2,
12167 int *multi_step_cvt,
12168 vec<tree> *interm_types)
12170 loop_vec_info loop_info = dyn_cast <loop_vec_info> (vinfo);
12171 class loop *vect_loop = NULL;
12172 machine_mode vec_mode;
12173 enum insn_code icode1, icode2;
12174 optab optab1, optab2;
12175 tree vectype = vectype_in;
12176 tree wide_vectype = vectype_out;
12177 enum tree_code c1, c2;
12178 int i;
12179 tree prev_type, intermediate_type;
12180 machine_mode intermediate_mode, prev_mode;
12181 optab optab3, optab4;
12183 *multi_step_cvt = 0;
12184 if (loop_info)
12185 vect_loop = LOOP_VINFO_LOOP (loop_info);
12187 switch (code)
12189 case WIDEN_MULT_EXPR:
12190 /* The result of a vectorized widening operation usually requires
12191 two vectors (because the widened results do not fit into one vector).
12192 The generated vector results would normally be expected to be
12193 generated in the same order as in the original scalar computation,
12194 i.e. if 8 results are generated in each vector iteration, they are
12195 to be organized as follows:
12196 vect1: [res1,res2,res3,res4],
12197 vect2: [res5,res6,res7,res8].
12199 However, in the special case that the result of the widening
12200 operation is used in a reduction computation only, the order doesn't
12201 matter (because when vectorizing a reduction we change the order of
12202 the computation). Some targets can take advantage of this and
12203 generate more efficient code. For example, targets like Altivec,
12204 that support widen_mult using a sequence of {mult_even,mult_odd}
12205 generate the following vectors:
12206 vect1: [res1,res3,res5,res7],
12207 vect2: [res2,res4,res6,res8].
12209 When vectorizing outer-loops, we execute the inner-loop sequentially
12210 (each vectorized inner-loop iteration contributes to VF outer-loop
12211 iterations in parallel). We therefore don't allow to change the
12212 order of the computation in the inner-loop during outer-loop
12213 vectorization. */
12214 /* TODO: Another case in which order doesn't *really* matter is when we
12215 widen and then contract again, e.g. (short)((int)x * y >> 8).
12216 Normally, pack_trunc performs an even/odd permute, whereas the
12217 repack from an even/odd expansion would be an interleave, which
12218 would be significantly simpler for e.g. AVX2. */
12219 /* In any case, in order to avoid duplicating the code below, recurse
12220 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
12221 are properly set up for the caller. If we fail, we'll continue with
12222 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
12223 if (vect_loop
12224 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
12225 && !nested_in_vect_loop_p (vect_loop, stmt_info)
12226 && supportable_widening_operation (vinfo, VEC_WIDEN_MULT_EVEN_EXPR,
12227 stmt_info, vectype_out,
12228 vectype_in, code1, code2,
12229 multi_step_cvt, interm_types))
12231 /* Elements in a vector with vect_used_by_reduction property cannot
12232 be reordered if the use chain with this property does not have the
12233 same operation. One such an example is s += a * b, where elements
12234 in a and b cannot be reordered. Here we check if the vector defined
12235 by STMT is only directly used in the reduction statement. */
12236 tree lhs = gimple_assign_lhs (stmt_info->stmt);
12237 stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
12238 if (use_stmt_info
12239 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
12240 return true;
12242 c1 = VEC_WIDEN_MULT_LO_EXPR;
12243 c2 = VEC_WIDEN_MULT_HI_EXPR;
12244 break;
12246 case DOT_PROD_EXPR:
12247 c1 = DOT_PROD_EXPR;
12248 c2 = DOT_PROD_EXPR;
12249 break;
12251 case SAD_EXPR:
12252 c1 = SAD_EXPR;
12253 c2 = SAD_EXPR;
12254 break;
12256 case VEC_WIDEN_MULT_EVEN_EXPR:
12257 /* Support the recursion induced just above. */
12258 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
12259 c2 = VEC_WIDEN_MULT_ODD_EXPR;
12260 break;
12262 case WIDEN_LSHIFT_EXPR:
12263 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
12264 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
12265 break;
12267 case WIDEN_PLUS_EXPR:
12268 c1 = VEC_WIDEN_PLUS_LO_EXPR;
12269 c2 = VEC_WIDEN_PLUS_HI_EXPR;
12270 break;
12272 case WIDEN_MINUS_EXPR:
12273 c1 = VEC_WIDEN_MINUS_LO_EXPR;
12274 c2 = VEC_WIDEN_MINUS_HI_EXPR;
12275 break;
12277 CASE_CONVERT:
12278 c1 = VEC_UNPACK_LO_EXPR;
12279 c2 = VEC_UNPACK_HI_EXPR;
12280 break;
12282 case FLOAT_EXPR:
12283 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
12284 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
12285 break;
12287 case FIX_TRUNC_EXPR:
12288 c1 = VEC_UNPACK_FIX_TRUNC_LO_EXPR;
12289 c2 = VEC_UNPACK_FIX_TRUNC_HI_EXPR;
12290 break;
12292 default:
12293 gcc_unreachable ();
12296 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
12297 std::swap (c1, c2);
12299 if (code == FIX_TRUNC_EXPR)
12301 /* The signedness is determined from output operand. */
12302 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
12303 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
12305 else if (CONVERT_EXPR_CODE_P (code)
12306 && VECTOR_BOOLEAN_TYPE_P (wide_vectype)
12307 && VECTOR_BOOLEAN_TYPE_P (vectype)
12308 && TYPE_MODE (wide_vectype) == TYPE_MODE (vectype)
12309 && SCALAR_INT_MODE_P (TYPE_MODE (vectype)))
12311 /* If the input and result modes are the same, a different optab
12312 is needed where we pass in the number of units in vectype. */
12313 optab1 = vec_unpacks_sbool_lo_optab;
12314 optab2 = vec_unpacks_sbool_hi_optab;
12316 else
12318 optab1 = optab_for_tree_code (c1, vectype, optab_default);
12319 optab2 = optab_for_tree_code (c2, vectype, optab_default);
12322 if (!optab1 || !optab2)
12323 return false;
12325 vec_mode = TYPE_MODE (vectype);
12326 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
12327 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
12328 return false;
12330 *code1 = c1;
12331 *code2 = c2;
12333 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
12334 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
12336 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
12337 return true;
12338 /* For scalar masks we may have different boolean
12339 vector types having the same QImode. Thus we
12340 add additional check for elements number. */
12341 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
12342 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2))
12343 return true;
12346 /* Check if it's a multi-step conversion that can be done using intermediate
12347 types. */
12349 prev_type = vectype;
12350 prev_mode = vec_mode;
12352 if (!CONVERT_EXPR_CODE_P (code))
12353 return false;
12355 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
12356 intermediate steps in promotion sequence. We try
12357 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
12358 not. */
12359 interm_types->create (MAX_INTERM_CVT_STEPS);
12360 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
12362 intermediate_mode = insn_data[icode1].operand[0].mode;
12363 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
12364 intermediate_type
12365 = vect_halve_mask_nunits (prev_type, intermediate_mode);
12366 else if (VECTOR_MODE_P (intermediate_mode))
12368 tree intermediate_element_type
12369 = lang_hooks.types.type_for_mode (GET_MODE_INNER (intermediate_mode),
12370 TYPE_UNSIGNED (prev_type));
12371 intermediate_type
12372 = build_vector_type_for_mode (intermediate_element_type,
12373 intermediate_mode);
12375 else
12376 intermediate_type
12377 = lang_hooks.types.type_for_mode (intermediate_mode,
12378 TYPE_UNSIGNED (prev_type));
12380 if (VECTOR_BOOLEAN_TYPE_P (intermediate_type)
12381 && VECTOR_BOOLEAN_TYPE_P (prev_type)
12382 && intermediate_mode == prev_mode
12383 && SCALAR_INT_MODE_P (prev_mode))
12385 /* If the input and result modes are the same, a different optab
12386 is needed where we pass in the number of units in vectype. */
12387 optab3 = vec_unpacks_sbool_lo_optab;
12388 optab4 = vec_unpacks_sbool_hi_optab;
12390 else
12392 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
12393 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
12396 if (!optab3 || !optab4
12397 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
12398 || insn_data[icode1].operand[0].mode != intermediate_mode
12399 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
12400 || insn_data[icode2].operand[0].mode != intermediate_mode
12401 || ((icode1 = optab_handler (optab3, intermediate_mode))
12402 == CODE_FOR_nothing)
12403 || ((icode2 = optab_handler (optab4, intermediate_mode))
12404 == CODE_FOR_nothing))
12405 break;
12407 interm_types->quick_push (intermediate_type);
12408 (*multi_step_cvt)++;
12410 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
12411 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
12413 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
12414 return true;
12415 if (known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
12416 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2))
12417 return true;
12420 prev_type = intermediate_type;
12421 prev_mode = intermediate_mode;
12424 interm_types->release ();
12425 return false;
12429 /* Function supportable_narrowing_operation
12431 Check whether an operation represented by the code CODE is a
12432 narrowing operation that is supported by the target platform in
12433 vector form (i.e., when operating on arguments of type VECTYPE_IN
12434 and producing a result of type VECTYPE_OUT).
12436 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
12437 and FLOAT. This function checks if these operations are supported by
12438 the target platform directly via vector tree-codes.
12440 Output:
12441 - CODE1 is the code of a vector operation to be used when
12442 vectorizing the operation, if available.
12443 - MULTI_STEP_CVT determines the number of required intermediate steps in
12444 case of multi-step conversion (like int->short->char - in that case
12445 MULTI_STEP_CVT will be 1).
12446 - INTERM_TYPES contains the intermediate type required to perform the
12447 narrowing operation (short in the above example). */
12449 bool
12450 supportable_narrowing_operation (enum tree_code code,
12451 tree vectype_out, tree vectype_in,
12452 enum tree_code *code1, int *multi_step_cvt,
12453 vec<tree> *interm_types)
12455 machine_mode vec_mode;
12456 enum insn_code icode1;
12457 optab optab1, interm_optab;
12458 tree vectype = vectype_in;
12459 tree narrow_vectype = vectype_out;
12460 enum tree_code c1;
12461 tree intermediate_type, prev_type;
12462 machine_mode intermediate_mode, prev_mode;
12463 int i;
12464 unsigned HOST_WIDE_INT n_elts;
12465 bool uns;
12467 *multi_step_cvt = 0;
12468 switch (code)
12470 CASE_CONVERT:
12471 c1 = VEC_PACK_TRUNC_EXPR;
12472 if (VECTOR_BOOLEAN_TYPE_P (narrow_vectype)
12473 && VECTOR_BOOLEAN_TYPE_P (vectype)
12474 && SCALAR_INT_MODE_P (TYPE_MODE (vectype))
12475 && TYPE_VECTOR_SUBPARTS (vectype).is_constant (&n_elts)
12476 && n_elts < BITS_PER_UNIT)
12477 optab1 = vec_pack_sbool_trunc_optab;
12478 else
12479 optab1 = optab_for_tree_code (c1, vectype, optab_default);
12480 break;
12482 case FIX_TRUNC_EXPR:
12483 c1 = VEC_PACK_FIX_TRUNC_EXPR;
12484 /* The signedness is determined from output operand. */
12485 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
12486 break;
12488 case FLOAT_EXPR:
12489 c1 = VEC_PACK_FLOAT_EXPR;
12490 optab1 = optab_for_tree_code (c1, vectype, optab_default);
12491 break;
12493 default:
12494 gcc_unreachable ();
12497 if (!optab1)
12498 return false;
12500 vec_mode = TYPE_MODE (vectype);
12501 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
12502 return false;
12504 *code1 = c1;
12506 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
12508 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
12509 return true;
12510 /* For scalar masks we may have different boolean
12511 vector types having the same QImode. Thus we
12512 add additional check for elements number. */
12513 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
12514 TYPE_VECTOR_SUBPARTS (narrow_vectype)))
12515 return true;
12518 if (code == FLOAT_EXPR)
12519 return false;
12521 /* Check if it's a multi-step conversion that can be done using intermediate
12522 types. */
12523 prev_mode = vec_mode;
12524 prev_type = vectype;
12525 if (code == FIX_TRUNC_EXPR)
12526 uns = TYPE_UNSIGNED (vectype_out);
12527 else
12528 uns = TYPE_UNSIGNED (vectype);
12530 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
12531 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
12532 costly than signed. */
12533 if (code == FIX_TRUNC_EXPR && uns)
12535 enum insn_code icode2;
12537 intermediate_type
12538 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
12539 interm_optab
12540 = optab_for_tree_code (c1, intermediate_type, optab_default);
12541 if (interm_optab != unknown_optab
12542 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
12543 && insn_data[icode1].operand[0].mode
12544 == insn_data[icode2].operand[0].mode)
12546 uns = false;
12547 optab1 = interm_optab;
12548 icode1 = icode2;
12552 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
12553 intermediate steps in promotion sequence. We try
12554 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
12555 interm_types->create (MAX_INTERM_CVT_STEPS);
12556 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
12558 intermediate_mode = insn_data[icode1].operand[0].mode;
12559 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
12560 intermediate_type
12561 = vect_double_mask_nunits (prev_type, intermediate_mode);
12562 else
12563 intermediate_type
12564 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
12565 if (VECTOR_BOOLEAN_TYPE_P (intermediate_type)
12566 && VECTOR_BOOLEAN_TYPE_P (prev_type)
12567 && SCALAR_INT_MODE_P (prev_mode)
12568 && TYPE_VECTOR_SUBPARTS (intermediate_type).is_constant (&n_elts)
12569 && n_elts < BITS_PER_UNIT)
12570 interm_optab = vec_pack_sbool_trunc_optab;
12571 else
12572 interm_optab
12573 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
12574 optab_default);
12575 if (!interm_optab
12576 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
12577 || insn_data[icode1].operand[0].mode != intermediate_mode
12578 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
12579 == CODE_FOR_nothing))
12580 break;
12582 interm_types->quick_push (intermediate_type);
12583 (*multi_step_cvt)++;
12585 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
12587 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
12588 return true;
12589 if (known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
12590 TYPE_VECTOR_SUBPARTS (narrow_vectype)))
12591 return true;
12594 prev_mode = intermediate_mode;
12595 prev_type = intermediate_type;
12596 optab1 = interm_optab;
12599 interm_types->release ();
12600 return false;
12603 /* Generate and return a vector mask of MASK_TYPE such that
12604 mask[I] is true iff J + START_INDEX < END_INDEX for all J <= I.
12605 Add the statements to SEQ. */
12607 tree
12608 vect_gen_while (gimple_seq *seq, tree mask_type, tree start_index,
12609 tree end_index, const char *name)
12611 tree cmp_type = TREE_TYPE (start_index);
12612 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT,
12613 cmp_type, mask_type,
12614 OPTIMIZE_FOR_SPEED));
12615 gcall *call = gimple_build_call_internal (IFN_WHILE_ULT, 3,
12616 start_index, end_index,
12617 build_zero_cst (mask_type));
12618 tree tmp;
12619 if (name)
12620 tmp = make_temp_ssa_name (mask_type, NULL, name);
12621 else
12622 tmp = make_ssa_name (mask_type);
12623 gimple_call_set_lhs (call, tmp);
12624 gimple_seq_add_stmt (seq, call);
12625 return tmp;
12628 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
12629 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
12631 tree
12632 vect_gen_while_not (gimple_seq *seq, tree mask_type, tree start_index,
12633 tree end_index)
12635 tree tmp = vect_gen_while (seq, mask_type, start_index, end_index);
12636 return gimple_build (seq, BIT_NOT_EXPR, mask_type, tmp);
12639 /* Try to compute the vector types required to vectorize STMT_INFO,
12640 returning true on success and false if vectorization isn't possible.
12641 If GROUP_SIZE is nonzero and we're performing BB vectorization,
12642 take sure that the number of elements in the vectors is no bigger
12643 than GROUP_SIZE.
12645 On success:
12647 - Set *STMT_VECTYPE_OUT to:
12648 - NULL_TREE if the statement doesn't need to be vectorized;
12649 - the equivalent of STMT_VINFO_VECTYPE otherwise.
12651 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
12652 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
12653 statement does not help to determine the overall number of units. */
12655 opt_result
12656 vect_get_vector_types_for_stmt (vec_info *vinfo, stmt_vec_info stmt_info,
12657 tree *stmt_vectype_out,
12658 tree *nunits_vectype_out,
12659 unsigned int group_size)
12661 gimple *stmt = stmt_info->stmt;
12663 /* For BB vectorization, we should always have a group size once we've
12664 constructed the SLP tree; the only valid uses of zero GROUP_SIZEs
12665 are tentative requests during things like early data reference
12666 analysis and pattern recognition. */
12667 if (is_a <bb_vec_info> (vinfo))
12668 gcc_assert (vinfo->slp_instances.is_empty () || group_size != 0);
12669 else
12670 group_size = 0;
12672 *stmt_vectype_out = NULL_TREE;
12673 *nunits_vectype_out = NULL_TREE;
12675 if (gimple_get_lhs (stmt) == NULL_TREE
12676 /* MASK_STORE has no lhs, but is ok. */
12677 && !gimple_call_internal_p (stmt, IFN_MASK_STORE))
12679 if (is_a <gcall *> (stmt))
12681 /* Ignore calls with no lhs. These must be calls to
12682 #pragma omp simd functions, and what vectorization factor
12683 it really needs can't be determined until
12684 vectorizable_simd_clone_call. */
12685 if (dump_enabled_p ())
12686 dump_printf_loc (MSG_NOTE, vect_location,
12687 "defer to SIMD clone analysis.\n");
12688 return opt_result::success ();
12691 return opt_result::failure_at (stmt,
12692 "not vectorized: irregular stmt.%G", stmt);
12695 tree vectype;
12696 tree scalar_type = NULL_TREE;
12697 if (group_size == 0 && STMT_VINFO_VECTYPE (stmt_info))
12699 vectype = STMT_VINFO_VECTYPE (stmt_info);
12700 if (dump_enabled_p ())
12701 dump_printf_loc (MSG_NOTE, vect_location,
12702 "precomputed vectype: %T\n", vectype);
12704 else if (vect_use_mask_type_p (stmt_info))
12706 unsigned int precision = stmt_info->mask_precision;
12707 scalar_type = build_nonstandard_integer_type (precision, 1);
12708 vectype = get_mask_type_for_scalar_type (vinfo, scalar_type, group_size);
12709 if (!vectype)
12710 return opt_result::failure_at (stmt, "not vectorized: unsupported"
12711 " data-type %T\n", scalar_type);
12712 if (dump_enabled_p ())
12713 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
12715 else
12717 if (data_reference *dr = STMT_VINFO_DATA_REF (stmt_info))
12718 scalar_type = TREE_TYPE (DR_REF (dr));
12719 else if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
12720 scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
12721 else
12722 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
12724 if (dump_enabled_p ())
12726 if (group_size)
12727 dump_printf_loc (MSG_NOTE, vect_location,
12728 "get vectype for scalar type (group size %d):"
12729 " %T\n", group_size, scalar_type);
12730 else
12731 dump_printf_loc (MSG_NOTE, vect_location,
12732 "get vectype for scalar type: %T\n", scalar_type);
12734 vectype = get_vectype_for_scalar_type (vinfo, scalar_type, group_size);
12735 if (!vectype)
12736 return opt_result::failure_at (stmt,
12737 "not vectorized:"
12738 " unsupported data-type %T\n",
12739 scalar_type);
12741 if (dump_enabled_p ())
12742 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n", vectype);
12745 if (scalar_type && VECTOR_MODE_P (TYPE_MODE (scalar_type)))
12746 return opt_result::failure_at (stmt,
12747 "not vectorized: vector stmt in loop:%G",
12748 stmt);
12750 *stmt_vectype_out = vectype;
12752 /* Don't try to compute scalar types if the stmt produces a boolean
12753 vector; use the existing vector type instead. */
12754 tree nunits_vectype = vectype;
12755 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
12757 /* The number of units is set according to the smallest scalar
12758 type (or the largest vector size, but we only support one
12759 vector size per vectorization). */
12760 scalar_type = vect_get_smallest_scalar_type (stmt_info,
12761 TREE_TYPE (vectype));
12762 if (scalar_type != TREE_TYPE (vectype))
12764 if (dump_enabled_p ())
12765 dump_printf_loc (MSG_NOTE, vect_location,
12766 "get vectype for smallest scalar type: %T\n",
12767 scalar_type);
12768 nunits_vectype = get_vectype_for_scalar_type (vinfo, scalar_type,
12769 group_size);
12770 if (!nunits_vectype)
12771 return opt_result::failure_at
12772 (stmt, "not vectorized: unsupported data-type %T\n",
12773 scalar_type);
12774 if (dump_enabled_p ())
12775 dump_printf_loc (MSG_NOTE, vect_location, "nunits vectype: %T\n",
12776 nunits_vectype);
12780 if (!multiple_p (TYPE_VECTOR_SUBPARTS (nunits_vectype),
12781 TYPE_VECTOR_SUBPARTS (*stmt_vectype_out)))
12782 return opt_result::failure_at (stmt,
12783 "Not vectorized: Incompatible number "
12784 "of vector subparts between %T and %T\n",
12785 nunits_vectype, *stmt_vectype_out);
12787 if (dump_enabled_p ())
12789 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
12790 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (nunits_vectype));
12791 dump_printf (MSG_NOTE, "\n");
12794 *nunits_vectype_out = nunits_vectype;
12795 return opt_result::success ();
12798 /* Generate and return statement sequence that sets vector length LEN that is:
12800 min_of_start_and_end = min (START_INDEX, END_INDEX);
12801 left_len = END_INDEX - min_of_start_and_end;
12802 rhs = min (left_len, LEN_LIMIT);
12803 LEN = rhs;
12805 Note: the cost of the code generated by this function is modeled
12806 by vect_estimate_min_profitable_iters, so changes here may need
12807 corresponding changes there. */
12809 gimple_seq
12810 vect_gen_len (tree len, tree start_index, tree end_index, tree len_limit)
12812 gimple_seq stmts = NULL;
12813 tree len_type = TREE_TYPE (len);
12814 gcc_assert (TREE_TYPE (start_index) == len_type);
12816 tree min = gimple_build (&stmts, MIN_EXPR, len_type, start_index, end_index);
12817 tree left_len = gimple_build (&stmts, MINUS_EXPR, len_type, end_index, min);
12818 tree rhs = gimple_build (&stmts, MIN_EXPR, len_type, left_len, len_limit);
12819 gimple* stmt = gimple_build_assign (len, rhs);
12820 gimple_seq_add_stmt (&stmts, stmt);
12822 return stmts;