Make vect_model_store_cost take a vec_load_store_type
[official-gcc.git] / gcc / tree-vect-stmts.c
blobcdca95acb507e6f230c3cb070881e4837df2aec3
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Return the vectorized type for the given statement. */
59 tree
60 stmt_vectype (struct _stmt_vec_info *stmt_info)
62 return STMT_VINFO_VECTYPE (stmt_info);
65 /* Return TRUE iff the given statement is in an inner loop relative to
66 the loop being vectorized. */
67 bool
68 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
70 gimple *stmt = STMT_VINFO_STMT (stmt_info);
71 basic_block bb = gimple_bb (stmt);
72 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
73 struct loop* loop;
75 if (!loop_vinfo)
76 return false;
78 loop = LOOP_VINFO_LOOP (loop_vinfo);
80 return (bb->loop_father == loop->inner);
83 /* Record the cost of a statement, either by directly informing the
84 target model or by saving it in a vector for later processing.
85 Return a preliminary estimate of the statement's cost. */
87 unsigned
88 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
89 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
90 int misalign, enum vect_cost_model_location where)
92 if ((kind == vector_load || kind == unaligned_load)
93 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
94 kind = vector_gather_load;
95 if ((kind == vector_store || kind == unaligned_store)
96 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
97 kind = vector_scatter_store;
98 if (body_cost_vec)
100 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
101 stmt_info_for_cost si = { count, kind,
102 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
103 misalign };
104 body_cost_vec->safe_push (si);
105 return (unsigned)
106 (builtin_vectorization_cost (kind, vectype, misalign) * count);
108 else
109 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
110 count, kind, stmt_info, misalign, where);
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
115 static tree
116 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
118 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
119 "vect_array");
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
127 static tree
128 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
129 tree array, unsigned HOST_WIDE_INT n)
131 tree vect_type, vect, vect_name, array_ref;
132 gimple *new_stmt;
134 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
135 vect_type = TREE_TYPE (TREE_TYPE (array));
136 vect = vect_create_destination_var (scalar_dest, vect_type);
137 array_ref = build4 (ARRAY_REF, vect_type, array,
138 build_int_cst (size_type_node, n),
139 NULL_TREE, NULL_TREE);
141 new_stmt = gimple_build_assign (vect, array_ref);
142 vect_name = make_ssa_name (vect, new_stmt);
143 gimple_assign_set_lhs (new_stmt, vect_name);
144 vect_finish_stmt_generation (stmt, new_stmt, gsi);
146 return vect_name;
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
153 static void
154 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
155 tree array, unsigned HOST_WIDE_INT n)
157 tree array_ref;
158 gimple *new_stmt;
160 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
161 build_int_cst (size_type_node, n),
162 NULL_TREE, NULL_TREE);
164 new_stmt = gimple_build_assign (array_ref, vect);
165 vect_finish_stmt_generation (stmt, new_stmt, gsi);
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
170 (and its group). */
172 static tree
173 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
175 tree mem_ref;
177 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
180 return mem_ref;
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
185 /* Function vect_mark_relevant.
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
189 static void
190 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
191 enum vect_relevant relevant, bool live_p)
193 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
194 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
195 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
196 gimple *pattern_stmt;
198 if (dump_enabled_p ())
200 dump_printf_loc (MSG_NOTE, vect_location,
201 "mark relevant %d, live %d: ", relevant, live_p);
202 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
216 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info = vinfo_for_stmt (pattern_stmt);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
224 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
225 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
226 stmt = pattern_stmt;
229 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
230 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
231 STMT_VINFO_RELEVANT (stmt_info) = relevant;
233 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE, vect_location,
238 "already marked relevant/live.\n");
239 return;
242 worklist->safe_push (stmt);
246 /* Function is_simple_and_all_uses_invariant
248 Return true if STMT is simple and all uses of it are invariant. */
250 bool
251 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
253 tree op;
254 gimple *def_stmt;
255 ssa_op_iter iter;
257 if (!is_gimple_assign (stmt))
258 return false;
260 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
262 enum vect_def_type dt = vect_uninitialized_def;
264 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
268 "use not simple.\n");
269 return false;
272 if (dt != vect_external_def && dt != vect_constant_def)
273 return false;
275 return true;
278 /* Function vect_stmt_relevant_p.
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
288 CHECKME: what other side effects would the vectorizer allow? */
290 static bool
291 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
292 enum vect_relevant *relevant, bool *live_p)
294 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
295 ssa_op_iter op_iter;
296 imm_use_iterator imm_iter;
297 use_operand_p use_p;
298 def_operand_p def_p;
300 *relevant = vect_unused_in_scope;
301 *live_p = false;
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
306 != loop_exit_ctrl_vec_info_type)
307 *relevant = vect_used_in_scope;
309 /* changing memory. */
310 if (gimple_code (stmt) != GIMPLE_PHI)
311 if (gimple_vdef (stmt)
312 && !gimple_clobber_p (stmt))
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE, vect_location,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant = vect_used_in_scope;
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
323 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
325 basic_block bb = gimple_bb (USE_STMT (use_p));
326 if (!flow_bb_inside_loop_p (loop, bb))
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE, vect_location,
330 "vec_stmt_relevant_p: used out of loop.\n");
332 if (is_gimple_debug (USE_STMT (use_p)))
333 continue;
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
338 gcc_assert (bb == single_exit (loop)->dest);
340 *live_p = true;
345 if (*live_p && *relevant == vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE, vect_location,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant = vect_used_only_live;
354 return (*live_p || *relevant);
358 /* Function exist_non_indexing_operands_for_use_p
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
363 static bool
364 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
366 tree operand;
367 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info))
373 return true;
375 /* STMT has a data_ref. FORNOW this means that its of one of
376 the following forms:
377 -1- ARRAY_REF = var
378 -2- var = ARRAY_REF
379 (This should have been verified in analyze_data_refs).
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
383 for array indexing.
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
388 if (!gimple_assign_copy_p (stmt))
390 if (is_gimple_call (stmt)
391 && gimple_call_internal_p (stmt))
392 switch (gimple_call_internal_fn (stmt))
394 case IFN_MASK_STORE:
395 operand = gimple_call_arg (stmt, 3);
396 if (operand == use)
397 return true;
398 /* FALLTHRU */
399 case IFN_MASK_LOAD:
400 operand = gimple_call_arg (stmt, 2);
401 if (operand == use)
402 return true;
403 break;
404 default:
405 break;
407 return false;
410 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
411 return false;
412 operand = gimple_assign_rhs1 (stmt);
413 if (TREE_CODE (operand) != SSA_NAME)
414 return false;
416 if (operand == use)
417 return true;
419 return false;
424 Function process_use.
426 Inputs:
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
432 be performed.
434 Outputs:
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
439 Exceptions:
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
448 Return true if everything is as expected. Return false otherwise. */
450 static bool
451 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
452 enum vect_relevant relevant, vec<gimple *> *worklist,
453 bool force)
455 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
456 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
457 stmt_vec_info dstmt_vinfo;
458 basic_block bb, def_bb;
459 gimple *def_stmt;
460 enum vect_def_type dt;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
465 return true;
467 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
471 "not vectorized: unsupported use in stmt.\n");
472 return false;
475 if (!def_stmt || gimple_nop_p (def_stmt))
476 return true;
478 def_bb = gimple_bb (def_stmt);
479 if (!flow_bb_inside_loop_p (loop, def_bb))
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
483 return true;
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo = vinfo_for_stmt (def_stmt);
492 bb = gimple_bb (stmt);
493 if (gimple_code (stmt) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
495 && gimple_code (def_stmt) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
497 && bb->loop_father == def_bb->loop_father)
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE, vect_location,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
503 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
507 return true;
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
512 d = def_stmt
513 inner-loop:
514 stmt # use (d)
515 outer-loop-tail-bb:
516 ... */
517 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE, vect_location,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
523 switch (relevant)
525 case vect_unused_in_scope:
526 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
527 vect_used_in_scope : vect_unused_in_scope;
528 break;
530 case vect_used_in_outer_by_reduction:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
532 relevant = vect_used_by_reduction;
533 break;
535 case vect_used_in_outer:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
537 relevant = vect_used_in_scope;
538 break;
540 case vect_used_in_scope:
541 break;
543 default:
544 gcc_unreachable ();
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
551 inner-loop:
552 d = def_stmt
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
554 stmt # use (d) */
555 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE, vect_location,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
561 switch (relevant)
563 case vect_unused_in_scope:
564 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
566 vect_used_in_outer_by_reduction : vect_unused_in_scope;
567 break;
569 case vect_used_by_reduction:
570 case vect_used_only_live:
571 relevant = vect_used_in_outer_by_reduction;
572 break;
574 case vect_used_in_scope:
575 relevant = vect_used_in_outer;
576 break;
578 default:
579 gcc_unreachable ();
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
585 of course. */
586 else if (gimple_code (stmt) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
590 == use))
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE, vect_location,
594 "induction value on backedge.\n");
595 return true;
599 vect_mark_relevant (worklist, def_stmt, relevant, false);
600 return true;
604 /* Function vect_mark_stmts_to_be_vectorized.
606 Not all stmts in the loop need to be vectorized. For example:
608 for i...
609 for j...
610 1. T0 = i + j
611 2. T1 = a[T0]
613 3. j = j + 1
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
618 This pass detects such stmts. */
620 bool
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
623 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
624 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
625 unsigned int nbbs = loop->num_nodes;
626 gimple_stmt_iterator si;
627 gimple *stmt;
628 unsigned int i;
629 stmt_vec_info stmt_vinfo;
630 basic_block bb;
631 gimple *phi;
632 bool live_p;
633 enum vect_relevant relevant;
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE, vect_location,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
639 auto_vec<gimple *, 64> worklist;
641 /* 1. Init worklist. */
642 for (i = 0; i < nbbs; i++)
644 bb = bbs[i];
645 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
647 phi = gsi_stmt (si);
648 if (dump_enabled_p ())
650 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
654 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
655 vect_mark_relevant (&worklist, phi, relevant, live_p);
657 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
659 stmt = gsi_stmt (si);
660 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
666 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
667 vect_mark_relevant (&worklist, stmt, relevant, live_p);
671 /* 2. Process_worklist */
672 while (worklist.length () > 0)
674 use_operand_p use_p;
675 ssa_op_iter iter;
677 stmt = worklist.pop ();
678 if (dump_enabled_p ())
680 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
686 of STMT. */
687 stmt_vinfo = vinfo_for_stmt (stmt);
688 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
703 case vect_reduction_def:
704 gcc_assert (relevant != vect_unused_in_scope);
705 if (relevant != vect_unused_in_scope
706 && relevant != vect_used_in_scope
707 && relevant != vect_used_by_reduction
708 && relevant != vect_used_only_live)
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
712 "unsupported use of reduction.\n");
713 return false;
715 break;
717 case vect_nested_cycle:
718 if (relevant != vect_unused_in_scope
719 && relevant != vect_used_in_outer_by_reduction
720 && relevant != vect_used_in_outer)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of nested cycle.\n");
726 return false;
728 break;
730 case vect_double_reduction_def:
731 if (relevant != vect_unused_in_scope
732 && relevant != vect_used_by_reduction
733 && relevant != vect_used_only_live)
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
737 "unsupported use of double reduction.\n");
739 return false;
741 break;
743 default:
744 break;
747 if (is_pattern_stmt_p (stmt_vinfo))
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt))
754 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
755 tree op = gimple_assign_rhs1 (stmt);
757 i = 1;
758 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
760 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
761 relevant, &worklist, false)
762 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
763 relevant, &worklist, false))
764 return false;
765 i = 2;
767 for (; i < gimple_num_ops (stmt); i++)
769 op = gimple_op (stmt, i);
770 if (TREE_CODE (op) == SSA_NAME
771 && !process_use (stmt, op, loop_vinfo, relevant,
772 &worklist, false))
773 return false;
776 else if (is_gimple_call (stmt))
778 for (i = 0; i < gimple_call_num_args (stmt); i++)
780 tree arg = gimple_call_arg (stmt, i);
781 if (!process_use (stmt, arg, loop_vinfo, relevant,
782 &worklist, false))
783 return false;
787 else
788 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
790 tree op = USE_FROM_PTR (use_p);
791 if (!process_use (stmt, op, loop_vinfo, relevant,
792 &worklist, false))
793 return false;
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
798 gather_scatter_info gs_info;
799 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
800 gcc_unreachable ();
801 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
802 &worklist, true))
803 return false;
805 } /* while worklist */
807 return true;
811 /* Function vect_model_simple_cost.
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
817 void
818 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
819 enum vect_def_type *dt,
820 int ndts,
821 stmt_vector_for_cost *prologue_cost_vec,
822 stmt_vector_for_cost *body_cost_vec)
824 int i;
825 int inside_cost = 0, prologue_cost = 0;
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info))
829 return;
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
833 cost model. */
834 for (i = 0; i < ndts; i++)
835 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
836 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
837 stmt_info, 0, vect_prologue);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
841 stmt_info, 0, vect_body);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE, vect_location,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost, prologue_cost);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
855 static void
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
857 enum vect_def_type *dt, int pwr)
859 int i, tmp;
860 int inside_cost = 0, prologue_cost = 0;
861 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
862 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
863 void *target_cost_data;
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info))
867 return;
869 if (loop_vinfo)
870 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
871 else
872 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
874 for (i = 0; i < pwr + 1; i++)
876 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
877 (i + 1) : i;
878 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
879 vec_promote_demote, stmt_info, 0,
880 vect_body);
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i = 0; i < 2; i++)
885 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
886 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
887 stmt_info, 0, vect_prologue);
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE, vect_location,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost, prologue_cost);
895 /* Function vect_model_store_cost
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
900 void
901 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
902 vect_memory_access_type memory_access_type,
903 vec_load_store_type vls_type, slp_tree slp_node,
904 stmt_vector_for_cost *prologue_cost_vec,
905 stmt_vector_for_cost *body_cost_vec)
907 unsigned int inside_cost = 0, prologue_cost = 0;
908 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
909 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
910 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
912 if (vls_type == VLS_STORE_INVARIANT)
913 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
914 stmt_info, 0, vect_prologue);
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node && grouped_access_p)
920 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
921 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
933 if (first_stmt_p
934 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
936 /* Uses a high and low interleave or shuffle operations for each
937 needed permute. */
938 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
939 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
940 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
941 stmt_info, 0, vect_body);
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE, vect_location,
945 "vect_model_store_cost: strided group_size = %d .\n",
946 group_size);
949 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
950 /* Costs of the stores. */
951 if (memory_access_type == VMAT_ELEMENTWISE
952 || memory_access_type == VMAT_GATHER_SCATTER)
954 /* N scalar stores plus extracting the elements. */
955 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
956 inside_cost += record_stmt_cost (body_cost_vec,
957 ncopies * assumed_nunits,
958 scalar_store, stmt_info, 0, vect_body);
960 else
961 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
963 if (memory_access_type == VMAT_ELEMENTWISE
964 || memory_access_type == VMAT_STRIDED_SLP)
966 /* N scalar stores plus extracting the elements. */
967 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
968 inside_cost += record_stmt_cost (body_cost_vec,
969 ncopies * assumed_nunits,
970 vec_to_scalar, stmt_info, 0, vect_body);
973 if (dump_enabled_p ())
974 dump_printf_loc (MSG_NOTE, vect_location,
975 "vect_model_store_cost: inside_cost = %d, "
976 "prologue_cost = %d .\n", inside_cost, prologue_cost);
980 /* Calculate cost of DR's memory access. */
981 void
982 vect_get_store_cost (struct data_reference *dr, int ncopies,
983 unsigned int *inside_cost,
984 stmt_vector_for_cost *body_cost_vec)
986 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
987 gimple *stmt = DR_STMT (dr);
988 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
990 switch (alignment_support_scheme)
992 case dr_aligned:
994 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
995 vector_store, stmt_info, 0,
996 vect_body);
998 if (dump_enabled_p ())
999 dump_printf_loc (MSG_NOTE, vect_location,
1000 "vect_model_store_cost: aligned.\n");
1001 break;
1004 case dr_unaligned_supported:
1006 /* Here, we assign an additional cost for the unaligned store. */
1007 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1008 unaligned_store, stmt_info,
1009 DR_MISALIGNMENT (dr), vect_body);
1010 if (dump_enabled_p ())
1011 dump_printf_loc (MSG_NOTE, vect_location,
1012 "vect_model_store_cost: unaligned supported by "
1013 "hardware.\n");
1014 break;
1017 case dr_unaligned_unsupported:
1019 *inside_cost = VECT_MAX_COST;
1021 if (dump_enabled_p ())
1022 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1023 "vect_model_store_cost: unsupported access.\n");
1024 break;
1027 default:
1028 gcc_unreachable ();
1033 /* Function vect_model_load_cost
1035 Models cost for loads. In the case of grouped accesses, one access has
1036 the overhead of the grouped access attributed to it. Since unaligned
1037 accesses are supported for loads, we also account for the costs of the
1038 access scheme chosen. */
1040 void
1041 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1042 vect_memory_access_type memory_access_type,
1043 slp_tree slp_node,
1044 stmt_vector_for_cost *prologue_cost_vec,
1045 stmt_vector_for_cost *body_cost_vec)
1047 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1048 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1049 unsigned int inside_cost = 0, prologue_cost = 0;
1050 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1052 /* Grouped loads read all elements in the group at once,
1053 so we want the DR for the first statement. */
1054 if (!slp_node && grouped_access_p)
1056 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1057 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1060 /* True if we should include any once-per-group costs as well as
1061 the cost of the statement itself. For SLP we only get called
1062 once per group anyhow. */
1063 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1065 /* We assume that the cost of a single load-lanes instruction is
1066 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1067 access is instead being provided by a load-and-permute operation,
1068 include the cost of the permutes. */
1069 if (first_stmt_p
1070 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1072 /* Uses an even and odd extract operations or shuffle operations
1073 for each needed permute. */
1074 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1075 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1076 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1077 stmt_info, 0, vect_body);
1079 if (dump_enabled_p ())
1080 dump_printf_loc (MSG_NOTE, vect_location,
1081 "vect_model_load_cost: strided group_size = %d .\n",
1082 group_size);
1085 /* The loads themselves. */
1086 if (memory_access_type == VMAT_ELEMENTWISE
1087 || memory_access_type == VMAT_GATHER_SCATTER)
1089 /* N scalar loads plus gathering them into a vector. */
1090 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1091 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1092 inside_cost += record_stmt_cost (body_cost_vec,
1093 ncopies * assumed_nunits,
1094 scalar_load, stmt_info, 0, vect_body);
1096 else
1097 vect_get_load_cost (dr, ncopies, first_stmt_p,
1098 &inside_cost, &prologue_cost,
1099 prologue_cost_vec, body_cost_vec, true);
1100 if (memory_access_type == VMAT_ELEMENTWISE
1101 || memory_access_type == VMAT_STRIDED_SLP)
1102 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1103 stmt_info, 0, vect_body);
1105 if (dump_enabled_p ())
1106 dump_printf_loc (MSG_NOTE, vect_location,
1107 "vect_model_load_cost: inside_cost = %d, "
1108 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1112 /* Calculate cost of DR's memory access. */
1113 void
1114 vect_get_load_cost (struct data_reference *dr, int ncopies,
1115 bool add_realign_cost, unsigned int *inside_cost,
1116 unsigned int *prologue_cost,
1117 stmt_vector_for_cost *prologue_cost_vec,
1118 stmt_vector_for_cost *body_cost_vec,
1119 bool record_prologue_costs)
1121 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1122 gimple *stmt = DR_STMT (dr);
1123 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1125 switch (alignment_support_scheme)
1127 case dr_aligned:
1129 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1130 stmt_info, 0, vect_body);
1132 if (dump_enabled_p ())
1133 dump_printf_loc (MSG_NOTE, vect_location,
1134 "vect_model_load_cost: aligned.\n");
1136 break;
1138 case dr_unaligned_supported:
1140 /* Here, we assign an additional cost for the unaligned load. */
1141 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1142 unaligned_load, stmt_info,
1143 DR_MISALIGNMENT (dr), vect_body);
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE, vect_location,
1147 "vect_model_load_cost: unaligned supported by "
1148 "hardware.\n");
1150 break;
1152 case dr_explicit_realign:
1154 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1155 vector_load, stmt_info, 0, vect_body);
1156 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1157 vec_perm, stmt_info, 0, vect_body);
1159 /* FIXME: If the misalignment remains fixed across the iterations of
1160 the containing loop, the following cost should be added to the
1161 prologue costs. */
1162 if (targetm.vectorize.builtin_mask_for_load)
1163 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1164 stmt_info, 0, vect_body);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: explicit realign\n");
1170 break;
1172 case dr_explicit_realign_optimized:
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: unaligned software "
1177 "pipelined.\n");
1179 /* Unaligned software pipeline has a load of an address, an initial
1180 load, and possibly a mask operation to "prime" the loop. However,
1181 if this is an access in a group of loads, which provide grouped
1182 access, then the above cost should only be considered for one
1183 access in the group. Inside the loop, there is a load op
1184 and a realignment op. */
1186 if (add_realign_cost && record_prologue_costs)
1188 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1189 vector_stmt, stmt_info,
1190 0, vect_prologue);
1191 if (targetm.vectorize.builtin_mask_for_load)
1192 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1193 vector_stmt, stmt_info,
1194 0, vect_prologue);
1197 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1198 stmt_info, 0, vect_body);
1199 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1200 stmt_info, 0, vect_body);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE, vect_location,
1204 "vect_model_load_cost: explicit realign optimized"
1205 "\n");
1207 break;
1210 case dr_unaligned_unsupported:
1212 *inside_cost = VECT_MAX_COST;
1214 if (dump_enabled_p ())
1215 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1216 "vect_model_load_cost: unsupported access.\n");
1217 break;
1220 default:
1221 gcc_unreachable ();
1225 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1226 the loop preheader for the vectorized stmt STMT. */
1228 static void
1229 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1231 if (gsi)
1232 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1233 else
1235 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1236 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1238 if (loop_vinfo)
1240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1241 basic_block new_bb;
1242 edge pe;
1244 if (nested_in_vect_loop_p (loop, stmt))
1245 loop = loop->inner;
1247 pe = loop_preheader_edge (loop);
1248 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1249 gcc_assert (!new_bb);
1251 else
1253 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1254 basic_block bb;
1255 gimple_stmt_iterator gsi_bb_start;
1257 gcc_assert (bb_vinfo);
1258 bb = BB_VINFO_BB (bb_vinfo);
1259 gsi_bb_start = gsi_after_labels (bb);
1260 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1264 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_NOTE, vect_location,
1267 "created new init_stmt: ");
1268 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1272 /* Function vect_init_vector.
1274 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1275 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1276 vector type a vector with all elements equal to VAL is created first.
1277 Place the initialization at BSI if it is not NULL. Otherwise, place the
1278 initialization at the loop preheader.
1279 Return the DEF of INIT_STMT.
1280 It will be used in the vectorization of STMT. */
1282 tree
1283 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1285 gimple *init_stmt;
1286 tree new_temp;
1288 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1289 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1291 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1292 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1294 /* Scalar boolean value should be transformed into
1295 all zeros or all ones value before building a vector. */
1296 if (VECTOR_BOOLEAN_TYPE_P (type))
1298 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1299 tree false_val = build_zero_cst (TREE_TYPE (type));
1301 if (CONSTANT_CLASS_P (val))
1302 val = integer_zerop (val) ? false_val : true_val;
1303 else
1305 new_temp = make_ssa_name (TREE_TYPE (type));
1306 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1307 val, true_val, false_val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1309 val = new_temp;
1312 else if (CONSTANT_CLASS_P (val))
1313 val = fold_convert (TREE_TYPE (type), val);
1314 else
1316 new_temp = make_ssa_name (TREE_TYPE (type));
1317 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1318 init_stmt = gimple_build_assign (new_temp,
1319 fold_build1 (VIEW_CONVERT_EXPR,
1320 TREE_TYPE (type),
1321 val));
1322 else
1323 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1324 vect_init_vector_1 (stmt, init_stmt, gsi);
1325 val = new_temp;
1328 val = build_vector_from_val (type, val);
1331 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1332 init_stmt = gimple_build_assign (new_temp, val);
1333 vect_init_vector_1 (stmt, init_stmt, gsi);
1334 return new_temp;
1337 /* Function vect_get_vec_def_for_operand_1.
1339 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1340 DT that will be used in the vectorized stmt. */
1342 tree
1343 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1345 tree vec_oprnd;
1346 gimple *vec_stmt;
1347 stmt_vec_info def_stmt_info = NULL;
1349 switch (dt)
1351 /* operand is a constant or a loop invariant. */
1352 case vect_constant_def:
1353 case vect_external_def:
1354 /* Code should use vect_get_vec_def_for_operand. */
1355 gcc_unreachable ();
1357 /* operand is defined inside the loop. */
1358 case vect_internal_def:
1360 /* Get the def from the vectorized stmt. */
1361 def_stmt_info = vinfo_for_stmt (def_stmt);
1363 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1364 /* Get vectorized pattern statement. */
1365 if (!vec_stmt
1366 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1367 && !STMT_VINFO_RELEVANT (def_stmt_info))
1368 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1369 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1370 gcc_assert (vec_stmt);
1371 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1372 vec_oprnd = PHI_RESULT (vec_stmt);
1373 else if (is_gimple_call (vec_stmt))
1374 vec_oprnd = gimple_call_lhs (vec_stmt);
1375 else
1376 vec_oprnd = gimple_assign_lhs (vec_stmt);
1377 return vec_oprnd;
1380 /* operand is defined by a loop header phi. */
1381 case vect_reduction_def:
1382 case vect_double_reduction_def:
1383 case vect_nested_cycle:
1384 case vect_induction_def:
1386 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info = vinfo_for_stmt (def_stmt);
1390 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1391 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1392 vec_oprnd = PHI_RESULT (vec_stmt);
1393 else
1394 vec_oprnd = gimple_get_lhs (vec_stmt);
1395 return vec_oprnd;
1398 default:
1399 gcc_unreachable ();
1404 /* Function vect_get_vec_def_for_operand.
1406 OP is an operand in STMT. This function returns a (vector) def that will be
1407 used in the vectorized stmt for STMT.
1409 In the case that OP is an SSA_NAME which is defined in the loop, then
1410 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1412 In case OP is an invariant or constant, a new stmt that creates a vector def
1413 needs to be introduced. VECTYPE may be used to specify a required type for
1414 vector invariant. */
1416 tree
1417 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1419 gimple *def_stmt;
1420 enum vect_def_type dt;
1421 bool is_simple_use;
1422 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1423 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1425 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE, vect_location,
1428 "vect_get_vec_def_for_operand: ");
1429 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1430 dump_printf (MSG_NOTE, "\n");
1433 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1434 gcc_assert (is_simple_use);
1435 if (def_stmt && dump_enabled_p ())
1437 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1438 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1441 if (dt == vect_constant_def || dt == vect_external_def)
1443 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1444 tree vector_type;
1446 if (vectype)
1447 vector_type = vectype;
1448 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1449 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1450 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1451 else
1452 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1454 gcc_assert (vector_type);
1455 return vect_init_vector (stmt, op, vector_type, NULL);
1457 else
1458 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1462 /* Function vect_get_vec_def_for_stmt_copy
1464 Return a vector-def for an operand. This function is used when the
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
1467 copies of the vector-stmt are required. In this case the vector-def is
1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1469 of the stmt that defines VEC_OPRND.
1470 DT is the type of the vector def VEC_OPRND.
1472 Context:
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
1475 more than one vector stmt to vectorize the scalar stmt. This situation
1476 arises when there are multiple data-types operated upon in the loop; the
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
1480 computing 'VF' results in each iteration). This function is called when
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
1489 VS1.3: vx.3 = memref3
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
1500 get the relevant vector-def for each operand of S2. For operand x it
1501 returns the vector-def 'vx.0'.
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1518 tree
1519 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1521 gimple *vec_stmt_for_operand;
1522 stmt_vec_info def_stmt_info;
1524 /* Do nothing; can reuse same def. */
1525 if (dt == vect_external_def || dt == vect_constant_def )
1526 return vec_oprnd;
1528 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1529 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1530 gcc_assert (def_stmt_info);
1531 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1532 gcc_assert (vec_stmt_for_operand);
1533 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1534 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1535 else
1536 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1537 return vec_oprnd;
1541 /* Get vectorized definitions for the operands to create a copy of an original
1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1544 void
1545 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1546 vec<tree> *vec_oprnds0,
1547 vec<tree> *vec_oprnds1)
1549 tree vec_oprnd = vec_oprnds0->pop ();
1551 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1552 vec_oprnds0->quick_push (vec_oprnd);
1554 if (vec_oprnds1 && vec_oprnds1->length ())
1556 vec_oprnd = vec_oprnds1->pop ();
1557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1558 vec_oprnds1->quick_push (vec_oprnd);
1563 /* Get vectorized definitions for OP0 and OP1. */
1565 void
1566 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1567 vec<tree> *vec_oprnds0,
1568 vec<tree> *vec_oprnds1,
1569 slp_tree slp_node)
1571 if (slp_node)
1573 int nops = (op1 == NULL_TREE) ? 1 : 2;
1574 auto_vec<tree> ops (nops);
1575 auto_vec<vec<tree> > vec_defs (nops);
1577 ops.quick_push (op0);
1578 if (op1)
1579 ops.quick_push (op1);
1581 vect_get_slp_defs (ops, slp_node, &vec_defs);
1583 *vec_oprnds0 = vec_defs[0];
1584 if (op1)
1585 *vec_oprnds1 = vec_defs[1];
1587 else
1589 tree vec_oprnd;
1591 vec_oprnds0->create (1);
1592 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1593 vec_oprnds0->quick_push (vec_oprnd);
1595 if (op1)
1597 vec_oprnds1->create (1);
1598 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1599 vec_oprnds1->quick_push (vec_oprnd);
1605 /* Function vect_finish_stmt_generation.
1607 Insert a new stmt. */
1609 void
1610 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1611 gimple_stmt_iterator *gsi)
1613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1614 vec_info *vinfo = stmt_info->vinfo;
1616 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1618 if (!gsi_end_p (*gsi)
1619 && gimple_has_mem_ops (vec_stmt))
1621 gimple *at_stmt = gsi_stmt (*gsi);
1622 tree vuse = gimple_vuse (at_stmt);
1623 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1625 tree vdef = gimple_vdef (at_stmt);
1626 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1627 /* If we have an SSA vuse and insert a store, update virtual
1628 SSA form to avoid triggering the renamer. Do so only
1629 if we can easily see all uses - which is what almost always
1630 happens with the way vectorized stmts are inserted. */
1631 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1632 && ((is_gimple_assign (vec_stmt)
1633 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1634 || (is_gimple_call (vec_stmt)
1635 && !(gimple_call_flags (vec_stmt)
1636 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1638 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1639 gimple_set_vdef (vec_stmt, new_vdef);
1640 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1644 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1646 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1648 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1654 gimple_set_location (vec_stmt, gimple_location (stmt));
1656 /* While EH edges will generally prevent vectorization, stmt might
1657 e.g. be in a must-not-throw region. Ensure newly created stmts
1658 that could throw are part of the same region. */
1659 int lp_nr = lookup_stmt_eh_lp (stmt);
1660 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1661 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1664 /* We want to vectorize a call to combined function CFN with function
1665 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1666 as the types of all inputs. Check whether this is possible using
1667 an internal function, returning its code if so or IFN_LAST if not. */
1669 static internal_fn
1670 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1671 tree vectype_out, tree vectype_in)
1673 internal_fn ifn;
1674 if (internal_fn_p (cfn))
1675 ifn = as_internal_fn (cfn);
1676 else
1677 ifn = associated_internal_fn (fndecl);
1678 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1680 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1681 if (info.vectorizable)
1683 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1684 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1685 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1686 OPTIMIZE_FOR_SPEED))
1687 return ifn;
1690 return IFN_LAST;
1694 static tree permute_vec_elements (tree, tree, tree, gimple *,
1695 gimple_stmt_iterator *);
1697 /* STMT is a non-strided load or store, meaning that it accesses
1698 elements with a known constant step. Return -1 if that step
1699 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1701 static int
1702 compare_step_with_zero (gimple *stmt)
1704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1705 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1706 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1707 size_zero_node);
1710 /* If the target supports a permute mask that reverses the elements in
1711 a vector of type VECTYPE, return that mask, otherwise return null. */
1713 static tree
1714 perm_mask_for_reverse (tree vectype)
1716 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1718 /* The encoding has a single stepped pattern. */
1719 vec_perm_builder sel (nunits, 1, 3);
1720 for (int i = 0; i < 3; ++i)
1721 sel.quick_push (nunits - 1 - i);
1723 vec_perm_indices indices (sel, 1, nunits);
1724 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1725 return NULL_TREE;
1726 return vect_gen_perm_mask_checked (vectype, indices);
1729 /* A subroutine of get_load_store_type, with a subset of the same
1730 arguments. Handle the case where STMT is part of a grouped load
1731 or store.
1733 For stores, the statements in the group are all consecutive
1734 and there is no gap at the end. For loads, the statements in the
1735 group might not be consecutive; there can be gaps between statements
1736 as well as at the end. */
1738 static bool
1739 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1740 vec_load_store_type vls_type,
1741 vect_memory_access_type *memory_access_type)
1743 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1744 vec_info *vinfo = stmt_info->vinfo;
1745 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1746 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1747 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1748 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1749 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1750 bool single_element_p = (stmt == first_stmt
1751 && !GROUP_NEXT_ELEMENT (stmt_info));
1752 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1753 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1755 /* True if the vectorized statements would access beyond the last
1756 statement in the group. */
1757 bool overrun_p = false;
1759 /* True if we can cope with such overrun by peeling for gaps, so that
1760 there is at least one final scalar iteration after the vector loop. */
1761 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1763 /* There can only be a gap at the end of the group if the stride is
1764 known at compile time. */
1765 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1767 /* Stores can't yet have gaps. */
1768 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1770 if (slp)
1772 if (STMT_VINFO_STRIDED_P (stmt_info))
1774 /* Try to use consecutive accesses of GROUP_SIZE elements,
1775 separated by the stride, until we have a complete vector.
1776 Fall back to scalar accesses if that isn't possible. */
1777 if (multiple_p (nunits, group_size))
1778 *memory_access_type = VMAT_STRIDED_SLP;
1779 else
1780 *memory_access_type = VMAT_ELEMENTWISE;
1782 else
1784 overrun_p = loop_vinfo && gap != 0;
1785 if (overrun_p && vls_type != VLS_LOAD)
1787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1788 "Grouped store with gaps requires"
1789 " non-consecutive accesses\n");
1790 return false;
1792 /* An overrun is fine if the trailing elements are smaller
1793 than the alignment boundary B. Every vector access will
1794 be a multiple of B and so we are guaranteed to access a
1795 non-gap element in the same B-sized block. */
1796 if (overrun_p
1797 && gap < (vect_known_alignment_in_bytes (first_dr)
1798 / vect_get_scalar_dr_size (first_dr)))
1799 overrun_p = false;
1800 if (overrun_p && !can_overrun_p)
1802 if (dump_enabled_p ())
1803 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1804 "Peeling for outer loop is not supported\n");
1805 return false;
1807 *memory_access_type = VMAT_CONTIGUOUS;
1810 else
1812 /* We can always handle this case using elementwise accesses,
1813 but see if something more efficient is available. */
1814 *memory_access_type = VMAT_ELEMENTWISE;
1816 /* If there is a gap at the end of the group then these optimizations
1817 would access excess elements in the last iteration. */
1818 bool would_overrun_p = (gap != 0);
1819 /* An overrun is fine if the trailing elements are smaller than the
1820 alignment boundary B. Every vector access will be a multiple of B
1821 and so we are guaranteed to access a non-gap element in the
1822 same B-sized block. */
1823 if (would_overrun_p
1824 && gap < (vect_known_alignment_in_bytes (first_dr)
1825 / vect_get_scalar_dr_size (first_dr)))
1826 would_overrun_p = false;
1828 if (!STMT_VINFO_STRIDED_P (stmt_info)
1829 && (can_overrun_p || !would_overrun_p)
1830 && compare_step_with_zero (stmt) > 0)
1832 /* First try using LOAD/STORE_LANES. */
1833 if (vls_type == VLS_LOAD
1834 ? vect_load_lanes_supported (vectype, group_size)
1835 : vect_store_lanes_supported (vectype, group_size))
1837 *memory_access_type = VMAT_LOAD_STORE_LANES;
1838 overrun_p = would_overrun_p;
1841 /* If that fails, try using permuting loads. */
1842 if (*memory_access_type == VMAT_ELEMENTWISE
1843 && (vls_type == VLS_LOAD
1844 ? vect_grouped_load_supported (vectype, single_element_p,
1845 group_size)
1846 : vect_grouped_store_supported (vectype, group_size)))
1848 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1849 overrun_p = would_overrun_p;
1854 if (vls_type != VLS_LOAD && first_stmt == stmt)
1856 /* STMT is the leader of the group. Check the operands of all the
1857 stmts of the group. */
1858 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1859 while (next_stmt)
1861 gcc_assert (gimple_assign_single_p (next_stmt));
1862 tree op = gimple_assign_rhs1 (next_stmt);
1863 gimple *def_stmt;
1864 enum vect_def_type dt;
1865 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1869 "use not simple.\n");
1870 return false;
1872 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1876 if (overrun_p)
1878 gcc_assert (can_overrun_p);
1879 if (dump_enabled_p ())
1880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1881 "Data access with gaps requires scalar "
1882 "epilogue loop\n");
1883 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1886 return true;
1889 /* A subroutine of get_load_store_type, with a subset of the same
1890 arguments. Handle the case where STMT is a load or store that
1891 accesses consecutive elements with a negative step. */
1893 static vect_memory_access_type
1894 get_negative_load_store_type (gimple *stmt, tree vectype,
1895 vec_load_store_type vls_type,
1896 unsigned int ncopies)
1898 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1899 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1900 dr_alignment_support alignment_support_scheme;
1902 if (ncopies > 1)
1904 if (dump_enabled_p ())
1905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1906 "multiple types with negative step.\n");
1907 return VMAT_ELEMENTWISE;
1910 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1911 if (alignment_support_scheme != dr_aligned
1912 && alignment_support_scheme != dr_unaligned_supported)
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1916 "negative step but alignment required.\n");
1917 return VMAT_ELEMENTWISE;
1920 if (vls_type == VLS_STORE_INVARIANT)
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_NOTE, vect_location,
1924 "negative step with invariant source;"
1925 " no permute needed.\n");
1926 return VMAT_CONTIGUOUS_DOWN;
1929 if (!perm_mask_for_reverse (vectype))
1931 if (dump_enabled_p ())
1932 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1933 "negative step and reversing not supported.\n");
1934 return VMAT_ELEMENTWISE;
1937 return VMAT_CONTIGUOUS_REVERSE;
1940 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1941 if there is a memory access type that the vectorized form can use,
1942 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1943 or scatters, fill in GS_INFO accordingly.
1945 SLP says whether we're performing SLP rather than loop vectorization.
1946 VECTYPE is the vector type that the vectorized statements will use.
1947 NCOPIES is the number of vector statements that will be needed. */
1949 static bool
1950 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1951 vec_load_store_type vls_type, unsigned int ncopies,
1952 vect_memory_access_type *memory_access_type,
1953 gather_scatter_info *gs_info)
1955 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1956 vec_info *vinfo = stmt_info->vinfo;
1957 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1958 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1959 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1961 *memory_access_type = VMAT_GATHER_SCATTER;
1962 gimple *def_stmt;
1963 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1964 gcc_unreachable ();
1965 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1966 &gs_info->offset_dt,
1967 &gs_info->offset_vectype))
1969 if (dump_enabled_p ())
1970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1971 "%s index use not simple.\n",
1972 vls_type == VLS_LOAD ? "gather" : "scatter");
1973 return false;
1976 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1978 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1979 memory_access_type))
1980 return false;
1982 else if (STMT_VINFO_STRIDED_P (stmt_info))
1984 gcc_assert (!slp);
1985 *memory_access_type = VMAT_ELEMENTWISE;
1987 else
1989 int cmp = compare_step_with_zero (stmt);
1990 if (cmp < 0)
1991 *memory_access_type = get_negative_load_store_type
1992 (stmt, vectype, vls_type, ncopies);
1993 else if (cmp == 0)
1995 gcc_assert (vls_type == VLS_LOAD);
1996 *memory_access_type = VMAT_INVARIANT;
1998 else
1999 *memory_access_type = VMAT_CONTIGUOUS;
2002 if ((*memory_access_type == VMAT_ELEMENTWISE
2003 || *memory_access_type == VMAT_STRIDED_SLP)
2004 && !nunits.is_constant ())
2006 if (dump_enabled_p ())
2007 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2008 "Not using elementwise accesses due to variable "
2009 "vectorization factor.\n");
2010 return false;
2013 /* FIXME: At the moment the cost model seems to underestimate the
2014 cost of using elementwise accesses. This check preserves the
2015 traditional behavior until that can be fixed. */
2016 if (*memory_access_type == VMAT_ELEMENTWISE
2017 && !STMT_VINFO_STRIDED_P (stmt_info))
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2021 "not falling back to elementwise accesses\n");
2022 return false;
2024 return true;
2027 /* Function vectorizable_mask_load_store.
2029 Check if STMT performs a conditional load or store that can be vectorized.
2030 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2031 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2032 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2034 static bool
2035 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2036 gimple **vec_stmt, slp_tree slp_node)
2038 tree vec_dest = NULL;
2039 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2040 stmt_vec_info prev_stmt_info;
2041 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2042 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2043 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2044 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2045 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2046 tree rhs_vectype = NULL_TREE;
2047 tree mask_vectype;
2048 tree elem_type;
2049 gimple *new_stmt;
2050 tree dummy;
2051 tree dataref_ptr = NULL_TREE;
2052 gimple *ptr_incr;
2053 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2054 int ncopies;
2055 int i, j;
2056 bool inv_p;
2057 gather_scatter_info gs_info;
2058 vec_load_store_type vls_type;
2059 tree mask;
2060 gimple *def_stmt;
2061 enum vect_def_type dt;
2063 if (slp_node != NULL)
2064 return false;
2066 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2067 gcc_assert (ncopies >= 1);
2069 mask = gimple_call_arg (stmt, 2);
2071 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2072 return false;
2074 /* FORNOW. This restriction should be relaxed. */
2075 if (nested_in_vect_loop && ncopies > 1)
2077 if (dump_enabled_p ())
2078 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2079 "multiple types in nested loop.");
2080 return false;
2083 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2084 return false;
2086 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2087 && ! vec_stmt)
2088 return false;
2090 if (!STMT_VINFO_DATA_REF (stmt_info))
2091 return false;
2093 elem_type = TREE_TYPE (vectype);
2095 if (TREE_CODE (mask) != SSA_NAME)
2096 return false;
2098 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2099 return false;
2101 if (!mask_vectype)
2102 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2104 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2105 || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2106 TYPE_VECTOR_SUBPARTS (vectype)))
2107 return false;
2109 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2111 tree rhs = gimple_call_arg (stmt, 3);
2112 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2113 return false;
2114 if (dt == vect_constant_def || dt == vect_external_def)
2115 vls_type = VLS_STORE_INVARIANT;
2116 else
2117 vls_type = VLS_STORE;
2119 else
2120 vls_type = VLS_LOAD;
2122 vect_memory_access_type memory_access_type;
2123 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2124 &memory_access_type, &gs_info))
2125 return false;
2127 if (memory_access_type == VMAT_GATHER_SCATTER)
2129 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2130 tree masktype
2131 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2132 if (TREE_CODE (masktype) == INTEGER_TYPE)
2134 if (dump_enabled_p ())
2135 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2136 "masked gather with integer mask not supported.");
2137 return false;
2140 else if (memory_access_type != VMAT_CONTIGUOUS)
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2144 "unsupported access type for masked %s.\n",
2145 vls_type == VLS_LOAD ? "load" : "store");
2146 return false;
2148 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2149 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2150 TYPE_MODE (mask_vectype),
2151 vls_type == VLS_LOAD)
2152 || (rhs_vectype
2153 && !useless_type_conversion_p (vectype, rhs_vectype)))
2154 return false;
2156 if (!vec_stmt) /* transformation not required. */
2158 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2159 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2160 if (vls_type == VLS_LOAD)
2161 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2162 NULL, NULL, NULL);
2163 else
2164 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2165 vls_type, NULL, NULL, NULL);
2166 return true;
2168 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2170 /* Transform. */
2172 if (memory_access_type == VMAT_GATHER_SCATTER)
2174 tree vec_oprnd0 = NULL_TREE, op;
2175 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2176 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2177 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2178 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2179 tree mask_perm_mask = NULL_TREE;
2180 edge pe = loop_preheader_edge (loop);
2181 gimple_seq seq;
2182 basic_block new_bb;
2183 enum { NARROW, NONE, WIDEN } modifier;
2184 poly_uint64 gather_off_nunits
2185 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2187 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2188 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2189 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2190 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2191 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2192 scaletype = TREE_VALUE (arglist);
2193 gcc_checking_assert (types_compatible_p (srctype, rettype)
2194 && types_compatible_p (srctype, masktype));
2196 if (known_eq (nunits, gather_off_nunits))
2197 modifier = NONE;
2198 else if (known_eq (nunits * 2, gather_off_nunits))
2200 modifier = WIDEN;
2202 /* Currently widening gathers and scatters are only supported for
2203 fixed-length vectors. */
2204 int count = gather_off_nunits.to_constant ();
2205 vec_perm_builder sel (count, count, 1);
2206 for (i = 0; i < count; ++i)
2207 sel.quick_push (i | (count / 2));
2209 vec_perm_indices indices (sel, 1, count);
2210 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2211 indices);
2213 else if (known_eq (nunits, gather_off_nunits * 2))
2215 modifier = NARROW;
2217 /* Currently narrowing gathers and scatters are only supported for
2218 fixed-length vectors. */
2219 int count = nunits.to_constant ();
2220 vec_perm_builder sel (count, count, 1);
2221 sel.quick_grow (count);
2222 for (i = 0; i < count; ++i)
2223 sel[i] = i < count / 2 ? i : i + count / 2;
2224 vec_perm_indices indices (sel, 2, count);
2225 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2227 ncopies *= 2;
2228 for (i = 0; i < count; ++i)
2229 sel[i] = i | (count / 2);
2230 indices.new_vector (sel, 2, count);
2231 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2233 else
2234 gcc_unreachable ();
2236 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2238 ptr = fold_convert (ptrtype, gs_info.base);
2239 if (!is_gimple_min_invariant (ptr))
2241 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2242 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2243 gcc_assert (!new_bb);
2246 scale = build_int_cst (scaletype, gs_info.scale);
2248 prev_stmt_info = NULL;
2249 for (j = 0; j < ncopies; ++j)
2251 if (modifier == WIDEN && (j & 1))
2252 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2253 perm_mask, stmt, gsi);
2254 else if (j == 0)
2255 op = vec_oprnd0
2256 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2257 else
2258 op = vec_oprnd0
2259 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2261 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2263 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2264 TYPE_VECTOR_SUBPARTS (idxtype)));
2265 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2266 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2267 new_stmt
2268 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2269 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2270 op = var;
2273 if (mask_perm_mask && (j & 1))
2274 mask_op = permute_vec_elements (mask_op, mask_op,
2275 mask_perm_mask, stmt, gsi);
2276 else
2278 if (j == 0)
2279 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2280 else
2282 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2283 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2286 mask_op = vec_mask;
2287 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2289 gcc_assert
2290 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2291 TYPE_VECTOR_SUBPARTS (masktype)));
2292 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2293 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2294 new_stmt
2295 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2296 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2297 mask_op = var;
2301 new_stmt
2302 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2303 scale);
2305 if (!useless_type_conversion_p (vectype, rettype))
2307 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2308 TYPE_VECTOR_SUBPARTS (rettype)));
2309 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2310 gimple_call_set_lhs (new_stmt, op);
2311 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2312 var = make_ssa_name (vec_dest);
2313 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2314 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2316 else
2318 var = make_ssa_name (vec_dest, new_stmt);
2319 gimple_call_set_lhs (new_stmt, var);
2322 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2324 if (modifier == NARROW)
2326 if ((j & 1) == 0)
2328 prev_res = var;
2329 continue;
2331 var = permute_vec_elements (prev_res, var,
2332 perm_mask, stmt, gsi);
2333 new_stmt = SSA_NAME_DEF_STMT (var);
2336 if (prev_stmt_info == NULL)
2337 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2338 else
2339 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2340 prev_stmt_info = vinfo_for_stmt (new_stmt);
2342 return true;
2344 else if (vls_type != VLS_LOAD)
2346 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2347 prev_stmt_info = NULL;
2348 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2349 for (i = 0; i < ncopies; i++)
2351 unsigned align, misalign;
2353 if (i == 0)
2355 tree rhs = gimple_call_arg (stmt, 3);
2356 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2357 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2358 mask_vectype);
2359 /* We should have catched mismatched types earlier. */
2360 gcc_assert (useless_type_conversion_p (vectype,
2361 TREE_TYPE (vec_rhs)));
2362 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2363 NULL_TREE, &dummy, gsi,
2364 &ptr_incr, false, &inv_p);
2365 gcc_assert (!inv_p);
2367 else
2369 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2370 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2371 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2372 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2373 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2374 TYPE_SIZE_UNIT (vectype));
2377 align = DR_TARGET_ALIGNMENT (dr);
2378 if (aligned_access_p (dr))
2379 misalign = 0;
2380 else if (DR_MISALIGNMENT (dr) == -1)
2382 align = TYPE_ALIGN_UNIT (elem_type);
2383 misalign = 0;
2385 else
2386 misalign = DR_MISALIGNMENT (dr);
2387 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2388 misalign);
2389 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2390 misalign ? least_bit_hwi (misalign) : align);
2391 gcall *call
2392 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2393 ptr, vec_mask, vec_rhs);
2394 gimple_call_set_nothrow (call, true);
2395 new_stmt = call;
2396 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2397 if (i == 0)
2398 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2399 else
2400 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2401 prev_stmt_info = vinfo_for_stmt (new_stmt);
2404 else
2406 tree vec_mask = NULL_TREE;
2407 prev_stmt_info = NULL;
2408 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2409 for (i = 0; i < ncopies; i++)
2411 unsigned align, misalign;
2413 if (i == 0)
2415 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2416 mask_vectype);
2417 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2418 NULL_TREE, &dummy, gsi,
2419 &ptr_incr, false, &inv_p);
2420 gcc_assert (!inv_p);
2422 else
2424 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2425 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2426 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2427 TYPE_SIZE_UNIT (vectype));
2430 align = DR_TARGET_ALIGNMENT (dr);
2431 if (aligned_access_p (dr))
2432 misalign = 0;
2433 else if (DR_MISALIGNMENT (dr) == -1)
2435 align = TYPE_ALIGN_UNIT (elem_type);
2436 misalign = 0;
2438 else
2439 misalign = DR_MISALIGNMENT (dr);
2440 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2441 misalign);
2442 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2443 misalign ? least_bit_hwi (misalign) : align);
2444 gcall *call
2445 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2446 ptr, vec_mask);
2447 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2448 gimple_call_set_nothrow (call, true);
2449 vect_finish_stmt_generation (stmt, call, gsi);
2450 if (i == 0)
2451 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2452 else
2453 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2454 prev_stmt_info = vinfo_for_stmt (call);
2458 return true;
2461 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2463 static bool
2464 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2465 gimple **vec_stmt, slp_tree slp_node,
2466 tree vectype_in, enum vect_def_type *dt)
2468 tree op, vectype;
2469 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2470 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2471 unsigned ncopies;
2472 unsigned HOST_WIDE_INT nunits, num_bytes;
2474 op = gimple_call_arg (stmt, 0);
2475 vectype = STMT_VINFO_VECTYPE (stmt_info);
2477 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2478 return false;
2480 /* Multiple types in SLP are handled by creating the appropriate number of
2481 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2482 case of SLP. */
2483 if (slp_node)
2484 ncopies = 1;
2485 else
2486 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2488 gcc_assert (ncopies >= 1);
2490 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2491 if (! char_vectype)
2492 return false;
2494 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2495 return false;
2497 unsigned word_bytes = num_bytes / nunits;
2499 /* The encoding uses one stepped pattern for each byte in the word. */
2500 vec_perm_builder elts (num_bytes, word_bytes, 3);
2501 for (unsigned i = 0; i < 3; ++i)
2502 for (unsigned j = 0; j < word_bytes; ++j)
2503 elts.quick_push ((i + 1) * word_bytes - j - 1);
2505 vec_perm_indices indices (elts, 1, num_bytes);
2506 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2507 return false;
2509 if (! vec_stmt)
2511 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2512 if (dump_enabled_p ())
2513 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2514 "\n");
2515 if (! PURE_SLP_STMT (stmt_info))
2517 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2518 1, vector_stmt, stmt_info, 0, vect_prologue);
2519 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2520 ncopies, vec_perm, stmt_info, 0, vect_body);
2522 return true;
2525 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2527 /* Transform. */
2528 vec<tree> vec_oprnds = vNULL;
2529 gimple *new_stmt = NULL;
2530 stmt_vec_info prev_stmt_info = NULL;
2531 for (unsigned j = 0; j < ncopies; j++)
2533 /* Handle uses. */
2534 if (j == 0)
2535 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2536 else
2537 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2539 /* Arguments are ready. create the new vector stmt. */
2540 unsigned i;
2541 tree vop;
2542 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2544 tree tem = make_ssa_name (char_vectype);
2545 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2546 char_vectype, vop));
2547 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2548 tree tem2 = make_ssa_name (char_vectype);
2549 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2550 tem, tem, bswap_vconst);
2551 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2552 tem = make_ssa_name (vectype);
2553 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2554 vectype, tem2));
2555 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2556 if (slp_node)
2557 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2560 if (slp_node)
2561 continue;
2563 if (j == 0)
2564 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2565 else
2566 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2568 prev_stmt_info = vinfo_for_stmt (new_stmt);
2571 vec_oprnds.release ();
2572 return true;
2575 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2576 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2577 in a single step. On success, store the binary pack code in
2578 *CONVERT_CODE. */
2580 static bool
2581 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2582 tree_code *convert_code)
2584 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2585 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2586 return false;
2588 tree_code code;
2589 int multi_step_cvt = 0;
2590 auto_vec <tree, 8> interm_types;
2591 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2592 &code, &multi_step_cvt,
2593 &interm_types)
2594 || multi_step_cvt)
2595 return false;
2597 *convert_code = code;
2598 return true;
2601 /* Function vectorizable_call.
2603 Check if GS performs a function call that can be vectorized.
2604 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2605 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2606 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2608 static bool
2609 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2610 slp_tree slp_node)
2612 gcall *stmt;
2613 tree vec_dest;
2614 tree scalar_dest;
2615 tree op, type;
2616 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2617 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2618 tree vectype_out, vectype_in;
2619 poly_uint64 nunits_in;
2620 poly_uint64 nunits_out;
2621 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2622 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2623 vec_info *vinfo = stmt_info->vinfo;
2624 tree fndecl, new_temp, rhs_type;
2625 gimple *def_stmt;
2626 enum vect_def_type dt[3]
2627 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2628 int ndts = 3;
2629 gimple *new_stmt = NULL;
2630 int ncopies, j;
2631 vec<tree> vargs = vNULL;
2632 enum { NARROW, NONE, WIDEN } modifier;
2633 size_t i, nargs;
2634 tree lhs;
2636 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2637 return false;
2639 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2640 && ! vec_stmt)
2641 return false;
2643 /* Is GS a vectorizable call? */
2644 stmt = dyn_cast <gcall *> (gs);
2645 if (!stmt)
2646 return false;
2648 if (gimple_call_internal_p (stmt)
2649 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2650 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2651 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2652 slp_node);
2654 if (gimple_call_lhs (stmt) == NULL_TREE
2655 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2656 return false;
2658 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2660 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2662 /* Process function arguments. */
2663 rhs_type = NULL_TREE;
2664 vectype_in = NULL_TREE;
2665 nargs = gimple_call_num_args (stmt);
2667 /* Bail out if the function has more than three arguments, we do not have
2668 interesting builtin functions to vectorize with more than two arguments
2669 except for fma. No arguments is also not good. */
2670 if (nargs == 0 || nargs > 3)
2671 return false;
2673 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2674 if (gimple_call_internal_p (stmt)
2675 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2677 nargs = 0;
2678 rhs_type = unsigned_type_node;
2681 for (i = 0; i < nargs; i++)
2683 tree opvectype;
2685 op = gimple_call_arg (stmt, i);
2687 /* We can only handle calls with arguments of the same type. */
2688 if (rhs_type
2689 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2691 if (dump_enabled_p ())
2692 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2693 "argument types differ.\n");
2694 return false;
2696 if (!rhs_type)
2697 rhs_type = TREE_TYPE (op);
2699 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2701 if (dump_enabled_p ())
2702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2703 "use not simple.\n");
2704 return false;
2707 if (!vectype_in)
2708 vectype_in = opvectype;
2709 else if (opvectype
2710 && opvectype != vectype_in)
2712 if (dump_enabled_p ())
2713 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2714 "argument vector types differ.\n");
2715 return false;
2718 /* If all arguments are external or constant defs use a vector type with
2719 the same size as the output vector type. */
2720 if (!vectype_in)
2721 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2722 if (vec_stmt)
2723 gcc_assert (vectype_in);
2724 if (!vectype_in)
2726 if (dump_enabled_p ())
2728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2729 "no vectype for scalar type ");
2730 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2731 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2734 return false;
2737 /* FORNOW */
2738 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2739 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2740 if (known_eq (nunits_in * 2, nunits_out))
2741 modifier = NARROW;
2742 else if (known_eq (nunits_out, nunits_in))
2743 modifier = NONE;
2744 else if (known_eq (nunits_out * 2, nunits_in))
2745 modifier = WIDEN;
2746 else
2747 return false;
2749 /* We only handle functions that do not read or clobber memory. */
2750 if (gimple_vuse (stmt))
2752 if (dump_enabled_p ())
2753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2754 "function reads from or writes to memory.\n");
2755 return false;
2758 /* For now, we only vectorize functions if a target specific builtin
2759 is available. TODO -- in some cases, it might be profitable to
2760 insert the calls for pieces of the vector, in order to be able
2761 to vectorize other operations in the loop. */
2762 fndecl = NULL_TREE;
2763 internal_fn ifn = IFN_LAST;
2764 combined_fn cfn = gimple_call_combined_fn (stmt);
2765 tree callee = gimple_call_fndecl (stmt);
2767 /* First try using an internal function. */
2768 tree_code convert_code = ERROR_MARK;
2769 if (cfn != CFN_LAST
2770 && (modifier == NONE
2771 || (modifier == NARROW
2772 && simple_integer_narrowing (vectype_out, vectype_in,
2773 &convert_code))))
2774 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2775 vectype_in);
2777 /* If that fails, try asking for a target-specific built-in function. */
2778 if (ifn == IFN_LAST)
2780 if (cfn != CFN_LAST)
2781 fndecl = targetm.vectorize.builtin_vectorized_function
2782 (cfn, vectype_out, vectype_in);
2783 else
2784 fndecl = targetm.vectorize.builtin_md_vectorized_function
2785 (callee, vectype_out, vectype_in);
2788 if (ifn == IFN_LAST && !fndecl)
2790 if (cfn == CFN_GOMP_SIMD_LANE
2791 && !slp_node
2792 && loop_vinfo
2793 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2794 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2795 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2796 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2798 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2799 { 0, 1, 2, ... vf - 1 } vector. */
2800 gcc_assert (nargs == 0);
2802 else if (modifier == NONE
2803 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2804 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2805 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2806 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2807 vectype_in, dt);
2808 else
2810 if (dump_enabled_p ())
2811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2812 "function is not vectorizable.\n");
2813 return false;
2817 if (slp_node)
2818 ncopies = 1;
2819 else if (modifier == NARROW && ifn == IFN_LAST)
2820 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2821 else
2822 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2824 /* Sanity check: make sure that at least one copy of the vectorized stmt
2825 needs to be generated. */
2826 gcc_assert (ncopies >= 1);
2828 if (!vec_stmt) /* transformation not required. */
2830 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2831 if (dump_enabled_p ())
2832 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2833 "\n");
2834 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2835 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2836 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2837 vec_promote_demote, stmt_info, 0, vect_body);
2839 return true;
2842 /* Transform. */
2844 if (dump_enabled_p ())
2845 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2847 /* Handle def. */
2848 scalar_dest = gimple_call_lhs (stmt);
2849 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2851 prev_stmt_info = NULL;
2852 if (modifier == NONE || ifn != IFN_LAST)
2854 tree prev_res = NULL_TREE;
2855 for (j = 0; j < ncopies; ++j)
2857 /* Build argument list for the vectorized call. */
2858 if (j == 0)
2859 vargs.create (nargs);
2860 else
2861 vargs.truncate (0);
2863 if (slp_node)
2865 auto_vec<vec<tree> > vec_defs (nargs);
2866 vec<tree> vec_oprnds0;
2868 for (i = 0; i < nargs; i++)
2869 vargs.quick_push (gimple_call_arg (stmt, i));
2870 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2871 vec_oprnds0 = vec_defs[0];
2873 /* Arguments are ready. Create the new vector stmt. */
2874 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2876 size_t k;
2877 for (k = 0; k < nargs; k++)
2879 vec<tree> vec_oprndsk = vec_defs[k];
2880 vargs[k] = vec_oprndsk[i];
2882 if (modifier == NARROW)
2884 tree half_res = make_ssa_name (vectype_in);
2885 gcall *call
2886 = gimple_build_call_internal_vec (ifn, vargs);
2887 gimple_call_set_lhs (call, half_res);
2888 gimple_call_set_nothrow (call, true);
2889 new_stmt = call;
2890 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2891 if ((i & 1) == 0)
2893 prev_res = half_res;
2894 continue;
2896 new_temp = make_ssa_name (vec_dest);
2897 new_stmt = gimple_build_assign (new_temp, convert_code,
2898 prev_res, half_res);
2900 else
2902 gcall *call;
2903 if (ifn != IFN_LAST)
2904 call = gimple_build_call_internal_vec (ifn, vargs);
2905 else
2906 call = gimple_build_call_vec (fndecl, vargs);
2907 new_temp = make_ssa_name (vec_dest, call);
2908 gimple_call_set_lhs (call, new_temp);
2909 gimple_call_set_nothrow (call, true);
2910 new_stmt = call;
2912 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2913 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2916 for (i = 0; i < nargs; i++)
2918 vec<tree> vec_oprndsi = vec_defs[i];
2919 vec_oprndsi.release ();
2921 continue;
2924 for (i = 0; i < nargs; i++)
2926 op = gimple_call_arg (stmt, i);
2927 if (j == 0)
2928 vec_oprnd0
2929 = vect_get_vec_def_for_operand (op, stmt);
2930 else
2932 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2933 vec_oprnd0
2934 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2937 vargs.quick_push (vec_oprnd0);
2940 if (gimple_call_internal_p (stmt)
2941 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2943 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
2944 tree new_var
2945 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2946 gimple *init_stmt = gimple_build_assign (new_var, cst);
2947 vect_init_vector_1 (stmt, init_stmt, NULL);
2948 new_temp = make_ssa_name (vec_dest);
2949 new_stmt = gimple_build_assign (new_temp, new_var);
2951 else if (modifier == NARROW)
2953 tree half_res = make_ssa_name (vectype_in);
2954 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2955 gimple_call_set_lhs (call, half_res);
2956 gimple_call_set_nothrow (call, true);
2957 new_stmt = call;
2958 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2959 if ((j & 1) == 0)
2961 prev_res = half_res;
2962 continue;
2964 new_temp = make_ssa_name (vec_dest);
2965 new_stmt = gimple_build_assign (new_temp, convert_code,
2966 prev_res, half_res);
2968 else
2970 gcall *call;
2971 if (ifn != IFN_LAST)
2972 call = gimple_build_call_internal_vec (ifn, vargs);
2973 else
2974 call = gimple_build_call_vec (fndecl, vargs);
2975 new_temp = make_ssa_name (vec_dest, new_stmt);
2976 gimple_call_set_lhs (call, new_temp);
2977 gimple_call_set_nothrow (call, true);
2978 new_stmt = call;
2980 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2982 if (j == (modifier == NARROW ? 1 : 0))
2983 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2984 else
2985 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2987 prev_stmt_info = vinfo_for_stmt (new_stmt);
2990 else if (modifier == NARROW)
2992 for (j = 0; j < ncopies; ++j)
2994 /* Build argument list for the vectorized call. */
2995 if (j == 0)
2996 vargs.create (nargs * 2);
2997 else
2998 vargs.truncate (0);
3000 if (slp_node)
3002 auto_vec<vec<tree> > vec_defs (nargs);
3003 vec<tree> vec_oprnds0;
3005 for (i = 0; i < nargs; i++)
3006 vargs.quick_push (gimple_call_arg (stmt, i));
3007 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3008 vec_oprnds0 = vec_defs[0];
3010 /* Arguments are ready. Create the new vector stmt. */
3011 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3013 size_t k;
3014 vargs.truncate (0);
3015 for (k = 0; k < nargs; k++)
3017 vec<tree> vec_oprndsk = vec_defs[k];
3018 vargs.quick_push (vec_oprndsk[i]);
3019 vargs.quick_push (vec_oprndsk[i + 1]);
3021 gcall *call;
3022 if (ifn != IFN_LAST)
3023 call = gimple_build_call_internal_vec (ifn, vargs);
3024 else
3025 call = gimple_build_call_vec (fndecl, vargs);
3026 new_temp = make_ssa_name (vec_dest, call);
3027 gimple_call_set_lhs (call, new_temp);
3028 gimple_call_set_nothrow (call, true);
3029 new_stmt = call;
3030 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3031 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3034 for (i = 0; i < nargs; i++)
3036 vec<tree> vec_oprndsi = vec_defs[i];
3037 vec_oprndsi.release ();
3039 continue;
3042 for (i = 0; i < nargs; i++)
3044 op = gimple_call_arg (stmt, i);
3045 if (j == 0)
3047 vec_oprnd0
3048 = vect_get_vec_def_for_operand (op, stmt);
3049 vec_oprnd1
3050 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3052 else
3054 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3055 vec_oprnd0
3056 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3057 vec_oprnd1
3058 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3061 vargs.quick_push (vec_oprnd0);
3062 vargs.quick_push (vec_oprnd1);
3065 new_stmt = gimple_build_call_vec (fndecl, vargs);
3066 new_temp = make_ssa_name (vec_dest, new_stmt);
3067 gimple_call_set_lhs (new_stmt, new_temp);
3068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3070 if (j == 0)
3071 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3072 else
3073 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3075 prev_stmt_info = vinfo_for_stmt (new_stmt);
3078 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3080 else
3081 /* No current target implements this case. */
3082 return false;
3084 vargs.release ();
3086 /* The call in STMT might prevent it from being removed in dce.
3087 We however cannot remove it here, due to the way the ssa name
3088 it defines is mapped to the new definition. So just replace
3089 rhs of the statement with something harmless. */
3091 if (slp_node)
3092 return true;
3094 type = TREE_TYPE (scalar_dest);
3095 if (is_pattern_stmt_p (stmt_info))
3096 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3097 else
3098 lhs = gimple_call_lhs (stmt);
3100 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3101 set_vinfo_for_stmt (new_stmt, stmt_info);
3102 set_vinfo_for_stmt (stmt, NULL);
3103 STMT_VINFO_STMT (stmt_info) = new_stmt;
3104 gsi_replace (gsi, new_stmt, false);
3106 return true;
3110 struct simd_call_arg_info
3112 tree vectype;
3113 tree op;
3114 HOST_WIDE_INT linear_step;
3115 enum vect_def_type dt;
3116 unsigned int align;
3117 bool simd_lane_linear;
3120 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3121 is linear within simd lane (but not within whole loop), note it in
3122 *ARGINFO. */
3124 static void
3125 vect_simd_lane_linear (tree op, struct loop *loop,
3126 struct simd_call_arg_info *arginfo)
3128 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3130 if (!is_gimple_assign (def_stmt)
3131 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3132 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3133 return;
3135 tree base = gimple_assign_rhs1 (def_stmt);
3136 HOST_WIDE_INT linear_step = 0;
3137 tree v = gimple_assign_rhs2 (def_stmt);
3138 while (TREE_CODE (v) == SSA_NAME)
3140 tree t;
3141 def_stmt = SSA_NAME_DEF_STMT (v);
3142 if (is_gimple_assign (def_stmt))
3143 switch (gimple_assign_rhs_code (def_stmt))
3145 case PLUS_EXPR:
3146 t = gimple_assign_rhs2 (def_stmt);
3147 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3148 return;
3149 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3150 v = gimple_assign_rhs1 (def_stmt);
3151 continue;
3152 case MULT_EXPR:
3153 t = gimple_assign_rhs2 (def_stmt);
3154 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3155 return;
3156 linear_step = tree_to_shwi (t);
3157 v = gimple_assign_rhs1 (def_stmt);
3158 continue;
3159 CASE_CONVERT:
3160 t = gimple_assign_rhs1 (def_stmt);
3161 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3162 || (TYPE_PRECISION (TREE_TYPE (v))
3163 < TYPE_PRECISION (TREE_TYPE (t))))
3164 return;
3165 if (!linear_step)
3166 linear_step = 1;
3167 v = t;
3168 continue;
3169 default:
3170 return;
3172 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3173 && loop->simduid
3174 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3175 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3176 == loop->simduid))
3178 if (!linear_step)
3179 linear_step = 1;
3180 arginfo->linear_step = linear_step;
3181 arginfo->op = base;
3182 arginfo->simd_lane_linear = true;
3183 return;
3188 /* Return the number of elements in vector type VECTYPE, which is associated
3189 with a SIMD clone. At present these vectors always have a constant
3190 length. */
3192 static unsigned HOST_WIDE_INT
3193 simd_clone_subparts (tree vectype)
3195 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3198 /* Function vectorizable_simd_clone_call.
3200 Check if STMT performs a function call that can be vectorized
3201 by calling a simd clone of the function.
3202 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3203 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3204 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3206 static bool
3207 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3208 gimple **vec_stmt, slp_tree slp_node)
3210 tree vec_dest;
3211 tree scalar_dest;
3212 tree op, type;
3213 tree vec_oprnd0 = NULL_TREE;
3214 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3215 tree vectype;
3216 unsigned int nunits;
3217 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3218 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3219 vec_info *vinfo = stmt_info->vinfo;
3220 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3221 tree fndecl, new_temp;
3222 gimple *def_stmt;
3223 gimple *new_stmt = NULL;
3224 int ncopies, j;
3225 auto_vec<simd_call_arg_info> arginfo;
3226 vec<tree> vargs = vNULL;
3227 size_t i, nargs;
3228 tree lhs, rtype, ratype;
3229 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3231 /* Is STMT a vectorizable call? */
3232 if (!is_gimple_call (stmt))
3233 return false;
3235 fndecl = gimple_call_fndecl (stmt);
3236 if (fndecl == NULL_TREE)
3237 return false;
3239 struct cgraph_node *node = cgraph_node::get (fndecl);
3240 if (node == NULL || node->simd_clones == NULL)
3241 return false;
3243 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3244 return false;
3246 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3247 && ! vec_stmt)
3248 return false;
3250 if (gimple_call_lhs (stmt)
3251 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3252 return false;
3254 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3256 vectype = STMT_VINFO_VECTYPE (stmt_info);
3258 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3259 return false;
3261 /* FORNOW */
3262 if (slp_node)
3263 return false;
3265 /* Process function arguments. */
3266 nargs = gimple_call_num_args (stmt);
3268 /* Bail out if the function has zero arguments. */
3269 if (nargs == 0)
3270 return false;
3272 arginfo.reserve (nargs, true);
3274 for (i = 0; i < nargs; i++)
3276 simd_call_arg_info thisarginfo;
3277 affine_iv iv;
3279 thisarginfo.linear_step = 0;
3280 thisarginfo.align = 0;
3281 thisarginfo.op = NULL_TREE;
3282 thisarginfo.simd_lane_linear = false;
3284 op = gimple_call_arg (stmt, i);
3285 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3286 &thisarginfo.vectype)
3287 || thisarginfo.dt == vect_uninitialized_def)
3289 if (dump_enabled_p ())
3290 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3291 "use not simple.\n");
3292 return false;
3295 if (thisarginfo.dt == vect_constant_def
3296 || thisarginfo.dt == vect_external_def)
3297 gcc_assert (thisarginfo.vectype == NULL_TREE);
3298 else
3299 gcc_assert (thisarginfo.vectype != NULL_TREE);
3301 /* For linear arguments, the analyze phase should have saved
3302 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3303 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3304 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3306 gcc_assert (vec_stmt);
3307 thisarginfo.linear_step
3308 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3309 thisarginfo.op
3310 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3311 thisarginfo.simd_lane_linear
3312 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3313 == boolean_true_node);
3314 /* If loop has been peeled for alignment, we need to adjust it. */
3315 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3316 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3317 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3319 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3320 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3321 tree opt = TREE_TYPE (thisarginfo.op);
3322 bias = fold_convert (TREE_TYPE (step), bias);
3323 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3324 thisarginfo.op
3325 = fold_build2 (POINTER_TYPE_P (opt)
3326 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3327 thisarginfo.op, bias);
3330 else if (!vec_stmt
3331 && thisarginfo.dt != vect_constant_def
3332 && thisarginfo.dt != vect_external_def
3333 && loop_vinfo
3334 && TREE_CODE (op) == SSA_NAME
3335 && simple_iv (loop, loop_containing_stmt (stmt), op,
3336 &iv, false)
3337 && tree_fits_shwi_p (iv.step))
3339 thisarginfo.linear_step = tree_to_shwi (iv.step);
3340 thisarginfo.op = iv.base;
3342 else if ((thisarginfo.dt == vect_constant_def
3343 || thisarginfo.dt == vect_external_def)
3344 && POINTER_TYPE_P (TREE_TYPE (op)))
3345 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3346 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3347 linear too. */
3348 if (POINTER_TYPE_P (TREE_TYPE (op))
3349 && !thisarginfo.linear_step
3350 && !vec_stmt
3351 && thisarginfo.dt != vect_constant_def
3352 && thisarginfo.dt != vect_external_def
3353 && loop_vinfo
3354 && !slp_node
3355 && TREE_CODE (op) == SSA_NAME)
3356 vect_simd_lane_linear (op, loop, &thisarginfo);
3358 arginfo.quick_push (thisarginfo);
3361 unsigned HOST_WIDE_INT vf;
3362 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3364 if (dump_enabled_p ())
3365 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3366 "not considering SIMD clones; not yet supported"
3367 " for variable-width vectors.\n");
3368 return NULL;
3371 unsigned int badness = 0;
3372 struct cgraph_node *bestn = NULL;
3373 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3374 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3375 else
3376 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3377 n = n->simdclone->next_clone)
3379 unsigned int this_badness = 0;
3380 if (n->simdclone->simdlen > vf
3381 || n->simdclone->nargs != nargs)
3382 continue;
3383 if (n->simdclone->simdlen < vf)
3384 this_badness += (exact_log2 (vf)
3385 - exact_log2 (n->simdclone->simdlen)) * 1024;
3386 if (n->simdclone->inbranch)
3387 this_badness += 2048;
3388 int target_badness = targetm.simd_clone.usable (n);
3389 if (target_badness < 0)
3390 continue;
3391 this_badness += target_badness * 512;
3392 /* FORNOW: Have to add code to add the mask argument. */
3393 if (n->simdclone->inbranch)
3394 continue;
3395 for (i = 0; i < nargs; i++)
3397 switch (n->simdclone->args[i].arg_type)
3399 case SIMD_CLONE_ARG_TYPE_VECTOR:
3400 if (!useless_type_conversion_p
3401 (n->simdclone->args[i].orig_type,
3402 TREE_TYPE (gimple_call_arg (stmt, i))))
3403 i = -1;
3404 else if (arginfo[i].dt == vect_constant_def
3405 || arginfo[i].dt == vect_external_def
3406 || arginfo[i].linear_step)
3407 this_badness += 64;
3408 break;
3409 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3410 if (arginfo[i].dt != vect_constant_def
3411 && arginfo[i].dt != vect_external_def)
3412 i = -1;
3413 break;
3414 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3415 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3416 if (arginfo[i].dt == vect_constant_def
3417 || arginfo[i].dt == vect_external_def
3418 || (arginfo[i].linear_step
3419 != n->simdclone->args[i].linear_step))
3420 i = -1;
3421 break;
3422 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3423 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3424 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3425 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3426 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3427 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3428 /* FORNOW */
3429 i = -1;
3430 break;
3431 case SIMD_CLONE_ARG_TYPE_MASK:
3432 gcc_unreachable ();
3434 if (i == (size_t) -1)
3435 break;
3436 if (n->simdclone->args[i].alignment > arginfo[i].align)
3438 i = -1;
3439 break;
3441 if (arginfo[i].align)
3442 this_badness += (exact_log2 (arginfo[i].align)
3443 - exact_log2 (n->simdclone->args[i].alignment));
3445 if (i == (size_t) -1)
3446 continue;
3447 if (bestn == NULL || this_badness < badness)
3449 bestn = n;
3450 badness = this_badness;
3454 if (bestn == NULL)
3455 return false;
3457 for (i = 0; i < nargs; i++)
3458 if ((arginfo[i].dt == vect_constant_def
3459 || arginfo[i].dt == vect_external_def)
3460 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3462 arginfo[i].vectype
3463 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3464 i)));
3465 if (arginfo[i].vectype == NULL
3466 || (simd_clone_subparts (arginfo[i].vectype)
3467 > bestn->simdclone->simdlen))
3468 return false;
3471 fndecl = bestn->decl;
3472 nunits = bestn->simdclone->simdlen;
3473 ncopies = vf / nunits;
3475 /* If the function isn't const, only allow it in simd loops where user
3476 has asserted that at least nunits consecutive iterations can be
3477 performed using SIMD instructions. */
3478 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3479 && gimple_vuse (stmt))
3480 return false;
3482 /* Sanity check: make sure that at least one copy of the vectorized stmt
3483 needs to be generated. */
3484 gcc_assert (ncopies >= 1);
3486 if (!vec_stmt) /* transformation not required. */
3488 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3489 for (i = 0; i < nargs; i++)
3490 if ((bestn->simdclone->args[i].arg_type
3491 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3492 || (bestn->simdclone->args[i].arg_type
3493 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3495 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3496 + 1);
3497 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3498 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3499 ? size_type_node : TREE_TYPE (arginfo[i].op);
3500 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3501 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3502 tree sll = arginfo[i].simd_lane_linear
3503 ? boolean_true_node : boolean_false_node;
3504 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3506 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3507 if (dump_enabled_p ())
3508 dump_printf_loc (MSG_NOTE, vect_location,
3509 "=== vectorizable_simd_clone_call ===\n");
3510 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3511 return true;
3514 /* Transform. */
3516 if (dump_enabled_p ())
3517 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3519 /* Handle def. */
3520 scalar_dest = gimple_call_lhs (stmt);
3521 vec_dest = NULL_TREE;
3522 rtype = NULL_TREE;
3523 ratype = NULL_TREE;
3524 if (scalar_dest)
3526 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3527 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3528 if (TREE_CODE (rtype) == ARRAY_TYPE)
3530 ratype = rtype;
3531 rtype = TREE_TYPE (ratype);
3535 prev_stmt_info = NULL;
3536 for (j = 0; j < ncopies; ++j)
3538 /* Build argument list for the vectorized call. */
3539 if (j == 0)
3540 vargs.create (nargs);
3541 else
3542 vargs.truncate (0);
3544 for (i = 0; i < nargs; i++)
3546 unsigned int k, l, m, o;
3547 tree atype;
3548 op = gimple_call_arg (stmt, i);
3549 switch (bestn->simdclone->args[i].arg_type)
3551 case SIMD_CLONE_ARG_TYPE_VECTOR:
3552 atype = bestn->simdclone->args[i].vector_type;
3553 o = nunits / simd_clone_subparts (atype);
3554 for (m = j * o; m < (j + 1) * o; m++)
3556 if (simd_clone_subparts (atype)
3557 < simd_clone_subparts (arginfo[i].vectype))
3559 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3560 k = (simd_clone_subparts (arginfo[i].vectype)
3561 / simd_clone_subparts (atype));
3562 gcc_assert ((k & (k - 1)) == 0);
3563 if (m == 0)
3564 vec_oprnd0
3565 = vect_get_vec_def_for_operand (op, stmt);
3566 else
3568 vec_oprnd0 = arginfo[i].op;
3569 if ((m & (k - 1)) == 0)
3570 vec_oprnd0
3571 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3572 vec_oprnd0);
3574 arginfo[i].op = vec_oprnd0;
3575 vec_oprnd0
3576 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3577 bitsize_int (prec),
3578 bitsize_int ((m & (k - 1)) * prec));
3579 new_stmt
3580 = gimple_build_assign (make_ssa_name (atype),
3581 vec_oprnd0);
3582 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3583 vargs.safe_push (gimple_assign_lhs (new_stmt));
3585 else
3587 k = (simd_clone_subparts (atype)
3588 / simd_clone_subparts (arginfo[i].vectype));
3589 gcc_assert ((k & (k - 1)) == 0);
3590 vec<constructor_elt, va_gc> *ctor_elts;
3591 if (k != 1)
3592 vec_alloc (ctor_elts, k);
3593 else
3594 ctor_elts = NULL;
3595 for (l = 0; l < k; l++)
3597 if (m == 0 && l == 0)
3598 vec_oprnd0
3599 = vect_get_vec_def_for_operand (op, stmt);
3600 else
3601 vec_oprnd0
3602 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3603 arginfo[i].op);
3604 arginfo[i].op = vec_oprnd0;
3605 if (k == 1)
3606 break;
3607 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3608 vec_oprnd0);
3610 if (k == 1)
3611 vargs.safe_push (vec_oprnd0);
3612 else
3614 vec_oprnd0 = build_constructor (atype, ctor_elts);
3615 new_stmt
3616 = gimple_build_assign (make_ssa_name (atype),
3617 vec_oprnd0);
3618 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3619 vargs.safe_push (gimple_assign_lhs (new_stmt));
3623 break;
3624 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3625 vargs.safe_push (op);
3626 break;
3627 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3628 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3629 if (j == 0)
3631 gimple_seq stmts;
3632 arginfo[i].op
3633 = force_gimple_operand (arginfo[i].op, &stmts, true,
3634 NULL_TREE);
3635 if (stmts != NULL)
3637 basic_block new_bb;
3638 edge pe = loop_preheader_edge (loop);
3639 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3640 gcc_assert (!new_bb);
3642 if (arginfo[i].simd_lane_linear)
3644 vargs.safe_push (arginfo[i].op);
3645 break;
3647 tree phi_res = copy_ssa_name (op);
3648 gphi *new_phi = create_phi_node (phi_res, loop->header);
3649 set_vinfo_for_stmt (new_phi,
3650 new_stmt_vec_info (new_phi, loop_vinfo));
3651 add_phi_arg (new_phi, arginfo[i].op,
3652 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3653 enum tree_code code
3654 = POINTER_TYPE_P (TREE_TYPE (op))
3655 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3656 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3657 ? sizetype : TREE_TYPE (op);
3658 widest_int cst
3659 = wi::mul (bestn->simdclone->args[i].linear_step,
3660 ncopies * nunits);
3661 tree tcst = wide_int_to_tree (type, cst);
3662 tree phi_arg = copy_ssa_name (op);
3663 new_stmt
3664 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3665 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3666 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3667 set_vinfo_for_stmt (new_stmt,
3668 new_stmt_vec_info (new_stmt, loop_vinfo));
3669 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3670 UNKNOWN_LOCATION);
3671 arginfo[i].op = phi_res;
3672 vargs.safe_push (phi_res);
3674 else
3676 enum tree_code code
3677 = POINTER_TYPE_P (TREE_TYPE (op))
3678 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3679 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3680 ? sizetype : TREE_TYPE (op);
3681 widest_int cst
3682 = wi::mul (bestn->simdclone->args[i].linear_step,
3683 j * nunits);
3684 tree tcst = wide_int_to_tree (type, cst);
3685 new_temp = make_ssa_name (TREE_TYPE (op));
3686 new_stmt = gimple_build_assign (new_temp, code,
3687 arginfo[i].op, tcst);
3688 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3689 vargs.safe_push (new_temp);
3691 break;
3692 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3693 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3694 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3695 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3696 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3697 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3698 default:
3699 gcc_unreachable ();
3703 new_stmt = gimple_build_call_vec (fndecl, vargs);
3704 if (vec_dest)
3706 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
3707 if (ratype)
3708 new_temp = create_tmp_var (ratype);
3709 else if (simd_clone_subparts (vectype)
3710 == simd_clone_subparts (rtype))
3711 new_temp = make_ssa_name (vec_dest, new_stmt);
3712 else
3713 new_temp = make_ssa_name (rtype, new_stmt);
3714 gimple_call_set_lhs (new_stmt, new_temp);
3716 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3718 if (vec_dest)
3720 if (simd_clone_subparts (vectype) < nunits)
3722 unsigned int k, l;
3723 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3724 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
3725 k = nunits / simd_clone_subparts (vectype);
3726 gcc_assert ((k & (k - 1)) == 0);
3727 for (l = 0; l < k; l++)
3729 tree t;
3730 if (ratype)
3732 t = build_fold_addr_expr (new_temp);
3733 t = build2 (MEM_REF, vectype, t,
3734 build_int_cst (TREE_TYPE (t), l * bytes));
3736 else
3737 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3738 bitsize_int (prec), bitsize_int (l * prec));
3739 new_stmt
3740 = gimple_build_assign (make_ssa_name (vectype), t);
3741 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3742 if (j == 0 && l == 0)
3743 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3744 else
3745 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3747 prev_stmt_info = vinfo_for_stmt (new_stmt);
3750 if (ratype)
3752 tree clobber = build_constructor (ratype, NULL);
3753 TREE_THIS_VOLATILE (clobber) = 1;
3754 new_stmt = gimple_build_assign (new_temp, clobber);
3755 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3757 continue;
3759 else if (simd_clone_subparts (vectype) > nunits)
3761 unsigned int k = (simd_clone_subparts (vectype)
3762 / simd_clone_subparts (rtype));
3763 gcc_assert ((k & (k - 1)) == 0);
3764 if ((j & (k - 1)) == 0)
3765 vec_alloc (ret_ctor_elts, k);
3766 if (ratype)
3768 unsigned int m, o = nunits / simd_clone_subparts (rtype);
3769 for (m = 0; m < o; m++)
3771 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3772 size_int (m), NULL_TREE, NULL_TREE);
3773 new_stmt
3774 = gimple_build_assign (make_ssa_name (rtype), tem);
3775 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3776 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3777 gimple_assign_lhs (new_stmt));
3779 tree clobber = build_constructor (ratype, NULL);
3780 TREE_THIS_VOLATILE (clobber) = 1;
3781 new_stmt = gimple_build_assign (new_temp, clobber);
3782 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3784 else
3785 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3786 if ((j & (k - 1)) != k - 1)
3787 continue;
3788 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3789 new_stmt
3790 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3791 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3793 if ((unsigned) j == k - 1)
3794 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3795 else
3796 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3798 prev_stmt_info = vinfo_for_stmt (new_stmt);
3799 continue;
3801 else if (ratype)
3803 tree t = build_fold_addr_expr (new_temp);
3804 t = build2 (MEM_REF, vectype, t,
3805 build_int_cst (TREE_TYPE (t), 0));
3806 new_stmt
3807 = gimple_build_assign (make_ssa_name (vec_dest), t);
3808 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3809 tree clobber = build_constructor (ratype, NULL);
3810 TREE_THIS_VOLATILE (clobber) = 1;
3811 vect_finish_stmt_generation (stmt,
3812 gimple_build_assign (new_temp,
3813 clobber), gsi);
3817 if (j == 0)
3818 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3819 else
3820 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3822 prev_stmt_info = vinfo_for_stmt (new_stmt);
3825 vargs.release ();
3827 /* The call in STMT might prevent it from being removed in dce.
3828 We however cannot remove it here, due to the way the ssa name
3829 it defines is mapped to the new definition. So just replace
3830 rhs of the statement with something harmless. */
3832 if (slp_node)
3833 return true;
3835 if (scalar_dest)
3837 type = TREE_TYPE (scalar_dest);
3838 if (is_pattern_stmt_p (stmt_info))
3839 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3840 else
3841 lhs = gimple_call_lhs (stmt);
3842 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3844 else
3845 new_stmt = gimple_build_nop ();
3846 set_vinfo_for_stmt (new_stmt, stmt_info);
3847 set_vinfo_for_stmt (stmt, NULL);
3848 STMT_VINFO_STMT (stmt_info) = new_stmt;
3849 gsi_replace (gsi, new_stmt, true);
3850 unlink_stmt_vdef (stmt);
3852 return true;
3856 /* Function vect_gen_widened_results_half
3858 Create a vector stmt whose code, type, number of arguments, and result
3859 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3860 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3861 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3862 needs to be created (DECL is a function-decl of a target-builtin).
3863 STMT is the original scalar stmt that we are vectorizing. */
3865 static gimple *
3866 vect_gen_widened_results_half (enum tree_code code,
3867 tree decl,
3868 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3869 tree vec_dest, gimple_stmt_iterator *gsi,
3870 gimple *stmt)
3872 gimple *new_stmt;
3873 tree new_temp;
3875 /* Generate half of the widened result: */
3876 if (code == CALL_EXPR)
3878 /* Target specific support */
3879 if (op_type == binary_op)
3880 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3881 else
3882 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3883 new_temp = make_ssa_name (vec_dest, new_stmt);
3884 gimple_call_set_lhs (new_stmt, new_temp);
3886 else
3888 /* Generic support */
3889 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3890 if (op_type != binary_op)
3891 vec_oprnd1 = NULL;
3892 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3893 new_temp = make_ssa_name (vec_dest, new_stmt);
3894 gimple_assign_set_lhs (new_stmt, new_temp);
3896 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3898 return new_stmt;
3902 /* Get vectorized definitions for loop-based vectorization. For the first
3903 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3904 scalar operand), and for the rest we get a copy with
3905 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3906 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3907 The vectors are collected into VEC_OPRNDS. */
3909 static void
3910 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3911 vec<tree> *vec_oprnds, int multi_step_cvt)
3913 tree vec_oprnd;
3915 /* Get first vector operand. */
3916 /* All the vector operands except the very first one (that is scalar oprnd)
3917 are stmt copies. */
3918 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3919 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3920 else
3921 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3923 vec_oprnds->quick_push (vec_oprnd);
3925 /* Get second vector operand. */
3926 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3927 vec_oprnds->quick_push (vec_oprnd);
3929 *oprnd = vec_oprnd;
3931 /* For conversion in multiple steps, continue to get operands
3932 recursively. */
3933 if (multi_step_cvt)
3934 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3938 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3939 For multi-step conversions store the resulting vectors and call the function
3940 recursively. */
3942 static void
3943 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3944 int multi_step_cvt, gimple *stmt,
3945 vec<tree> vec_dsts,
3946 gimple_stmt_iterator *gsi,
3947 slp_tree slp_node, enum tree_code code,
3948 stmt_vec_info *prev_stmt_info)
3950 unsigned int i;
3951 tree vop0, vop1, new_tmp, vec_dest;
3952 gimple *new_stmt;
3953 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3955 vec_dest = vec_dsts.pop ();
3957 for (i = 0; i < vec_oprnds->length (); i += 2)
3959 /* Create demotion operation. */
3960 vop0 = (*vec_oprnds)[i];
3961 vop1 = (*vec_oprnds)[i + 1];
3962 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3963 new_tmp = make_ssa_name (vec_dest, new_stmt);
3964 gimple_assign_set_lhs (new_stmt, new_tmp);
3965 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3967 if (multi_step_cvt)
3968 /* Store the resulting vector for next recursive call. */
3969 (*vec_oprnds)[i/2] = new_tmp;
3970 else
3972 /* This is the last step of the conversion sequence. Store the
3973 vectors in SLP_NODE or in vector info of the scalar statement
3974 (or in STMT_VINFO_RELATED_STMT chain). */
3975 if (slp_node)
3976 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3977 else
3979 if (!*prev_stmt_info)
3980 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3981 else
3982 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3984 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3989 /* For multi-step demotion operations we first generate demotion operations
3990 from the source type to the intermediate types, and then combine the
3991 results (stored in VEC_OPRNDS) in demotion operation to the destination
3992 type. */
3993 if (multi_step_cvt)
3995 /* At each level of recursion we have half of the operands we had at the
3996 previous level. */
3997 vec_oprnds->truncate ((i+1)/2);
3998 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3999 stmt, vec_dsts, gsi, slp_node,
4000 VEC_PACK_TRUNC_EXPR,
4001 prev_stmt_info);
4004 vec_dsts.quick_push (vec_dest);
4008 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4009 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4010 the resulting vectors and call the function recursively. */
4012 static void
4013 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4014 vec<tree> *vec_oprnds1,
4015 gimple *stmt, tree vec_dest,
4016 gimple_stmt_iterator *gsi,
4017 enum tree_code code1,
4018 enum tree_code code2, tree decl1,
4019 tree decl2, int op_type)
4021 int i;
4022 tree vop0, vop1, new_tmp1, new_tmp2;
4023 gimple *new_stmt1, *new_stmt2;
4024 vec<tree> vec_tmp = vNULL;
4026 vec_tmp.create (vec_oprnds0->length () * 2);
4027 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4029 if (op_type == binary_op)
4030 vop1 = (*vec_oprnds1)[i];
4031 else
4032 vop1 = NULL_TREE;
4034 /* Generate the two halves of promotion operation. */
4035 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4036 op_type, vec_dest, gsi, stmt);
4037 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4038 op_type, vec_dest, gsi, stmt);
4039 if (is_gimple_call (new_stmt1))
4041 new_tmp1 = gimple_call_lhs (new_stmt1);
4042 new_tmp2 = gimple_call_lhs (new_stmt2);
4044 else
4046 new_tmp1 = gimple_assign_lhs (new_stmt1);
4047 new_tmp2 = gimple_assign_lhs (new_stmt2);
4050 /* Store the results for the next step. */
4051 vec_tmp.quick_push (new_tmp1);
4052 vec_tmp.quick_push (new_tmp2);
4055 vec_oprnds0->release ();
4056 *vec_oprnds0 = vec_tmp;
4060 /* Check if STMT performs a conversion operation, that can be vectorized.
4061 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4062 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4063 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4065 static bool
4066 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4067 gimple **vec_stmt, slp_tree slp_node)
4069 tree vec_dest;
4070 tree scalar_dest;
4071 tree op0, op1 = NULL_TREE;
4072 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4073 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4074 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4075 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4076 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4077 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4078 tree new_temp;
4079 gimple *def_stmt;
4080 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4081 int ndts = 2;
4082 gimple *new_stmt = NULL;
4083 stmt_vec_info prev_stmt_info;
4084 poly_uint64 nunits_in;
4085 poly_uint64 nunits_out;
4086 tree vectype_out, vectype_in;
4087 int ncopies, i, j;
4088 tree lhs_type, rhs_type;
4089 enum { NARROW, NONE, WIDEN } modifier;
4090 vec<tree> vec_oprnds0 = vNULL;
4091 vec<tree> vec_oprnds1 = vNULL;
4092 tree vop0;
4093 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4094 vec_info *vinfo = stmt_info->vinfo;
4095 int multi_step_cvt = 0;
4096 vec<tree> interm_types = vNULL;
4097 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4098 int op_type;
4099 unsigned short fltsz;
4101 /* Is STMT a vectorizable conversion? */
4103 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4104 return false;
4106 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4107 && ! vec_stmt)
4108 return false;
4110 if (!is_gimple_assign (stmt))
4111 return false;
4113 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4114 return false;
4116 code = gimple_assign_rhs_code (stmt);
4117 if (!CONVERT_EXPR_CODE_P (code)
4118 && code != FIX_TRUNC_EXPR
4119 && code != FLOAT_EXPR
4120 && code != WIDEN_MULT_EXPR
4121 && code != WIDEN_LSHIFT_EXPR)
4122 return false;
4124 op_type = TREE_CODE_LENGTH (code);
4126 /* Check types of lhs and rhs. */
4127 scalar_dest = gimple_assign_lhs (stmt);
4128 lhs_type = TREE_TYPE (scalar_dest);
4129 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4131 op0 = gimple_assign_rhs1 (stmt);
4132 rhs_type = TREE_TYPE (op0);
4134 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4135 && !((INTEGRAL_TYPE_P (lhs_type)
4136 && INTEGRAL_TYPE_P (rhs_type))
4137 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4138 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4139 return false;
4141 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4142 && ((INTEGRAL_TYPE_P (lhs_type)
4143 && !type_has_mode_precision_p (lhs_type))
4144 || (INTEGRAL_TYPE_P (rhs_type)
4145 && !type_has_mode_precision_p (rhs_type))))
4147 if (dump_enabled_p ())
4148 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4149 "type conversion to/from bit-precision unsupported."
4150 "\n");
4151 return false;
4154 /* Check the operands of the operation. */
4155 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4157 if (dump_enabled_p ())
4158 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4159 "use not simple.\n");
4160 return false;
4162 if (op_type == binary_op)
4164 bool ok;
4166 op1 = gimple_assign_rhs2 (stmt);
4167 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4168 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4169 OP1. */
4170 if (CONSTANT_CLASS_P (op0))
4171 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4172 else
4173 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4175 if (!ok)
4177 if (dump_enabled_p ())
4178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4179 "use not simple.\n");
4180 return false;
4184 /* If op0 is an external or constant defs use a vector type of
4185 the same size as the output vector type. */
4186 if (!vectype_in)
4187 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4188 if (vec_stmt)
4189 gcc_assert (vectype_in);
4190 if (!vectype_in)
4192 if (dump_enabled_p ())
4194 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4195 "no vectype for scalar type ");
4196 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4197 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4200 return false;
4203 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4204 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4206 if (dump_enabled_p ())
4208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4209 "can't convert between boolean and non "
4210 "boolean vectors");
4211 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4212 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4215 return false;
4218 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4219 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4220 if (known_eq (nunits_out, nunits_in))
4221 modifier = NONE;
4222 else if (multiple_p (nunits_out, nunits_in))
4223 modifier = NARROW;
4224 else
4226 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4227 modifier = WIDEN;
4230 /* Multiple types in SLP are handled by creating the appropriate number of
4231 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4232 case of SLP. */
4233 if (slp_node)
4234 ncopies = 1;
4235 else if (modifier == NARROW)
4236 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4237 else
4238 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4240 /* Sanity check: make sure that at least one copy of the vectorized stmt
4241 needs to be generated. */
4242 gcc_assert (ncopies >= 1);
4244 bool found_mode = false;
4245 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4246 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4247 opt_scalar_mode rhs_mode_iter;
4249 /* Supportable by target? */
4250 switch (modifier)
4252 case NONE:
4253 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4254 return false;
4255 if (supportable_convert_operation (code, vectype_out, vectype_in,
4256 &decl1, &code1))
4257 break;
4258 /* FALLTHRU */
4259 unsupported:
4260 if (dump_enabled_p ())
4261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4262 "conversion not supported by target.\n");
4263 return false;
4265 case WIDEN:
4266 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4267 &code1, &code2, &multi_step_cvt,
4268 &interm_types))
4270 /* Binary widening operation can only be supported directly by the
4271 architecture. */
4272 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4273 break;
4276 if (code != FLOAT_EXPR
4277 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4278 goto unsupported;
4280 fltsz = GET_MODE_SIZE (lhs_mode);
4281 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4283 rhs_mode = rhs_mode_iter.require ();
4284 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4285 break;
4287 cvt_type
4288 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4289 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4290 if (cvt_type == NULL_TREE)
4291 goto unsupported;
4293 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4295 if (!supportable_convert_operation (code, vectype_out,
4296 cvt_type, &decl1, &codecvt1))
4297 goto unsupported;
4299 else if (!supportable_widening_operation (code, stmt, vectype_out,
4300 cvt_type, &codecvt1,
4301 &codecvt2, &multi_step_cvt,
4302 &interm_types))
4303 continue;
4304 else
4305 gcc_assert (multi_step_cvt == 0);
4307 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4308 vectype_in, &code1, &code2,
4309 &multi_step_cvt, &interm_types))
4311 found_mode = true;
4312 break;
4316 if (!found_mode)
4317 goto unsupported;
4319 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4320 codecvt2 = ERROR_MARK;
4321 else
4323 multi_step_cvt++;
4324 interm_types.safe_push (cvt_type);
4325 cvt_type = NULL_TREE;
4327 break;
4329 case NARROW:
4330 gcc_assert (op_type == unary_op);
4331 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4332 &code1, &multi_step_cvt,
4333 &interm_types))
4334 break;
4336 if (code != FIX_TRUNC_EXPR
4337 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4338 goto unsupported;
4340 cvt_type
4341 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4342 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4343 if (cvt_type == NULL_TREE)
4344 goto unsupported;
4345 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4346 &decl1, &codecvt1))
4347 goto unsupported;
4348 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4349 &code1, &multi_step_cvt,
4350 &interm_types))
4351 break;
4352 goto unsupported;
4354 default:
4355 gcc_unreachable ();
4358 if (!vec_stmt) /* transformation not required. */
4360 if (dump_enabled_p ())
4361 dump_printf_loc (MSG_NOTE, vect_location,
4362 "=== vectorizable_conversion ===\n");
4363 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4365 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4366 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4368 else if (modifier == NARROW)
4370 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4371 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4373 else
4375 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4376 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4378 interm_types.release ();
4379 return true;
4382 /* Transform. */
4383 if (dump_enabled_p ())
4384 dump_printf_loc (MSG_NOTE, vect_location,
4385 "transform conversion. ncopies = %d.\n", ncopies);
4387 if (op_type == binary_op)
4389 if (CONSTANT_CLASS_P (op0))
4390 op0 = fold_convert (TREE_TYPE (op1), op0);
4391 else if (CONSTANT_CLASS_P (op1))
4392 op1 = fold_convert (TREE_TYPE (op0), op1);
4395 /* In case of multi-step conversion, we first generate conversion operations
4396 to the intermediate types, and then from that types to the final one.
4397 We create vector destinations for the intermediate type (TYPES) received
4398 from supportable_*_operation, and store them in the correct order
4399 for future use in vect_create_vectorized_*_stmts (). */
4400 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4401 vec_dest = vect_create_destination_var (scalar_dest,
4402 (cvt_type && modifier == WIDEN)
4403 ? cvt_type : vectype_out);
4404 vec_dsts.quick_push (vec_dest);
4406 if (multi_step_cvt)
4408 for (i = interm_types.length () - 1;
4409 interm_types.iterate (i, &intermediate_type); i--)
4411 vec_dest = vect_create_destination_var (scalar_dest,
4412 intermediate_type);
4413 vec_dsts.quick_push (vec_dest);
4417 if (cvt_type)
4418 vec_dest = vect_create_destination_var (scalar_dest,
4419 modifier == WIDEN
4420 ? vectype_out : cvt_type);
4422 if (!slp_node)
4424 if (modifier == WIDEN)
4426 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4427 if (op_type == binary_op)
4428 vec_oprnds1.create (1);
4430 else if (modifier == NARROW)
4431 vec_oprnds0.create (
4432 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4434 else if (code == WIDEN_LSHIFT_EXPR)
4435 vec_oprnds1.create (slp_node->vec_stmts_size);
4437 last_oprnd = op0;
4438 prev_stmt_info = NULL;
4439 switch (modifier)
4441 case NONE:
4442 for (j = 0; j < ncopies; j++)
4444 if (j == 0)
4445 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4446 else
4447 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4449 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4451 /* Arguments are ready, create the new vector stmt. */
4452 if (code1 == CALL_EXPR)
4454 new_stmt = gimple_build_call (decl1, 1, vop0);
4455 new_temp = make_ssa_name (vec_dest, new_stmt);
4456 gimple_call_set_lhs (new_stmt, new_temp);
4458 else
4460 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4461 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4462 new_temp = make_ssa_name (vec_dest, new_stmt);
4463 gimple_assign_set_lhs (new_stmt, new_temp);
4466 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4467 if (slp_node)
4468 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4469 else
4471 if (!prev_stmt_info)
4472 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4473 else
4474 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4475 prev_stmt_info = vinfo_for_stmt (new_stmt);
4479 break;
4481 case WIDEN:
4482 /* In case the vectorization factor (VF) is bigger than the number
4483 of elements that we can fit in a vectype (nunits), we have to
4484 generate more than one vector stmt - i.e - we need to "unroll"
4485 the vector stmt by a factor VF/nunits. */
4486 for (j = 0; j < ncopies; j++)
4488 /* Handle uses. */
4489 if (j == 0)
4491 if (slp_node)
4493 if (code == WIDEN_LSHIFT_EXPR)
4495 unsigned int k;
4497 vec_oprnd1 = op1;
4498 /* Store vec_oprnd1 for every vector stmt to be created
4499 for SLP_NODE. We check during the analysis that all
4500 the shift arguments are the same. */
4501 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4502 vec_oprnds1.quick_push (vec_oprnd1);
4504 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4505 slp_node);
4507 else
4508 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4509 &vec_oprnds1, slp_node);
4511 else
4513 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4514 vec_oprnds0.quick_push (vec_oprnd0);
4515 if (op_type == binary_op)
4517 if (code == WIDEN_LSHIFT_EXPR)
4518 vec_oprnd1 = op1;
4519 else
4520 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4521 vec_oprnds1.quick_push (vec_oprnd1);
4525 else
4527 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4528 vec_oprnds0.truncate (0);
4529 vec_oprnds0.quick_push (vec_oprnd0);
4530 if (op_type == binary_op)
4532 if (code == WIDEN_LSHIFT_EXPR)
4533 vec_oprnd1 = op1;
4534 else
4535 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4536 vec_oprnd1);
4537 vec_oprnds1.truncate (0);
4538 vec_oprnds1.quick_push (vec_oprnd1);
4542 /* Arguments are ready. Create the new vector stmts. */
4543 for (i = multi_step_cvt; i >= 0; i--)
4545 tree this_dest = vec_dsts[i];
4546 enum tree_code c1 = code1, c2 = code2;
4547 if (i == 0 && codecvt2 != ERROR_MARK)
4549 c1 = codecvt1;
4550 c2 = codecvt2;
4552 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4553 &vec_oprnds1,
4554 stmt, this_dest, gsi,
4555 c1, c2, decl1, decl2,
4556 op_type);
4559 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4561 if (cvt_type)
4563 if (codecvt1 == CALL_EXPR)
4565 new_stmt = gimple_build_call (decl1, 1, vop0);
4566 new_temp = make_ssa_name (vec_dest, new_stmt);
4567 gimple_call_set_lhs (new_stmt, new_temp);
4569 else
4571 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4572 new_temp = make_ssa_name (vec_dest);
4573 new_stmt = gimple_build_assign (new_temp, codecvt1,
4574 vop0);
4577 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4579 else
4580 new_stmt = SSA_NAME_DEF_STMT (vop0);
4582 if (slp_node)
4583 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4584 else
4586 if (!prev_stmt_info)
4587 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4588 else
4589 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4590 prev_stmt_info = vinfo_for_stmt (new_stmt);
4595 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4596 break;
4598 case NARROW:
4599 /* In case the vectorization factor (VF) is bigger than the number
4600 of elements that we can fit in a vectype (nunits), we have to
4601 generate more than one vector stmt - i.e - we need to "unroll"
4602 the vector stmt by a factor VF/nunits. */
4603 for (j = 0; j < ncopies; j++)
4605 /* Handle uses. */
4606 if (slp_node)
4607 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4608 slp_node);
4609 else
4611 vec_oprnds0.truncate (0);
4612 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4613 vect_pow2 (multi_step_cvt) - 1);
4616 /* Arguments are ready. Create the new vector stmts. */
4617 if (cvt_type)
4618 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4620 if (codecvt1 == CALL_EXPR)
4622 new_stmt = gimple_build_call (decl1, 1, vop0);
4623 new_temp = make_ssa_name (vec_dest, new_stmt);
4624 gimple_call_set_lhs (new_stmt, new_temp);
4626 else
4628 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4629 new_temp = make_ssa_name (vec_dest);
4630 new_stmt = gimple_build_assign (new_temp, codecvt1,
4631 vop0);
4634 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4635 vec_oprnds0[i] = new_temp;
4638 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4639 stmt, vec_dsts, gsi,
4640 slp_node, code1,
4641 &prev_stmt_info);
4644 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4645 break;
4648 vec_oprnds0.release ();
4649 vec_oprnds1.release ();
4650 interm_types.release ();
4652 return true;
4656 /* Function vectorizable_assignment.
4658 Check if STMT performs an assignment (copy) that can be vectorized.
4659 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4660 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4661 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4663 static bool
4664 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4665 gimple **vec_stmt, slp_tree slp_node)
4667 tree vec_dest;
4668 tree scalar_dest;
4669 tree op;
4670 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4671 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4672 tree new_temp;
4673 gimple *def_stmt;
4674 enum vect_def_type dt[1] = {vect_unknown_def_type};
4675 int ndts = 1;
4676 int ncopies;
4677 int i, j;
4678 vec<tree> vec_oprnds = vNULL;
4679 tree vop;
4680 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4681 vec_info *vinfo = stmt_info->vinfo;
4682 gimple *new_stmt = NULL;
4683 stmt_vec_info prev_stmt_info = NULL;
4684 enum tree_code code;
4685 tree vectype_in;
4687 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4688 return false;
4690 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4691 && ! vec_stmt)
4692 return false;
4694 /* Is vectorizable assignment? */
4695 if (!is_gimple_assign (stmt))
4696 return false;
4698 scalar_dest = gimple_assign_lhs (stmt);
4699 if (TREE_CODE (scalar_dest) != SSA_NAME)
4700 return false;
4702 code = gimple_assign_rhs_code (stmt);
4703 if (gimple_assign_single_p (stmt)
4704 || code == PAREN_EXPR
4705 || CONVERT_EXPR_CODE_P (code))
4706 op = gimple_assign_rhs1 (stmt);
4707 else
4708 return false;
4710 if (code == VIEW_CONVERT_EXPR)
4711 op = TREE_OPERAND (op, 0);
4713 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4714 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4716 /* Multiple types in SLP are handled by creating the appropriate number of
4717 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4718 case of SLP. */
4719 if (slp_node)
4720 ncopies = 1;
4721 else
4722 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4724 gcc_assert (ncopies >= 1);
4726 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4728 if (dump_enabled_p ())
4729 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4730 "use not simple.\n");
4731 return false;
4734 /* We can handle NOP_EXPR conversions that do not change the number
4735 of elements or the vector size. */
4736 if ((CONVERT_EXPR_CODE_P (code)
4737 || code == VIEW_CONVERT_EXPR)
4738 && (!vectype_in
4739 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
4740 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
4741 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4742 return false;
4744 /* We do not handle bit-precision changes. */
4745 if ((CONVERT_EXPR_CODE_P (code)
4746 || code == VIEW_CONVERT_EXPR)
4747 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4748 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4749 || !type_has_mode_precision_p (TREE_TYPE (op)))
4750 /* But a conversion that does not change the bit-pattern is ok. */
4751 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4752 > TYPE_PRECISION (TREE_TYPE (op)))
4753 && TYPE_UNSIGNED (TREE_TYPE (op)))
4754 /* Conversion between boolean types of different sizes is
4755 a simple assignment in case their vectypes are same
4756 boolean vectors. */
4757 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4758 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4760 if (dump_enabled_p ())
4761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4762 "type conversion to/from bit-precision "
4763 "unsupported.\n");
4764 return false;
4767 if (!vec_stmt) /* transformation not required. */
4769 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4770 if (dump_enabled_p ())
4771 dump_printf_loc (MSG_NOTE, vect_location,
4772 "=== vectorizable_assignment ===\n");
4773 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4774 return true;
4777 /* Transform. */
4778 if (dump_enabled_p ())
4779 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4781 /* Handle def. */
4782 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4784 /* Handle use. */
4785 for (j = 0; j < ncopies; j++)
4787 /* Handle uses. */
4788 if (j == 0)
4789 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4790 else
4791 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4793 /* Arguments are ready. create the new vector stmt. */
4794 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4796 if (CONVERT_EXPR_CODE_P (code)
4797 || code == VIEW_CONVERT_EXPR)
4798 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4799 new_stmt = gimple_build_assign (vec_dest, vop);
4800 new_temp = make_ssa_name (vec_dest, new_stmt);
4801 gimple_assign_set_lhs (new_stmt, new_temp);
4802 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4803 if (slp_node)
4804 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4807 if (slp_node)
4808 continue;
4810 if (j == 0)
4811 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4812 else
4813 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4815 prev_stmt_info = vinfo_for_stmt (new_stmt);
4818 vec_oprnds.release ();
4819 return true;
4823 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4824 either as shift by a scalar or by a vector. */
4826 bool
4827 vect_supportable_shift (enum tree_code code, tree scalar_type)
4830 machine_mode vec_mode;
4831 optab optab;
4832 int icode;
4833 tree vectype;
4835 vectype = get_vectype_for_scalar_type (scalar_type);
4836 if (!vectype)
4837 return false;
4839 optab = optab_for_tree_code (code, vectype, optab_scalar);
4840 if (!optab
4841 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4843 optab = optab_for_tree_code (code, vectype, optab_vector);
4844 if (!optab
4845 || (optab_handler (optab, TYPE_MODE (vectype))
4846 == CODE_FOR_nothing))
4847 return false;
4850 vec_mode = TYPE_MODE (vectype);
4851 icode = (int) optab_handler (optab, vec_mode);
4852 if (icode == CODE_FOR_nothing)
4853 return false;
4855 return true;
4859 /* Function vectorizable_shift.
4861 Check if STMT performs a shift operation that can be vectorized.
4862 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4863 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4864 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4866 static bool
4867 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4868 gimple **vec_stmt, slp_tree slp_node)
4870 tree vec_dest;
4871 tree scalar_dest;
4872 tree op0, op1 = NULL;
4873 tree vec_oprnd1 = NULL_TREE;
4874 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4875 tree vectype;
4876 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4877 enum tree_code code;
4878 machine_mode vec_mode;
4879 tree new_temp;
4880 optab optab;
4881 int icode;
4882 machine_mode optab_op2_mode;
4883 gimple *def_stmt;
4884 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4885 int ndts = 2;
4886 gimple *new_stmt = NULL;
4887 stmt_vec_info prev_stmt_info;
4888 poly_uint64 nunits_in;
4889 poly_uint64 nunits_out;
4890 tree vectype_out;
4891 tree op1_vectype;
4892 int ncopies;
4893 int j, i;
4894 vec<tree> vec_oprnds0 = vNULL;
4895 vec<tree> vec_oprnds1 = vNULL;
4896 tree vop0, vop1;
4897 unsigned int k;
4898 bool scalar_shift_arg = true;
4899 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4900 vec_info *vinfo = stmt_info->vinfo;
4902 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4903 return false;
4905 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4906 && ! vec_stmt)
4907 return false;
4909 /* Is STMT a vectorizable binary/unary operation? */
4910 if (!is_gimple_assign (stmt))
4911 return false;
4913 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4914 return false;
4916 code = gimple_assign_rhs_code (stmt);
4918 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4919 || code == RROTATE_EXPR))
4920 return false;
4922 scalar_dest = gimple_assign_lhs (stmt);
4923 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4924 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4926 if (dump_enabled_p ())
4927 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4928 "bit-precision shifts not supported.\n");
4929 return false;
4932 op0 = gimple_assign_rhs1 (stmt);
4933 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4935 if (dump_enabled_p ())
4936 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4937 "use not simple.\n");
4938 return false;
4940 /* If op0 is an external or constant def use a vector type with
4941 the same size as the output vector type. */
4942 if (!vectype)
4943 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4944 if (vec_stmt)
4945 gcc_assert (vectype);
4946 if (!vectype)
4948 if (dump_enabled_p ())
4949 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4950 "no vectype for scalar type\n");
4951 return false;
4954 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4955 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4956 if (maybe_ne (nunits_out, nunits_in))
4957 return false;
4959 op1 = gimple_assign_rhs2 (stmt);
4960 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4962 if (dump_enabled_p ())
4963 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4964 "use not simple.\n");
4965 return false;
4968 /* Multiple types in SLP are handled by creating the appropriate number of
4969 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4970 case of SLP. */
4971 if (slp_node)
4972 ncopies = 1;
4973 else
4974 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4976 gcc_assert (ncopies >= 1);
4978 /* Determine whether the shift amount is a vector, or scalar. If the
4979 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4981 if ((dt[1] == vect_internal_def
4982 || dt[1] == vect_induction_def)
4983 && !slp_node)
4984 scalar_shift_arg = false;
4985 else if (dt[1] == vect_constant_def
4986 || dt[1] == vect_external_def
4987 || dt[1] == vect_internal_def)
4989 /* In SLP, need to check whether the shift count is the same,
4990 in loops if it is a constant or invariant, it is always
4991 a scalar shift. */
4992 if (slp_node)
4994 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4995 gimple *slpstmt;
4997 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4998 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4999 scalar_shift_arg = false;
5002 /* If the shift amount is computed by a pattern stmt we cannot
5003 use the scalar amount directly thus give up and use a vector
5004 shift. */
5005 if (dt[1] == vect_internal_def)
5007 gimple *def = SSA_NAME_DEF_STMT (op1);
5008 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5009 scalar_shift_arg = false;
5012 else
5014 if (dump_enabled_p ())
5015 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5016 "operand mode requires invariant argument.\n");
5017 return false;
5020 /* Vector shifted by vector. */
5021 if (!scalar_shift_arg)
5023 optab = optab_for_tree_code (code, vectype, optab_vector);
5024 if (dump_enabled_p ())
5025 dump_printf_loc (MSG_NOTE, vect_location,
5026 "vector/vector shift/rotate found.\n");
5028 if (!op1_vectype)
5029 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5030 if (op1_vectype == NULL_TREE
5031 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5033 if (dump_enabled_p ())
5034 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5035 "unusable type for last operand in"
5036 " vector/vector shift/rotate.\n");
5037 return false;
5040 /* See if the machine has a vector shifted by scalar insn and if not
5041 then see if it has a vector shifted by vector insn. */
5042 else
5044 optab = optab_for_tree_code (code, vectype, optab_scalar);
5045 if (optab
5046 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5048 if (dump_enabled_p ())
5049 dump_printf_loc (MSG_NOTE, vect_location,
5050 "vector/scalar shift/rotate found.\n");
5052 else
5054 optab = optab_for_tree_code (code, vectype, optab_vector);
5055 if (optab
5056 && (optab_handler (optab, TYPE_MODE (vectype))
5057 != CODE_FOR_nothing))
5059 scalar_shift_arg = false;
5061 if (dump_enabled_p ())
5062 dump_printf_loc (MSG_NOTE, vect_location,
5063 "vector/vector shift/rotate found.\n");
5065 /* Unlike the other binary operators, shifts/rotates have
5066 the rhs being int, instead of the same type as the lhs,
5067 so make sure the scalar is the right type if we are
5068 dealing with vectors of long long/long/short/char. */
5069 if (dt[1] == vect_constant_def)
5070 op1 = fold_convert (TREE_TYPE (vectype), op1);
5071 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5072 TREE_TYPE (op1)))
5074 if (slp_node
5075 && TYPE_MODE (TREE_TYPE (vectype))
5076 != TYPE_MODE (TREE_TYPE (op1)))
5078 if (dump_enabled_p ())
5079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5080 "unusable type for last operand in"
5081 " vector/vector shift/rotate.\n");
5082 return false;
5084 if (vec_stmt && !slp_node)
5086 op1 = fold_convert (TREE_TYPE (vectype), op1);
5087 op1 = vect_init_vector (stmt, op1,
5088 TREE_TYPE (vectype), NULL);
5095 /* Supportable by target? */
5096 if (!optab)
5098 if (dump_enabled_p ())
5099 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5100 "no optab.\n");
5101 return false;
5103 vec_mode = TYPE_MODE (vectype);
5104 icode = (int) optab_handler (optab, vec_mode);
5105 if (icode == CODE_FOR_nothing)
5107 if (dump_enabled_p ())
5108 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5109 "op not supported by target.\n");
5110 /* Check only during analysis. */
5111 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5112 || (!vec_stmt
5113 && !vect_worthwhile_without_simd_p (vinfo, code)))
5114 return false;
5115 if (dump_enabled_p ())
5116 dump_printf_loc (MSG_NOTE, vect_location,
5117 "proceeding using word mode.\n");
5120 /* Worthwhile without SIMD support? Check only during analysis. */
5121 if (!vec_stmt
5122 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5123 && !vect_worthwhile_without_simd_p (vinfo, code))
5125 if (dump_enabled_p ())
5126 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5127 "not worthwhile without SIMD support.\n");
5128 return false;
5131 if (!vec_stmt) /* transformation not required. */
5133 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5134 if (dump_enabled_p ())
5135 dump_printf_loc (MSG_NOTE, vect_location,
5136 "=== vectorizable_shift ===\n");
5137 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5138 return true;
5141 /* Transform. */
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_NOTE, vect_location,
5145 "transform binary/unary operation.\n");
5147 /* Handle def. */
5148 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5150 prev_stmt_info = NULL;
5151 for (j = 0; j < ncopies; j++)
5153 /* Handle uses. */
5154 if (j == 0)
5156 if (scalar_shift_arg)
5158 /* Vector shl and shr insn patterns can be defined with scalar
5159 operand 2 (shift operand). In this case, use constant or loop
5160 invariant op1 directly, without extending it to vector mode
5161 first. */
5162 optab_op2_mode = insn_data[icode].operand[2].mode;
5163 if (!VECTOR_MODE_P (optab_op2_mode))
5165 if (dump_enabled_p ())
5166 dump_printf_loc (MSG_NOTE, vect_location,
5167 "operand 1 using scalar mode.\n");
5168 vec_oprnd1 = op1;
5169 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5170 vec_oprnds1.quick_push (vec_oprnd1);
5171 if (slp_node)
5173 /* Store vec_oprnd1 for every vector stmt to be created
5174 for SLP_NODE. We check during the analysis that all
5175 the shift arguments are the same.
5176 TODO: Allow different constants for different vector
5177 stmts generated for an SLP instance. */
5178 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5179 vec_oprnds1.quick_push (vec_oprnd1);
5184 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5185 (a special case for certain kind of vector shifts); otherwise,
5186 operand 1 should be of a vector type (the usual case). */
5187 if (vec_oprnd1)
5188 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5189 slp_node);
5190 else
5191 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5192 slp_node);
5194 else
5195 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5197 /* Arguments are ready. Create the new vector stmt. */
5198 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5200 vop1 = vec_oprnds1[i];
5201 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5202 new_temp = make_ssa_name (vec_dest, new_stmt);
5203 gimple_assign_set_lhs (new_stmt, new_temp);
5204 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5205 if (slp_node)
5206 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5209 if (slp_node)
5210 continue;
5212 if (j == 0)
5213 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5214 else
5215 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5216 prev_stmt_info = vinfo_for_stmt (new_stmt);
5219 vec_oprnds0.release ();
5220 vec_oprnds1.release ();
5222 return true;
5226 /* Function vectorizable_operation.
5228 Check if STMT performs a binary, unary or ternary operation that can
5229 be vectorized.
5230 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5231 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5232 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5234 static bool
5235 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5236 gimple **vec_stmt, slp_tree slp_node)
5238 tree vec_dest;
5239 tree scalar_dest;
5240 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5241 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5242 tree vectype;
5243 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5244 enum tree_code code, orig_code;
5245 machine_mode vec_mode;
5246 tree new_temp;
5247 int op_type;
5248 optab optab;
5249 bool target_support_p;
5250 gimple *def_stmt;
5251 enum vect_def_type dt[3]
5252 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5253 int ndts = 3;
5254 gimple *new_stmt = NULL;
5255 stmt_vec_info prev_stmt_info;
5256 poly_uint64 nunits_in;
5257 poly_uint64 nunits_out;
5258 tree vectype_out;
5259 int ncopies;
5260 int j, i;
5261 vec<tree> vec_oprnds0 = vNULL;
5262 vec<tree> vec_oprnds1 = vNULL;
5263 vec<tree> vec_oprnds2 = vNULL;
5264 tree vop0, vop1, vop2;
5265 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5266 vec_info *vinfo = stmt_info->vinfo;
5268 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5269 return false;
5271 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5272 && ! vec_stmt)
5273 return false;
5275 /* Is STMT a vectorizable binary/unary operation? */
5276 if (!is_gimple_assign (stmt))
5277 return false;
5279 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5280 return false;
5282 orig_code = code = gimple_assign_rhs_code (stmt);
5284 /* For pointer addition and subtraction, we should use the normal
5285 plus and minus for the vector operation. */
5286 if (code == POINTER_PLUS_EXPR)
5287 code = PLUS_EXPR;
5288 if (code == POINTER_DIFF_EXPR)
5289 code = MINUS_EXPR;
5291 /* Support only unary or binary operations. */
5292 op_type = TREE_CODE_LENGTH (code);
5293 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5295 if (dump_enabled_p ())
5296 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5297 "num. args = %d (not unary/binary/ternary op).\n",
5298 op_type);
5299 return false;
5302 scalar_dest = gimple_assign_lhs (stmt);
5303 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5305 /* Most operations cannot handle bit-precision types without extra
5306 truncations. */
5307 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5308 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5309 /* Exception are bitwise binary operations. */
5310 && code != BIT_IOR_EXPR
5311 && code != BIT_XOR_EXPR
5312 && code != BIT_AND_EXPR)
5314 if (dump_enabled_p ())
5315 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5316 "bit-precision arithmetic not supported.\n");
5317 return false;
5320 op0 = gimple_assign_rhs1 (stmt);
5321 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5323 if (dump_enabled_p ())
5324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5325 "use not simple.\n");
5326 return false;
5328 /* If op0 is an external or constant def use a vector type with
5329 the same size as the output vector type. */
5330 if (!vectype)
5332 /* For boolean type we cannot determine vectype by
5333 invariant value (don't know whether it is a vector
5334 of booleans or vector of integers). We use output
5335 vectype because operations on boolean don't change
5336 type. */
5337 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5339 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5341 if (dump_enabled_p ())
5342 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5343 "not supported operation on bool value.\n");
5344 return false;
5346 vectype = vectype_out;
5348 else
5349 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5351 if (vec_stmt)
5352 gcc_assert (vectype);
5353 if (!vectype)
5355 if (dump_enabled_p ())
5357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5358 "no vectype for scalar type ");
5359 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5360 TREE_TYPE (op0));
5361 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5364 return false;
5367 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5368 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5369 if (maybe_ne (nunits_out, nunits_in))
5370 return false;
5372 if (op_type == binary_op || op_type == ternary_op)
5374 op1 = gimple_assign_rhs2 (stmt);
5375 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5377 if (dump_enabled_p ())
5378 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5379 "use not simple.\n");
5380 return false;
5383 if (op_type == ternary_op)
5385 op2 = gimple_assign_rhs3 (stmt);
5386 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5388 if (dump_enabled_p ())
5389 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5390 "use not simple.\n");
5391 return false;
5395 /* Multiple types in SLP are handled by creating the appropriate number of
5396 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5397 case of SLP. */
5398 if (slp_node)
5399 ncopies = 1;
5400 else
5401 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5403 gcc_assert (ncopies >= 1);
5405 /* Shifts are handled in vectorizable_shift (). */
5406 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5407 || code == RROTATE_EXPR)
5408 return false;
5410 /* Supportable by target? */
5412 vec_mode = TYPE_MODE (vectype);
5413 if (code == MULT_HIGHPART_EXPR)
5414 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5415 else
5417 optab = optab_for_tree_code (code, vectype, optab_default);
5418 if (!optab)
5420 if (dump_enabled_p ())
5421 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5422 "no optab.\n");
5423 return false;
5425 target_support_p = (optab_handler (optab, vec_mode)
5426 != CODE_FOR_nothing);
5429 if (!target_support_p)
5431 if (dump_enabled_p ())
5432 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5433 "op not supported by target.\n");
5434 /* Check only during analysis. */
5435 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5436 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5437 return false;
5438 if (dump_enabled_p ())
5439 dump_printf_loc (MSG_NOTE, vect_location,
5440 "proceeding using word mode.\n");
5443 /* Worthwhile without SIMD support? Check only during analysis. */
5444 if (!VECTOR_MODE_P (vec_mode)
5445 && !vec_stmt
5446 && !vect_worthwhile_without_simd_p (vinfo, code))
5448 if (dump_enabled_p ())
5449 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5450 "not worthwhile without SIMD support.\n");
5451 return false;
5454 if (!vec_stmt) /* transformation not required. */
5456 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5457 if (dump_enabled_p ())
5458 dump_printf_loc (MSG_NOTE, vect_location,
5459 "=== vectorizable_operation ===\n");
5460 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5461 return true;
5464 /* Transform. */
5466 if (dump_enabled_p ())
5467 dump_printf_loc (MSG_NOTE, vect_location,
5468 "transform binary/unary operation.\n");
5470 /* Handle def. */
5471 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5473 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5474 vectors with unsigned elements, but the result is signed. So, we
5475 need to compute the MINUS_EXPR into vectype temporary and
5476 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5477 tree vec_cvt_dest = NULL_TREE;
5478 if (orig_code == POINTER_DIFF_EXPR)
5479 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5481 /* In case the vectorization factor (VF) is bigger than the number
5482 of elements that we can fit in a vectype (nunits), we have to generate
5483 more than one vector stmt - i.e - we need to "unroll" the
5484 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5485 from one copy of the vector stmt to the next, in the field
5486 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5487 stages to find the correct vector defs to be used when vectorizing
5488 stmts that use the defs of the current stmt. The example below
5489 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5490 we need to create 4 vectorized stmts):
5492 before vectorization:
5493 RELATED_STMT VEC_STMT
5494 S1: x = memref - -
5495 S2: z = x + 1 - -
5497 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5498 there):
5499 RELATED_STMT VEC_STMT
5500 VS1_0: vx0 = memref0 VS1_1 -
5501 VS1_1: vx1 = memref1 VS1_2 -
5502 VS1_2: vx2 = memref2 VS1_3 -
5503 VS1_3: vx3 = memref3 - -
5504 S1: x = load - VS1_0
5505 S2: z = x + 1 - -
5507 step2: vectorize stmt S2 (done here):
5508 To vectorize stmt S2 we first need to find the relevant vector
5509 def for the first operand 'x'. This is, as usual, obtained from
5510 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5511 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5512 relevant vector def 'vx0'. Having found 'vx0' we can generate
5513 the vector stmt VS2_0, and as usual, record it in the
5514 STMT_VINFO_VEC_STMT of stmt S2.
5515 When creating the second copy (VS2_1), we obtain the relevant vector
5516 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5517 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5518 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5519 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5520 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5521 chain of stmts and pointers:
5522 RELATED_STMT VEC_STMT
5523 VS1_0: vx0 = memref0 VS1_1 -
5524 VS1_1: vx1 = memref1 VS1_2 -
5525 VS1_2: vx2 = memref2 VS1_3 -
5526 VS1_3: vx3 = memref3 - -
5527 S1: x = load - VS1_0
5528 VS2_0: vz0 = vx0 + v1 VS2_1 -
5529 VS2_1: vz1 = vx1 + v1 VS2_2 -
5530 VS2_2: vz2 = vx2 + v1 VS2_3 -
5531 VS2_3: vz3 = vx3 + v1 - -
5532 S2: z = x + 1 - VS2_0 */
5534 prev_stmt_info = NULL;
5535 for (j = 0; j < ncopies; j++)
5537 /* Handle uses. */
5538 if (j == 0)
5540 if (op_type == binary_op || op_type == ternary_op)
5541 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5542 slp_node);
5543 else
5544 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5545 slp_node);
5546 if (op_type == ternary_op)
5547 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5548 slp_node);
5550 else
5552 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5553 if (op_type == ternary_op)
5555 tree vec_oprnd = vec_oprnds2.pop ();
5556 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5557 vec_oprnd));
5561 /* Arguments are ready. Create the new vector stmt. */
5562 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5564 vop1 = ((op_type == binary_op || op_type == ternary_op)
5565 ? vec_oprnds1[i] : NULL_TREE);
5566 vop2 = ((op_type == ternary_op)
5567 ? vec_oprnds2[i] : NULL_TREE);
5568 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5569 new_temp = make_ssa_name (vec_dest, new_stmt);
5570 gimple_assign_set_lhs (new_stmt, new_temp);
5571 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5572 if (vec_cvt_dest)
5574 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5575 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5576 new_temp);
5577 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5578 gimple_assign_set_lhs (new_stmt, new_temp);
5579 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5581 if (slp_node)
5582 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5585 if (slp_node)
5586 continue;
5588 if (j == 0)
5589 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5590 else
5591 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5592 prev_stmt_info = vinfo_for_stmt (new_stmt);
5595 vec_oprnds0.release ();
5596 vec_oprnds1.release ();
5597 vec_oprnds2.release ();
5599 return true;
5602 /* A helper function to ensure data reference DR's base alignment. */
5604 static void
5605 ensure_base_align (struct data_reference *dr)
5607 if (!dr->aux)
5608 return;
5610 if (DR_VECT_AUX (dr)->base_misaligned)
5612 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5614 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5616 if (decl_in_symtab_p (base_decl))
5617 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5618 else
5620 SET_DECL_ALIGN (base_decl, align_base_to);
5621 DECL_USER_ALIGN (base_decl) = 1;
5623 DR_VECT_AUX (dr)->base_misaligned = false;
5628 /* Function get_group_alias_ptr_type.
5630 Return the alias type for the group starting at FIRST_STMT. */
5632 static tree
5633 get_group_alias_ptr_type (gimple *first_stmt)
5635 struct data_reference *first_dr, *next_dr;
5636 gimple *next_stmt;
5638 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5639 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5640 while (next_stmt)
5642 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5643 if (get_alias_set (DR_REF (first_dr))
5644 != get_alias_set (DR_REF (next_dr)))
5646 if (dump_enabled_p ())
5647 dump_printf_loc (MSG_NOTE, vect_location,
5648 "conflicting alias set types.\n");
5649 return ptr_type_node;
5651 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5653 return reference_alias_ptr_type (DR_REF (first_dr));
5657 /* Function vectorizable_store.
5659 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5660 can be vectorized.
5661 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5662 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5663 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5665 static bool
5666 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5667 slp_tree slp_node)
5669 tree scalar_dest;
5670 tree data_ref;
5671 tree op;
5672 tree vec_oprnd = NULL_TREE;
5673 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5674 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5675 tree elem_type;
5676 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5677 struct loop *loop = NULL;
5678 machine_mode vec_mode;
5679 tree dummy;
5680 enum dr_alignment_support alignment_support_scheme;
5681 gimple *def_stmt;
5682 enum vect_def_type dt;
5683 stmt_vec_info prev_stmt_info = NULL;
5684 tree dataref_ptr = NULL_TREE;
5685 tree dataref_offset = NULL_TREE;
5686 gimple *ptr_incr = NULL;
5687 int ncopies;
5688 int j;
5689 gimple *next_stmt, *first_stmt;
5690 bool grouped_store;
5691 unsigned int group_size, i;
5692 vec<tree> oprnds = vNULL;
5693 vec<tree> result_chain = vNULL;
5694 bool inv_p;
5695 tree offset = NULL_TREE;
5696 vec<tree> vec_oprnds = vNULL;
5697 bool slp = (slp_node != NULL);
5698 unsigned int vec_num;
5699 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5700 vec_info *vinfo = stmt_info->vinfo;
5701 tree aggr_type;
5702 gather_scatter_info gs_info;
5703 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5704 gimple *new_stmt;
5705 poly_uint64 vf;
5706 vec_load_store_type vls_type;
5707 tree ref_type;
5709 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5710 return false;
5712 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5713 && ! vec_stmt)
5714 return false;
5716 /* Is vectorizable store? */
5718 if (!is_gimple_assign (stmt))
5719 return false;
5721 scalar_dest = gimple_assign_lhs (stmt);
5722 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5723 && is_pattern_stmt_p (stmt_info))
5724 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5725 if (TREE_CODE (scalar_dest) != ARRAY_REF
5726 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5727 && TREE_CODE (scalar_dest) != INDIRECT_REF
5728 && TREE_CODE (scalar_dest) != COMPONENT_REF
5729 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5730 && TREE_CODE (scalar_dest) != REALPART_EXPR
5731 && TREE_CODE (scalar_dest) != MEM_REF)
5732 return false;
5734 /* Cannot have hybrid store SLP -- that would mean storing to the
5735 same location twice. */
5736 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5738 gcc_assert (gimple_assign_single_p (stmt));
5740 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5741 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5743 if (loop_vinfo)
5745 loop = LOOP_VINFO_LOOP (loop_vinfo);
5746 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5748 else
5749 vf = 1;
5751 /* Multiple types in SLP are handled by creating the appropriate number of
5752 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5753 case of SLP. */
5754 if (slp)
5755 ncopies = 1;
5756 else
5757 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5759 gcc_assert (ncopies >= 1);
5761 /* FORNOW. This restriction should be relaxed. */
5762 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5764 if (dump_enabled_p ())
5765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5766 "multiple types in nested loop.\n");
5767 return false;
5770 op = gimple_assign_rhs1 (stmt);
5772 /* In the case this is a store from a constant make sure
5773 native_encode_expr can handle it. */
5774 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5775 return false;
5777 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5779 if (dump_enabled_p ())
5780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5781 "use not simple.\n");
5782 return false;
5785 if (dt == vect_constant_def || dt == vect_external_def)
5786 vls_type = VLS_STORE_INVARIANT;
5787 else
5788 vls_type = VLS_STORE;
5790 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5791 return false;
5793 elem_type = TREE_TYPE (vectype);
5794 vec_mode = TYPE_MODE (vectype);
5796 /* FORNOW. In some cases can vectorize even if data-type not supported
5797 (e.g. - array initialization with 0). */
5798 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5799 return false;
5801 if (!STMT_VINFO_DATA_REF (stmt_info))
5802 return false;
5804 vect_memory_access_type memory_access_type;
5805 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5806 &memory_access_type, &gs_info))
5807 return false;
5809 if (!vec_stmt) /* transformation not required. */
5811 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5812 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5813 /* The SLP costs are calculated during SLP analysis. */
5814 if (!PURE_SLP_STMT (stmt_info))
5815 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
5816 vls_type, NULL, NULL, NULL);
5817 return true;
5819 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5821 /* Transform. */
5823 ensure_base_align (dr);
5825 if (memory_access_type == VMAT_GATHER_SCATTER)
5827 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5828 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5829 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5830 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5831 edge pe = loop_preheader_edge (loop);
5832 gimple_seq seq;
5833 basic_block new_bb;
5834 enum { NARROW, NONE, WIDEN } modifier;
5835 poly_uint64 scatter_off_nunits
5836 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5838 if (known_eq (nunits, scatter_off_nunits))
5839 modifier = NONE;
5840 else if (known_eq (nunits * 2, scatter_off_nunits))
5842 modifier = WIDEN;
5844 /* Currently gathers and scatters are only supported for
5845 fixed-length vectors. */
5846 unsigned int count = scatter_off_nunits.to_constant ();
5847 vec_perm_builder sel (count, count, 1);
5848 for (i = 0; i < (unsigned int) count; ++i)
5849 sel.quick_push (i | (count / 2));
5851 vec_perm_indices indices (sel, 1, count);
5852 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5853 indices);
5854 gcc_assert (perm_mask != NULL_TREE);
5856 else if (known_eq (nunits, scatter_off_nunits * 2))
5858 modifier = NARROW;
5860 /* Currently gathers and scatters are only supported for
5861 fixed-length vectors. */
5862 unsigned int count = nunits.to_constant ();
5863 vec_perm_builder sel (count, count, 1);
5864 for (i = 0; i < (unsigned int) count; ++i)
5865 sel.quick_push (i | (count / 2));
5867 vec_perm_indices indices (sel, 2, count);
5868 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5869 gcc_assert (perm_mask != NULL_TREE);
5870 ncopies *= 2;
5872 else
5873 gcc_unreachable ();
5875 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5876 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5877 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5878 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5879 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5880 scaletype = TREE_VALUE (arglist);
5882 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5883 && TREE_CODE (rettype) == VOID_TYPE);
5885 ptr = fold_convert (ptrtype, gs_info.base);
5886 if (!is_gimple_min_invariant (ptr))
5888 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5889 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5890 gcc_assert (!new_bb);
5893 /* Currently we support only unconditional scatter stores,
5894 so mask should be all ones. */
5895 mask = build_int_cst (masktype, -1);
5896 mask = vect_init_vector (stmt, mask, masktype, NULL);
5898 scale = build_int_cst (scaletype, gs_info.scale);
5900 prev_stmt_info = NULL;
5901 for (j = 0; j < ncopies; ++j)
5903 if (j == 0)
5905 src = vec_oprnd1
5906 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5907 op = vec_oprnd0
5908 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5910 else if (modifier != NONE && (j & 1))
5912 if (modifier == WIDEN)
5914 src = vec_oprnd1
5915 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5916 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5917 stmt, gsi);
5919 else if (modifier == NARROW)
5921 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5922 stmt, gsi);
5923 op = vec_oprnd0
5924 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5925 vec_oprnd0);
5927 else
5928 gcc_unreachable ();
5930 else
5932 src = vec_oprnd1
5933 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5934 op = vec_oprnd0
5935 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5936 vec_oprnd0);
5939 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5941 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
5942 TYPE_VECTOR_SUBPARTS (srctype)));
5943 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5944 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5945 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5946 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5947 src = var;
5950 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5952 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
5953 TYPE_VECTOR_SUBPARTS (idxtype)));
5954 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5955 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5956 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5957 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5958 op = var;
5961 new_stmt
5962 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5964 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5966 if (prev_stmt_info == NULL)
5967 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5968 else
5969 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5970 prev_stmt_info = vinfo_for_stmt (new_stmt);
5972 return true;
5975 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
5976 if (grouped_store)
5978 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5979 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5980 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5982 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5984 /* FORNOW */
5985 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5987 /* We vectorize all the stmts of the interleaving group when we
5988 reach the last stmt in the group. */
5989 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5990 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5991 && !slp)
5993 *vec_stmt = NULL;
5994 return true;
5997 if (slp)
5999 grouped_store = false;
6000 /* VEC_NUM is the number of vect stmts to be created for this
6001 group. */
6002 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6003 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6004 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6005 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6006 op = gimple_assign_rhs1 (first_stmt);
6008 else
6009 /* VEC_NUM is the number of vect stmts to be created for this
6010 group. */
6011 vec_num = group_size;
6013 ref_type = get_group_alias_ptr_type (first_stmt);
6015 else
6017 first_stmt = stmt;
6018 first_dr = dr;
6019 group_size = vec_num = 1;
6020 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6023 if (dump_enabled_p ())
6024 dump_printf_loc (MSG_NOTE, vect_location,
6025 "transform store. ncopies = %d\n", ncopies);
6027 if (memory_access_type == VMAT_ELEMENTWISE
6028 || memory_access_type == VMAT_STRIDED_SLP)
6030 gimple_stmt_iterator incr_gsi;
6031 bool insert_after;
6032 gimple *incr;
6033 tree offvar;
6034 tree ivstep;
6035 tree running_off;
6036 gimple_seq stmts = NULL;
6037 tree stride_base, stride_step, alias_off;
6038 tree vec_oprnd;
6039 unsigned int g;
6040 /* Checked by get_load_store_type. */
6041 unsigned int const_nunits = nunits.to_constant ();
6043 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6045 stride_base
6046 = fold_build_pointer_plus
6047 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6048 size_binop (PLUS_EXPR,
6049 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6050 convert_to_ptrofftype (DR_INIT (first_dr))));
6051 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6053 /* For a store with loop-invariant (but other than power-of-2)
6054 stride (i.e. not a grouped access) like so:
6056 for (i = 0; i < n; i += stride)
6057 array[i] = ...;
6059 we generate a new induction variable and new stores from
6060 the components of the (vectorized) rhs:
6062 for (j = 0; ; j += VF*stride)
6063 vectemp = ...;
6064 tmp1 = vectemp[0];
6065 array[j] = tmp1;
6066 tmp2 = vectemp[1];
6067 array[j + stride] = tmp2;
6071 unsigned nstores = const_nunits;
6072 unsigned lnel = 1;
6073 tree ltype = elem_type;
6074 tree lvectype = vectype;
6075 if (slp)
6077 if (group_size < const_nunits
6078 && const_nunits % group_size == 0)
6080 nstores = const_nunits / group_size;
6081 lnel = group_size;
6082 ltype = build_vector_type (elem_type, group_size);
6083 lvectype = vectype;
6085 /* First check if vec_extract optab doesn't support extraction
6086 of vector elts directly. */
6087 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6088 machine_mode vmode;
6089 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6090 || !VECTOR_MODE_P (vmode)
6091 || (convert_optab_handler (vec_extract_optab,
6092 TYPE_MODE (vectype), vmode)
6093 == CODE_FOR_nothing))
6095 /* Try to avoid emitting an extract of vector elements
6096 by performing the extracts using an integer type of the
6097 same size, extracting from a vector of those and then
6098 re-interpreting it as the original vector type if
6099 supported. */
6100 unsigned lsize
6101 = group_size * GET_MODE_BITSIZE (elmode);
6102 elmode = int_mode_for_size (lsize, 0).require ();
6103 unsigned int lnunits = const_nunits / group_size;
6104 /* If we can't construct such a vector fall back to
6105 element extracts from the original vector type and
6106 element size stores. */
6107 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6108 && VECTOR_MODE_P (vmode)
6109 && (convert_optab_handler (vec_extract_optab,
6110 vmode, elmode)
6111 != CODE_FOR_nothing))
6113 nstores = lnunits;
6114 lnel = group_size;
6115 ltype = build_nonstandard_integer_type (lsize, 1);
6116 lvectype = build_vector_type (ltype, nstores);
6118 /* Else fall back to vector extraction anyway.
6119 Fewer stores are more important than avoiding spilling
6120 of the vector we extract from. Compared to the
6121 construction case in vectorizable_load no store-forwarding
6122 issue exists here for reasonable archs. */
6125 else if (group_size >= const_nunits
6126 && group_size % const_nunits == 0)
6128 nstores = 1;
6129 lnel = const_nunits;
6130 ltype = vectype;
6131 lvectype = vectype;
6133 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6134 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6137 ivstep = stride_step;
6138 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6139 build_int_cst (TREE_TYPE (ivstep), vf));
6141 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6143 create_iv (stride_base, ivstep, NULL,
6144 loop, &incr_gsi, insert_after,
6145 &offvar, NULL);
6146 incr = gsi_stmt (incr_gsi);
6147 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6149 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6150 if (stmts)
6151 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6153 prev_stmt_info = NULL;
6154 alias_off = build_int_cst (ref_type, 0);
6155 next_stmt = first_stmt;
6156 for (g = 0; g < group_size; g++)
6158 running_off = offvar;
6159 if (g)
6161 tree size = TYPE_SIZE_UNIT (ltype);
6162 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6163 size);
6164 tree newoff = copy_ssa_name (running_off, NULL);
6165 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6166 running_off, pos);
6167 vect_finish_stmt_generation (stmt, incr, gsi);
6168 running_off = newoff;
6170 unsigned int group_el = 0;
6171 unsigned HOST_WIDE_INT
6172 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6173 for (j = 0; j < ncopies; j++)
6175 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6176 and first_stmt == stmt. */
6177 if (j == 0)
6179 if (slp)
6181 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6182 slp_node);
6183 vec_oprnd = vec_oprnds[0];
6185 else
6187 gcc_assert (gimple_assign_single_p (next_stmt));
6188 op = gimple_assign_rhs1 (next_stmt);
6189 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6192 else
6194 if (slp)
6195 vec_oprnd = vec_oprnds[j];
6196 else
6198 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6199 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6202 /* Pun the vector to extract from if necessary. */
6203 if (lvectype != vectype)
6205 tree tem = make_ssa_name (lvectype);
6206 gimple *pun
6207 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6208 lvectype, vec_oprnd));
6209 vect_finish_stmt_generation (stmt, pun, gsi);
6210 vec_oprnd = tem;
6212 for (i = 0; i < nstores; i++)
6214 tree newref, newoff;
6215 gimple *incr, *assign;
6216 tree size = TYPE_SIZE (ltype);
6217 /* Extract the i'th component. */
6218 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6219 bitsize_int (i), size);
6220 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6221 size, pos);
6223 elem = force_gimple_operand_gsi (gsi, elem, true,
6224 NULL_TREE, true,
6225 GSI_SAME_STMT);
6227 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6228 group_el * elsz);
6229 newref = build2 (MEM_REF, ltype,
6230 running_off, this_off);
6232 /* And store it to *running_off. */
6233 assign = gimple_build_assign (newref, elem);
6234 vect_finish_stmt_generation (stmt, assign, gsi);
6236 group_el += lnel;
6237 if (! slp
6238 || group_el == group_size)
6240 newoff = copy_ssa_name (running_off, NULL);
6241 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6242 running_off, stride_step);
6243 vect_finish_stmt_generation (stmt, incr, gsi);
6245 running_off = newoff;
6246 group_el = 0;
6248 if (g == group_size - 1
6249 && !slp)
6251 if (j == 0 && i == 0)
6252 STMT_VINFO_VEC_STMT (stmt_info)
6253 = *vec_stmt = assign;
6254 else
6255 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6256 prev_stmt_info = vinfo_for_stmt (assign);
6260 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6261 if (slp)
6262 break;
6265 vec_oprnds.release ();
6266 return true;
6269 auto_vec<tree> dr_chain (group_size);
6270 oprnds.create (group_size);
6272 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6273 gcc_assert (alignment_support_scheme);
6274 /* Targets with store-lane instructions must not require explicit
6275 realignment. */
6276 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6277 || alignment_support_scheme == dr_aligned
6278 || alignment_support_scheme == dr_unaligned_supported);
6280 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6281 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6282 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6284 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6285 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6286 else
6287 aggr_type = vectype;
6289 /* In case the vectorization factor (VF) is bigger than the number
6290 of elements that we can fit in a vectype (nunits), we have to generate
6291 more than one vector stmt - i.e - we need to "unroll" the
6292 vector stmt by a factor VF/nunits. For more details see documentation in
6293 vect_get_vec_def_for_copy_stmt. */
6295 /* In case of interleaving (non-unit grouped access):
6297 S1: &base + 2 = x2
6298 S2: &base = x0
6299 S3: &base + 1 = x1
6300 S4: &base + 3 = x3
6302 We create vectorized stores starting from base address (the access of the
6303 first stmt in the chain (S2 in the above example), when the last store stmt
6304 of the chain (S4) is reached:
6306 VS1: &base = vx2
6307 VS2: &base + vec_size*1 = vx0
6308 VS3: &base + vec_size*2 = vx1
6309 VS4: &base + vec_size*3 = vx3
6311 Then permutation statements are generated:
6313 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6314 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6317 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6318 (the order of the data-refs in the output of vect_permute_store_chain
6319 corresponds to the order of scalar stmts in the interleaving chain - see
6320 the documentation of vect_permute_store_chain()).
6322 In case of both multiple types and interleaving, above vector stores and
6323 permutation stmts are created for every copy. The result vector stmts are
6324 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6325 STMT_VINFO_RELATED_STMT for the next copies.
6328 prev_stmt_info = NULL;
6329 for (j = 0; j < ncopies; j++)
6332 if (j == 0)
6334 if (slp)
6336 /* Get vectorized arguments for SLP_NODE. */
6337 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6338 NULL, slp_node);
6340 vec_oprnd = vec_oprnds[0];
6342 else
6344 /* For interleaved stores we collect vectorized defs for all the
6345 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6346 used as an input to vect_permute_store_chain(), and OPRNDS as
6347 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6349 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6350 OPRNDS are of size 1. */
6351 next_stmt = first_stmt;
6352 for (i = 0; i < group_size; i++)
6354 /* Since gaps are not supported for interleaved stores,
6355 GROUP_SIZE is the exact number of stmts in the chain.
6356 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6357 there is no interleaving, GROUP_SIZE is 1, and only one
6358 iteration of the loop will be executed. */
6359 gcc_assert (next_stmt
6360 && gimple_assign_single_p (next_stmt));
6361 op = gimple_assign_rhs1 (next_stmt);
6363 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6364 dr_chain.quick_push (vec_oprnd);
6365 oprnds.quick_push (vec_oprnd);
6366 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6370 /* We should have catched mismatched types earlier. */
6371 gcc_assert (useless_type_conversion_p (vectype,
6372 TREE_TYPE (vec_oprnd)));
6373 bool simd_lane_access_p
6374 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6375 if (simd_lane_access_p
6376 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6377 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6378 && integer_zerop (DR_OFFSET (first_dr))
6379 && integer_zerop (DR_INIT (first_dr))
6380 && alias_sets_conflict_p (get_alias_set (aggr_type),
6381 get_alias_set (TREE_TYPE (ref_type))))
6383 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6384 dataref_offset = build_int_cst (ref_type, 0);
6385 inv_p = false;
6387 else
6388 dataref_ptr
6389 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6390 simd_lane_access_p ? loop : NULL,
6391 offset, &dummy, gsi, &ptr_incr,
6392 simd_lane_access_p, &inv_p);
6393 gcc_assert (bb_vinfo || !inv_p);
6395 else
6397 /* For interleaved stores we created vectorized defs for all the
6398 defs stored in OPRNDS in the previous iteration (previous copy).
6399 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6400 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6401 next copy.
6402 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6403 OPRNDS are of size 1. */
6404 for (i = 0; i < group_size; i++)
6406 op = oprnds[i];
6407 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6408 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6409 dr_chain[i] = vec_oprnd;
6410 oprnds[i] = vec_oprnd;
6412 if (dataref_offset)
6413 dataref_offset
6414 = int_const_binop (PLUS_EXPR, dataref_offset,
6415 TYPE_SIZE_UNIT (aggr_type));
6416 else
6417 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6418 TYPE_SIZE_UNIT (aggr_type));
6421 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6423 tree vec_array;
6425 /* Combine all the vectors into an array. */
6426 vec_array = create_vector_array (vectype, vec_num);
6427 for (i = 0; i < vec_num; i++)
6429 vec_oprnd = dr_chain[i];
6430 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6433 /* Emit:
6434 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6435 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6436 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6437 vec_array);
6438 gimple_call_set_lhs (call, data_ref);
6439 gimple_call_set_nothrow (call, true);
6440 new_stmt = call;
6441 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6443 else
6445 new_stmt = NULL;
6446 if (grouped_store)
6448 if (j == 0)
6449 result_chain.create (group_size);
6450 /* Permute. */
6451 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6452 &result_chain);
6455 next_stmt = first_stmt;
6456 for (i = 0; i < vec_num; i++)
6458 unsigned align, misalign;
6460 if (i > 0)
6461 /* Bump the vector pointer. */
6462 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6463 stmt, NULL_TREE);
6465 if (slp)
6466 vec_oprnd = vec_oprnds[i];
6467 else if (grouped_store)
6468 /* For grouped stores vectorized defs are interleaved in
6469 vect_permute_store_chain(). */
6470 vec_oprnd = result_chain[i];
6472 data_ref = fold_build2 (MEM_REF, vectype,
6473 dataref_ptr,
6474 dataref_offset
6475 ? dataref_offset
6476 : build_int_cst (ref_type, 0));
6477 align = DR_TARGET_ALIGNMENT (first_dr);
6478 if (aligned_access_p (first_dr))
6479 misalign = 0;
6480 else if (DR_MISALIGNMENT (first_dr) == -1)
6482 align = dr_alignment (vect_dr_behavior (first_dr));
6483 misalign = 0;
6484 TREE_TYPE (data_ref)
6485 = build_aligned_type (TREE_TYPE (data_ref),
6486 align * BITS_PER_UNIT);
6488 else
6490 TREE_TYPE (data_ref)
6491 = build_aligned_type (TREE_TYPE (data_ref),
6492 TYPE_ALIGN (elem_type));
6493 misalign = DR_MISALIGNMENT (first_dr);
6495 if (dataref_offset == NULL_TREE
6496 && TREE_CODE (dataref_ptr) == SSA_NAME)
6497 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6498 misalign);
6500 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6502 tree perm_mask = perm_mask_for_reverse (vectype);
6503 tree perm_dest
6504 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6505 vectype);
6506 tree new_temp = make_ssa_name (perm_dest);
6508 /* Generate the permute statement. */
6509 gimple *perm_stmt
6510 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6511 vec_oprnd, perm_mask);
6512 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6514 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6515 vec_oprnd = new_temp;
6518 /* Arguments are ready. Create the new vector stmt. */
6519 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6520 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6522 if (slp)
6523 continue;
6525 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6526 if (!next_stmt)
6527 break;
6530 if (!slp)
6532 if (j == 0)
6533 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6534 else
6535 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6536 prev_stmt_info = vinfo_for_stmt (new_stmt);
6540 oprnds.release ();
6541 result_chain.release ();
6542 vec_oprnds.release ();
6544 return true;
6547 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6548 VECTOR_CST mask. No checks are made that the target platform supports the
6549 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6550 vect_gen_perm_mask_checked. */
6552 tree
6553 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6555 tree mask_type;
6557 poly_uint64 nunits = sel.length ();
6558 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
6560 mask_type = build_vector_type (ssizetype, nunits);
6561 return vec_perm_indices_to_tree (mask_type, sel);
6564 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6565 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6567 tree
6568 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6570 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6571 return vect_gen_perm_mask_any (vectype, sel);
6574 /* Given a vector variable X and Y, that was generated for the scalar
6575 STMT, generate instructions to permute the vector elements of X and Y
6576 using permutation mask MASK_VEC, insert them at *GSI and return the
6577 permuted vector variable. */
6579 static tree
6580 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6581 gimple_stmt_iterator *gsi)
6583 tree vectype = TREE_TYPE (x);
6584 tree perm_dest, data_ref;
6585 gimple *perm_stmt;
6587 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6588 data_ref = make_ssa_name (perm_dest);
6590 /* Generate the permute statement. */
6591 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6592 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6594 return data_ref;
6597 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6598 inserting them on the loops preheader edge. Returns true if we
6599 were successful in doing so (and thus STMT can be moved then),
6600 otherwise returns false. */
6602 static bool
6603 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6605 ssa_op_iter i;
6606 tree op;
6607 bool any = false;
6609 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6611 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6612 if (!gimple_nop_p (def_stmt)
6613 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6615 /* Make sure we don't need to recurse. While we could do
6616 so in simple cases when there are more complex use webs
6617 we don't have an easy way to preserve stmt order to fulfil
6618 dependencies within them. */
6619 tree op2;
6620 ssa_op_iter i2;
6621 if (gimple_code (def_stmt) == GIMPLE_PHI)
6622 return false;
6623 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6625 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6626 if (!gimple_nop_p (def_stmt2)
6627 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6628 return false;
6630 any = true;
6634 if (!any)
6635 return true;
6637 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6639 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6640 if (!gimple_nop_p (def_stmt)
6641 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6643 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6644 gsi_remove (&gsi, false);
6645 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6649 return true;
6652 /* vectorizable_load.
6654 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6655 can be vectorized.
6656 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6657 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6658 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6660 static bool
6661 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6662 slp_tree slp_node, slp_instance slp_node_instance)
6664 tree scalar_dest;
6665 tree vec_dest = NULL;
6666 tree data_ref = NULL;
6667 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6668 stmt_vec_info prev_stmt_info;
6669 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6670 struct loop *loop = NULL;
6671 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6672 bool nested_in_vect_loop = false;
6673 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6674 tree elem_type;
6675 tree new_temp;
6676 machine_mode mode;
6677 gimple *new_stmt = NULL;
6678 tree dummy;
6679 enum dr_alignment_support alignment_support_scheme;
6680 tree dataref_ptr = NULL_TREE;
6681 tree dataref_offset = NULL_TREE;
6682 gimple *ptr_incr = NULL;
6683 int ncopies;
6684 int i, j;
6685 unsigned int group_size;
6686 poly_uint64 group_gap_adj;
6687 tree msq = NULL_TREE, lsq;
6688 tree offset = NULL_TREE;
6689 tree byte_offset = NULL_TREE;
6690 tree realignment_token = NULL_TREE;
6691 gphi *phi = NULL;
6692 vec<tree> dr_chain = vNULL;
6693 bool grouped_load = false;
6694 gimple *first_stmt;
6695 gimple *first_stmt_for_drptr = NULL;
6696 bool inv_p;
6697 bool compute_in_loop = false;
6698 struct loop *at_loop;
6699 int vec_num;
6700 bool slp = (slp_node != NULL);
6701 bool slp_perm = false;
6702 enum tree_code code;
6703 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6704 poly_uint64 vf;
6705 tree aggr_type;
6706 gather_scatter_info gs_info;
6707 vec_info *vinfo = stmt_info->vinfo;
6708 tree ref_type;
6710 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6711 return false;
6713 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6714 && ! vec_stmt)
6715 return false;
6717 /* Is vectorizable load? */
6718 if (!is_gimple_assign (stmt))
6719 return false;
6721 scalar_dest = gimple_assign_lhs (stmt);
6722 if (TREE_CODE (scalar_dest) != SSA_NAME)
6723 return false;
6725 code = gimple_assign_rhs_code (stmt);
6726 if (code != ARRAY_REF
6727 && code != BIT_FIELD_REF
6728 && code != INDIRECT_REF
6729 && code != COMPONENT_REF
6730 && code != IMAGPART_EXPR
6731 && code != REALPART_EXPR
6732 && code != MEM_REF
6733 && TREE_CODE_CLASS (code) != tcc_declaration)
6734 return false;
6736 if (!STMT_VINFO_DATA_REF (stmt_info))
6737 return false;
6739 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6740 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6742 if (loop_vinfo)
6744 loop = LOOP_VINFO_LOOP (loop_vinfo);
6745 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6746 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6748 else
6749 vf = 1;
6751 /* Multiple types in SLP are handled by creating the appropriate number of
6752 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6753 case of SLP. */
6754 if (slp)
6755 ncopies = 1;
6756 else
6757 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6759 gcc_assert (ncopies >= 1);
6761 /* FORNOW. This restriction should be relaxed. */
6762 if (nested_in_vect_loop && ncopies > 1)
6764 if (dump_enabled_p ())
6765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6766 "multiple types in nested loop.\n");
6767 return false;
6770 /* Invalidate assumptions made by dependence analysis when vectorization
6771 on the unrolled body effectively re-orders stmts. */
6772 if (ncopies > 1
6773 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6774 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6775 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6777 if (dump_enabled_p ())
6778 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6779 "cannot perform implicit CSE when unrolling "
6780 "with negative dependence distance\n");
6781 return false;
6784 elem_type = TREE_TYPE (vectype);
6785 mode = TYPE_MODE (vectype);
6787 /* FORNOW. In some cases can vectorize even if data-type not supported
6788 (e.g. - data copies). */
6789 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6791 if (dump_enabled_p ())
6792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6793 "Aligned load, but unsupported type.\n");
6794 return false;
6797 /* Check if the load is a part of an interleaving chain. */
6798 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6800 grouped_load = true;
6801 /* FORNOW */
6802 gcc_assert (!nested_in_vect_loop);
6803 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6805 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6806 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6808 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6809 slp_perm = true;
6811 /* Invalidate assumptions made by dependence analysis when vectorization
6812 on the unrolled body effectively re-orders stmts. */
6813 if (!PURE_SLP_STMT (stmt_info)
6814 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6815 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6816 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6818 if (dump_enabled_p ())
6819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6820 "cannot perform implicit CSE when performing "
6821 "group loads with negative dependence distance\n");
6822 return false;
6825 /* Similarly when the stmt is a load that is both part of a SLP
6826 instance and a loop vectorized stmt via the same-dr mechanism
6827 we have to give up. */
6828 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6829 && (STMT_SLP_TYPE (stmt_info)
6830 != STMT_SLP_TYPE (vinfo_for_stmt
6831 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6833 if (dump_enabled_p ())
6834 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6835 "conflicting SLP types for CSEd load\n");
6836 return false;
6840 vect_memory_access_type memory_access_type;
6841 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6842 &memory_access_type, &gs_info))
6843 return false;
6845 if (!vec_stmt) /* transformation not required. */
6847 if (!slp)
6848 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6849 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6850 /* The SLP costs are calculated during SLP analysis. */
6851 if (!PURE_SLP_STMT (stmt_info))
6852 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6853 NULL, NULL, NULL);
6854 return true;
6857 if (!slp)
6858 gcc_assert (memory_access_type
6859 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6861 if (dump_enabled_p ())
6862 dump_printf_loc (MSG_NOTE, vect_location,
6863 "transform load. ncopies = %d\n", ncopies);
6865 /* Transform. */
6867 ensure_base_align (dr);
6869 if (memory_access_type == VMAT_GATHER_SCATTER)
6871 tree vec_oprnd0 = NULL_TREE, op;
6872 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6873 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6874 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6875 edge pe = loop_preheader_edge (loop);
6876 gimple_seq seq;
6877 basic_block new_bb;
6878 enum { NARROW, NONE, WIDEN } modifier;
6879 poly_uint64 gather_off_nunits
6880 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6882 if (known_eq (nunits, gather_off_nunits))
6883 modifier = NONE;
6884 else if (known_eq (nunits * 2, gather_off_nunits))
6886 modifier = WIDEN;
6888 /* Currently widening gathers are only supported for
6889 fixed-length vectors. */
6890 int count = gather_off_nunits.to_constant ();
6891 vec_perm_builder sel (count, count, 1);
6892 for (i = 0; i < count; ++i)
6893 sel.quick_push (i | (count / 2));
6895 vec_perm_indices indices (sel, 1, count);
6896 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6897 indices);
6899 else if (known_eq (nunits, gather_off_nunits * 2))
6901 modifier = NARROW;
6903 /* Currently narrowing gathers are only supported for
6904 fixed-length vectors. */
6905 int count = nunits.to_constant ();
6906 vec_perm_builder sel (count, count, 1);
6907 for (i = 0; i < count; ++i)
6908 sel.quick_push (i < count / 2 ? i : i + count / 2);
6910 vec_perm_indices indices (sel, 2, count);
6911 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6912 ncopies *= 2;
6914 else
6915 gcc_unreachable ();
6917 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6918 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6919 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6920 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6921 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6922 scaletype = TREE_VALUE (arglist);
6923 gcc_checking_assert (types_compatible_p (srctype, rettype));
6925 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6927 ptr = fold_convert (ptrtype, gs_info.base);
6928 if (!is_gimple_min_invariant (ptr))
6930 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6931 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6932 gcc_assert (!new_bb);
6935 /* Currently we support only unconditional gather loads,
6936 so mask should be all ones. */
6937 if (TREE_CODE (masktype) == INTEGER_TYPE)
6938 mask = build_int_cst (masktype, -1);
6939 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6941 mask = build_int_cst (TREE_TYPE (masktype), -1);
6942 mask = build_vector_from_val (masktype, mask);
6943 mask = vect_init_vector (stmt, mask, masktype, NULL);
6945 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6947 REAL_VALUE_TYPE r;
6948 long tmp[6];
6949 for (j = 0; j < 6; ++j)
6950 tmp[j] = -1;
6951 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6952 mask = build_real (TREE_TYPE (masktype), r);
6953 mask = build_vector_from_val (masktype, mask);
6954 mask = vect_init_vector (stmt, mask, masktype, NULL);
6956 else
6957 gcc_unreachable ();
6959 scale = build_int_cst (scaletype, gs_info.scale);
6961 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6962 merge = build_int_cst (TREE_TYPE (rettype), 0);
6963 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6965 REAL_VALUE_TYPE r;
6966 long tmp[6];
6967 for (j = 0; j < 6; ++j)
6968 tmp[j] = 0;
6969 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6970 merge = build_real (TREE_TYPE (rettype), r);
6972 else
6973 gcc_unreachable ();
6974 merge = build_vector_from_val (rettype, merge);
6975 merge = vect_init_vector (stmt, merge, rettype, NULL);
6977 prev_stmt_info = NULL;
6978 for (j = 0; j < ncopies; ++j)
6980 if (modifier == WIDEN && (j & 1))
6981 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6982 perm_mask, stmt, gsi);
6983 else if (j == 0)
6984 op = vec_oprnd0
6985 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6986 else
6987 op = vec_oprnd0
6988 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
6990 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6992 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
6993 TYPE_VECTOR_SUBPARTS (idxtype)));
6994 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6995 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6996 new_stmt
6997 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6998 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6999 op = var;
7002 new_stmt
7003 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
7005 if (!useless_type_conversion_p (vectype, rettype))
7007 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
7008 TYPE_VECTOR_SUBPARTS (rettype)));
7009 op = vect_get_new_ssa_name (rettype, vect_simple_var);
7010 gimple_call_set_lhs (new_stmt, op);
7011 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7012 var = make_ssa_name (vec_dest);
7013 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
7014 new_stmt
7015 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7017 else
7019 var = make_ssa_name (vec_dest, new_stmt);
7020 gimple_call_set_lhs (new_stmt, var);
7023 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7025 if (modifier == NARROW)
7027 if ((j & 1) == 0)
7029 prev_res = var;
7030 continue;
7032 var = permute_vec_elements (prev_res, var,
7033 perm_mask, stmt, gsi);
7034 new_stmt = SSA_NAME_DEF_STMT (var);
7037 if (prev_stmt_info == NULL)
7038 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7039 else
7040 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7041 prev_stmt_info = vinfo_for_stmt (new_stmt);
7043 return true;
7046 if (memory_access_type == VMAT_ELEMENTWISE
7047 || memory_access_type == VMAT_STRIDED_SLP)
7049 gimple_stmt_iterator incr_gsi;
7050 bool insert_after;
7051 gimple *incr;
7052 tree offvar;
7053 tree ivstep;
7054 tree running_off;
7055 vec<constructor_elt, va_gc> *v = NULL;
7056 gimple_seq stmts = NULL;
7057 tree stride_base, stride_step, alias_off;
7058 /* Checked by get_load_store_type. */
7059 unsigned int const_nunits = nunits.to_constant ();
7061 gcc_assert (!nested_in_vect_loop);
7063 if (slp && grouped_load)
7065 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7066 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7067 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7068 ref_type = get_group_alias_ptr_type (first_stmt);
7070 else
7072 first_stmt = stmt;
7073 first_dr = dr;
7074 group_size = 1;
7075 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7078 stride_base
7079 = fold_build_pointer_plus
7080 (DR_BASE_ADDRESS (first_dr),
7081 size_binop (PLUS_EXPR,
7082 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7083 convert_to_ptrofftype (DR_INIT (first_dr))));
7084 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7086 /* For a load with loop-invariant (but other than power-of-2)
7087 stride (i.e. not a grouped access) like so:
7089 for (i = 0; i < n; i += stride)
7090 ... = array[i];
7092 we generate a new induction variable and new accesses to
7093 form a new vector (or vectors, depending on ncopies):
7095 for (j = 0; ; j += VF*stride)
7096 tmp1 = array[j];
7097 tmp2 = array[j + stride];
7099 vectemp = {tmp1, tmp2, ...}
7102 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7103 build_int_cst (TREE_TYPE (stride_step), vf));
7105 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7107 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7108 loop, &incr_gsi, insert_after,
7109 &offvar, NULL);
7110 incr = gsi_stmt (incr_gsi);
7111 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7113 stride_step = force_gimple_operand (unshare_expr (stride_step),
7114 &stmts, true, NULL_TREE);
7115 if (stmts)
7116 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7118 prev_stmt_info = NULL;
7119 running_off = offvar;
7120 alias_off = build_int_cst (ref_type, 0);
7121 int nloads = const_nunits;
7122 int lnel = 1;
7123 tree ltype = TREE_TYPE (vectype);
7124 tree lvectype = vectype;
7125 auto_vec<tree> dr_chain;
7126 if (memory_access_type == VMAT_STRIDED_SLP)
7128 if (group_size < const_nunits)
7130 /* First check if vec_init optab supports construction from
7131 vector elts directly. */
7132 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7133 machine_mode vmode;
7134 if (mode_for_vector (elmode, group_size).exists (&vmode)
7135 && VECTOR_MODE_P (vmode)
7136 && (convert_optab_handler (vec_init_optab,
7137 TYPE_MODE (vectype), vmode)
7138 != CODE_FOR_nothing))
7140 nloads = const_nunits / group_size;
7141 lnel = group_size;
7142 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7144 else
7146 /* Otherwise avoid emitting a constructor of vector elements
7147 by performing the loads using an integer type of the same
7148 size, constructing a vector of those and then
7149 re-interpreting it as the original vector type.
7150 This avoids a huge runtime penalty due to the general
7151 inability to perform store forwarding from smaller stores
7152 to a larger load. */
7153 unsigned lsize
7154 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7155 elmode = int_mode_for_size (lsize, 0).require ();
7156 unsigned int lnunits = const_nunits / group_size;
7157 /* If we can't construct such a vector fall back to
7158 element loads of the original vector type. */
7159 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7160 && VECTOR_MODE_P (vmode)
7161 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7162 != CODE_FOR_nothing))
7164 nloads = lnunits;
7165 lnel = group_size;
7166 ltype = build_nonstandard_integer_type (lsize, 1);
7167 lvectype = build_vector_type (ltype, nloads);
7171 else
7173 nloads = 1;
7174 lnel = const_nunits;
7175 ltype = vectype;
7177 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7179 if (slp)
7181 /* For SLP permutation support we need to load the whole group,
7182 not only the number of vector stmts the permutation result
7183 fits in. */
7184 if (slp_perm)
7186 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7187 variable VF. */
7188 unsigned int const_vf = vf.to_constant ();
7189 ncopies = CEIL (group_size * const_vf, const_nunits);
7190 dr_chain.create (ncopies);
7192 else
7193 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7195 unsigned int group_el = 0;
7196 unsigned HOST_WIDE_INT
7197 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7198 for (j = 0; j < ncopies; j++)
7200 if (nloads > 1)
7201 vec_alloc (v, nloads);
7202 for (i = 0; i < nloads; i++)
7204 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7205 group_el * elsz);
7206 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7207 build2 (MEM_REF, ltype,
7208 running_off, this_off));
7209 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7210 if (nloads > 1)
7211 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7212 gimple_assign_lhs (new_stmt));
7214 group_el += lnel;
7215 if (! slp
7216 || group_el == group_size)
7218 tree newoff = copy_ssa_name (running_off);
7219 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7220 running_off, stride_step);
7221 vect_finish_stmt_generation (stmt, incr, gsi);
7223 running_off = newoff;
7224 group_el = 0;
7227 if (nloads > 1)
7229 tree vec_inv = build_constructor (lvectype, v);
7230 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7231 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7232 if (lvectype != vectype)
7234 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7235 VIEW_CONVERT_EXPR,
7236 build1 (VIEW_CONVERT_EXPR,
7237 vectype, new_temp));
7238 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7242 if (slp)
7244 if (slp_perm)
7245 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7246 else
7247 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7249 else
7251 if (j == 0)
7252 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7253 else
7254 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7255 prev_stmt_info = vinfo_for_stmt (new_stmt);
7258 if (slp_perm)
7260 unsigned n_perms;
7261 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7262 slp_node_instance, false, &n_perms);
7264 return true;
7267 if (grouped_load)
7269 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7270 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7271 /* For SLP vectorization we directly vectorize a subchain
7272 without permutation. */
7273 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7274 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7275 /* For BB vectorization always use the first stmt to base
7276 the data ref pointer on. */
7277 if (bb_vinfo)
7278 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7280 /* Check if the chain of loads is already vectorized. */
7281 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7282 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7283 ??? But we can only do so if there is exactly one
7284 as we have no way to get at the rest. Leave the CSE
7285 opportunity alone.
7286 ??? With the group load eventually participating
7287 in multiple different permutations (having multiple
7288 slp nodes which refer to the same group) the CSE
7289 is even wrong code. See PR56270. */
7290 && !slp)
7292 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7293 return true;
7295 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7296 group_gap_adj = 0;
7298 /* VEC_NUM is the number of vect stmts to be created for this group. */
7299 if (slp)
7301 grouped_load = false;
7302 /* For SLP permutation support we need to load the whole group,
7303 not only the number of vector stmts the permutation result
7304 fits in. */
7305 if (slp_perm)
7307 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7308 variable VF. */
7309 unsigned int const_vf = vf.to_constant ();
7310 unsigned int const_nunits = nunits.to_constant ();
7311 vec_num = CEIL (group_size * const_vf, const_nunits);
7312 group_gap_adj = vf * group_size - nunits * vec_num;
7314 else
7316 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7317 group_gap_adj
7318 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7321 else
7322 vec_num = group_size;
7324 ref_type = get_group_alias_ptr_type (first_stmt);
7326 else
7328 first_stmt = stmt;
7329 first_dr = dr;
7330 group_size = vec_num = 1;
7331 group_gap_adj = 0;
7332 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7335 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7336 gcc_assert (alignment_support_scheme);
7337 /* Targets with load-lane instructions must not require explicit
7338 realignment. */
7339 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7340 || alignment_support_scheme == dr_aligned
7341 || alignment_support_scheme == dr_unaligned_supported);
7343 /* In case the vectorization factor (VF) is bigger than the number
7344 of elements that we can fit in a vectype (nunits), we have to generate
7345 more than one vector stmt - i.e - we need to "unroll" the
7346 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7347 from one copy of the vector stmt to the next, in the field
7348 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7349 stages to find the correct vector defs to be used when vectorizing
7350 stmts that use the defs of the current stmt. The example below
7351 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7352 need to create 4 vectorized stmts):
7354 before vectorization:
7355 RELATED_STMT VEC_STMT
7356 S1: x = memref - -
7357 S2: z = x + 1 - -
7359 step 1: vectorize stmt S1:
7360 We first create the vector stmt VS1_0, and, as usual, record a
7361 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7362 Next, we create the vector stmt VS1_1, and record a pointer to
7363 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7364 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7365 stmts and pointers:
7366 RELATED_STMT VEC_STMT
7367 VS1_0: vx0 = memref0 VS1_1 -
7368 VS1_1: vx1 = memref1 VS1_2 -
7369 VS1_2: vx2 = memref2 VS1_3 -
7370 VS1_3: vx3 = memref3 - -
7371 S1: x = load - VS1_0
7372 S2: z = x + 1 - -
7374 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7375 information we recorded in RELATED_STMT field is used to vectorize
7376 stmt S2. */
7378 /* In case of interleaving (non-unit grouped access):
7380 S1: x2 = &base + 2
7381 S2: x0 = &base
7382 S3: x1 = &base + 1
7383 S4: x3 = &base + 3
7385 Vectorized loads are created in the order of memory accesses
7386 starting from the access of the first stmt of the chain:
7388 VS1: vx0 = &base
7389 VS2: vx1 = &base + vec_size*1
7390 VS3: vx3 = &base + vec_size*2
7391 VS4: vx4 = &base + vec_size*3
7393 Then permutation statements are generated:
7395 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7396 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7399 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7400 (the order of the data-refs in the output of vect_permute_load_chain
7401 corresponds to the order of scalar stmts in the interleaving chain - see
7402 the documentation of vect_permute_load_chain()).
7403 The generation of permutation stmts and recording them in
7404 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7406 In case of both multiple types and interleaving, the vector loads and
7407 permutation stmts above are created for every copy. The result vector
7408 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7409 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7411 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7412 on a target that supports unaligned accesses (dr_unaligned_supported)
7413 we generate the following code:
7414 p = initial_addr;
7415 indx = 0;
7416 loop {
7417 p = p + indx * vectype_size;
7418 vec_dest = *(p);
7419 indx = indx + 1;
7422 Otherwise, the data reference is potentially unaligned on a target that
7423 does not support unaligned accesses (dr_explicit_realign_optimized) -
7424 then generate the following code, in which the data in each iteration is
7425 obtained by two vector loads, one from the previous iteration, and one
7426 from the current iteration:
7427 p1 = initial_addr;
7428 msq_init = *(floor(p1))
7429 p2 = initial_addr + VS - 1;
7430 realignment_token = call target_builtin;
7431 indx = 0;
7432 loop {
7433 p2 = p2 + indx * vectype_size
7434 lsq = *(floor(p2))
7435 vec_dest = realign_load (msq, lsq, realignment_token)
7436 indx = indx + 1;
7437 msq = lsq;
7438 } */
7440 /* If the misalignment remains the same throughout the execution of the
7441 loop, we can create the init_addr and permutation mask at the loop
7442 preheader. Otherwise, it needs to be created inside the loop.
7443 This can only occur when vectorizing memory accesses in the inner-loop
7444 nested within an outer-loop that is being vectorized. */
7446 if (nested_in_vect_loop
7447 && !multiple_p (DR_STEP_ALIGNMENT (dr),
7448 GET_MODE_SIZE (TYPE_MODE (vectype))))
7450 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7451 compute_in_loop = true;
7454 if ((alignment_support_scheme == dr_explicit_realign_optimized
7455 || alignment_support_scheme == dr_explicit_realign)
7456 && !compute_in_loop)
7458 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7459 alignment_support_scheme, NULL_TREE,
7460 &at_loop);
7461 if (alignment_support_scheme == dr_explicit_realign_optimized)
7463 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7464 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7465 size_one_node);
7468 else
7469 at_loop = loop;
7471 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7472 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7474 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7475 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7476 else
7477 aggr_type = vectype;
7479 prev_stmt_info = NULL;
7480 poly_uint64 group_elt = 0;
7481 for (j = 0; j < ncopies; j++)
7483 /* 1. Create the vector or array pointer update chain. */
7484 if (j == 0)
7486 bool simd_lane_access_p
7487 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7488 if (simd_lane_access_p
7489 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7490 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7491 && integer_zerop (DR_OFFSET (first_dr))
7492 && integer_zerop (DR_INIT (first_dr))
7493 && alias_sets_conflict_p (get_alias_set (aggr_type),
7494 get_alias_set (TREE_TYPE (ref_type)))
7495 && (alignment_support_scheme == dr_aligned
7496 || alignment_support_scheme == dr_unaligned_supported))
7498 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7499 dataref_offset = build_int_cst (ref_type, 0);
7500 inv_p = false;
7502 else if (first_stmt_for_drptr
7503 && first_stmt != first_stmt_for_drptr)
7505 dataref_ptr
7506 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7507 at_loop, offset, &dummy, gsi,
7508 &ptr_incr, simd_lane_access_p,
7509 &inv_p, byte_offset);
7510 /* Adjust the pointer by the difference to first_stmt. */
7511 data_reference_p ptrdr
7512 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7513 tree diff = fold_convert (sizetype,
7514 size_binop (MINUS_EXPR,
7515 DR_INIT (first_dr),
7516 DR_INIT (ptrdr)));
7517 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7518 stmt, diff);
7520 else
7521 dataref_ptr
7522 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7523 offset, &dummy, gsi, &ptr_incr,
7524 simd_lane_access_p, &inv_p,
7525 byte_offset);
7527 else if (dataref_offset)
7528 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7529 TYPE_SIZE_UNIT (aggr_type));
7530 else
7531 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7532 TYPE_SIZE_UNIT (aggr_type));
7534 if (grouped_load || slp_perm)
7535 dr_chain.create (vec_num);
7537 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7539 tree vec_array;
7541 vec_array = create_vector_array (vectype, vec_num);
7543 /* Emit:
7544 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7545 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7546 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7547 data_ref);
7548 gimple_call_set_lhs (call, vec_array);
7549 gimple_call_set_nothrow (call, true);
7550 new_stmt = call;
7551 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7553 /* Extract each vector into an SSA_NAME. */
7554 for (i = 0; i < vec_num; i++)
7556 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7557 vec_array, i);
7558 dr_chain.quick_push (new_temp);
7561 /* Record the mapping between SSA_NAMEs and statements. */
7562 vect_record_grouped_load_vectors (stmt, dr_chain);
7564 else
7566 for (i = 0; i < vec_num; i++)
7568 if (i > 0)
7569 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7570 stmt, NULL_TREE);
7572 /* 2. Create the vector-load in the loop. */
7573 switch (alignment_support_scheme)
7575 case dr_aligned:
7576 case dr_unaligned_supported:
7578 unsigned int align, misalign;
7580 data_ref
7581 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7582 dataref_offset
7583 ? dataref_offset
7584 : build_int_cst (ref_type, 0));
7585 align = DR_TARGET_ALIGNMENT (dr);
7586 if (alignment_support_scheme == dr_aligned)
7588 gcc_assert (aligned_access_p (first_dr));
7589 misalign = 0;
7591 else if (DR_MISALIGNMENT (first_dr) == -1)
7593 align = dr_alignment (vect_dr_behavior (first_dr));
7594 misalign = 0;
7595 TREE_TYPE (data_ref)
7596 = build_aligned_type (TREE_TYPE (data_ref),
7597 align * BITS_PER_UNIT);
7599 else
7601 TREE_TYPE (data_ref)
7602 = build_aligned_type (TREE_TYPE (data_ref),
7603 TYPE_ALIGN (elem_type));
7604 misalign = DR_MISALIGNMENT (first_dr);
7606 if (dataref_offset == NULL_TREE
7607 && TREE_CODE (dataref_ptr) == SSA_NAME)
7608 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7609 align, misalign);
7610 break;
7612 case dr_explicit_realign:
7614 tree ptr, bump;
7616 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7618 if (compute_in_loop)
7619 msq = vect_setup_realignment (first_stmt, gsi,
7620 &realignment_token,
7621 dr_explicit_realign,
7622 dataref_ptr, NULL);
7624 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7625 ptr = copy_ssa_name (dataref_ptr);
7626 else
7627 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7628 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7629 new_stmt = gimple_build_assign
7630 (ptr, BIT_AND_EXPR, dataref_ptr,
7631 build_int_cst
7632 (TREE_TYPE (dataref_ptr),
7633 -(HOST_WIDE_INT) align));
7634 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7635 data_ref
7636 = build2 (MEM_REF, vectype, ptr,
7637 build_int_cst (ref_type, 0));
7638 vec_dest = vect_create_destination_var (scalar_dest,
7639 vectype);
7640 new_stmt = gimple_build_assign (vec_dest, data_ref);
7641 new_temp = make_ssa_name (vec_dest, new_stmt);
7642 gimple_assign_set_lhs (new_stmt, new_temp);
7643 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7644 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7645 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7646 msq = new_temp;
7648 bump = size_binop (MULT_EXPR, vs,
7649 TYPE_SIZE_UNIT (elem_type));
7650 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7651 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7652 new_stmt = gimple_build_assign
7653 (NULL_TREE, BIT_AND_EXPR, ptr,
7654 build_int_cst
7655 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7656 ptr = copy_ssa_name (ptr, new_stmt);
7657 gimple_assign_set_lhs (new_stmt, ptr);
7658 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7659 data_ref
7660 = build2 (MEM_REF, vectype, ptr,
7661 build_int_cst (ref_type, 0));
7662 break;
7664 case dr_explicit_realign_optimized:
7666 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7667 new_temp = copy_ssa_name (dataref_ptr);
7668 else
7669 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7670 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7671 new_stmt = gimple_build_assign
7672 (new_temp, BIT_AND_EXPR, dataref_ptr,
7673 build_int_cst (TREE_TYPE (dataref_ptr),
7674 -(HOST_WIDE_INT) align));
7675 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7676 data_ref
7677 = build2 (MEM_REF, vectype, new_temp,
7678 build_int_cst (ref_type, 0));
7679 break;
7681 default:
7682 gcc_unreachable ();
7684 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7685 new_stmt = gimple_build_assign (vec_dest, data_ref);
7686 new_temp = make_ssa_name (vec_dest, new_stmt);
7687 gimple_assign_set_lhs (new_stmt, new_temp);
7688 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7690 /* 3. Handle explicit realignment if necessary/supported.
7691 Create in loop:
7692 vec_dest = realign_load (msq, lsq, realignment_token) */
7693 if (alignment_support_scheme == dr_explicit_realign_optimized
7694 || alignment_support_scheme == dr_explicit_realign)
7696 lsq = gimple_assign_lhs (new_stmt);
7697 if (!realignment_token)
7698 realignment_token = dataref_ptr;
7699 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7700 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7701 msq, lsq, realignment_token);
7702 new_temp = make_ssa_name (vec_dest, new_stmt);
7703 gimple_assign_set_lhs (new_stmt, new_temp);
7704 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7706 if (alignment_support_scheme == dr_explicit_realign_optimized)
7708 gcc_assert (phi);
7709 if (i == vec_num - 1 && j == ncopies - 1)
7710 add_phi_arg (phi, lsq,
7711 loop_latch_edge (containing_loop),
7712 UNKNOWN_LOCATION);
7713 msq = lsq;
7717 /* 4. Handle invariant-load. */
7718 if (inv_p && !bb_vinfo)
7720 gcc_assert (!grouped_load);
7721 /* If we have versioned for aliasing or the loop doesn't
7722 have any data dependencies that would preclude this,
7723 then we are sure this is a loop invariant load and
7724 thus we can insert it on the preheader edge. */
7725 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7726 && !nested_in_vect_loop
7727 && hoist_defs_of_uses (stmt, loop))
7729 if (dump_enabled_p ())
7731 dump_printf_loc (MSG_NOTE, vect_location,
7732 "hoisting out of the vectorized "
7733 "loop: ");
7734 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7736 tree tem = copy_ssa_name (scalar_dest);
7737 gsi_insert_on_edge_immediate
7738 (loop_preheader_edge (loop),
7739 gimple_build_assign (tem,
7740 unshare_expr
7741 (gimple_assign_rhs1 (stmt))));
7742 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7743 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7744 set_vinfo_for_stmt (new_stmt,
7745 new_stmt_vec_info (new_stmt, vinfo));
7747 else
7749 gimple_stmt_iterator gsi2 = *gsi;
7750 gsi_next (&gsi2);
7751 new_temp = vect_init_vector (stmt, scalar_dest,
7752 vectype, &gsi2);
7753 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7757 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7759 tree perm_mask = perm_mask_for_reverse (vectype);
7760 new_temp = permute_vec_elements (new_temp, new_temp,
7761 perm_mask, stmt, gsi);
7762 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7765 /* Collect vector loads and later create their permutation in
7766 vect_transform_grouped_load (). */
7767 if (grouped_load || slp_perm)
7768 dr_chain.quick_push (new_temp);
7770 /* Store vector loads in the corresponding SLP_NODE. */
7771 if (slp && !slp_perm)
7772 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7774 /* With SLP permutation we load the gaps as well, without
7775 we need to skip the gaps after we manage to fully load
7776 all elements. group_gap_adj is GROUP_SIZE here. */
7777 group_elt += nunits;
7778 if (maybe_ne (group_gap_adj, 0U)
7779 && !slp_perm
7780 && known_eq (group_elt, group_size - group_gap_adj))
7782 poly_wide_int bump_val
7783 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7784 * group_gap_adj);
7785 tree bump = wide_int_to_tree (sizetype, bump_val);
7786 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7787 stmt, bump);
7788 group_elt = 0;
7791 /* Bump the vector pointer to account for a gap or for excess
7792 elements loaded for a permuted SLP load. */
7793 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
7795 poly_wide_int bump_val
7796 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7797 * group_gap_adj);
7798 tree bump = wide_int_to_tree (sizetype, bump_val);
7799 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7800 stmt, bump);
7804 if (slp && !slp_perm)
7805 continue;
7807 if (slp_perm)
7809 unsigned n_perms;
7810 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7811 slp_node_instance, false,
7812 &n_perms))
7814 dr_chain.release ();
7815 return false;
7818 else
7820 if (grouped_load)
7822 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7823 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7824 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7826 else
7828 if (j == 0)
7829 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7830 else
7831 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7832 prev_stmt_info = vinfo_for_stmt (new_stmt);
7835 dr_chain.release ();
7838 return true;
7841 /* Function vect_is_simple_cond.
7843 Input:
7844 LOOP - the loop that is being vectorized.
7845 COND - Condition that is checked for simple use.
7847 Output:
7848 *COMP_VECTYPE - the vector type for the comparison.
7849 *DTS - The def types for the arguments of the comparison
7851 Returns whether a COND can be vectorized. Checks whether
7852 condition operands are supportable using vec_is_simple_use. */
7854 static bool
7855 vect_is_simple_cond (tree cond, vec_info *vinfo,
7856 tree *comp_vectype, enum vect_def_type *dts,
7857 tree vectype)
7859 tree lhs, rhs;
7860 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7862 /* Mask case. */
7863 if (TREE_CODE (cond) == SSA_NAME
7864 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7866 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7867 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7868 &dts[0], comp_vectype)
7869 || !*comp_vectype
7870 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7871 return false;
7872 return true;
7875 if (!COMPARISON_CLASS_P (cond))
7876 return false;
7878 lhs = TREE_OPERAND (cond, 0);
7879 rhs = TREE_OPERAND (cond, 1);
7881 if (TREE_CODE (lhs) == SSA_NAME)
7883 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7884 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7885 return false;
7887 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7888 || TREE_CODE (lhs) == FIXED_CST)
7889 dts[0] = vect_constant_def;
7890 else
7891 return false;
7893 if (TREE_CODE (rhs) == SSA_NAME)
7895 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7896 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7897 return false;
7899 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7900 || TREE_CODE (rhs) == FIXED_CST)
7901 dts[1] = vect_constant_def;
7902 else
7903 return false;
7905 if (vectype1 && vectype2
7906 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
7907 TYPE_VECTOR_SUBPARTS (vectype2)))
7908 return false;
7910 *comp_vectype = vectype1 ? vectype1 : vectype2;
7911 /* Invariant comparison. */
7912 if (! *comp_vectype)
7914 tree scalar_type = TREE_TYPE (lhs);
7915 /* If we can widen the comparison to match vectype do so. */
7916 if (INTEGRAL_TYPE_P (scalar_type)
7917 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7918 TYPE_SIZE (TREE_TYPE (vectype))))
7919 scalar_type = build_nonstandard_integer_type
7920 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7921 TYPE_UNSIGNED (scalar_type));
7922 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7925 return true;
7928 /* vectorizable_condition.
7930 Check if STMT is conditional modify expression that can be vectorized.
7931 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7932 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7933 at GSI.
7935 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7936 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7937 else clause if it is 2).
7939 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7941 bool
7942 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7943 gimple **vec_stmt, tree reduc_def, int reduc_index,
7944 slp_tree slp_node)
7946 tree scalar_dest = NULL_TREE;
7947 tree vec_dest = NULL_TREE;
7948 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7949 tree then_clause, else_clause;
7950 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7951 tree comp_vectype = NULL_TREE;
7952 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7953 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7954 tree vec_compare;
7955 tree new_temp;
7956 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7957 enum vect_def_type dts[4]
7958 = {vect_unknown_def_type, vect_unknown_def_type,
7959 vect_unknown_def_type, vect_unknown_def_type};
7960 int ndts = 4;
7961 int ncopies;
7962 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7963 stmt_vec_info prev_stmt_info = NULL;
7964 int i, j;
7965 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7966 vec<tree> vec_oprnds0 = vNULL;
7967 vec<tree> vec_oprnds1 = vNULL;
7968 vec<tree> vec_oprnds2 = vNULL;
7969 vec<tree> vec_oprnds3 = vNULL;
7970 tree vec_cmp_type;
7971 bool masked = false;
7973 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7974 return false;
7976 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7978 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7979 return false;
7981 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7982 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7983 && reduc_def))
7984 return false;
7986 /* FORNOW: not yet supported. */
7987 if (STMT_VINFO_LIVE_P (stmt_info))
7989 if (dump_enabled_p ())
7990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7991 "value used after loop.\n");
7992 return false;
7996 /* Is vectorizable conditional operation? */
7997 if (!is_gimple_assign (stmt))
7998 return false;
8000 code = gimple_assign_rhs_code (stmt);
8002 if (code != COND_EXPR)
8003 return false;
8005 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8006 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8008 if (slp_node)
8009 ncopies = 1;
8010 else
8011 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8013 gcc_assert (ncopies >= 1);
8014 if (reduc_index && ncopies > 1)
8015 return false; /* FORNOW */
8017 cond_expr = gimple_assign_rhs1 (stmt);
8018 then_clause = gimple_assign_rhs2 (stmt);
8019 else_clause = gimple_assign_rhs3 (stmt);
8021 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8022 &comp_vectype, &dts[0], vectype)
8023 || !comp_vectype)
8024 return false;
8026 gimple *def_stmt;
8027 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8028 &vectype1))
8029 return false;
8030 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8031 &vectype2))
8032 return false;
8034 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8035 return false;
8037 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8038 return false;
8040 masked = !COMPARISON_CLASS_P (cond_expr);
8041 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8043 if (vec_cmp_type == NULL_TREE)
8044 return false;
8046 cond_code = TREE_CODE (cond_expr);
8047 if (!masked)
8049 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8050 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8053 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8055 /* Boolean values may have another representation in vectors
8056 and therefore we prefer bit operations over comparison for
8057 them (which also works for scalar masks). We store opcodes
8058 to use in bitop1 and bitop2. Statement is vectorized as
8059 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8060 depending on bitop1 and bitop2 arity. */
8061 switch (cond_code)
8063 case GT_EXPR:
8064 bitop1 = BIT_NOT_EXPR;
8065 bitop2 = BIT_AND_EXPR;
8066 break;
8067 case GE_EXPR:
8068 bitop1 = BIT_NOT_EXPR;
8069 bitop2 = BIT_IOR_EXPR;
8070 break;
8071 case LT_EXPR:
8072 bitop1 = BIT_NOT_EXPR;
8073 bitop2 = BIT_AND_EXPR;
8074 std::swap (cond_expr0, cond_expr1);
8075 break;
8076 case LE_EXPR:
8077 bitop1 = BIT_NOT_EXPR;
8078 bitop2 = BIT_IOR_EXPR;
8079 std::swap (cond_expr0, cond_expr1);
8080 break;
8081 case NE_EXPR:
8082 bitop1 = BIT_XOR_EXPR;
8083 break;
8084 case EQ_EXPR:
8085 bitop1 = BIT_XOR_EXPR;
8086 bitop2 = BIT_NOT_EXPR;
8087 break;
8088 default:
8089 return false;
8091 cond_code = SSA_NAME;
8094 if (!vec_stmt)
8096 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8097 if (bitop1 != NOP_EXPR)
8099 machine_mode mode = TYPE_MODE (comp_vectype);
8100 optab optab;
8102 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8103 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8104 return false;
8106 if (bitop2 != NOP_EXPR)
8108 optab = optab_for_tree_code (bitop2, comp_vectype,
8109 optab_default);
8110 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8111 return false;
8114 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8115 cond_code))
8117 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8118 return true;
8120 return false;
8123 /* Transform. */
8125 if (!slp_node)
8127 vec_oprnds0.create (1);
8128 vec_oprnds1.create (1);
8129 vec_oprnds2.create (1);
8130 vec_oprnds3.create (1);
8133 /* Handle def. */
8134 scalar_dest = gimple_assign_lhs (stmt);
8135 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8137 /* Handle cond expr. */
8138 for (j = 0; j < ncopies; j++)
8140 gassign *new_stmt = NULL;
8141 if (j == 0)
8143 if (slp_node)
8145 auto_vec<tree, 4> ops;
8146 auto_vec<vec<tree>, 4> vec_defs;
8148 if (masked)
8149 ops.safe_push (cond_expr);
8150 else
8152 ops.safe_push (cond_expr0);
8153 ops.safe_push (cond_expr1);
8155 ops.safe_push (then_clause);
8156 ops.safe_push (else_clause);
8157 vect_get_slp_defs (ops, slp_node, &vec_defs);
8158 vec_oprnds3 = vec_defs.pop ();
8159 vec_oprnds2 = vec_defs.pop ();
8160 if (!masked)
8161 vec_oprnds1 = vec_defs.pop ();
8162 vec_oprnds0 = vec_defs.pop ();
8164 else
8166 gimple *gtemp;
8167 if (masked)
8169 vec_cond_lhs
8170 = vect_get_vec_def_for_operand (cond_expr, stmt,
8171 comp_vectype);
8172 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8173 &gtemp, &dts[0]);
8175 else
8177 vec_cond_lhs
8178 = vect_get_vec_def_for_operand (cond_expr0,
8179 stmt, comp_vectype);
8180 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8182 vec_cond_rhs
8183 = vect_get_vec_def_for_operand (cond_expr1,
8184 stmt, comp_vectype);
8185 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8187 if (reduc_index == 1)
8188 vec_then_clause = reduc_def;
8189 else
8191 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8192 stmt);
8193 vect_is_simple_use (then_clause, loop_vinfo,
8194 &gtemp, &dts[2]);
8196 if (reduc_index == 2)
8197 vec_else_clause = reduc_def;
8198 else
8200 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8201 stmt);
8202 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8206 else
8208 vec_cond_lhs
8209 = vect_get_vec_def_for_stmt_copy (dts[0],
8210 vec_oprnds0.pop ());
8211 if (!masked)
8212 vec_cond_rhs
8213 = vect_get_vec_def_for_stmt_copy (dts[1],
8214 vec_oprnds1.pop ());
8216 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8217 vec_oprnds2.pop ());
8218 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8219 vec_oprnds3.pop ());
8222 if (!slp_node)
8224 vec_oprnds0.quick_push (vec_cond_lhs);
8225 if (!masked)
8226 vec_oprnds1.quick_push (vec_cond_rhs);
8227 vec_oprnds2.quick_push (vec_then_clause);
8228 vec_oprnds3.quick_push (vec_else_clause);
8231 /* Arguments are ready. Create the new vector stmt. */
8232 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8234 vec_then_clause = vec_oprnds2[i];
8235 vec_else_clause = vec_oprnds3[i];
8237 if (masked)
8238 vec_compare = vec_cond_lhs;
8239 else
8241 vec_cond_rhs = vec_oprnds1[i];
8242 if (bitop1 == NOP_EXPR)
8243 vec_compare = build2 (cond_code, vec_cmp_type,
8244 vec_cond_lhs, vec_cond_rhs);
8245 else
8247 new_temp = make_ssa_name (vec_cmp_type);
8248 if (bitop1 == BIT_NOT_EXPR)
8249 new_stmt = gimple_build_assign (new_temp, bitop1,
8250 vec_cond_rhs);
8251 else
8252 new_stmt
8253 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8254 vec_cond_rhs);
8255 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8256 if (bitop2 == NOP_EXPR)
8257 vec_compare = new_temp;
8258 else if (bitop2 == BIT_NOT_EXPR)
8260 /* Instead of doing ~x ? y : z do x ? z : y. */
8261 vec_compare = new_temp;
8262 std::swap (vec_then_clause, vec_else_clause);
8264 else
8266 vec_compare = make_ssa_name (vec_cmp_type);
8267 new_stmt
8268 = gimple_build_assign (vec_compare, bitop2,
8269 vec_cond_lhs, new_temp);
8270 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8274 new_temp = make_ssa_name (vec_dest);
8275 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8276 vec_compare, vec_then_clause,
8277 vec_else_clause);
8278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8279 if (slp_node)
8280 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8283 if (slp_node)
8284 continue;
8286 if (j == 0)
8287 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8288 else
8289 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8291 prev_stmt_info = vinfo_for_stmt (new_stmt);
8294 vec_oprnds0.release ();
8295 vec_oprnds1.release ();
8296 vec_oprnds2.release ();
8297 vec_oprnds3.release ();
8299 return true;
8302 /* vectorizable_comparison.
8304 Check if STMT is comparison expression that can be vectorized.
8305 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8306 comparison, put it in VEC_STMT, and insert it at GSI.
8308 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8310 static bool
8311 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8312 gimple **vec_stmt, tree reduc_def,
8313 slp_tree slp_node)
8315 tree lhs, rhs1, rhs2;
8316 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8317 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8318 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8319 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8320 tree new_temp;
8321 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8322 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8323 int ndts = 2;
8324 poly_uint64 nunits;
8325 int ncopies;
8326 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8327 stmt_vec_info prev_stmt_info = NULL;
8328 int i, j;
8329 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8330 vec<tree> vec_oprnds0 = vNULL;
8331 vec<tree> vec_oprnds1 = vNULL;
8332 gimple *def_stmt;
8333 tree mask_type;
8334 tree mask;
8336 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8337 return false;
8339 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8340 return false;
8342 mask_type = vectype;
8343 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8345 if (slp_node)
8346 ncopies = 1;
8347 else
8348 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8350 gcc_assert (ncopies >= 1);
8351 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8352 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8353 && reduc_def))
8354 return false;
8356 if (STMT_VINFO_LIVE_P (stmt_info))
8358 if (dump_enabled_p ())
8359 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8360 "value used after loop.\n");
8361 return false;
8364 if (!is_gimple_assign (stmt))
8365 return false;
8367 code = gimple_assign_rhs_code (stmt);
8369 if (TREE_CODE_CLASS (code) != tcc_comparison)
8370 return false;
8372 rhs1 = gimple_assign_rhs1 (stmt);
8373 rhs2 = gimple_assign_rhs2 (stmt);
8375 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8376 &dts[0], &vectype1))
8377 return false;
8379 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8380 &dts[1], &vectype2))
8381 return false;
8383 if (vectype1 && vectype2
8384 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8385 TYPE_VECTOR_SUBPARTS (vectype2)))
8386 return false;
8388 vectype = vectype1 ? vectype1 : vectype2;
8390 /* Invariant comparison. */
8391 if (!vectype)
8393 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8394 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
8395 return false;
8397 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
8398 return false;
8400 /* Can't compare mask and non-mask types. */
8401 if (vectype1 && vectype2
8402 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8403 return false;
8405 /* Boolean values may have another representation in vectors
8406 and therefore we prefer bit operations over comparison for
8407 them (which also works for scalar masks). We store opcodes
8408 to use in bitop1 and bitop2. Statement is vectorized as
8409 BITOP2 (rhs1 BITOP1 rhs2) or
8410 rhs1 BITOP2 (BITOP1 rhs2)
8411 depending on bitop1 and bitop2 arity. */
8412 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8414 if (code == GT_EXPR)
8416 bitop1 = BIT_NOT_EXPR;
8417 bitop2 = BIT_AND_EXPR;
8419 else if (code == GE_EXPR)
8421 bitop1 = BIT_NOT_EXPR;
8422 bitop2 = BIT_IOR_EXPR;
8424 else if (code == LT_EXPR)
8426 bitop1 = BIT_NOT_EXPR;
8427 bitop2 = BIT_AND_EXPR;
8428 std::swap (rhs1, rhs2);
8429 std::swap (dts[0], dts[1]);
8431 else if (code == LE_EXPR)
8433 bitop1 = BIT_NOT_EXPR;
8434 bitop2 = BIT_IOR_EXPR;
8435 std::swap (rhs1, rhs2);
8436 std::swap (dts[0], dts[1]);
8438 else
8440 bitop1 = BIT_XOR_EXPR;
8441 if (code == EQ_EXPR)
8442 bitop2 = BIT_NOT_EXPR;
8446 if (!vec_stmt)
8448 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8449 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8450 dts, ndts, NULL, NULL);
8451 if (bitop1 == NOP_EXPR)
8452 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8453 else
8455 machine_mode mode = TYPE_MODE (vectype);
8456 optab optab;
8458 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8459 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8460 return false;
8462 if (bitop2 != NOP_EXPR)
8464 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8465 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8466 return false;
8468 return true;
8472 /* Transform. */
8473 if (!slp_node)
8475 vec_oprnds0.create (1);
8476 vec_oprnds1.create (1);
8479 /* Handle def. */
8480 lhs = gimple_assign_lhs (stmt);
8481 mask = vect_create_destination_var (lhs, mask_type);
8483 /* Handle cmp expr. */
8484 for (j = 0; j < ncopies; j++)
8486 gassign *new_stmt = NULL;
8487 if (j == 0)
8489 if (slp_node)
8491 auto_vec<tree, 2> ops;
8492 auto_vec<vec<tree>, 2> vec_defs;
8494 ops.safe_push (rhs1);
8495 ops.safe_push (rhs2);
8496 vect_get_slp_defs (ops, slp_node, &vec_defs);
8497 vec_oprnds1 = vec_defs.pop ();
8498 vec_oprnds0 = vec_defs.pop ();
8500 else
8502 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8503 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8506 else
8508 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8509 vec_oprnds0.pop ());
8510 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8511 vec_oprnds1.pop ());
8514 if (!slp_node)
8516 vec_oprnds0.quick_push (vec_rhs1);
8517 vec_oprnds1.quick_push (vec_rhs2);
8520 /* Arguments are ready. Create the new vector stmt. */
8521 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8523 vec_rhs2 = vec_oprnds1[i];
8525 new_temp = make_ssa_name (mask);
8526 if (bitop1 == NOP_EXPR)
8528 new_stmt = gimple_build_assign (new_temp, code,
8529 vec_rhs1, vec_rhs2);
8530 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8532 else
8534 if (bitop1 == BIT_NOT_EXPR)
8535 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8536 else
8537 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8538 vec_rhs2);
8539 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8540 if (bitop2 != NOP_EXPR)
8542 tree res = make_ssa_name (mask);
8543 if (bitop2 == BIT_NOT_EXPR)
8544 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8545 else
8546 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8547 new_temp);
8548 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8551 if (slp_node)
8552 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8555 if (slp_node)
8556 continue;
8558 if (j == 0)
8559 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8560 else
8561 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8563 prev_stmt_info = vinfo_for_stmt (new_stmt);
8566 vec_oprnds0.release ();
8567 vec_oprnds1.release ();
8569 return true;
8572 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8573 can handle all live statements in the node. Otherwise return true
8574 if STMT is not live or if vectorizable_live_operation can handle it.
8575 GSI and VEC_STMT are as for vectorizable_live_operation. */
8577 static bool
8578 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8579 slp_tree slp_node, gimple **vec_stmt)
8581 if (slp_node)
8583 gimple *slp_stmt;
8584 unsigned int i;
8585 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8587 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8588 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8589 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8590 vec_stmt))
8591 return false;
8594 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8595 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8596 return false;
8598 return true;
8601 /* Make sure the statement is vectorizable. */
8603 bool
8604 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8605 slp_instance node_instance)
8607 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8608 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8609 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8610 bool ok;
8611 gimple *pattern_stmt;
8612 gimple_seq pattern_def_seq;
8614 if (dump_enabled_p ())
8616 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8617 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8620 if (gimple_has_volatile_ops (stmt))
8622 if (dump_enabled_p ())
8623 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8624 "not vectorized: stmt has volatile operands\n");
8626 return false;
8629 /* Skip stmts that do not need to be vectorized. In loops this is expected
8630 to include:
8631 - the COND_EXPR which is the loop exit condition
8632 - any LABEL_EXPRs in the loop
8633 - computations that are used only for array indexing or loop control.
8634 In basic blocks we only analyze statements that are a part of some SLP
8635 instance, therefore, all the statements are relevant.
8637 Pattern statement needs to be analyzed instead of the original statement
8638 if the original statement is not relevant. Otherwise, we analyze both
8639 statements. In basic blocks we are called from some SLP instance
8640 traversal, don't analyze pattern stmts instead, the pattern stmts
8641 already will be part of SLP instance. */
8643 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8644 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8645 && !STMT_VINFO_LIVE_P (stmt_info))
8647 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8648 && pattern_stmt
8649 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8650 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8652 /* Analyze PATTERN_STMT instead of the original stmt. */
8653 stmt = pattern_stmt;
8654 stmt_info = vinfo_for_stmt (pattern_stmt);
8655 if (dump_enabled_p ())
8657 dump_printf_loc (MSG_NOTE, vect_location,
8658 "==> examining pattern statement: ");
8659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8662 else
8664 if (dump_enabled_p ())
8665 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8667 return true;
8670 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8671 && node == NULL
8672 && pattern_stmt
8673 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8674 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8676 /* Analyze PATTERN_STMT too. */
8677 if (dump_enabled_p ())
8679 dump_printf_loc (MSG_NOTE, vect_location,
8680 "==> examining pattern statement: ");
8681 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8684 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8685 node_instance))
8686 return false;
8689 if (is_pattern_stmt_p (stmt_info)
8690 && node == NULL
8691 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8693 gimple_stmt_iterator si;
8695 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8697 gimple *pattern_def_stmt = gsi_stmt (si);
8698 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8699 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8701 /* Analyze def stmt of STMT if it's a pattern stmt. */
8702 if (dump_enabled_p ())
8704 dump_printf_loc (MSG_NOTE, vect_location,
8705 "==> examining pattern def statement: ");
8706 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8709 if (!vect_analyze_stmt (pattern_def_stmt,
8710 need_to_vectorize, node, node_instance))
8711 return false;
8716 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8718 case vect_internal_def:
8719 break;
8721 case vect_reduction_def:
8722 case vect_nested_cycle:
8723 gcc_assert (!bb_vinfo
8724 && (relevance == vect_used_in_outer
8725 || relevance == vect_used_in_outer_by_reduction
8726 || relevance == vect_used_by_reduction
8727 || relevance == vect_unused_in_scope
8728 || relevance == vect_used_only_live));
8729 break;
8731 case vect_induction_def:
8732 gcc_assert (!bb_vinfo);
8733 break;
8735 case vect_constant_def:
8736 case vect_external_def:
8737 case vect_unknown_def_type:
8738 default:
8739 gcc_unreachable ();
8742 if (STMT_VINFO_RELEVANT_P (stmt_info))
8744 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8745 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8746 || (is_gimple_call (stmt)
8747 && gimple_call_lhs (stmt) == NULL_TREE));
8748 *need_to_vectorize = true;
8751 if (PURE_SLP_STMT (stmt_info) && !node)
8753 dump_printf_loc (MSG_NOTE, vect_location,
8754 "handled only by SLP analysis\n");
8755 return true;
8758 ok = true;
8759 if (!bb_vinfo
8760 && (STMT_VINFO_RELEVANT_P (stmt_info)
8761 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8762 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8763 || vectorizable_conversion (stmt, NULL, NULL, node)
8764 || vectorizable_shift (stmt, NULL, NULL, node)
8765 || vectorizable_operation (stmt, NULL, NULL, node)
8766 || vectorizable_assignment (stmt, NULL, NULL, node)
8767 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8768 || vectorizable_call (stmt, NULL, NULL, node)
8769 || vectorizable_store (stmt, NULL, NULL, node)
8770 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8771 || vectorizable_induction (stmt, NULL, NULL, node)
8772 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8773 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8774 else
8776 if (bb_vinfo)
8777 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8778 || vectorizable_conversion (stmt, NULL, NULL, node)
8779 || vectorizable_shift (stmt, NULL, NULL, node)
8780 || vectorizable_operation (stmt, NULL, NULL, node)
8781 || vectorizable_assignment (stmt, NULL, NULL, node)
8782 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8783 || vectorizable_call (stmt, NULL, NULL, node)
8784 || vectorizable_store (stmt, NULL, NULL, node)
8785 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8786 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8789 if (!ok)
8791 if (dump_enabled_p ())
8793 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8794 "not vectorized: relevant stmt not ");
8795 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8796 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8799 return false;
8802 if (bb_vinfo)
8803 return true;
8805 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8806 need extra handling, except for vectorizable reductions. */
8807 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8808 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8810 if (dump_enabled_p ())
8812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8813 "not vectorized: live stmt not supported: ");
8814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8817 return false;
8820 return true;
8824 /* Function vect_transform_stmt.
8826 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8828 bool
8829 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8830 bool *grouped_store, slp_tree slp_node,
8831 slp_instance slp_node_instance)
8833 bool is_store = false;
8834 gimple *vec_stmt = NULL;
8835 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8836 bool done;
8838 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8839 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8841 switch (STMT_VINFO_TYPE (stmt_info))
8843 case type_demotion_vec_info_type:
8844 case type_promotion_vec_info_type:
8845 case type_conversion_vec_info_type:
8846 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8847 gcc_assert (done);
8848 break;
8850 case induc_vec_info_type:
8851 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8852 gcc_assert (done);
8853 break;
8855 case shift_vec_info_type:
8856 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8857 gcc_assert (done);
8858 break;
8860 case op_vec_info_type:
8861 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8862 gcc_assert (done);
8863 break;
8865 case assignment_vec_info_type:
8866 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8867 gcc_assert (done);
8868 break;
8870 case load_vec_info_type:
8871 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8872 slp_node_instance);
8873 gcc_assert (done);
8874 break;
8876 case store_vec_info_type:
8877 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8878 gcc_assert (done);
8879 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8881 /* In case of interleaving, the whole chain is vectorized when the
8882 last store in the chain is reached. Store stmts before the last
8883 one are skipped, and there vec_stmt_info shouldn't be freed
8884 meanwhile. */
8885 *grouped_store = true;
8886 if (STMT_VINFO_VEC_STMT (stmt_info))
8887 is_store = true;
8889 else
8890 is_store = true;
8891 break;
8893 case condition_vec_info_type:
8894 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8895 gcc_assert (done);
8896 break;
8898 case comparison_vec_info_type:
8899 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8900 gcc_assert (done);
8901 break;
8903 case call_vec_info_type:
8904 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8905 stmt = gsi_stmt (*gsi);
8906 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8907 is_store = true;
8908 break;
8910 case call_simd_clone_vec_info_type:
8911 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8912 stmt = gsi_stmt (*gsi);
8913 break;
8915 case reduc_vec_info_type:
8916 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8917 slp_node_instance);
8918 gcc_assert (done);
8919 break;
8921 default:
8922 if (!STMT_VINFO_LIVE_P (stmt_info))
8924 if (dump_enabled_p ())
8925 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8926 "stmt not supported.\n");
8927 gcc_unreachable ();
8931 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8932 This would break hybrid SLP vectorization. */
8933 if (slp_node)
8934 gcc_assert (!vec_stmt
8935 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8937 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8938 is being vectorized, but outside the immediately enclosing loop. */
8939 if (vec_stmt
8940 && STMT_VINFO_LOOP_VINFO (stmt_info)
8941 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8942 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8943 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8944 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8945 || STMT_VINFO_RELEVANT (stmt_info) ==
8946 vect_used_in_outer_by_reduction))
8948 struct loop *innerloop = LOOP_VINFO_LOOP (
8949 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8950 imm_use_iterator imm_iter;
8951 use_operand_p use_p;
8952 tree scalar_dest;
8953 gimple *exit_phi;
8955 if (dump_enabled_p ())
8956 dump_printf_loc (MSG_NOTE, vect_location,
8957 "Record the vdef for outer-loop vectorization.\n");
8959 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8960 (to be used when vectorizing outer-loop stmts that use the DEF of
8961 STMT). */
8962 if (gimple_code (stmt) == GIMPLE_PHI)
8963 scalar_dest = PHI_RESULT (stmt);
8964 else
8965 scalar_dest = gimple_assign_lhs (stmt);
8967 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8969 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8971 exit_phi = USE_STMT (use_p);
8972 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8977 /* Handle stmts whose DEF is used outside the loop-nest that is
8978 being vectorized. */
8979 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8981 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
8982 gcc_assert (done);
8985 if (vec_stmt)
8986 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8988 return is_store;
8992 /* Remove a group of stores (for SLP or interleaving), free their
8993 stmt_vec_info. */
8995 void
8996 vect_remove_stores (gimple *first_stmt)
8998 gimple *next = first_stmt;
8999 gimple *tmp;
9000 gimple_stmt_iterator next_si;
9002 while (next)
9004 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9006 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9007 if (is_pattern_stmt_p (stmt_info))
9008 next = STMT_VINFO_RELATED_STMT (stmt_info);
9009 /* Free the attached stmt_vec_info and remove the stmt. */
9010 next_si = gsi_for_stmt (next);
9011 unlink_stmt_vdef (next);
9012 gsi_remove (&next_si, true);
9013 release_defs (next);
9014 free_stmt_vec_info (next);
9015 next = tmp;
9020 /* Function new_stmt_vec_info.
9022 Create and initialize a new stmt_vec_info struct for STMT. */
9024 stmt_vec_info
9025 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9027 stmt_vec_info res;
9028 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9030 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9031 STMT_VINFO_STMT (res) = stmt;
9032 res->vinfo = vinfo;
9033 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9034 STMT_VINFO_LIVE_P (res) = false;
9035 STMT_VINFO_VECTYPE (res) = NULL;
9036 STMT_VINFO_VEC_STMT (res) = NULL;
9037 STMT_VINFO_VECTORIZABLE (res) = true;
9038 STMT_VINFO_IN_PATTERN_P (res) = false;
9039 STMT_VINFO_RELATED_STMT (res) = NULL;
9040 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9041 STMT_VINFO_DATA_REF (res) = NULL;
9042 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9043 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9045 if (gimple_code (stmt) == GIMPLE_PHI
9046 && is_loop_header_bb_p (gimple_bb (stmt)))
9047 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9048 else
9049 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9051 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9052 STMT_SLP_TYPE (res) = loop_vect;
9053 STMT_VINFO_NUM_SLP_USES (res) = 0;
9055 GROUP_FIRST_ELEMENT (res) = NULL;
9056 GROUP_NEXT_ELEMENT (res) = NULL;
9057 GROUP_SIZE (res) = 0;
9058 GROUP_STORE_COUNT (res) = 0;
9059 GROUP_GAP (res) = 0;
9060 GROUP_SAME_DR_STMT (res) = NULL;
9062 return res;
9066 /* Create a hash table for stmt_vec_info. */
9068 void
9069 init_stmt_vec_info_vec (void)
9071 gcc_assert (!stmt_vec_info_vec.exists ());
9072 stmt_vec_info_vec.create (50);
9076 /* Free hash table for stmt_vec_info. */
9078 void
9079 free_stmt_vec_info_vec (void)
9081 unsigned int i;
9082 stmt_vec_info info;
9083 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9084 if (info != NULL)
9085 free_stmt_vec_info (STMT_VINFO_STMT (info));
9086 gcc_assert (stmt_vec_info_vec.exists ());
9087 stmt_vec_info_vec.release ();
9091 /* Free stmt vectorization related info. */
9093 void
9094 free_stmt_vec_info (gimple *stmt)
9096 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9098 if (!stmt_info)
9099 return;
9101 /* Check if this statement has a related "pattern stmt"
9102 (introduced by the vectorizer during the pattern recognition
9103 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9104 too. */
9105 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9107 stmt_vec_info patt_info
9108 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9109 if (patt_info)
9111 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9112 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9113 gimple_set_bb (patt_stmt, NULL);
9114 tree lhs = gimple_get_lhs (patt_stmt);
9115 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9116 release_ssa_name (lhs);
9117 if (seq)
9119 gimple_stmt_iterator si;
9120 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9122 gimple *seq_stmt = gsi_stmt (si);
9123 gimple_set_bb (seq_stmt, NULL);
9124 lhs = gimple_get_lhs (seq_stmt);
9125 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9126 release_ssa_name (lhs);
9127 free_stmt_vec_info (seq_stmt);
9130 free_stmt_vec_info (patt_stmt);
9134 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9135 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9136 set_vinfo_for_stmt (stmt, NULL);
9137 free (stmt_info);
9141 /* Function get_vectype_for_scalar_type_and_size.
9143 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9144 by the target. */
9146 static tree
9147 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9149 tree orig_scalar_type = scalar_type;
9150 scalar_mode inner_mode;
9151 machine_mode simd_mode;
9152 poly_uint64 nunits;
9153 tree vectype;
9155 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9156 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9157 return NULL_TREE;
9159 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9161 /* For vector types of elements whose mode precision doesn't
9162 match their types precision we use a element type of mode
9163 precision. The vectorization routines will have to make sure
9164 they support the proper result truncation/extension.
9165 We also make sure to build vector types with INTEGER_TYPE
9166 component type only. */
9167 if (INTEGRAL_TYPE_P (scalar_type)
9168 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9169 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9170 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9171 TYPE_UNSIGNED (scalar_type));
9173 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9174 When the component mode passes the above test simply use a type
9175 corresponding to that mode. The theory is that any use that
9176 would cause problems with this will disable vectorization anyway. */
9177 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9178 && !INTEGRAL_TYPE_P (scalar_type))
9179 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9181 /* We can't build a vector type of elements with alignment bigger than
9182 their size. */
9183 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9184 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9185 TYPE_UNSIGNED (scalar_type));
9187 /* If we felt back to using the mode fail if there was
9188 no scalar type for it. */
9189 if (scalar_type == NULL_TREE)
9190 return NULL_TREE;
9192 /* If no size was supplied use the mode the target prefers. Otherwise
9193 lookup a vector mode of the specified size. */
9194 if (known_eq (size, 0U))
9195 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9196 else if (!multiple_p (size, nbytes, &nunits)
9197 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9198 return NULL_TREE;
9199 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9200 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9201 return NULL_TREE;
9203 vectype = build_vector_type (scalar_type, nunits);
9205 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9206 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9207 return NULL_TREE;
9209 /* Re-attach the address-space qualifier if we canonicalized the scalar
9210 type. */
9211 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9212 return build_qualified_type
9213 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9215 return vectype;
9218 poly_uint64 current_vector_size;
9220 /* Function get_vectype_for_scalar_type.
9222 Returns the vector type corresponding to SCALAR_TYPE as supported
9223 by the target. */
9225 tree
9226 get_vectype_for_scalar_type (tree scalar_type)
9228 tree vectype;
9229 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9230 current_vector_size);
9231 if (vectype
9232 && known_eq (current_vector_size, 0U))
9233 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9234 return vectype;
9237 /* Function get_mask_type_for_scalar_type.
9239 Returns the mask type corresponding to a result of comparison
9240 of vectors of specified SCALAR_TYPE as supported by target. */
9242 tree
9243 get_mask_type_for_scalar_type (tree scalar_type)
9245 tree vectype = get_vectype_for_scalar_type (scalar_type);
9247 if (!vectype)
9248 return NULL;
9250 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9251 current_vector_size);
9254 /* Function get_same_sized_vectype
9256 Returns a vector type corresponding to SCALAR_TYPE of size
9257 VECTOR_TYPE if supported by the target. */
9259 tree
9260 get_same_sized_vectype (tree scalar_type, tree vector_type)
9262 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9263 return build_same_sized_truth_vector_type (vector_type);
9265 return get_vectype_for_scalar_type_and_size
9266 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9269 /* Function vect_is_simple_use.
9271 Input:
9272 VINFO - the vect info of the loop or basic block that is being vectorized.
9273 OPERAND - operand in the loop or bb.
9274 Output:
9275 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9276 DT - the type of definition
9278 Returns whether a stmt with OPERAND can be vectorized.
9279 For loops, supportable operands are constants, loop invariants, and operands
9280 that are defined by the current iteration of the loop. Unsupportable
9281 operands are those that are defined by a previous iteration of the loop (as
9282 is the case in reduction/induction computations).
9283 For basic blocks, supportable operands are constants and bb invariants.
9284 For now, operands defined outside the basic block are not supported. */
9286 bool
9287 vect_is_simple_use (tree operand, vec_info *vinfo,
9288 gimple **def_stmt, enum vect_def_type *dt)
9290 *def_stmt = NULL;
9291 *dt = vect_unknown_def_type;
9293 if (dump_enabled_p ())
9295 dump_printf_loc (MSG_NOTE, vect_location,
9296 "vect_is_simple_use: operand ");
9297 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9298 dump_printf (MSG_NOTE, "\n");
9301 if (CONSTANT_CLASS_P (operand))
9303 *dt = vect_constant_def;
9304 return true;
9307 if (is_gimple_min_invariant (operand))
9309 *dt = vect_external_def;
9310 return true;
9313 if (TREE_CODE (operand) != SSA_NAME)
9315 if (dump_enabled_p ())
9316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9317 "not ssa-name.\n");
9318 return false;
9321 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9323 *dt = vect_external_def;
9324 return true;
9327 *def_stmt = SSA_NAME_DEF_STMT (operand);
9328 if (dump_enabled_p ())
9330 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9331 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9334 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9335 *dt = vect_external_def;
9336 else
9338 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9339 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9342 if (dump_enabled_p ())
9344 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9345 switch (*dt)
9347 case vect_uninitialized_def:
9348 dump_printf (MSG_NOTE, "uninitialized\n");
9349 break;
9350 case vect_constant_def:
9351 dump_printf (MSG_NOTE, "constant\n");
9352 break;
9353 case vect_external_def:
9354 dump_printf (MSG_NOTE, "external\n");
9355 break;
9356 case vect_internal_def:
9357 dump_printf (MSG_NOTE, "internal\n");
9358 break;
9359 case vect_induction_def:
9360 dump_printf (MSG_NOTE, "induction\n");
9361 break;
9362 case vect_reduction_def:
9363 dump_printf (MSG_NOTE, "reduction\n");
9364 break;
9365 case vect_double_reduction_def:
9366 dump_printf (MSG_NOTE, "double reduction\n");
9367 break;
9368 case vect_nested_cycle:
9369 dump_printf (MSG_NOTE, "nested cycle\n");
9370 break;
9371 case vect_unknown_def_type:
9372 dump_printf (MSG_NOTE, "unknown\n");
9373 break;
9377 if (*dt == vect_unknown_def_type)
9379 if (dump_enabled_p ())
9380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9381 "Unsupported pattern.\n");
9382 return false;
9385 switch (gimple_code (*def_stmt))
9387 case GIMPLE_PHI:
9388 case GIMPLE_ASSIGN:
9389 case GIMPLE_CALL:
9390 break;
9391 default:
9392 if (dump_enabled_p ())
9393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9394 "unsupported defining stmt:\n");
9395 return false;
9398 return true;
9401 /* Function vect_is_simple_use.
9403 Same as vect_is_simple_use but also determines the vector operand
9404 type of OPERAND and stores it to *VECTYPE. If the definition of
9405 OPERAND is vect_uninitialized_def, vect_constant_def or
9406 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9407 is responsible to compute the best suited vector type for the
9408 scalar operand. */
9410 bool
9411 vect_is_simple_use (tree operand, vec_info *vinfo,
9412 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9414 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9415 return false;
9417 /* Now get a vector type if the def is internal, otherwise supply
9418 NULL_TREE and leave it up to the caller to figure out a proper
9419 type for the use stmt. */
9420 if (*dt == vect_internal_def
9421 || *dt == vect_induction_def
9422 || *dt == vect_reduction_def
9423 || *dt == vect_double_reduction_def
9424 || *dt == vect_nested_cycle)
9426 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9428 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9429 && !STMT_VINFO_RELEVANT (stmt_info)
9430 && !STMT_VINFO_LIVE_P (stmt_info))
9431 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9433 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9434 gcc_assert (*vectype != NULL_TREE);
9436 else if (*dt == vect_uninitialized_def
9437 || *dt == vect_constant_def
9438 || *dt == vect_external_def)
9439 *vectype = NULL_TREE;
9440 else
9441 gcc_unreachable ();
9443 return true;
9447 /* Function supportable_widening_operation
9449 Check whether an operation represented by the code CODE is a
9450 widening operation that is supported by the target platform in
9451 vector form (i.e., when operating on arguments of type VECTYPE_IN
9452 producing a result of type VECTYPE_OUT).
9454 Widening operations we currently support are NOP (CONVERT), FLOAT
9455 and WIDEN_MULT. This function checks if these operations are supported
9456 by the target platform either directly (via vector tree-codes), or via
9457 target builtins.
9459 Output:
9460 - CODE1 and CODE2 are codes of vector operations to be used when
9461 vectorizing the operation, if available.
9462 - MULTI_STEP_CVT determines the number of required intermediate steps in
9463 case of multi-step conversion (like char->short->int - in that case
9464 MULTI_STEP_CVT will be 1).
9465 - INTERM_TYPES contains the intermediate type required to perform the
9466 widening operation (short in the above example). */
9468 bool
9469 supportable_widening_operation (enum tree_code code, gimple *stmt,
9470 tree vectype_out, tree vectype_in,
9471 enum tree_code *code1, enum tree_code *code2,
9472 int *multi_step_cvt,
9473 vec<tree> *interm_types)
9475 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9476 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9477 struct loop *vect_loop = NULL;
9478 machine_mode vec_mode;
9479 enum insn_code icode1, icode2;
9480 optab optab1, optab2;
9481 tree vectype = vectype_in;
9482 tree wide_vectype = vectype_out;
9483 enum tree_code c1, c2;
9484 int i;
9485 tree prev_type, intermediate_type;
9486 machine_mode intermediate_mode, prev_mode;
9487 optab optab3, optab4;
9489 *multi_step_cvt = 0;
9490 if (loop_info)
9491 vect_loop = LOOP_VINFO_LOOP (loop_info);
9493 switch (code)
9495 case WIDEN_MULT_EXPR:
9496 /* The result of a vectorized widening operation usually requires
9497 two vectors (because the widened results do not fit into one vector).
9498 The generated vector results would normally be expected to be
9499 generated in the same order as in the original scalar computation,
9500 i.e. if 8 results are generated in each vector iteration, they are
9501 to be organized as follows:
9502 vect1: [res1,res2,res3,res4],
9503 vect2: [res5,res6,res7,res8].
9505 However, in the special case that the result of the widening
9506 operation is used in a reduction computation only, the order doesn't
9507 matter (because when vectorizing a reduction we change the order of
9508 the computation). Some targets can take advantage of this and
9509 generate more efficient code. For example, targets like Altivec,
9510 that support widen_mult using a sequence of {mult_even,mult_odd}
9511 generate the following vectors:
9512 vect1: [res1,res3,res5,res7],
9513 vect2: [res2,res4,res6,res8].
9515 When vectorizing outer-loops, we execute the inner-loop sequentially
9516 (each vectorized inner-loop iteration contributes to VF outer-loop
9517 iterations in parallel). We therefore don't allow to change the
9518 order of the computation in the inner-loop during outer-loop
9519 vectorization. */
9520 /* TODO: Another case in which order doesn't *really* matter is when we
9521 widen and then contract again, e.g. (short)((int)x * y >> 8).
9522 Normally, pack_trunc performs an even/odd permute, whereas the
9523 repack from an even/odd expansion would be an interleave, which
9524 would be significantly simpler for e.g. AVX2. */
9525 /* In any case, in order to avoid duplicating the code below, recurse
9526 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9527 are properly set up for the caller. If we fail, we'll continue with
9528 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9529 if (vect_loop
9530 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9531 && !nested_in_vect_loop_p (vect_loop, stmt)
9532 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9533 stmt, vectype_out, vectype_in,
9534 code1, code2, multi_step_cvt,
9535 interm_types))
9537 /* Elements in a vector with vect_used_by_reduction property cannot
9538 be reordered if the use chain with this property does not have the
9539 same operation. One such an example is s += a * b, where elements
9540 in a and b cannot be reordered. Here we check if the vector defined
9541 by STMT is only directly used in the reduction statement. */
9542 tree lhs = gimple_assign_lhs (stmt);
9543 use_operand_p dummy;
9544 gimple *use_stmt;
9545 stmt_vec_info use_stmt_info = NULL;
9546 if (single_imm_use (lhs, &dummy, &use_stmt)
9547 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9548 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9549 return true;
9551 c1 = VEC_WIDEN_MULT_LO_EXPR;
9552 c2 = VEC_WIDEN_MULT_HI_EXPR;
9553 break;
9555 case DOT_PROD_EXPR:
9556 c1 = DOT_PROD_EXPR;
9557 c2 = DOT_PROD_EXPR;
9558 break;
9560 case SAD_EXPR:
9561 c1 = SAD_EXPR;
9562 c2 = SAD_EXPR;
9563 break;
9565 case VEC_WIDEN_MULT_EVEN_EXPR:
9566 /* Support the recursion induced just above. */
9567 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9568 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9569 break;
9571 case WIDEN_LSHIFT_EXPR:
9572 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9573 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9574 break;
9576 CASE_CONVERT:
9577 c1 = VEC_UNPACK_LO_EXPR;
9578 c2 = VEC_UNPACK_HI_EXPR;
9579 break;
9581 case FLOAT_EXPR:
9582 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9583 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9584 break;
9586 case FIX_TRUNC_EXPR:
9587 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9588 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9589 computing the operation. */
9590 return false;
9592 default:
9593 gcc_unreachable ();
9596 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9597 std::swap (c1, c2);
9599 if (code == FIX_TRUNC_EXPR)
9601 /* The signedness is determined from output operand. */
9602 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9603 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9605 else
9607 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9608 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9611 if (!optab1 || !optab2)
9612 return false;
9614 vec_mode = TYPE_MODE (vectype);
9615 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9616 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9617 return false;
9619 *code1 = c1;
9620 *code2 = c2;
9622 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9623 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9624 /* For scalar masks we may have different boolean
9625 vector types having the same QImode. Thus we
9626 add additional check for elements number. */
9627 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9628 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
9629 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9631 /* Check if it's a multi-step conversion that can be done using intermediate
9632 types. */
9634 prev_type = vectype;
9635 prev_mode = vec_mode;
9637 if (!CONVERT_EXPR_CODE_P (code))
9638 return false;
9640 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9641 intermediate steps in promotion sequence. We try
9642 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9643 not. */
9644 interm_types->create (MAX_INTERM_CVT_STEPS);
9645 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9647 intermediate_mode = insn_data[icode1].operand[0].mode;
9648 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9650 poly_uint64 intermediate_nelts
9651 = exact_div (TYPE_VECTOR_SUBPARTS (prev_type), 2);
9652 intermediate_type
9653 = build_truth_vector_type (intermediate_nelts,
9654 current_vector_size);
9655 if (intermediate_mode != TYPE_MODE (intermediate_type))
9656 return false;
9658 else
9659 intermediate_type
9660 = lang_hooks.types.type_for_mode (intermediate_mode,
9661 TYPE_UNSIGNED (prev_type));
9663 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9664 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9666 if (!optab3 || !optab4
9667 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9668 || insn_data[icode1].operand[0].mode != intermediate_mode
9669 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9670 || insn_data[icode2].operand[0].mode != intermediate_mode
9671 || ((icode1 = optab_handler (optab3, intermediate_mode))
9672 == CODE_FOR_nothing)
9673 || ((icode2 = optab_handler (optab4, intermediate_mode))
9674 == CODE_FOR_nothing))
9675 break;
9677 interm_types->quick_push (intermediate_type);
9678 (*multi_step_cvt)++;
9680 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9681 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9682 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9683 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
9684 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9686 prev_type = intermediate_type;
9687 prev_mode = intermediate_mode;
9690 interm_types->release ();
9691 return false;
9695 /* Function supportable_narrowing_operation
9697 Check whether an operation represented by the code CODE is a
9698 narrowing operation that is supported by the target platform in
9699 vector form (i.e., when operating on arguments of type VECTYPE_IN
9700 and producing a result of type VECTYPE_OUT).
9702 Narrowing operations we currently support are NOP (CONVERT) and
9703 FIX_TRUNC. This function checks if these operations are supported by
9704 the target platform directly via vector tree-codes.
9706 Output:
9707 - CODE1 is the code of a vector operation to be used when
9708 vectorizing the operation, if available.
9709 - MULTI_STEP_CVT determines the number of required intermediate steps in
9710 case of multi-step conversion (like int->short->char - in that case
9711 MULTI_STEP_CVT will be 1).
9712 - INTERM_TYPES contains the intermediate type required to perform the
9713 narrowing operation (short in the above example). */
9715 bool
9716 supportable_narrowing_operation (enum tree_code code,
9717 tree vectype_out, tree vectype_in,
9718 enum tree_code *code1, int *multi_step_cvt,
9719 vec<tree> *interm_types)
9721 machine_mode vec_mode;
9722 enum insn_code icode1;
9723 optab optab1, interm_optab;
9724 tree vectype = vectype_in;
9725 tree narrow_vectype = vectype_out;
9726 enum tree_code c1;
9727 tree intermediate_type, prev_type;
9728 machine_mode intermediate_mode, prev_mode;
9729 int i;
9730 bool uns;
9732 *multi_step_cvt = 0;
9733 switch (code)
9735 CASE_CONVERT:
9736 c1 = VEC_PACK_TRUNC_EXPR;
9737 break;
9739 case FIX_TRUNC_EXPR:
9740 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9741 break;
9743 case FLOAT_EXPR:
9744 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9745 tree code and optabs used for computing the operation. */
9746 return false;
9748 default:
9749 gcc_unreachable ();
9752 if (code == FIX_TRUNC_EXPR)
9753 /* The signedness is determined from output operand. */
9754 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9755 else
9756 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9758 if (!optab1)
9759 return false;
9761 vec_mode = TYPE_MODE (vectype);
9762 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9763 return false;
9765 *code1 = c1;
9767 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9768 /* For scalar masks we may have different boolean
9769 vector types having the same QImode. Thus we
9770 add additional check for elements number. */
9771 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9772 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
9773 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9775 /* Check if it's a multi-step conversion that can be done using intermediate
9776 types. */
9777 prev_mode = vec_mode;
9778 prev_type = vectype;
9779 if (code == FIX_TRUNC_EXPR)
9780 uns = TYPE_UNSIGNED (vectype_out);
9781 else
9782 uns = TYPE_UNSIGNED (vectype);
9784 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9785 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9786 costly than signed. */
9787 if (code == FIX_TRUNC_EXPR && uns)
9789 enum insn_code icode2;
9791 intermediate_type
9792 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9793 interm_optab
9794 = optab_for_tree_code (c1, intermediate_type, optab_default);
9795 if (interm_optab != unknown_optab
9796 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9797 && insn_data[icode1].operand[0].mode
9798 == insn_data[icode2].operand[0].mode)
9800 uns = false;
9801 optab1 = interm_optab;
9802 icode1 = icode2;
9806 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9807 intermediate steps in promotion sequence. We try
9808 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9809 interm_types->create (MAX_INTERM_CVT_STEPS);
9810 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9812 intermediate_mode = insn_data[icode1].operand[0].mode;
9813 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9815 intermediate_type
9816 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9817 current_vector_size);
9818 if (intermediate_mode != TYPE_MODE (intermediate_type))
9819 return false;
9821 else
9822 intermediate_type
9823 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9824 interm_optab
9825 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9826 optab_default);
9827 if (!interm_optab
9828 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9829 || insn_data[icode1].operand[0].mode != intermediate_mode
9830 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9831 == CODE_FOR_nothing))
9832 break;
9834 interm_types->quick_push (intermediate_type);
9835 (*multi_step_cvt)++;
9837 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9838 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9839 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
9840 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9842 prev_mode = intermediate_mode;
9843 prev_type = intermediate_type;
9844 optab1 = interm_optab;
9847 interm_types->release ();
9848 return false;