PR c++/83634
[official-gcc.git] / gcc / tree-vect-stmts.c
blob71bc3c10ed4a3aca15cb4090a879a2954cfc4691
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type {
60 VLS_LOAD,
61 VLS_STORE,
62 VLS_STORE_INVARIANT
65 /* Return the vectorized type for the given statement. */
67 tree
68 stmt_vectype (struct _stmt_vec_info *stmt_info)
70 return STMT_VINFO_VECTYPE (stmt_info);
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
75 bool
76 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
78 gimple *stmt = STMT_VINFO_STMT (stmt_info);
79 basic_block bb = gimple_bb (stmt);
80 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
81 struct loop* loop;
83 if (!loop_vinfo)
84 return false;
86 loop = LOOP_VINFO_LOOP (loop_vinfo);
88 return (bb->loop_father == loop->inner);
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
95 unsigned
96 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
97 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
98 int misalign, enum vect_cost_model_location where)
100 if ((kind == vector_load || kind == unaligned_load)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
102 kind = vector_gather_load;
103 if ((kind == vector_store || kind == unaligned_store)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
105 kind = vector_scatter_store;
106 if (body_cost_vec)
108 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
109 stmt_info_for_cost si = { count, kind,
110 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
111 misalign };
112 body_cost_vec->safe_push (si);
113 return (unsigned)
114 (builtin_vectorization_cost (kind, vectype, misalign) * count);
116 else
117 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
118 count, kind, stmt_info, misalign, where);
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
123 static tree
124 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
126 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
127 "vect_array");
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
135 static tree
136 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
137 tree array, unsigned HOST_WIDE_INT n)
139 tree vect_type, vect, vect_name, array_ref;
140 gimple *new_stmt;
142 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
143 vect_type = TREE_TYPE (TREE_TYPE (array));
144 vect = vect_create_destination_var (scalar_dest, vect_type);
145 array_ref = build4 (ARRAY_REF, vect_type, array,
146 build_int_cst (size_type_node, n),
147 NULL_TREE, NULL_TREE);
149 new_stmt = gimple_build_assign (vect, array_ref);
150 vect_name = make_ssa_name (vect, new_stmt);
151 gimple_assign_set_lhs (new_stmt, vect_name);
152 vect_finish_stmt_generation (stmt, new_stmt, gsi);
154 return vect_name;
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
161 static void
162 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
163 tree array, unsigned HOST_WIDE_INT n)
165 tree array_ref;
166 gimple *new_stmt;
168 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
169 build_int_cst (size_type_node, n),
170 NULL_TREE, NULL_TREE);
172 new_stmt = gimple_build_assign (array_ref, vect);
173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
178 (and its group). */
180 static tree
181 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
183 tree mem_ref;
185 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
188 return mem_ref;
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
199 enum vect_relevant relevant, bool live_p)
201 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
202 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
203 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 gimple *pattern_stmt;
206 if (dump_enabled_p ())
208 dump_printf_loc (MSG_NOTE, vect_location,
209 "mark relevant %d, live %d: ", relevant, live_p);
210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
224 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info = vinfo_for_stmt (pattern_stmt);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
232 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
233 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
234 stmt = pattern_stmt;
237 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
238 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
239 STMT_VINFO_RELEVANT (stmt_info) = relevant;
241 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE, vect_location,
246 "already marked relevant/live.\n");
247 return;
250 worklist->safe_push (stmt);
254 /* Function is_simple_and_all_uses_invariant
256 Return true if STMT is simple and all uses of it are invariant. */
258 bool
259 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
261 tree op;
262 gimple *def_stmt;
263 ssa_op_iter iter;
265 if (!is_gimple_assign (stmt))
266 return false;
268 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
270 enum vect_def_type dt = vect_uninitialized_def;
272 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "use not simple.\n");
277 return false;
280 if (dt != vect_external_def && dt != vect_constant_def)
281 return false;
283 return true;
286 /* Function vect_stmt_relevant_p.
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
296 CHECKME: what other side effects would the vectorizer allow? */
298 static bool
299 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
300 enum vect_relevant *relevant, bool *live_p)
302 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
303 ssa_op_iter op_iter;
304 imm_use_iterator imm_iter;
305 use_operand_p use_p;
306 def_operand_p def_p;
308 *relevant = vect_unused_in_scope;
309 *live_p = false;
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
314 != loop_exit_ctrl_vec_info_type)
315 *relevant = vect_used_in_scope;
317 /* changing memory. */
318 if (gimple_code (stmt) != GIMPLE_PHI)
319 if (gimple_vdef (stmt)
320 && !gimple_clobber_p (stmt))
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE, vect_location,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant = vect_used_in_scope;
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
333 basic_block bb = gimple_bb (USE_STMT (use_p));
334 if (!flow_bb_inside_loop_p (loop, bb))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: used out of loop.\n");
340 if (is_gimple_debug (USE_STMT (use_p)))
341 continue;
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
346 gcc_assert (bb == single_exit (loop)->dest);
348 *live_p = true;
353 if (*live_p && *relevant == vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant = vect_used_only_live;
362 return (*live_p || *relevant);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
371 static bool
372 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
374 tree operand;
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
381 return true;
383 /* STMT has a data_ref. FORNOW this means that its of one of
384 the following forms:
385 -1- ARRAY_REF = var
386 -2- var = ARRAY_REF
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
391 for array indexing.
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt))
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
402 case IFN_MASK_STORE:
403 operand = gimple_call_arg (stmt, 3);
404 if (operand == use)
405 return true;
406 /* FALLTHRU */
407 case IFN_MASK_LOAD:
408 operand = gimple_call_arg (stmt, 2);
409 if (operand == use)
410 return true;
411 break;
412 default:
413 break;
415 return false;
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
424 if (operand == use)
425 return true;
427 return false;
432 Function process_use.
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
458 static bool
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
461 bool force)
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 gimple *def_stmt;
468 enum vect_def_type dt;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
473 return true;
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
480 return false;
483 if (!def_stmt || gimple_nop_p (def_stmt))
484 return true;
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
491 return true;
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
515 return true;
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
520 d = def_stmt
521 inner-loop:
522 stmt # use (d)
523 outer-loop-tail-bb:
524 ... */
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
531 switch (relevant)
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
536 break;
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
541 break;
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
546 break;
548 case vect_used_in_scope:
549 break;
551 default:
552 gcc_unreachable ();
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
559 inner-loop:
560 d = def_stmt
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
562 stmt # use (d) */
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
569 switch (relevant)
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
575 break;
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
586 default:
587 gcc_unreachable ();
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
593 of course. */
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
598 == use))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
603 return true;
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
608 return true;
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
616 for i...
617 for j...
618 1. T0 = i + j
619 2. T1 = a[T0]
621 3. j = j + 1
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
628 bool
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
635 gimple *stmt;
636 unsigned int i;
637 stmt_vec_info stmt_vinfo;
638 basic_block bb;
639 gimple *phi;
640 bool live_p;
641 enum vect_relevant relevant;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec<gimple *, 64> worklist;
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
652 bb = bbs[i];
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
655 phi = gsi_stmt (si);
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
662 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, phi, relevant, live_p);
665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
667 stmt = gsi_stmt (si);
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
674 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
675 vect_mark_relevant (&worklist, stmt, relevant, live_p);
679 /* 2. Process_worklist */
680 while (worklist.length () > 0)
682 use_operand_p use_p;
683 ssa_op_iter iter;
685 stmt = worklist.pop ();
686 if (dump_enabled_p ())
688 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
694 of STMT. */
695 stmt_vinfo = vinfo_for_stmt (stmt);
696 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
711 case vect_reduction_def:
712 gcc_assert (relevant != vect_unused_in_scope);
713 if (relevant != vect_unused_in_scope
714 && relevant != vect_used_in_scope
715 && relevant != vect_used_by_reduction
716 && relevant != vect_used_only_live)
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "unsupported use of reduction.\n");
721 return false;
723 break;
725 case vect_nested_cycle:
726 if (relevant != vect_unused_in_scope
727 && relevant != vect_used_in_outer_by_reduction
728 && relevant != vect_used_in_outer)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of nested cycle.\n");
734 return false;
736 break;
738 case vect_double_reduction_def:
739 if (relevant != vect_unused_in_scope
740 && relevant != vect_used_by_reduction
741 && relevant != vect_used_only_live)
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "unsupported use of double reduction.\n");
747 return false;
749 break;
751 default:
752 break;
755 if (is_pattern_stmt_p (stmt_vinfo))
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt))
762 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
763 tree op = gimple_assign_rhs1 (stmt);
765 i = 1;
766 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
768 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
769 relevant, &worklist, false)
770 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
771 relevant, &worklist, false))
772 return false;
773 i = 2;
775 for (; i < gimple_num_ops (stmt); i++)
777 op = gimple_op (stmt, i);
778 if (TREE_CODE (op) == SSA_NAME
779 && !process_use (stmt, op, loop_vinfo, relevant,
780 &worklist, false))
781 return false;
784 else if (is_gimple_call (stmt))
786 for (i = 0; i < gimple_call_num_args (stmt); i++)
788 tree arg = gimple_call_arg (stmt, i);
789 if (!process_use (stmt, arg, loop_vinfo, relevant,
790 &worklist, false))
791 return false;
795 else
796 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
798 tree op = USE_FROM_PTR (use_p);
799 if (!process_use (stmt, op, loop_vinfo, relevant,
800 &worklist, false))
801 return false;
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
808 gcc_unreachable ();
809 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
810 &worklist, true))
811 return false;
813 } /* while worklist */
815 return true;
819 /* Function vect_model_simple_cost.
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
825 void
826 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
827 enum vect_def_type *dt,
828 int ndts,
829 stmt_vector_for_cost *prologue_cost_vec,
830 stmt_vector_for_cost *body_cost_vec)
832 int i;
833 int inside_cost = 0, prologue_cost = 0;
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info))
837 return;
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
841 cost model. */
842 for (i = 0; i < ndts; i++)
843 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
844 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
845 stmt_info, 0, vect_prologue);
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
849 stmt_info, 0, vect_body);
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE, vect_location,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost, prologue_cost);
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
863 static void
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
865 enum vect_def_type *dt, int pwr)
867 int i, tmp;
868 int inside_cost = 0, prologue_cost = 0;
869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
870 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
871 void *target_cost_data;
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info))
875 return;
877 if (loop_vinfo)
878 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
879 else
880 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
882 for (i = 0; i < pwr + 1; i++)
884 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
885 (i + 1) : i;
886 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
887 vec_promote_demote, stmt_info, 0,
888 vect_body);
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i = 0; i < 2; i++)
893 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
894 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
895 stmt_info, 0, vect_prologue);
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE, vect_location,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost, prologue_cost);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
908 void
909 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
910 vect_memory_access_type memory_access_type,
911 enum vect_def_type dt, slp_tree slp_node,
912 stmt_vector_for_cost *prologue_cost_vec,
913 stmt_vector_for_cost *body_cost_vec)
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
917 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
918 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
920 if (dt == vect_constant_def || dt == vect_external_def)
921 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
922 stmt_info, 0, vect_prologue);
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node && grouped_access_p)
928 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
929 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
941 if (first_stmt_p
942 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
944 /* Uses a high and low interleave or shuffle operations for each
945 needed permute. */
946 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
947 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
948 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
949 stmt_info, 0, vect_body);
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE, vect_location,
953 "vect_model_store_cost: strided group_size = %d .\n",
954 group_size);
957 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
958 /* Costs of the stores. */
959 if (memory_access_type == VMAT_ELEMENTWISE
960 || memory_access_type == VMAT_GATHER_SCATTER)
962 /* N scalar stores plus extracting the elements. */
963 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
964 inside_cost += record_stmt_cost (body_cost_vec,
965 ncopies * assumed_nunits,
966 scalar_store, stmt_info, 0, vect_body);
968 else
969 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
971 if (memory_access_type == VMAT_ELEMENTWISE
972 || memory_access_type == VMAT_STRIDED_SLP)
974 /* N scalar stores plus extracting the elements. */
975 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
976 inside_cost += record_stmt_cost (body_cost_vec,
977 ncopies * assumed_nunits,
978 vec_to_scalar, stmt_info, 0, vect_body);
981 if (dump_enabled_p ())
982 dump_printf_loc (MSG_NOTE, vect_location,
983 "vect_model_store_cost: inside_cost = %d, "
984 "prologue_cost = %d .\n", inside_cost, prologue_cost);
988 /* Calculate cost of DR's memory access. */
989 void
990 vect_get_store_cost (struct data_reference *dr, int ncopies,
991 unsigned int *inside_cost,
992 stmt_vector_for_cost *body_cost_vec)
994 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
995 gimple *stmt = DR_STMT (dr);
996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
998 switch (alignment_support_scheme)
1000 case dr_aligned:
1002 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1003 vector_store, stmt_info, 0,
1004 vect_body);
1006 if (dump_enabled_p ())
1007 dump_printf_loc (MSG_NOTE, vect_location,
1008 "vect_model_store_cost: aligned.\n");
1009 break;
1012 case dr_unaligned_supported:
1014 /* Here, we assign an additional cost for the unaligned store. */
1015 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1016 unaligned_store, stmt_info,
1017 DR_MISALIGNMENT (dr), vect_body);
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_NOTE, vect_location,
1020 "vect_model_store_cost: unaligned supported by "
1021 "hardware.\n");
1022 break;
1025 case dr_unaligned_unsupported:
1027 *inside_cost = VECT_MAX_COST;
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1031 "vect_model_store_cost: unsupported access.\n");
1032 break;
1035 default:
1036 gcc_unreachable ();
1041 /* Function vect_model_load_cost
1043 Models cost for loads. In the case of grouped accesses, one access has
1044 the overhead of the grouped access attributed to it. Since unaligned
1045 accesses are supported for loads, we also account for the costs of the
1046 access scheme chosen. */
1048 void
1049 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1050 vect_memory_access_type memory_access_type,
1051 slp_tree slp_node,
1052 stmt_vector_for_cost *prologue_cost_vec,
1053 stmt_vector_for_cost *body_cost_vec)
1055 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1056 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1057 unsigned int inside_cost = 0, prologue_cost = 0;
1058 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1060 /* Grouped loads read all elements in the group at once,
1061 so we want the DR for the first statement. */
1062 if (!slp_node && grouped_access_p)
1064 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1065 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1068 /* True if we should include any once-per-group costs as well as
1069 the cost of the statement itself. For SLP we only get called
1070 once per group anyhow. */
1071 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1073 /* We assume that the cost of a single load-lanes instruction is
1074 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1075 access is instead being provided by a load-and-permute operation,
1076 include the cost of the permutes. */
1077 if (first_stmt_p
1078 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1080 /* Uses an even and odd extract operations or shuffle operations
1081 for each needed permute. */
1082 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1083 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1084 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1085 stmt_info, 0, vect_body);
1087 if (dump_enabled_p ())
1088 dump_printf_loc (MSG_NOTE, vect_location,
1089 "vect_model_load_cost: strided group_size = %d .\n",
1090 group_size);
1093 /* The loads themselves. */
1094 if (memory_access_type == VMAT_ELEMENTWISE
1095 || memory_access_type == VMAT_GATHER_SCATTER)
1097 /* N scalar loads plus gathering them into a vector. */
1098 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1099 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * assumed_nunits,
1102 scalar_load, stmt_info, 0, vect_body);
1104 else
1105 vect_get_load_cost (dr, ncopies, first_stmt_p,
1106 &inside_cost, &prologue_cost,
1107 prologue_cost_vec, body_cost_vec, true);
1108 if (memory_access_type == VMAT_ELEMENTWISE
1109 || memory_access_type == VMAT_STRIDED_SLP)
1110 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1111 stmt_info, 0, vect_body);
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE, vect_location,
1115 "vect_model_load_cost: inside_cost = %d, "
1116 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1120 /* Calculate cost of DR's memory access. */
1121 void
1122 vect_get_load_cost (struct data_reference *dr, int ncopies,
1123 bool add_realign_cost, unsigned int *inside_cost,
1124 unsigned int *prologue_cost,
1125 stmt_vector_for_cost *prologue_cost_vec,
1126 stmt_vector_for_cost *body_cost_vec,
1127 bool record_prologue_costs)
1129 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1130 gimple *stmt = DR_STMT (dr);
1131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1133 switch (alignment_support_scheme)
1135 case dr_aligned:
1137 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1138 stmt_info, 0, vect_body);
1140 if (dump_enabled_p ())
1141 dump_printf_loc (MSG_NOTE, vect_location,
1142 "vect_model_load_cost: aligned.\n");
1144 break;
1146 case dr_unaligned_supported:
1148 /* Here, we assign an additional cost for the unaligned load. */
1149 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1150 unaligned_load, stmt_info,
1151 DR_MISALIGNMENT (dr), vect_body);
1153 if (dump_enabled_p ())
1154 dump_printf_loc (MSG_NOTE, vect_location,
1155 "vect_model_load_cost: unaligned supported by "
1156 "hardware.\n");
1158 break;
1160 case dr_explicit_realign:
1162 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1163 vector_load, stmt_info, 0, vect_body);
1164 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1165 vec_perm, stmt_info, 0, vect_body);
1167 /* FIXME: If the misalignment remains fixed across the iterations of
1168 the containing loop, the following cost should be added to the
1169 prologue costs. */
1170 if (targetm.vectorize.builtin_mask_for_load)
1171 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1172 stmt_info, 0, vect_body);
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: explicit realign\n");
1178 break;
1180 case dr_explicit_realign_optimized:
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_NOTE, vect_location,
1184 "vect_model_load_cost: unaligned software "
1185 "pipelined.\n");
1187 /* Unaligned software pipeline has a load of an address, an initial
1188 load, and possibly a mask operation to "prime" the loop. However,
1189 if this is an access in a group of loads, which provide grouped
1190 access, then the above cost should only be considered for one
1191 access in the group. Inside the loop, there is a load op
1192 and a realignment op. */
1194 if (add_realign_cost && record_prologue_costs)
1196 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1197 vector_stmt, stmt_info,
1198 0, vect_prologue);
1199 if (targetm.vectorize.builtin_mask_for_load)
1200 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1201 vector_stmt, stmt_info,
1202 0, vect_prologue);
1205 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1206 stmt_info, 0, vect_body);
1207 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1208 stmt_info, 0, vect_body);
1210 if (dump_enabled_p ())
1211 dump_printf_loc (MSG_NOTE, vect_location,
1212 "vect_model_load_cost: explicit realign optimized"
1213 "\n");
1215 break;
1218 case dr_unaligned_unsupported:
1220 *inside_cost = VECT_MAX_COST;
1222 if (dump_enabled_p ())
1223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1224 "vect_model_load_cost: unsupported access.\n");
1225 break;
1228 default:
1229 gcc_unreachable ();
1233 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1234 the loop preheader for the vectorized stmt STMT. */
1236 static void
1237 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1239 if (gsi)
1240 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1241 else
1243 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1244 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1246 if (loop_vinfo)
1248 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1249 basic_block new_bb;
1250 edge pe;
1252 if (nested_in_vect_loop_p (loop, stmt))
1253 loop = loop->inner;
1255 pe = loop_preheader_edge (loop);
1256 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1257 gcc_assert (!new_bb);
1259 else
1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1262 basic_block bb;
1263 gimple_stmt_iterator gsi_bb_start;
1265 gcc_assert (bb_vinfo);
1266 bb = BB_VINFO_BB (bb_vinfo);
1267 gsi_bb_start = gsi_after_labels (bb);
1268 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1272 if (dump_enabled_p ())
1274 dump_printf_loc (MSG_NOTE, vect_location,
1275 "created new init_stmt: ");
1276 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1280 /* Function vect_init_vector.
1282 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1283 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1284 vector type a vector with all elements equal to VAL is created first.
1285 Place the initialization at BSI if it is not NULL. Otherwise, place the
1286 initialization at the loop preheader.
1287 Return the DEF of INIT_STMT.
1288 It will be used in the vectorization of STMT. */
1290 tree
1291 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1293 gimple *init_stmt;
1294 tree new_temp;
1296 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1297 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1299 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1300 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1302 /* Scalar boolean value should be transformed into
1303 all zeros or all ones value before building a vector. */
1304 if (VECTOR_BOOLEAN_TYPE_P (type))
1306 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1307 tree false_val = build_zero_cst (TREE_TYPE (type));
1309 if (CONSTANT_CLASS_P (val))
1310 val = integer_zerop (val) ? false_val : true_val;
1311 else
1313 new_temp = make_ssa_name (TREE_TYPE (type));
1314 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1315 val, true_val, false_val);
1316 vect_init_vector_1 (stmt, init_stmt, gsi);
1317 val = new_temp;
1320 else if (CONSTANT_CLASS_P (val))
1321 val = fold_convert (TREE_TYPE (type), val);
1322 else
1324 new_temp = make_ssa_name (TREE_TYPE (type));
1325 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1326 init_stmt = gimple_build_assign (new_temp,
1327 fold_build1 (VIEW_CONVERT_EXPR,
1328 TREE_TYPE (type),
1329 val));
1330 else
1331 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1332 vect_init_vector_1 (stmt, init_stmt, gsi);
1333 val = new_temp;
1336 val = build_vector_from_val (type, val);
1339 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1340 init_stmt = gimple_build_assign (new_temp, val);
1341 vect_init_vector_1 (stmt, init_stmt, gsi);
1342 return new_temp;
1345 /* Function vect_get_vec_def_for_operand_1.
1347 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1348 DT that will be used in the vectorized stmt. */
1350 tree
1351 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1353 tree vec_oprnd;
1354 gimple *vec_stmt;
1355 stmt_vec_info def_stmt_info = NULL;
1357 switch (dt)
1359 /* operand is a constant or a loop invariant. */
1360 case vect_constant_def:
1361 case vect_external_def:
1362 /* Code should use vect_get_vec_def_for_operand. */
1363 gcc_unreachable ();
1365 /* operand is defined inside the loop. */
1366 case vect_internal_def:
1368 /* Get the def from the vectorized stmt. */
1369 def_stmt_info = vinfo_for_stmt (def_stmt);
1371 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1372 /* Get vectorized pattern statement. */
1373 if (!vec_stmt
1374 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1375 && !STMT_VINFO_RELEVANT (def_stmt_info))
1376 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1377 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1378 gcc_assert (vec_stmt);
1379 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1380 vec_oprnd = PHI_RESULT (vec_stmt);
1381 else if (is_gimple_call (vec_stmt))
1382 vec_oprnd = gimple_call_lhs (vec_stmt);
1383 else
1384 vec_oprnd = gimple_assign_lhs (vec_stmt);
1385 return vec_oprnd;
1388 /* operand is defined by a loop header phi. */
1389 case vect_reduction_def:
1390 case vect_double_reduction_def:
1391 case vect_nested_cycle:
1392 case vect_induction_def:
1394 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1396 /* Get the def from the vectorized stmt. */
1397 def_stmt_info = vinfo_for_stmt (def_stmt);
1398 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1399 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1400 vec_oprnd = PHI_RESULT (vec_stmt);
1401 else
1402 vec_oprnd = gimple_get_lhs (vec_stmt);
1403 return vec_oprnd;
1406 default:
1407 gcc_unreachable ();
1412 /* Function vect_get_vec_def_for_operand.
1414 OP is an operand in STMT. This function returns a (vector) def that will be
1415 used in the vectorized stmt for STMT.
1417 In the case that OP is an SSA_NAME which is defined in the loop, then
1418 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1420 In case OP is an invariant or constant, a new stmt that creates a vector def
1421 needs to be introduced. VECTYPE may be used to specify a required type for
1422 vector invariant. */
1424 tree
1425 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1427 gimple *def_stmt;
1428 enum vect_def_type dt;
1429 bool is_simple_use;
1430 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1431 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1433 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_NOTE, vect_location,
1436 "vect_get_vec_def_for_operand: ");
1437 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1438 dump_printf (MSG_NOTE, "\n");
1441 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1442 gcc_assert (is_simple_use);
1443 if (def_stmt && dump_enabled_p ())
1445 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1446 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1449 if (dt == vect_constant_def || dt == vect_external_def)
1451 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1452 tree vector_type;
1454 if (vectype)
1455 vector_type = vectype;
1456 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1457 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1458 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1459 else
1460 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1462 gcc_assert (vector_type);
1463 return vect_init_vector (stmt, op, vector_type, NULL);
1465 else
1466 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1470 /* Function vect_get_vec_def_for_stmt_copy
1472 Return a vector-def for an operand. This function is used when the
1473 vectorized stmt to be created (by the caller to this function) is a "copy"
1474 created in case the vectorized result cannot fit in one vector, and several
1475 copies of the vector-stmt are required. In this case the vector-def is
1476 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1477 of the stmt that defines VEC_OPRND.
1478 DT is the type of the vector def VEC_OPRND.
1480 Context:
1481 In case the vectorization factor (VF) is bigger than the number
1482 of elements that can fit in a vectype (nunits), we have to generate
1483 more than one vector stmt to vectorize the scalar stmt. This situation
1484 arises when there are multiple data-types operated upon in the loop; the
1485 smallest data-type determines the VF, and as a result, when vectorizing
1486 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1487 vector stmt (each computing a vector of 'nunits' results, and together
1488 computing 'VF' results in each iteration). This function is called when
1489 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1490 which VF=16 and nunits=4, so the number of copies required is 4):
1492 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1494 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1495 VS1.1: vx.1 = memref1 VS1.2
1496 VS1.2: vx.2 = memref2 VS1.3
1497 VS1.3: vx.3 = memref3
1499 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1500 VSnew.1: vz1 = vx.1 + ... VSnew.2
1501 VSnew.2: vz2 = vx.2 + ... VSnew.3
1502 VSnew.3: vz3 = vx.3 + ...
1504 The vectorization of S1 is explained in vectorizable_load.
1505 The vectorization of S2:
1506 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1507 the function 'vect_get_vec_def_for_operand' is called to
1508 get the relevant vector-def for each operand of S2. For operand x it
1509 returns the vector-def 'vx.0'.
1511 To create the remaining copies of the vector-stmt (VSnew.j), this
1512 function is called to get the relevant vector-def for each operand. It is
1513 obtained from the respective VS1.j stmt, which is recorded in the
1514 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1516 For example, to obtain the vector-def 'vx.1' in order to create the
1517 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1518 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1519 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1520 and return its def ('vx.1').
1521 Overall, to create the above sequence this function will be called 3 times:
1522 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1523 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1524 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1526 tree
1527 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1529 gimple *vec_stmt_for_operand;
1530 stmt_vec_info def_stmt_info;
1532 /* Do nothing; can reuse same def. */
1533 if (dt == vect_external_def || dt == vect_constant_def )
1534 return vec_oprnd;
1536 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1537 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1538 gcc_assert (def_stmt_info);
1539 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1540 gcc_assert (vec_stmt_for_operand);
1541 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1542 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1543 else
1544 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1545 return vec_oprnd;
1549 /* Get vectorized definitions for the operands to create a copy of an original
1550 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1552 void
1553 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1554 vec<tree> *vec_oprnds0,
1555 vec<tree> *vec_oprnds1)
1557 tree vec_oprnd = vec_oprnds0->pop ();
1559 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1560 vec_oprnds0->quick_push (vec_oprnd);
1562 if (vec_oprnds1 && vec_oprnds1->length ())
1564 vec_oprnd = vec_oprnds1->pop ();
1565 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1566 vec_oprnds1->quick_push (vec_oprnd);
1571 /* Get vectorized definitions for OP0 and OP1. */
1573 void
1574 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1575 vec<tree> *vec_oprnds0,
1576 vec<tree> *vec_oprnds1,
1577 slp_tree slp_node)
1579 if (slp_node)
1581 int nops = (op1 == NULL_TREE) ? 1 : 2;
1582 auto_vec<tree> ops (nops);
1583 auto_vec<vec<tree> > vec_defs (nops);
1585 ops.quick_push (op0);
1586 if (op1)
1587 ops.quick_push (op1);
1589 vect_get_slp_defs (ops, slp_node, &vec_defs);
1591 *vec_oprnds0 = vec_defs[0];
1592 if (op1)
1593 *vec_oprnds1 = vec_defs[1];
1595 else
1597 tree vec_oprnd;
1599 vec_oprnds0->create (1);
1600 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1601 vec_oprnds0->quick_push (vec_oprnd);
1603 if (op1)
1605 vec_oprnds1->create (1);
1606 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1607 vec_oprnds1->quick_push (vec_oprnd);
1613 /* Function vect_finish_stmt_generation.
1615 Insert a new stmt. */
1617 void
1618 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1619 gimple_stmt_iterator *gsi)
1621 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1622 vec_info *vinfo = stmt_info->vinfo;
1624 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1626 if (!gsi_end_p (*gsi)
1627 && gimple_has_mem_ops (vec_stmt))
1629 gimple *at_stmt = gsi_stmt (*gsi);
1630 tree vuse = gimple_vuse (at_stmt);
1631 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1633 tree vdef = gimple_vdef (at_stmt);
1634 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1635 /* If we have an SSA vuse and insert a store, update virtual
1636 SSA form to avoid triggering the renamer. Do so only
1637 if we can easily see all uses - which is what almost always
1638 happens with the way vectorized stmts are inserted. */
1639 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1640 && ((is_gimple_assign (vec_stmt)
1641 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1642 || (is_gimple_call (vec_stmt)
1643 && !(gimple_call_flags (vec_stmt)
1644 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1646 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1647 gimple_set_vdef (vec_stmt, new_vdef);
1648 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1652 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1654 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1656 if (dump_enabled_p ())
1658 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1662 gimple_set_location (vec_stmt, gimple_location (stmt));
1664 /* While EH edges will generally prevent vectorization, stmt might
1665 e.g. be in a must-not-throw region. Ensure newly created stmts
1666 that could throw are part of the same region. */
1667 int lp_nr = lookup_stmt_eh_lp (stmt);
1668 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1669 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1672 /* We want to vectorize a call to combined function CFN with function
1673 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1674 as the types of all inputs. Check whether this is possible using
1675 an internal function, returning its code if so or IFN_LAST if not. */
1677 static internal_fn
1678 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1679 tree vectype_out, tree vectype_in)
1681 internal_fn ifn;
1682 if (internal_fn_p (cfn))
1683 ifn = as_internal_fn (cfn);
1684 else
1685 ifn = associated_internal_fn (fndecl);
1686 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1688 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1689 if (info.vectorizable)
1691 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1692 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1693 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1694 OPTIMIZE_FOR_SPEED))
1695 return ifn;
1698 return IFN_LAST;
1702 static tree permute_vec_elements (tree, tree, tree, gimple *,
1703 gimple_stmt_iterator *);
1705 /* STMT is a non-strided load or store, meaning that it accesses
1706 elements with a known constant step. Return -1 if that step
1707 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1709 static int
1710 compare_step_with_zero (gimple *stmt)
1712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1713 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1714 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1715 size_zero_node);
1718 /* If the target supports a permute mask that reverses the elements in
1719 a vector of type VECTYPE, return that mask, otherwise return null. */
1721 static tree
1722 perm_mask_for_reverse (tree vectype)
1724 int i, nunits;
1726 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1728 /* The encoding has a single stepped pattern. */
1729 vec_perm_builder sel (nunits, 1, 3);
1730 for (i = 0; i < 3; ++i)
1731 sel.quick_push (nunits - 1 - i);
1733 vec_perm_indices indices (sel, 1, nunits);
1734 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1735 return NULL_TREE;
1736 return vect_gen_perm_mask_checked (vectype, indices);
1739 /* A subroutine of get_load_store_type, with a subset of the same
1740 arguments. Handle the case where STMT is part of a grouped load
1741 or store.
1743 For stores, the statements in the group are all consecutive
1744 and there is no gap at the end. For loads, the statements in the
1745 group might not be consecutive; there can be gaps between statements
1746 as well as at the end. */
1748 static bool
1749 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1750 vec_load_store_type vls_type,
1751 vect_memory_access_type *memory_access_type)
1753 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1754 vec_info *vinfo = stmt_info->vinfo;
1755 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1756 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1757 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1758 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1759 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1760 bool single_element_p = (stmt == first_stmt
1761 && !GROUP_NEXT_ELEMENT (stmt_info));
1762 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1763 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
1765 /* True if the vectorized statements would access beyond the last
1766 statement in the group. */
1767 bool overrun_p = false;
1769 /* True if we can cope with such overrun by peeling for gaps, so that
1770 there is at least one final scalar iteration after the vector loop. */
1771 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1773 /* There can only be a gap at the end of the group if the stride is
1774 known at compile time. */
1775 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1777 /* Stores can't yet have gaps. */
1778 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1780 if (slp)
1782 if (STMT_VINFO_STRIDED_P (stmt_info))
1784 /* Try to use consecutive accesses of GROUP_SIZE elements,
1785 separated by the stride, until we have a complete vector.
1786 Fall back to scalar accesses if that isn't possible. */
1787 if (nunits % group_size == 0)
1788 *memory_access_type = VMAT_STRIDED_SLP;
1789 else
1790 *memory_access_type = VMAT_ELEMENTWISE;
1792 else
1794 overrun_p = loop_vinfo && gap != 0;
1795 if (overrun_p && vls_type != VLS_LOAD)
1797 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1798 "Grouped store with gaps requires"
1799 " non-consecutive accesses\n");
1800 return false;
1802 /* An overrun is fine if the trailing elements are smaller
1803 than the alignment boundary B. Every vector access will
1804 be a multiple of B and so we are guaranteed to access a
1805 non-gap element in the same B-sized block. */
1806 if (overrun_p
1807 && gap < (vect_known_alignment_in_bytes (first_dr)
1808 / vect_get_scalar_dr_size (first_dr)))
1809 overrun_p = false;
1810 if (overrun_p && !can_overrun_p)
1812 if (dump_enabled_p ())
1813 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1814 "Peeling for outer loop is not supported\n");
1815 return false;
1817 *memory_access_type = VMAT_CONTIGUOUS;
1820 else
1822 /* We can always handle this case using elementwise accesses,
1823 but see if something more efficient is available. */
1824 *memory_access_type = VMAT_ELEMENTWISE;
1826 /* If there is a gap at the end of the group then these optimizations
1827 would access excess elements in the last iteration. */
1828 bool would_overrun_p = (gap != 0);
1829 /* An overrun is fine if the trailing elements are smaller than the
1830 alignment boundary B. Every vector access will be a multiple of B
1831 and so we are guaranteed to access a non-gap element in the
1832 same B-sized block. */
1833 if (would_overrun_p
1834 && gap < (vect_known_alignment_in_bytes (first_dr)
1835 / vect_get_scalar_dr_size (first_dr)))
1836 would_overrun_p = false;
1838 if (!STMT_VINFO_STRIDED_P (stmt_info)
1839 && (can_overrun_p || !would_overrun_p)
1840 && compare_step_with_zero (stmt) > 0)
1842 /* First try using LOAD/STORE_LANES. */
1843 if (vls_type == VLS_LOAD
1844 ? vect_load_lanes_supported (vectype, group_size)
1845 : vect_store_lanes_supported (vectype, group_size))
1847 *memory_access_type = VMAT_LOAD_STORE_LANES;
1848 overrun_p = would_overrun_p;
1851 /* If that fails, try using permuting loads. */
1852 if (*memory_access_type == VMAT_ELEMENTWISE
1853 && (vls_type == VLS_LOAD
1854 ? vect_grouped_load_supported (vectype, single_element_p,
1855 group_size)
1856 : vect_grouped_store_supported (vectype, group_size)))
1858 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1859 overrun_p = would_overrun_p;
1864 if (vls_type != VLS_LOAD && first_stmt == stmt)
1866 /* STMT is the leader of the group. Check the operands of all the
1867 stmts of the group. */
1868 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1869 while (next_stmt)
1871 gcc_assert (gimple_assign_single_p (next_stmt));
1872 tree op = gimple_assign_rhs1 (next_stmt);
1873 gimple *def_stmt;
1874 enum vect_def_type dt;
1875 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1877 if (dump_enabled_p ())
1878 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1879 "use not simple.\n");
1880 return false;
1882 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1886 if (overrun_p)
1888 gcc_assert (can_overrun_p);
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1891 "Data access with gaps requires scalar "
1892 "epilogue loop\n");
1893 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1896 return true;
1899 /* A subroutine of get_load_store_type, with a subset of the same
1900 arguments. Handle the case where STMT is a load or store that
1901 accesses consecutive elements with a negative step. */
1903 static vect_memory_access_type
1904 get_negative_load_store_type (gimple *stmt, tree vectype,
1905 vec_load_store_type vls_type,
1906 unsigned int ncopies)
1908 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1909 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1910 dr_alignment_support alignment_support_scheme;
1912 if (ncopies > 1)
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1916 "multiple types with negative step.\n");
1917 return VMAT_ELEMENTWISE;
1920 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1921 if (alignment_support_scheme != dr_aligned
1922 && alignment_support_scheme != dr_unaligned_supported)
1924 if (dump_enabled_p ())
1925 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1926 "negative step but alignment required.\n");
1927 return VMAT_ELEMENTWISE;
1930 if (vls_type == VLS_STORE_INVARIANT)
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_NOTE, vect_location,
1934 "negative step with invariant source;"
1935 " no permute needed.\n");
1936 return VMAT_CONTIGUOUS_DOWN;
1939 if (!perm_mask_for_reverse (vectype))
1941 if (dump_enabled_p ())
1942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1943 "negative step and reversing not supported.\n");
1944 return VMAT_ELEMENTWISE;
1947 return VMAT_CONTIGUOUS_REVERSE;
1950 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1951 if there is a memory access type that the vectorized form can use,
1952 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1953 or scatters, fill in GS_INFO accordingly.
1955 SLP says whether we're performing SLP rather than loop vectorization.
1956 VECTYPE is the vector type that the vectorized statements will use.
1957 NCOPIES is the number of vector statements that will be needed. */
1959 static bool
1960 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1961 vec_load_store_type vls_type, unsigned int ncopies,
1962 vect_memory_access_type *memory_access_type,
1963 gather_scatter_info *gs_info)
1965 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1966 vec_info *vinfo = stmt_info->vinfo;
1967 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1968 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1969 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1971 *memory_access_type = VMAT_GATHER_SCATTER;
1972 gimple *def_stmt;
1973 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1974 gcc_unreachable ();
1975 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1976 &gs_info->offset_dt,
1977 &gs_info->offset_vectype))
1979 if (dump_enabled_p ())
1980 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1981 "%s index use not simple.\n",
1982 vls_type == VLS_LOAD ? "gather" : "scatter");
1983 return false;
1986 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1988 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1989 memory_access_type))
1990 return false;
1992 else if (STMT_VINFO_STRIDED_P (stmt_info))
1994 gcc_assert (!slp);
1995 *memory_access_type = VMAT_ELEMENTWISE;
1997 else
1999 int cmp = compare_step_with_zero (stmt);
2000 if (cmp < 0)
2001 *memory_access_type = get_negative_load_store_type
2002 (stmt, vectype, vls_type, ncopies);
2003 else if (cmp == 0)
2005 gcc_assert (vls_type == VLS_LOAD);
2006 *memory_access_type = VMAT_INVARIANT;
2008 else
2009 *memory_access_type = VMAT_CONTIGUOUS;
2012 if ((*memory_access_type == VMAT_ELEMENTWISE
2013 || *memory_access_type == VMAT_STRIDED_SLP)
2014 && !nunits.is_constant ())
2016 if (dump_enabled_p ())
2017 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2018 "Not using elementwise accesses due to variable "
2019 "vectorization factor.\n");
2020 return false;
2023 /* FIXME: At the moment the cost model seems to underestimate the
2024 cost of using elementwise accesses. This check preserves the
2025 traditional behavior until that can be fixed. */
2026 if (*memory_access_type == VMAT_ELEMENTWISE
2027 && !STMT_VINFO_STRIDED_P (stmt_info))
2029 if (dump_enabled_p ())
2030 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2031 "not falling back to elementwise accesses\n");
2032 return false;
2034 return true;
2037 /* Function vectorizable_mask_load_store.
2039 Check if STMT performs a conditional load or store that can be vectorized.
2040 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2041 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2042 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2044 static bool
2045 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2046 gimple **vec_stmt, slp_tree slp_node)
2048 tree vec_dest = NULL;
2049 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2050 stmt_vec_info prev_stmt_info;
2051 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2052 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2053 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2054 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2055 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2056 tree rhs_vectype = NULL_TREE;
2057 tree mask_vectype;
2058 tree elem_type;
2059 gimple *new_stmt;
2060 tree dummy;
2061 tree dataref_ptr = NULL_TREE;
2062 gimple *ptr_incr;
2063 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2064 int ncopies;
2065 int i, j;
2066 bool inv_p;
2067 gather_scatter_info gs_info;
2068 vec_load_store_type vls_type;
2069 tree mask;
2070 gimple *def_stmt;
2071 enum vect_def_type dt;
2073 if (slp_node != NULL)
2074 return false;
2076 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2077 gcc_assert (ncopies >= 1);
2079 mask = gimple_call_arg (stmt, 2);
2081 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2082 return false;
2084 /* FORNOW. This restriction should be relaxed. */
2085 if (nested_in_vect_loop && ncopies > 1)
2087 if (dump_enabled_p ())
2088 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2089 "multiple types in nested loop.");
2090 return false;
2093 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2094 return false;
2096 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2097 && ! vec_stmt)
2098 return false;
2100 if (!STMT_VINFO_DATA_REF (stmt_info))
2101 return false;
2103 elem_type = TREE_TYPE (vectype);
2105 if (TREE_CODE (mask) != SSA_NAME)
2106 return false;
2108 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2109 return false;
2111 if (!mask_vectype)
2112 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2114 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2115 || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
2116 return false;
2118 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2120 tree rhs = gimple_call_arg (stmt, 3);
2121 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2122 return false;
2123 if (dt == vect_constant_def || dt == vect_external_def)
2124 vls_type = VLS_STORE_INVARIANT;
2125 else
2126 vls_type = VLS_STORE;
2128 else
2129 vls_type = VLS_LOAD;
2131 vect_memory_access_type memory_access_type;
2132 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2133 &memory_access_type, &gs_info))
2134 return false;
2136 if (memory_access_type == VMAT_GATHER_SCATTER)
2138 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2139 tree masktype
2140 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2141 if (TREE_CODE (masktype) == INTEGER_TYPE)
2143 if (dump_enabled_p ())
2144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2145 "masked gather with integer mask not supported.");
2146 return false;
2149 else if (memory_access_type != VMAT_CONTIGUOUS)
2151 if (dump_enabled_p ())
2152 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2153 "unsupported access type for masked %s.\n",
2154 vls_type == VLS_LOAD ? "load" : "store");
2155 return false;
2157 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2158 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2159 TYPE_MODE (mask_vectype),
2160 vls_type == VLS_LOAD)
2161 || (rhs_vectype
2162 && !useless_type_conversion_p (vectype, rhs_vectype)))
2163 return false;
2165 if (!vec_stmt) /* transformation not required. */
2167 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2168 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2169 if (vls_type == VLS_LOAD)
2170 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2171 NULL, NULL, NULL);
2172 else
2173 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2174 dt, NULL, NULL, NULL);
2175 return true;
2177 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2179 /* Transform. */
2181 if (memory_access_type == VMAT_GATHER_SCATTER)
2183 tree vec_oprnd0 = NULL_TREE, op;
2184 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2185 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2186 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2187 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2188 tree mask_perm_mask = NULL_TREE;
2189 edge pe = loop_preheader_edge (loop);
2190 gimple_seq seq;
2191 basic_block new_bb;
2192 enum { NARROW, NONE, WIDEN } modifier;
2193 poly_uint64 gather_off_nunits
2194 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2196 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2197 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2198 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2199 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2200 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2201 scaletype = TREE_VALUE (arglist);
2202 gcc_checking_assert (types_compatible_p (srctype, rettype)
2203 && types_compatible_p (srctype, masktype));
2205 if (known_eq (nunits, gather_off_nunits))
2206 modifier = NONE;
2207 else if (known_eq (nunits * 2, gather_off_nunits))
2209 modifier = WIDEN;
2211 /* Currently widening gathers and scatters are only supported for
2212 fixed-length vectors. */
2213 int count = gather_off_nunits.to_constant ();
2214 vec_perm_builder sel (count, count, 1);
2215 for (i = 0; i < count; ++i)
2216 sel.quick_push (i | (count / 2));
2218 vec_perm_indices indices (sel, 1, count);
2219 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2220 indices);
2222 else if (known_eq (nunits, gather_off_nunits * 2))
2224 modifier = NARROW;
2226 /* Currently narrowing gathers and scatters are only supported for
2227 fixed-length vectors. */
2228 int count = nunits.to_constant ();
2229 vec_perm_builder sel (count, count, 1);
2230 sel.quick_grow (count);
2231 for (i = 0; i < count; ++i)
2232 sel[i] = i < count / 2 ? i : i + count / 2;
2233 vec_perm_indices indices (sel, 2, count);
2234 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2236 ncopies *= 2;
2237 for (i = 0; i < count; ++i)
2238 sel[i] = i | (count / 2);
2239 indices.new_vector (sel, 2, count);
2240 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2242 else
2243 gcc_unreachable ();
2245 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2247 ptr = fold_convert (ptrtype, gs_info.base);
2248 if (!is_gimple_min_invariant (ptr))
2250 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2251 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2252 gcc_assert (!new_bb);
2255 scale = build_int_cst (scaletype, gs_info.scale);
2257 prev_stmt_info = NULL;
2258 for (j = 0; j < ncopies; ++j)
2260 if (modifier == WIDEN && (j & 1))
2261 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2262 perm_mask, stmt, gsi);
2263 else if (j == 0)
2264 op = vec_oprnd0
2265 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2266 else
2267 op = vec_oprnd0
2268 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2270 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2272 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
2273 == TYPE_VECTOR_SUBPARTS (idxtype));
2274 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2275 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2276 new_stmt
2277 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2279 op = var;
2282 if (mask_perm_mask && (j & 1))
2283 mask_op = permute_vec_elements (mask_op, mask_op,
2284 mask_perm_mask, stmt, gsi);
2285 else
2287 if (j == 0)
2288 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2289 else
2291 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2292 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2295 mask_op = vec_mask;
2296 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2298 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2299 == TYPE_VECTOR_SUBPARTS (masktype));
2300 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2301 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2302 new_stmt
2303 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2305 mask_op = var;
2309 new_stmt
2310 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2311 scale);
2313 if (!useless_type_conversion_p (vectype, rettype))
2315 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2316 == TYPE_VECTOR_SUBPARTS (rettype));
2317 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2318 gimple_call_set_lhs (new_stmt, op);
2319 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2320 var = make_ssa_name (vec_dest);
2321 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2322 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2324 else
2326 var = make_ssa_name (vec_dest, new_stmt);
2327 gimple_call_set_lhs (new_stmt, var);
2330 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2332 if (modifier == NARROW)
2334 if ((j & 1) == 0)
2336 prev_res = var;
2337 continue;
2339 var = permute_vec_elements (prev_res, var,
2340 perm_mask, stmt, gsi);
2341 new_stmt = SSA_NAME_DEF_STMT (var);
2344 if (prev_stmt_info == NULL)
2345 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2346 else
2347 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2348 prev_stmt_info = vinfo_for_stmt (new_stmt);
2351 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2352 from the IL. */
2353 if (STMT_VINFO_RELATED_STMT (stmt_info))
2355 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2356 stmt_info = vinfo_for_stmt (stmt);
2358 tree lhs = gimple_call_lhs (stmt);
2359 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2360 set_vinfo_for_stmt (new_stmt, stmt_info);
2361 set_vinfo_for_stmt (stmt, NULL);
2362 STMT_VINFO_STMT (stmt_info) = new_stmt;
2363 gsi_replace (gsi, new_stmt, true);
2364 return true;
2366 else if (vls_type != VLS_LOAD)
2368 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2369 prev_stmt_info = NULL;
2370 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2371 for (i = 0; i < ncopies; i++)
2373 unsigned align, misalign;
2375 if (i == 0)
2377 tree rhs = gimple_call_arg (stmt, 3);
2378 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2379 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2380 mask_vectype);
2381 /* We should have catched mismatched types earlier. */
2382 gcc_assert (useless_type_conversion_p (vectype,
2383 TREE_TYPE (vec_rhs)));
2384 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2385 NULL_TREE, &dummy, gsi,
2386 &ptr_incr, false, &inv_p);
2387 gcc_assert (!inv_p);
2389 else
2391 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2392 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2393 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2394 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2395 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2396 TYPE_SIZE_UNIT (vectype));
2399 align = DR_TARGET_ALIGNMENT (dr);
2400 if (aligned_access_p (dr))
2401 misalign = 0;
2402 else if (DR_MISALIGNMENT (dr) == -1)
2404 align = TYPE_ALIGN_UNIT (elem_type);
2405 misalign = 0;
2407 else
2408 misalign = DR_MISALIGNMENT (dr);
2409 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2410 misalign);
2411 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2412 misalign ? least_bit_hwi (misalign) : align);
2413 gcall *call
2414 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2415 ptr, vec_mask, vec_rhs);
2416 gimple_call_set_nothrow (call, true);
2417 new_stmt = call;
2418 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2419 if (i == 0)
2420 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2421 else
2422 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2423 prev_stmt_info = vinfo_for_stmt (new_stmt);
2426 else
2428 tree vec_mask = NULL_TREE;
2429 prev_stmt_info = NULL;
2430 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2431 for (i = 0; i < ncopies; i++)
2433 unsigned align, misalign;
2435 if (i == 0)
2437 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2438 mask_vectype);
2439 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2440 NULL_TREE, &dummy, gsi,
2441 &ptr_incr, false, &inv_p);
2442 gcc_assert (!inv_p);
2444 else
2446 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2447 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2448 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2449 TYPE_SIZE_UNIT (vectype));
2452 align = DR_TARGET_ALIGNMENT (dr);
2453 if (aligned_access_p (dr))
2454 misalign = 0;
2455 else if (DR_MISALIGNMENT (dr) == -1)
2457 align = TYPE_ALIGN_UNIT (elem_type);
2458 misalign = 0;
2460 else
2461 misalign = DR_MISALIGNMENT (dr);
2462 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2463 misalign);
2464 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2465 misalign ? least_bit_hwi (misalign) : align);
2466 gcall *call
2467 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2468 ptr, vec_mask);
2469 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2470 gimple_call_set_nothrow (call, true);
2471 vect_finish_stmt_generation (stmt, call, gsi);
2472 if (i == 0)
2473 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2474 else
2475 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2476 prev_stmt_info = vinfo_for_stmt (call);
2480 if (vls_type == VLS_LOAD)
2482 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2483 from the IL. */
2484 if (STMT_VINFO_RELATED_STMT (stmt_info))
2486 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2487 stmt_info = vinfo_for_stmt (stmt);
2489 tree lhs = gimple_call_lhs (stmt);
2490 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2491 set_vinfo_for_stmt (new_stmt, stmt_info);
2492 set_vinfo_for_stmt (stmt, NULL);
2493 STMT_VINFO_STMT (stmt_info) = new_stmt;
2494 gsi_replace (gsi, new_stmt, true);
2497 return true;
2500 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2502 static bool
2503 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2504 gimple **vec_stmt, slp_tree slp_node,
2505 tree vectype_in, enum vect_def_type *dt)
2507 tree op, vectype;
2508 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2509 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2510 unsigned ncopies, nunits;
2512 op = gimple_call_arg (stmt, 0);
2513 vectype = STMT_VINFO_VECTYPE (stmt_info);
2514 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2516 /* Multiple types in SLP are handled by creating the appropriate number of
2517 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2518 case of SLP. */
2519 if (slp_node)
2520 ncopies = 1;
2521 else
2522 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2524 gcc_assert (ncopies >= 1);
2526 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2527 if (! char_vectype)
2528 return false;
2530 unsigned int num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
2531 unsigned word_bytes = num_bytes / nunits;
2533 /* The encoding uses one stepped pattern for each byte in the word. */
2534 vec_perm_builder elts (num_bytes, word_bytes, 3);
2535 for (unsigned i = 0; i < 3; ++i)
2536 for (unsigned j = 0; j < word_bytes; ++j)
2537 elts.quick_push ((i + 1) * word_bytes - j - 1);
2539 vec_perm_indices indices (elts, 1, num_bytes);
2540 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2541 return false;
2543 if (! vec_stmt)
2545 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2546 if (dump_enabled_p ())
2547 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2548 "\n");
2549 if (! PURE_SLP_STMT (stmt_info))
2551 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2552 1, vector_stmt, stmt_info, 0, vect_prologue);
2553 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2554 ncopies, vec_perm, stmt_info, 0, vect_body);
2556 return true;
2559 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2561 /* Transform. */
2562 vec<tree> vec_oprnds = vNULL;
2563 gimple *new_stmt = NULL;
2564 stmt_vec_info prev_stmt_info = NULL;
2565 for (unsigned j = 0; j < ncopies; j++)
2567 /* Handle uses. */
2568 if (j == 0)
2569 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2570 else
2571 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2573 /* Arguments are ready. create the new vector stmt. */
2574 unsigned i;
2575 tree vop;
2576 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2578 tree tem = make_ssa_name (char_vectype);
2579 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2580 char_vectype, vop));
2581 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2582 tree tem2 = make_ssa_name (char_vectype);
2583 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2584 tem, tem, bswap_vconst);
2585 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2586 tem = make_ssa_name (vectype);
2587 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2588 vectype, tem2));
2589 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2590 if (slp_node)
2591 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2594 if (slp_node)
2595 continue;
2597 if (j == 0)
2598 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2599 else
2600 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2602 prev_stmt_info = vinfo_for_stmt (new_stmt);
2605 vec_oprnds.release ();
2606 return true;
2609 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2610 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2611 in a single step. On success, store the binary pack code in
2612 *CONVERT_CODE. */
2614 static bool
2615 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2616 tree_code *convert_code)
2618 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2619 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2620 return false;
2622 tree_code code;
2623 int multi_step_cvt = 0;
2624 auto_vec <tree, 8> interm_types;
2625 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2626 &code, &multi_step_cvt,
2627 &interm_types)
2628 || multi_step_cvt)
2629 return false;
2631 *convert_code = code;
2632 return true;
2635 /* Function vectorizable_call.
2637 Check if GS performs a function call that can be vectorized.
2638 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2639 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2640 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2642 static bool
2643 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2644 slp_tree slp_node)
2646 gcall *stmt;
2647 tree vec_dest;
2648 tree scalar_dest;
2649 tree op, type;
2650 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2651 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2652 tree vectype_out, vectype_in;
2653 poly_uint64 nunits_in;
2654 poly_uint64 nunits_out;
2655 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2656 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2657 vec_info *vinfo = stmt_info->vinfo;
2658 tree fndecl, new_temp, rhs_type;
2659 gimple *def_stmt;
2660 enum vect_def_type dt[3]
2661 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2662 int ndts = 3;
2663 gimple *new_stmt = NULL;
2664 int ncopies, j;
2665 vec<tree> vargs = vNULL;
2666 enum { NARROW, NONE, WIDEN } modifier;
2667 size_t i, nargs;
2668 tree lhs;
2670 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2671 return false;
2673 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2674 && ! vec_stmt)
2675 return false;
2677 /* Is GS a vectorizable call? */
2678 stmt = dyn_cast <gcall *> (gs);
2679 if (!stmt)
2680 return false;
2682 if (gimple_call_internal_p (stmt)
2683 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2684 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2685 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2686 slp_node);
2688 if (gimple_call_lhs (stmt) == NULL_TREE
2689 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2690 return false;
2692 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2694 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2696 /* Process function arguments. */
2697 rhs_type = NULL_TREE;
2698 vectype_in = NULL_TREE;
2699 nargs = gimple_call_num_args (stmt);
2701 /* Bail out if the function has more than three arguments, we do not have
2702 interesting builtin functions to vectorize with more than two arguments
2703 except for fma. No arguments is also not good. */
2704 if (nargs == 0 || nargs > 3)
2705 return false;
2707 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2708 if (gimple_call_internal_p (stmt)
2709 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2711 nargs = 0;
2712 rhs_type = unsigned_type_node;
2715 for (i = 0; i < nargs; i++)
2717 tree opvectype;
2719 op = gimple_call_arg (stmt, i);
2721 /* We can only handle calls with arguments of the same type. */
2722 if (rhs_type
2723 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2725 if (dump_enabled_p ())
2726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2727 "argument types differ.\n");
2728 return false;
2730 if (!rhs_type)
2731 rhs_type = TREE_TYPE (op);
2733 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2735 if (dump_enabled_p ())
2736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2737 "use not simple.\n");
2738 return false;
2741 if (!vectype_in)
2742 vectype_in = opvectype;
2743 else if (opvectype
2744 && opvectype != vectype_in)
2746 if (dump_enabled_p ())
2747 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2748 "argument vector types differ.\n");
2749 return false;
2752 /* If all arguments are external or constant defs use a vector type with
2753 the same size as the output vector type. */
2754 if (!vectype_in)
2755 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2756 if (vec_stmt)
2757 gcc_assert (vectype_in);
2758 if (!vectype_in)
2760 if (dump_enabled_p ())
2762 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2763 "no vectype for scalar type ");
2764 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2765 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2768 return false;
2771 /* FORNOW */
2772 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2773 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2774 if (known_eq (nunits_in * 2, nunits_out))
2775 modifier = NARROW;
2776 else if (known_eq (nunits_out, nunits_in))
2777 modifier = NONE;
2778 else if (known_eq (nunits_out * 2, nunits_in))
2779 modifier = WIDEN;
2780 else
2781 return false;
2783 /* We only handle functions that do not read or clobber memory. */
2784 if (gimple_vuse (stmt))
2786 if (dump_enabled_p ())
2787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2788 "function reads from or writes to memory.\n");
2789 return false;
2792 /* For now, we only vectorize functions if a target specific builtin
2793 is available. TODO -- in some cases, it might be profitable to
2794 insert the calls for pieces of the vector, in order to be able
2795 to vectorize other operations in the loop. */
2796 fndecl = NULL_TREE;
2797 internal_fn ifn = IFN_LAST;
2798 combined_fn cfn = gimple_call_combined_fn (stmt);
2799 tree callee = gimple_call_fndecl (stmt);
2801 /* First try using an internal function. */
2802 tree_code convert_code = ERROR_MARK;
2803 if (cfn != CFN_LAST
2804 && (modifier == NONE
2805 || (modifier == NARROW
2806 && simple_integer_narrowing (vectype_out, vectype_in,
2807 &convert_code))))
2808 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2809 vectype_in);
2811 /* If that fails, try asking for a target-specific built-in function. */
2812 if (ifn == IFN_LAST)
2814 if (cfn != CFN_LAST)
2815 fndecl = targetm.vectorize.builtin_vectorized_function
2816 (cfn, vectype_out, vectype_in);
2817 else
2818 fndecl = targetm.vectorize.builtin_md_vectorized_function
2819 (callee, vectype_out, vectype_in);
2822 if (ifn == IFN_LAST && !fndecl)
2824 if (cfn == CFN_GOMP_SIMD_LANE
2825 && !slp_node
2826 && loop_vinfo
2827 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2828 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2829 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2830 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2832 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2833 { 0, 1, 2, ... vf - 1 } vector. */
2834 gcc_assert (nargs == 0);
2836 else if (modifier == NONE
2837 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2838 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2839 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2840 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2841 vectype_in, dt);
2842 else
2844 if (dump_enabled_p ())
2845 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2846 "function is not vectorizable.\n");
2847 return false;
2851 if (slp_node)
2852 ncopies = 1;
2853 else if (modifier == NARROW && ifn == IFN_LAST)
2854 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2855 else
2856 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2858 /* Sanity check: make sure that at least one copy of the vectorized stmt
2859 needs to be generated. */
2860 gcc_assert (ncopies >= 1);
2862 if (!vec_stmt) /* transformation not required. */
2864 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2865 if (dump_enabled_p ())
2866 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2867 "\n");
2868 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2869 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2870 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2871 vec_promote_demote, stmt_info, 0, vect_body);
2873 return true;
2876 /* Transform. */
2878 if (dump_enabled_p ())
2879 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2881 /* Handle def. */
2882 scalar_dest = gimple_call_lhs (stmt);
2883 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2885 prev_stmt_info = NULL;
2886 if (modifier == NONE || ifn != IFN_LAST)
2888 tree prev_res = NULL_TREE;
2889 for (j = 0; j < ncopies; ++j)
2891 /* Build argument list for the vectorized call. */
2892 if (j == 0)
2893 vargs.create (nargs);
2894 else
2895 vargs.truncate (0);
2897 if (slp_node)
2899 auto_vec<vec<tree> > vec_defs (nargs);
2900 vec<tree> vec_oprnds0;
2902 for (i = 0; i < nargs; i++)
2903 vargs.quick_push (gimple_call_arg (stmt, i));
2904 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2905 vec_oprnds0 = vec_defs[0];
2907 /* Arguments are ready. Create the new vector stmt. */
2908 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2910 size_t k;
2911 for (k = 0; k < nargs; k++)
2913 vec<tree> vec_oprndsk = vec_defs[k];
2914 vargs[k] = vec_oprndsk[i];
2916 if (modifier == NARROW)
2918 tree half_res = make_ssa_name (vectype_in);
2919 gcall *call
2920 = gimple_build_call_internal_vec (ifn, vargs);
2921 gimple_call_set_lhs (call, half_res);
2922 gimple_call_set_nothrow (call, true);
2923 new_stmt = call;
2924 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2925 if ((i & 1) == 0)
2927 prev_res = half_res;
2928 continue;
2930 new_temp = make_ssa_name (vec_dest);
2931 new_stmt = gimple_build_assign (new_temp, convert_code,
2932 prev_res, half_res);
2934 else
2936 gcall *call;
2937 if (ifn != IFN_LAST)
2938 call = gimple_build_call_internal_vec (ifn, vargs);
2939 else
2940 call = gimple_build_call_vec (fndecl, vargs);
2941 new_temp = make_ssa_name (vec_dest, call);
2942 gimple_call_set_lhs (call, new_temp);
2943 gimple_call_set_nothrow (call, true);
2944 new_stmt = call;
2946 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2947 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2950 for (i = 0; i < nargs; i++)
2952 vec<tree> vec_oprndsi = vec_defs[i];
2953 vec_oprndsi.release ();
2955 continue;
2958 for (i = 0; i < nargs; i++)
2960 op = gimple_call_arg (stmt, i);
2961 if (j == 0)
2962 vec_oprnd0
2963 = vect_get_vec_def_for_operand (op, stmt);
2964 else
2966 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2967 vec_oprnd0
2968 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2971 vargs.quick_push (vec_oprnd0);
2974 if (gimple_call_internal_p (stmt)
2975 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2977 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
2978 tree new_var
2979 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2980 gimple *init_stmt = gimple_build_assign (new_var, cst);
2981 vect_init_vector_1 (stmt, init_stmt, NULL);
2982 new_temp = make_ssa_name (vec_dest);
2983 new_stmt = gimple_build_assign (new_temp, new_var);
2985 else if (modifier == NARROW)
2987 tree half_res = make_ssa_name (vectype_in);
2988 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2989 gimple_call_set_lhs (call, half_res);
2990 gimple_call_set_nothrow (call, true);
2991 new_stmt = call;
2992 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2993 if ((j & 1) == 0)
2995 prev_res = half_res;
2996 continue;
2998 new_temp = make_ssa_name (vec_dest);
2999 new_stmt = gimple_build_assign (new_temp, convert_code,
3000 prev_res, half_res);
3002 else
3004 gcall *call;
3005 if (ifn != IFN_LAST)
3006 call = gimple_build_call_internal_vec (ifn, vargs);
3007 else
3008 call = gimple_build_call_vec (fndecl, vargs);
3009 new_temp = make_ssa_name (vec_dest, new_stmt);
3010 gimple_call_set_lhs (call, new_temp);
3011 gimple_call_set_nothrow (call, true);
3012 new_stmt = call;
3014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3016 if (j == (modifier == NARROW ? 1 : 0))
3017 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3018 else
3019 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3021 prev_stmt_info = vinfo_for_stmt (new_stmt);
3024 else if (modifier == NARROW)
3026 for (j = 0; j < ncopies; ++j)
3028 /* Build argument list for the vectorized call. */
3029 if (j == 0)
3030 vargs.create (nargs * 2);
3031 else
3032 vargs.truncate (0);
3034 if (slp_node)
3036 auto_vec<vec<tree> > vec_defs (nargs);
3037 vec<tree> vec_oprnds0;
3039 for (i = 0; i < nargs; i++)
3040 vargs.quick_push (gimple_call_arg (stmt, i));
3041 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3042 vec_oprnds0 = vec_defs[0];
3044 /* Arguments are ready. Create the new vector stmt. */
3045 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3047 size_t k;
3048 vargs.truncate (0);
3049 for (k = 0; k < nargs; k++)
3051 vec<tree> vec_oprndsk = vec_defs[k];
3052 vargs.quick_push (vec_oprndsk[i]);
3053 vargs.quick_push (vec_oprndsk[i + 1]);
3055 gcall *call;
3056 if (ifn != IFN_LAST)
3057 call = gimple_build_call_internal_vec (ifn, vargs);
3058 else
3059 call = gimple_build_call_vec (fndecl, vargs);
3060 new_temp = make_ssa_name (vec_dest, call);
3061 gimple_call_set_lhs (call, new_temp);
3062 gimple_call_set_nothrow (call, true);
3063 new_stmt = call;
3064 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3065 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3068 for (i = 0; i < nargs; i++)
3070 vec<tree> vec_oprndsi = vec_defs[i];
3071 vec_oprndsi.release ();
3073 continue;
3076 for (i = 0; i < nargs; i++)
3078 op = gimple_call_arg (stmt, i);
3079 if (j == 0)
3081 vec_oprnd0
3082 = vect_get_vec_def_for_operand (op, stmt);
3083 vec_oprnd1
3084 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3086 else
3088 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3089 vec_oprnd0
3090 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3091 vec_oprnd1
3092 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3095 vargs.quick_push (vec_oprnd0);
3096 vargs.quick_push (vec_oprnd1);
3099 new_stmt = gimple_build_call_vec (fndecl, vargs);
3100 new_temp = make_ssa_name (vec_dest, new_stmt);
3101 gimple_call_set_lhs (new_stmt, new_temp);
3102 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3104 if (j == 0)
3105 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3106 else
3107 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3109 prev_stmt_info = vinfo_for_stmt (new_stmt);
3112 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3114 else
3115 /* No current target implements this case. */
3116 return false;
3118 vargs.release ();
3120 /* The call in STMT might prevent it from being removed in dce.
3121 We however cannot remove it here, due to the way the ssa name
3122 it defines is mapped to the new definition. So just replace
3123 rhs of the statement with something harmless. */
3125 if (slp_node)
3126 return true;
3128 type = TREE_TYPE (scalar_dest);
3129 if (is_pattern_stmt_p (stmt_info))
3130 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3131 else
3132 lhs = gimple_call_lhs (stmt);
3134 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3135 set_vinfo_for_stmt (new_stmt, stmt_info);
3136 set_vinfo_for_stmt (stmt, NULL);
3137 STMT_VINFO_STMT (stmt_info) = new_stmt;
3138 gsi_replace (gsi, new_stmt, false);
3140 return true;
3144 struct simd_call_arg_info
3146 tree vectype;
3147 tree op;
3148 HOST_WIDE_INT linear_step;
3149 enum vect_def_type dt;
3150 unsigned int align;
3151 bool simd_lane_linear;
3154 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3155 is linear within simd lane (but not within whole loop), note it in
3156 *ARGINFO. */
3158 static void
3159 vect_simd_lane_linear (tree op, struct loop *loop,
3160 struct simd_call_arg_info *arginfo)
3162 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3164 if (!is_gimple_assign (def_stmt)
3165 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3166 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3167 return;
3169 tree base = gimple_assign_rhs1 (def_stmt);
3170 HOST_WIDE_INT linear_step = 0;
3171 tree v = gimple_assign_rhs2 (def_stmt);
3172 while (TREE_CODE (v) == SSA_NAME)
3174 tree t;
3175 def_stmt = SSA_NAME_DEF_STMT (v);
3176 if (is_gimple_assign (def_stmt))
3177 switch (gimple_assign_rhs_code (def_stmt))
3179 case PLUS_EXPR:
3180 t = gimple_assign_rhs2 (def_stmt);
3181 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3182 return;
3183 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3184 v = gimple_assign_rhs1 (def_stmt);
3185 continue;
3186 case MULT_EXPR:
3187 t = gimple_assign_rhs2 (def_stmt);
3188 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3189 return;
3190 linear_step = tree_to_shwi (t);
3191 v = gimple_assign_rhs1 (def_stmt);
3192 continue;
3193 CASE_CONVERT:
3194 t = gimple_assign_rhs1 (def_stmt);
3195 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3196 || (TYPE_PRECISION (TREE_TYPE (v))
3197 < TYPE_PRECISION (TREE_TYPE (t))))
3198 return;
3199 if (!linear_step)
3200 linear_step = 1;
3201 v = t;
3202 continue;
3203 default:
3204 return;
3206 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3207 && loop->simduid
3208 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3209 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3210 == loop->simduid))
3212 if (!linear_step)
3213 linear_step = 1;
3214 arginfo->linear_step = linear_step;
3215 arginfo->op = base;
3216 arginfo->simd_lane_linear = true;
3217 return;
3222 /* Return the number of elements in vector type VECTYPE, which is associated
3223 with a SIMD clone. At present these vectors always have a constant
3224 length. */
3226 static unsigned HOST_WIDE_INT
3227 simd_clone_subparts (tree vectype)
3229 return TYPE_VECTOR_SUBPARTS (vectype);
3232 /* Function vectorizable_simd_clone_call.
3234 Check if STMT performs a function call that can be vectorized
3235 by calling a simd clone of the function.
3236 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3237 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3238 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3240 static bool
3241 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3242 gimple **vec_stmt, slp_tree slp_node)
3244 tree vec_dest;
3245 tree scalar_dest;
3246 tree op, type;
3247 tree vec_oprnd0 = NULL_TREE;
3248 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3249 tree vectype;
3250 unsigned int nunits;
3251 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3252 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3253 vec_info *vinfo = stmt_info->vinfo;
3254 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3255 tree fndecl, new_temp;
3256 gimple *def_stmt;
3257 gimple *new_stmt = NULL;
3258 int ncopies, j;
3259 auto_vec<simd_call_arg_info> arginfo;
3260 vec<tree> vargs = vNULL;
3261 size_t i, nargs;
3262 tree lhs, rtype, ratype;
3263 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3265 /* Is STMT a vectorizable call? */
3266 if (!is_gimple_call (stmt))
3267 return false;
3269 fndecl = gimple_call_fndecl (stmt);
3270 if (fndecl == NULL_TREE)
3271 return false;
3273 struct cgraph_node *node = cgraph_node::get (fndecl);
3274 if (node == NULL || node->simd_clones == NULL)
3275 return false;
3277 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3278 return false;
3280 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3281 && ! vec_stmt)
3282 return false;
3284 if (gimple_call_lhs (stmt)
3285 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3286 return false;
3288 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3290 vectype = STMT_VINFO_VECTYPE (stmt_info);
3292 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3293 return false;
3295 /* FORNOW */
3296 if (slp_node)
3297 return false;
3299 /* Process function arguments. */
3300 nargs = gimple_call_num_args (stmt);
3302 /* Bail out if the function has zero arguments. */
3303 if (nargs == 0)
3304 return false;
3306 arginfo.reserve (nargs, true);
3308 for (i = 0; i < nargs; i++)
3310 simd_call_arg_info thisarginfo;
3311 affine_iv iv;
3313 thisarginfo.linear_step = 0;
3314 thisarginfo.align = 0;
3315 thisarginfo.op = NULL_TREE;
3316 thisarginfo.simd_lane_linear = false;
3318 op = gimple_call_arg (stmt, i);
3319 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3320 &thisarginfo.vectype)
3321 || thisarginfo.dt == vect_uninitialized_def)
3323 if (dump_enabled_p ())
3324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3325 "use not simple.\n");
3326 return false;
3329 if (thisarginfo.dt == vect_constant_def
3330 || thisarginfo.dt == vect_external_def)
3331 gcc_assert (thisarginfo.vectype == NULL_TREE);
3332 else
3333 gcc_assert (thisarginfo.vectype != NULL_TREE);
3335 /* For linear arguments, the analyze phase should have saved
3336 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3337 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3338 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3340 gcc_assert (vec_stmt);
3341 thisarginfo.linear_step
3342 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3343 thisarginfo.op
3344 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3345 thisarginfo.simd_lane_linear
3346 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3347 == boolean_true_node);
3348 /* If loop has been peeled for alignment, we need to adjust it. */
3349 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3350 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3351 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3353 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3354 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3355 tree opt = TREE_TYPE (thisarginfo.op);
3356 bias = fold_convert (TREE_TYPE (step), bias);
3357 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3358 thisarginfo.op
3359 = fold_build2 (POINTER_TYPE_P (opt)
3360 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3361 thisarginfo.op, bias);
3364 else if (!vec_stmt
3365 && thisarginfo.dt != vect_constant_def
3366 && thisarginfo.dt != vect_external_def
3367 && loop_vinfo
3368 && TREE_CODE (op) == SSA_NAME
3369 && simple_iv (loop, loop_containing_stmt (stmt), op,
3370 &iv, false)
3371 && tree_fits_shwi_p (iv.step))
3373 thisarginfo.linear_step = tree_to_shwi (iv.step);
3374 thisarginfo.op = iv.base;
3376 else if ((thisarginfo.dt == vect_constant_def
3377 || thisarginfo.dt == vect_external_def)
3378 && POINTER_TYPE_P (TREE_TYPE (op)))
3379 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3380 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3381 linear too. */
3382 if (POINTER_TYPE_P (TREE_TYPE (op))
3383 && !thisarginfo.linear_step
3384 && !vec_stmt
3385 && thisarginfo.dt != vect_constant_def
3386 && thisarginfo.dt != vect_external_def
3387 && loop_vinfo
3388 && !slp_node
3389 && TREE_CODE (op) == SSA_NAME)
3390 vect_simd_lane_linear (op, loop, &thisarginfo);
3392 arginfo.quick_push (thisarginfo);
3395 unsigned HOST_WIDE_INT vf;
3396 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3398 if (dump_enabled_p ())
3399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3400 "not considering SIMD clones; not yet supported"
3401 " for variable-width vectors.\n");
3402 return NULL;
3405 unsigned int badness = 0;
3406 struct cgraph_node *bestn = NULL;
3407 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3408 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3409 else
3410 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3411 n = n->simdclone->next_clone)
3413 unsigned int this_badness = 0;
3414 if (n->simdclone->simdlen > vf
3415 || n->simdclone->nargs != nargs)
3416 continue;
3417 if (n->simdclone->simdlen < vf)
3418 this_badness += (exact_log2 (vf)
3419 - exact_log2 (n->simdclone->simdlen)) * 1024;
3420 if (n->simdclone->inbranch)
3421 this_badness += 2048;
3422 int target_badness = targetm.simd_clone.usable (n);
3423 if (target_badness < 0)
3424 continue;
3425 this_badness += target_badness * 512;
3426 /* FORNOW: Have to add code to add the mask argument. */
3427 if (n->simdclone->inbranch)
3428 continue;
3429 for (i = 0; i < nargs; i++)
3431 switch (n->simdclone->args[i].arg_type)
3433 case SIMD_CLONE_ARG_TYPE_VECTOR:
3434 if (!useless_type_conversion_p
3435 (n->simdclone->args[i].orig_type,
3436 TREE_TYPE (gimple_call_arg (stmt, i))))
3437 i = -1;
3438 else if (arginfo[i].dt == vect_constant_def
3439 || arginfo[i].dt == vect_external_def
3440 || arginfo[i].linear_step)
3441 this_badness += 64;
3442 break;
3443 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3444 if (arginfo[i].dt != vect_constant_def
3445 && arginfo[i].dt != vect_external_def)
3446 i = -1;
3447 break;
3448 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3449 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3450 if (arginfo[i].dt == vect_constant_def
3451 || arginfo[i].dt == vect_external_def
3452 || (arginfo[i].linear_step
3453 != n->simdclone->args[i].linear_step))
3454 i = -1;
3455 break;
3456 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3457 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3458 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3459 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3460 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3461 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3462 /* FORNOW */
3463 i = -1;
3464 break;
3465 case SIMD_CLONE_ARG_TYPE_MASK:
3466 gcc_unreachable ();
3468 if (i == (size_t) -1)
3469 break;
3470 if (n->simdclone->args[i].alignment > arginfo[i].align)
3472 i = -1;
3473 break;
3475 if (arginfo[i].align)
3476 this_badness += (exact_log2 (arginfo[i].align)
3477 - exact_log2 (n->simdclone->args[i].alignment));
3479 if (i == (size_t) -1)
3480 continue;
3481 if (bestn == NULL || this_badness < badness)
3483 bestn = n;
3484 badness = this_badness;
3488 if (bestn == NULL)
3489 return false;
3491 for (i = 0; i < nargs; i++)
3492 if ((arginfo[i].dt == vect_constant_def
3493 || arginfo[i].dt == vect_external_def)
3494 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3496 arginfo[i].vectype
3497 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3498 i)));
3499 if (arginfo[i].vectype == NULL
3500 || (simd_clone_subparts (arginfo[i].vectype)
3501 > bestn->simdclone->simdlen))
3502 return false;
3505 fndecl = bestn->decl;
3506 nunits = bestn->simdclone->simdlen;
3507 ncopies = vf / nunits;
3509 /* If the function isn't const, only allow it in simd loops where user
3510 has asserted that at least nunits consecutive iterations can be
3511 performed using SIMD instructions. */
3512 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3513 && gimple_vuse (stmt))
3514 return false;
3516 /* Sanity check: make sure that at least one copy of the vectorized stmt
3517 needs to be generated. */
3518 gcc_assert (ncopies >= 1);
3520 if (!vec_stmt) /* transformation not required. */
3522 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3523 for (i = 0; i < nargs; i++)
3524 if ((bestn->simdclone->args[i].arg_type
3525 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3526 || (bestn->simdclone->args[i].arg_type
3527 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3529 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3530 + 1);
3531 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3532 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3533 ? size_type_node : TREE_TYPE (arginfo[i].op);
3534 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3535 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3536 tree sll = arginfo[i].simd_lane_linear
3537 ? boolean_true_node : boolean_false_node;
3538 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3540 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3541 if (dump_enabled_p ())
3542 dump_printf_loc (MSG_NOTE, vect_location,
3543 "=== vectorizable_simd_clone_call ===\n");
3544 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3545 return true;
3548 /* Transform. */
3550 if (dump_enabled_p ())
3551 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3553 /* Handle def. */
3554 scalar_dest = gimple_call_lhs (stmt);
3555 vec_dest = NULL_TREE;
3556 rtype = NULL_TREE;
3557 ratype = NULL_TREE;
3558 if (scalar_dest)
3560 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3561 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3562 if (TREE_CODE (rtype) == ARRAY_TYPE)
3564 ratype = rtype;
3565 rtype = TREE_TYPE (ratype);
3569 prev_stmt_info = NULL;
3570 for (j = 0; j < ncopies; ++j)
3572 /* Build argument list for the vectorized call. */
3573 if (j == 0)
3574 vargs.create (nargs);
3575 else
3576 vargs.truncate (0);
3578 for (i = 0; i < nargs; i++)
3580 unsigned int k, l, m, o;
3581 tree atype;
3582 op = gimple_call_arg (stmt, i);
3583 switch (bestn->simdclone->args[i].arg_type)
3585 case SIMD_CLONE_ARG_TYPE_VECTOR:
3586 atype = bestn->simdclone->args[i].vector_type;
3587 o = nunits / simd_clone_subparts (atype);
3588 for (m = j * o; m < (j + 1) * o; m++)
3590 if (simd_clone_subparts (atype)
3591 < simd_clone_subparts (arginfo[i].vectype))
3593 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3594 k = (simd_clone_subparts (arginfo[i].vectype)
3595 / simd_clone_subparts (atype));
3596 gcc_assert ((k & (k - 1)) == 0);
3597 if (m == 0)
3598 vec_oprnd0
3599 = vect_get_vec_def_for_operand (op, stmt);
3600 else
3602 vec_oprnd0 = arginfo[i].op;
3603 if ((m & (k - 1)) == 0)
3604 vec_oprnd0
3605 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3606 vec_oprnd0);
3608 arginfo[i].op = vec_oprnd0;
3609 vec_oprnd0
3610 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3611 bitsize_int (prec),
3612 bitsize_int ((m & (k - 1)) * prec));
3613 new_stmt
3614 = gimple_build_assign (make_ssa_name (atype),
3615 vec_oprnd0);
3616 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3617 vargs.safe_push (gimple_assign_lhs (new_stmt));
3619 else
3621 k = (simd_clone_subparts (atype)
3622 / simd_clone_subparts (arginfo[i].vectype));
3623 gcc_assert ((k & (k - 1)) == 0);
3624 vec<constructor_elt, va_gc> *ctor_elts;
3625 if (k != 1)
3626 vec_alloc (ctor_elts, k);
3627 else
3628 ctor_elts = NULL;
3629 for (l = 0; l < k; l++)
3631 if (m == 0 && l == 0)
3632 vec_oprnd0
3633 = vect_get_vec_def_for_operand (op, stmt);
3634 else
3635 vec_oprnd0
3636 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3637 arginfo[i].op);
3638 arginfo[i].op = vec_oprnd0;
3639 if (k == 1)
3640 break;
3641 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3642 vec_oprnd0);
3644 if (k == 1)
3645 vargs.safe_push (vec_oprnd0);
3646 else
3648 vec_oprnd0 = build_constructor (atype, ctor_elts);
3649 new_stmt
3650 = gimple_build_assign (make_ssa_name (atype),
3651 vec_oprnd0);
3652 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3653 vargs.safe_push (gimple_assign_lhs (new_stmt));
3657 break;
3658 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3659 vargs.safe_push (op);
3660 break;
3661 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3662 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3663 if (j == 0)
3665 gimple_seq stmts;
3666 arginfo[i].op
3667 = force_gimple_operand (arginfo[i].op, &stmts, true,
3668 NULL_TREE);
3669 if (stmts != NULL)
3671 basic_block new_bb;
3672 edge pe = loop_preheader_edge (loop);
3673 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3674 gcc_assert (!new_bb);
3676 if (arginfo[i].simd_lane_linear)
3678 vargs.safe_push (arginfo[i].op);
3679 break;
3681 tree phi_res = copy_ssa_name (op);
3682 gphi *new_phi = create_phi_node (phi_res, loop->header);
3683 set_vinfo_for_stmt (new_phi,
3684 new_stmt_vec_info (new_phi, loop_vinfo));
3685 add_phi_arg (new_phi, arginfo[i].op,
3686 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3687 enum tree_code code
3688 = POINTER_TYPE_P (TREE_TYPE (op))
3689 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3690 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3691 ? sizetype : TREE_TYPE (op);
3692 widest_int cst
3693 = wi::mul (bestn->simdclone->args[i].linear_step,
3694 ncopies * nunits);
3695 tree tcst = wide_int_to_tree (type, cst);
3696 tree phi_arg = copy_ssa_name (op);
3697 new_stmt
3698 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3699 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3700 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3701 set_vinfo_for_stmt (new_stmt,
3702 new_stmt_vec_info (new_stmt, loop_vinfo));
3703 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3704 UNKNOWN_LOCATION);
3705 arginfo[i].op = phi_res;
3706 vargs.safe_push (phi_res);
3708 else
3710 enum tree_code code
3711 = POINTER_TYPE_P (TREE_TYPE (op))
3712 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3713 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3714 ? sizetype : TREE_TYPE (op);
3715 widest_int cst
3716 = wi::mul (bestn->simdclone->args[i].linear_step,
3717 j * nunits);
3718 tree tcst = wide_int_to_tree (type, cst);
3719 new_temp = make_ssa_name (TREE_TYPE (op));
3720 new_stmt = gimple_build_assign (new_temp, code,
3721 arginfo[i].op, tcst);
3722 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3723 vargs.safe_push (new_temp);
3725 break;
3726 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3727 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3728 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3729 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3730 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3731 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3732 default:
3733 gcc_unreachable ();
3737 new_stmt = gimple_build_call_vec (fndecl, vargs);
3738 if (vec_dest)
3740 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
3741 if (ratype)
3742 new_temp = create_tmp_var (ratype);
3743 else if (simd_clone_subparts (vectype)
3744 == simd_clone_subparts (rtype))
3745 new_temp = make_ssa_name (vec_dest, new_stmt);
3746 else
3747 new_temp = make_ssa_name (rtype, new_stmt);
3748 gimple_call_set_lhs (new_stmt, new_temp);
3750 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3752 if (vec_dest)
3754 if (simd_clone_subparts (vectype) < nunits)
3756 unsigned int k, l;
3757 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3758 k = nunits / simd_clone_subparts (vectype);
3759 gcc_assert ((k & (k - 1)) == 0);
3760 for (l = 0; l < k; l++)
3762 tree t;
3763 if (ratype)
3765 t = build_fold_addr_expr (new_temp);
3766 t = build2 (MEM_REF, vectype, t,
3767 build_int_cst (TREE_TYPE (t),
3768 l * prec / BITS_PER_UNIT));
3770 else
3771 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3772 bitsize_int (prec), bitsize_int (l * prec));
3773 new_stmt
3774 = gimple_build_assign (make_ssa_name (vectype), t);
3775 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3776 if (j == 0 && l == 0)
3777 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3778 else
3779 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3781 prev_stmt_info = vinfo_for_stmt (new_stmt);
3784 if (ratype)
3786 tree clobber = build_constructor (ratype, NULL);
3787 TREE_THIS_VOLATILE (clobber) = 1;
3788 new_stmt = gimple_build_assign (new_temp, clobber);
3789 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3791 continue;
3793 else if (simd_clone_subparts (vectype) > nunits)
3795 unsigned int k = (simd_clone_subparts (vectype)
3796 / simd_clone_subparts (rtype));
3797 gcc_assert ((k & (k - 1)) == 0);
3798 if ((j & (k - 1)) == 0)
3799 vec_alloc (ret_ctor_elts, k);
3800 if (ratype)
3802 unsigned int m, o = nunits / simd_clone_subparts (rtype);
3803 for (m = 0; m < o; m++)
3805 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3806 size_int (m), NULL_TREE, NULL_TREE);
3807 new_stmt
3808 = gimple_build_assign (make_ssa_name (rtype), tem);
3809 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3810 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3811 gimple_assign_lhs (new_stmt));
3813 tree clobber = build_constructor (ratype, NULL);
3814 TREE_THIS_VOLATILE (clobber) = 1;
3815 new_stmt = gimple_build_assign (new_temp, clobber);
3816 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3818 else
3819 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3820 if ((j & (k - 1)) != k - 1)
3821 continue;
3822 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3823 new_stmt
3824 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3825 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3827 if ((unsigned) j == k - 1)
3828 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3829 else
3830 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3832 prev_stmt_info = vinfo_for_stmt (new_stmt);
3833 continue;
3835 else if (ratype)
3837 tree t = build_fold_addr_expr (new_temp);
3838 t = build2 (MEM_REF, vectype, t,
3839 build_int_cst (TREE_TYPE (t), 0));
3840 new_stmt
3841 = gimple_build_assign (make_ssa_name (vec_dest), t);
3842 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3843 tree clobber = build_constructor (ratype, NULL);
3844 TREE_THIS_VOLATILE (clobber) = 1;
3845 vect_finish_stmt_generation (stmt,
3846 gimple_build_assign (new_temp,
3847 clobber), gsi);
3851 if (j == 0)
3852 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3853 else
3854 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3856 prev_stmt_info = vinfo_for_stmt (new_stmt);
3859 vargs.release ();
3861 /* The call in STMT might prevent it from being removed in dce.
3862 We however cannot remove it here, due to the way the ssa name
3863 it defines is mapped to the new definition. So just replace
3864 rhs of the statement with something harmless. */
3866 if (slp_node)
3867 return true;
3869 if (scalar_dest)
3871 type = TREE_TYPE (scalar_dest);
3872 if (is_pattern_stmt_p (stmt_info))
3873 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3874 else
3875 lhs = gimple_call_lhs (stmt);
3876 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3878 else
3879 new_stmt = gimple_build_nop ();
3880 set_vinfo_for_stmt (new_stmt, stmt_info);
3881 set_vinfo_for_stmt (stmt, NULL);
3882 STMT_VINFO_STMT (stmt_info) = new_stmt;
3883 gsi_replace (gsi, new_stmt, true);
3884 unlink_stmt_vdef (stmt);
3886 return true;
3890 /* Function vect_gen_widened_results_half
3892 Create a vector stmt whose code, type, number of arguments, and result
3893 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3894 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3895 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3896 needs to be created (DECL is a function-decl of a target-builtin).
3897 STMT is the original scalar stmt that we are vectorizing. */
3899 static gimple *
3900 vect_gen_widened_results_half (enum tree_code code,
3901 tree decl,
3902 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3903 tree vec_dest, gimple_stmt_iterator *gsi,
3904 gimple *stmt)
3906 gimple *new_stmt;
3907 tree new_temp;
3909 /* Generate half of the widened result: */
3910 if (code == CALL_EXPR)
3912 /* Target specific support */
3913 if (op_type == binary_op)
3914 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3915 else
3916 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3917 new_temp = make_ssa_name (vec_dest, new_stmt);
3918 gimple_call_set_lhs (new_stmt, new_temp);
3920 else
3922 /* Generic support */
3923 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3924 if (op_type != binary_op)
3925 vec_oprnd1 = NULL;
3926 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3927 new_temp = make_ssa_name (vec_dest, new_stmt);
3928 gimple_assign_set_lhs (new_stmt, new_temp);
3930 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3932 return new_stmt;
3936 /* Get vectorized definitions for loop-based vectorization. For the first
3937 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3938 scalar operand), and for the rest we get a copy with
3939 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3940 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3941 The vectors are collected into VEC_OPRNDS. */
3943 static void
3944 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3945 vec<tree> *vec_oprnds, int multi_step_cvt)
3947 tree vec_oprnd;
3949 /* Get first vector operand. */
3950 /* All the vector operands except the very first one (that is scalar oprnd)
3951 are stmt copies. */
3952 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3953 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3954 else
3955 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3957 vec_oprnds->quick_push (vec_oprnd);
3959 /* Get second vector operand. */
3960 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3961 vec_oprnds->quick_push (vec_oprnd);
3963 *oprnd = vec_oprnd;
3965 /* For conversion in multiple steps, continue to get operands
3966 recursively. */
3967 if (multi_step_cvt)
3968 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3972 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3973 For multi-step conversions store the resulting vectors and call the function
3974 recursively. */
3976 static void
3977 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3978 int multi_step_cvt, gimple *stmt,
3979 vec<tree> vec_dsts,
3980 gimple_stmt_iterator *gsi,
3981 slp_tree slp_node, enum tree_code code,
3982 stmt_vec_info *prev_stmt_info)
3984 unsigned int i;
3985 tree vop0, vop1, new_tmp, vec_dest;
3986 gimple *new_stmt;
3987 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3989 vec_dest = vec_dsts.pop ();
3991 for (i = 0; i < vec_oprnds->length (); i += 2)
3993 /* Create demotion operation. */
3994 vop0 = (*vec_oprnds)[i];
3995 vop1 = (*vec_oprnds)[i + 1];
3996 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3997 new_tmp = make_ssa_name (vec_dest, new_stmt);
3998 gimple_assign_set_lhs (new_stmt, new_tmp);
3999 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4001 if (multi_step_cvt)
4002 /* Store the resulting vector for next recursive call. */
4003 (*vec_oprnds)[i/2] = new_tmp;
4004 else
4006 /* This is the last step of the conversion sequence. Store the
4007 vectors in SLP_NODE or in vector info of the scalar statement
4008 (or in STMT_VINFO_RELATED_STMT chain). */
4009 if (slp_node)
4010 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4011 else
4013 if (!*prev_stmt_info)
4014 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4015 else
4016 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
4018 *prev_stmt_info = vinfo_for_stmt (new_stmt);
4023 /* For multi-step demotion operations we first generate demotion operations
4024 from the source type to the intermediate types, and then combine the
4025 results (stored in VEC_OPRNDS) in demotion operation to the destination
4026 type. */
4027 if (multi_step_cvt)
4029 /* At each level of recursion we have half of the operands we had at the
4030 previous level. */
4031 vec_oprnds->truncate ((i+1)/2);
4032 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4033 stmt, vec_dsts, gsi, slp_node,
4034 VEC_PACK_TRUNC_EXPR,
4035 prev_stmt_info);
4038 vec_dsts.quick_push (vec_dest);
4042 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4043 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4044 the resulting vectors and call the function recursively. */
4046 static void
4047 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4048 vec<tree> *vec_oprnds1,
4049 gimple *stmt, tree vec_dest,
4050 gimple_stmt_iterator *gsi,
4051 enum tree_code code1,
4052 enum tree_code code2, tree decl1,
4053 tree decl2, int op_type)
4055 int i;
4056 tree vop0, vop1, new_tmp1, new_tmp2;
4057 gimple *new_stmt1, *new_stmt2;
4058 vec<tree> vec_tmp = vNULL;
4060 vec_tmp.create (vec_oprnds0->length () * 2);
4061 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4063 if (op_type == binary_op)
4064 vop1 = (*vec_oprnds1)[i];
4065 else
4066 vop1 = NULL_TREE;
4068 /* Generate the two halves of promotion operation. */
4069 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4070 op_type, vec_dest, gsi, stmt);
4071 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4072 op_type, vec_dest, gsi, stmt);
4073 if (is_gimple_call (new_stmt1))
4075 new_tmp1 = gimple_call_lhs (new_stmt1);
4076 new_tmp2 = gimple_call_lhs (new_stmt2);
4078 else
4080 new_tmp1 = gimple_assign_lhs (new_stmt1);
4081 new_tmp2 = gimple_assign_lhs (new_stmt2);
4084 /* Store the results for the next step. */
4085 vec_tmp.quick_push (new_tmp1);
4086 vec_tmp.quick_push (new_tmp2);
4089 vec_oprnds0->release ();
4090 *vec_oprnds0 = vec_tmp;
4094 /* Check if STMT performs a conversion operation, that can be vectorized.
4095 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4096 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4097 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4099 static bool
4100 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4101 gimple **vec_stmt, slp_tree slp_node)
4103 tree vec_dest;
4104 tree scalar_dest;
4105 tree op0, op1 = NULL_TREE;
4106 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4107 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4108 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4109 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4110 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4111 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4112 tree new_temp;
4113 gimple *def_stmt;
4114 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4115 int ndts = 2;
4116 gimple *new_stmt = NULL;
4117 stmt_vec_info prev_stmt_info;
4118 poly_uint64 nunits_in;
4119 poly_uint64 nunits_out;
4120 tree vectype_out, vectype_in;
4121 int ncopies, i, j;
4122 tree lhs_type, rhs_type;
4123 enum { NARROW, NONE, WIDEN } modifier;
4124 vec<tree> vec_oprnds0 = vNULL;
4125 vec<tree> vec_oprnds1 = vNULL;
4126 tree vop0;
4127 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4128 vec_info *vinfo = stmt_info->vinfo;
4129 int multi_step_cvt = 0;
4130 vec<tree> interm_types = vNULL;
4131 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4132 int op_type;
4133 unsigned short fltsz;
4135 /* Is STMT a vectorizable conversion? */
4137 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4138 return false;
4140 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4141 && ! vec_stmt)
4142 return false;
4144 if (!is_gimple_assign (stmt))
4145 return false;
4147 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4148 return false;
4150 code = gimple_assign_rhs_code (stmt);
4151 if (!CONVERT_EXPR_CODE_P (code)
4152 && code != FIX_TRUNC_EXPR
4153 && code != FLOAT_EXPR
4154 && code != WIDEN_MULT_EXPR
4155 && code != WIDEN_LSHIFT_EXPR)
4156 return false;
4158 op_type = TREE_CODE_LENGTH (code);
4160 /* Check types of lhs and rhs. */
4161 scalar_dest = gimple_assign_lhs (stmt);
4162 lhs_type = TREE_TYPE (scalar_dest);
4163 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4165 op0 = gimple_assign_rhs1 (stmt);
4166 rhs_type = TREE_TYPE (op0);
4168 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4169 && !((INTEGRAL_TYPE_P (lhs_type)
4170 && INTEGRAL_TYPE_P (rhs_type))
4171 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4172 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4173 return false;
4175 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4176 && ((INTEGRAL_TYPE_P (lhs_type)
4177 && !type_has_mode_precision_p (lhs_type))
4178 || (INTEGRAL_TYPE_P (rhs_type)
4179 && !type_has_mode_precision_p (rhs_type))))
4181 if (dump_enabled_p ())
4182 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4183 "type conversion to/from bit-precision unsupported."
4184 "\n");
4185 return false;
4188 /* Check the operands of the operation. */
4189 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4191 if (dump_enabled_p ())
4192 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4193 "use not simple.\n");
4194 return false;
4196 if (op_type == binary_op)
4198 bool ok;
4200 op1 = gimple_assign_rhs2 (stmt);
4201 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4202 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4203 OP1. */
4204 if (CONSTANT_CLASS_P (op0))
4205 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4206 else
4207 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4209 if (!ok)
4211 if (dump_enabled_p ())
4212 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4213 "use not simple.\n");
4214 return false;
4218 /* If op0 is an external or constant defs use a vector type of
4219 the same size as the output vector type. */
4220 if (!vectype_in)
4221 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4222 if (vec_stmt)
4223 gcc_assert (vectype_in);
4224 if (!vectype_in)
4226 if (dump_enabled_p ())
4228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4229 "no vectype for scalar type ");
4230 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4231 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4234 return false;
4237 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4238 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4240 if (dump_enabled_p ())
4242 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4243 "can't convert between boolean and non "
4244 "boolean vectors");
4245 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4246 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4249 return false;
4252 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4253 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4254 if (known_eq (nunits_out, nunits_in))
4255 modifier = NONE;
4256 else if (multiple_p (nunits_out, nunits_in))
4257 modifier = NARROW;
4258 else
4260 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4261 modifier = WIDEN;
4264 /* Multiple types in SLP are handled by creating the appropriate number of
4265 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4266 case of SLP. */
4267 if (slp_node)
4268 ncopies = 1;
4269 else if (modifier == NARROW)
4270 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4271 else
4272 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4274 /* Sanity check: make sure that at least one copy of the vectorized stmt
4275 needs to be generated. */
4276 gcc_assert (ncopies >= 1);
4278 bool found_mode = false;
4279 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4280 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4281 opt_scalar_mode rhs_mode_iter;
4283 /* Supportable by target? */
4284 switch (modifier)
4286 case NONE:
4287 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4288 return false;
4289 if (supportable_convert_operation (code, vectype_out, vectype_in,
4290 &decl1, &code1))
4291 break;
4292 /* FALLTHRU */
4293 unsupported:
4294 if (dump_enabled_p ())
4295 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4296 "conversion not supported by target.\n");
4297 return false;
4299 case WIDEN:
4300 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4301 &code1, &code2, &multi_step_cvt,
4302 &interm_types))
4304 /* Binary widening operation can only be supported directly by the
4305 architecture. */
4306 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4307 break;
4310 if (code != FLOAT_EXPR
4311 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4312 goto unsupported;
4314 fltsz = GET_MODE_SIZE (lhs_mode);
4315 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4317 rhs_mode = rhs_mode_iter.require ();
4318 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4319 break;
4321 cvt_type
4322 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4323 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4324 if (cvt_type == NULL_TREE)
4325 goto unsupported;
4327 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4329 if (!supportable_convert_operation (code, vectype_out,
4330 cvt_type, &decl1, &codecvt1))
4331 goto unsupported;
4333 else if (!supportable_widening_operation (code, stmt, vectype_out,
4334 cvt_type, &codecvt1,
4335 &codecvt2, &multi_step_cvt,
4336 &interm_types))
4337 continue;
4338 else
4339 gcc_assert (multi_step_cvt == 0);
4341 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4342 vectype_in, &code1, &code2,
4343 &multi_step_cvt, &interm_types))
4345 found_mode = true;
4346 break;
4350 if (!found_mode)
4351 goto unsupported;
4353 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4354 codecvt2 = ERROR_MARK;
4355 else
4357 multi_step_cvt++;
4358 interm_types.safe_push (cvt_type);
4359 cvt_type = NULL_TREE;
4361 break;
4363 case NARROW:
4364 gcc_assert (op_type == unary_op);
4365 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4366 &code1, &multi_step_cvt,
4367 &interm_types))
4368 break;
4370 if (code != FIX_TRUNC_EXPR
4371 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4372 goto unsupported;
4374 cvt_type
4375 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4376 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4377 if (cvt_type == NULL_TREE)
4378 goto unsupported;
4379 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4380 &decl1, &codecvt1))
4381 goto unsupported;
4382 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4383 &code1, &multi_step_cvt,
4384 &interm_types))
4385 break;
4386 goto unsupported;
4388 default:
4389 gcc_unreachable ();
4392 if (!vec_stmt) /* transformation not required. */
4394 if (dump_enabled_p ())
4395 dump_printf_loc (MSG_NOTE, vect_location,
4396 "=== vectorizable_conversion ===\n");
4397 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4399 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4400 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4402 else if (modifier == NARROW)
4404 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4405 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4407 else
4409 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4410 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4412 interm_types.release ();
4413 return true;
4416 /* Transform. */
4417 if (dump_enabled_p ())
4418 dump_printf_loc (MSG_NOTE, vect_location,
4419 "transform conversion. ncopies = %d.\n", ncopies);
4421 if (op_type == binary_op)
4423 if (CONSTANT_CLASS_P (op0))
4424 op0 = fold_convert (TREE_TYPE (op1), op0);
4425 else if (CONSTANT_CLASS_P (op1))
4426 op1 = fold_convert (TREE_TYPE (op0), op1);
4429 /* In case of multi-step conversion, we first generate conversion operations
4430 to the intermediate types, and then from that types to the final one.
4431 We create vector destinations for the intermediate type (TYPES) received
4432 from supportable_*_operation, and store them in the correct order
4433 for future use in vect_create_vectorized_*_stmts (). */
4434 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4435 vec_dest = vect_create_destination_var (scalar_dest,
4436 (cvt_type && modifier == WIDEN)
4437 ? cvt_type : vectype_out);
4438 vec_dsts.quick_push (vec_dest);
4440 if (multi_step_cvt)
4442 for (i = interm_types.length () - 1;
4443 interm_types.iterate (i, &intermediate_type); i--)
4445 vec_dest = vect_create_destination_var (scalar_dest,
4446 intermediate_type);
4447 vec_dsts.quick_push (vec_dest);
4451 if (cvt_type)
4452 vec_dest = vect_create_destination_var (scalar_dest,
4453 modifier == WIDEN
4454 ? vectype_out : cvt_type);
4456 if (!slp_node)
4458 if (modifier == WIDEN)
4460 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4461 if (op_type == binary_op)
4462 vec_oprnds1.create (1);
4464 else if (modifier == NARROW)
4465 vec_oprnds0.create (
4466 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4468 else if (code == WIDEN_LSHIFT_EXPR)
4469 vec_oprnds1.create (slp_node->vec_stmts_size);
4471 last_oprnd = op0;
4472 prev_stmt_info = NULL;
4473 switch (modifier)
4475 case NONE:
4476 for (j = 0; j < ncopies; j++)
4478 if (j == 0)
4479 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4480 else
4481 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4483 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4485 /* Arguments are ready, create the new vector stmt. */
4486 if (code1 == CALL_EXPR)
4488 new_stmt = gimple_build_call (decl1, 1, vop0);
4489 new_temp = make_ssa_name (vec_dest, new_stmt);
4490 gimple_call_set_lhs (new_stmt, new_temp);
4492 else
4494 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4495 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4496 new_temp = make_ssa_name (vec_dest, new_stmt);
4497 gimple_assign_set_lhs (new_stmt, new_temp);
4500 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4501 if (slp_node)
4502 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4503 else
4505 if (!prev_stmt_info)
4506 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4507 else
4508 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4509 prev_stmt_info = vinfo_for_stmt (new_stmt);
4513 break;
4515 case WIDEN:
4516 /* In case the vectorization factor (VF) is bigger than the number
4517 of elements that we can fit in a vectype (nunits), we have to
4518 generate more than one vector stmt - i.e - we need to "unroll"
4519 the vector stmt by a factor VF/nunits. */
4520 for (j = 0; j < ncopies; j++)
4522 /* Handle uses. */
4523 if (j == 0)
4525 if (slp_node)
4527 if (code == WIDEN_LSHIFT_EXPR)
4529 unsigned int k;
4531 vec_oprnd1 = op1;
4532 /* Store vec_oprnd1 for every vector stmt to be created
4533 for SLP_NODE. We check during the analysis that all
4534 the shift arguments are the same. */
4535 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4536 vec_oprnds1.quick_push (vec_oprnd1);
4538 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4539 slp_node);
4541 else
4542 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4543 &vec_oprnds1, slp_node);
4545 else
4547 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4548 vec_oprnds0.quick_push (vec_oprnd0);
4549 if (op_type == binary_op)
4551 if (code == WIDEN_LSHIFT_EXPR)
4552 vec_oprnd1 = op1;
4553 else
4554 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4555 vec_oprnds1.quick_push (vec_oprnd1);
4559 else
4561 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4562 vec_oprnds0.truncate (0);
4563 vec_oprnds0.quick_push (vec_oprnd0);
4564 if (op_type == binary_op)
4566 if (code == WIDEN_LSHIFT_EXPR)
4567 vec_oprnd1 = op1;
4568 else
4569 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4570 vec_oprnd1);
4571 vec_oprnds1.truncate (0);
4572 vec_oprnds1.quick_push (vec_oprnd1);
4576 /* Arguments are ready. Create the new vector stmts. */
4577 for (i = multi_step_cvt; i >= 0; i--)
4579 tree this_dest = vec_dsts[i];
4580 enum tree_code c1 = code1, c2 = code2;
4581 if (i == 0 && codecvt2 != ERROR_MARK)
4583 c1 = codecvt1;
4584 c2 = codecvt2;
4586 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4587 &vec_oprnds1,
4588 stmt, this_dest, gsi,
4589 c1, c2, decl1, decl2,
4590 op_type);
4593 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4595 if (cvt_type)
4597 if (codecvt1 == CALL_EXPR)
4599 new_stmt = gimple_build_call (decl1, 1, vop0);
4600 new_temp = make_ssa_name (vec_dest, new_stmt);
4601 gimple_call_set_lhs (new_stmt, new_temp);
4603 else
4605 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4606 new_temp = make_ssa_name (vec_dest);
4607 new_stmt = gimple_build_assign (new_temp, codecvt1,
4608 vop0);
4611 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4613 else
4614 new_stmt = SSA_NAME_DEF_STMT (vop0);
4616 if (slp_node)
4617 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4618 else
4620 if (!prev_stmt_info)
4621 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4622 else
4623 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4624 prev_stmt_info = vinfo_for_stmt (new_stmt);
4629 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4630 break;
4632 case NARROW:
4633 /* In case the vectorization factor (VF) is bigger than the number
4634 of elements that we can fit in a vectype (nunits), we have to
4635 generate more than one vector stmt - i.e - we need to "unroll"
4636 the vector stmt by a factor VF/nunits. */
4637 for (j = 0; j < ncopies; j++)
4639 /* Handle uses. */
4640 if (slp_node)
4641 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4642 slp_node);
4643 else
4645 vec_oprnds0.truncate (0);
4646 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4647 vect_pow2 (multi_step_cvt) - 1);
4650 /* Arguments are ready. Create the new vector stmts. */
4651 if (cvt_type)
4652 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4654 if (codecvt1 == CALL_EXPR)
4656 new_stmt = gimple_build_call (decl1, 1, vop0);
4657 new_temp = make_ssa_name (vec_dest, new_stmt);
4658 gimple_call_set_lhs (new_stmt, new_temp);
4660 else
4662 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4663 new_temp = make_ssa_name (vec_dest);
4664 new_stmt = gimple_build_assign (new_temp, codecvt1,
4665 vop0);
4668 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4669 vec_oprnds0[i] = new_temp;
4672 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4673 stmt, vec_dsts, gsi,
4674 slp_node, code1,
4675 &prev_stmt_info);
4678 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4679 break;
4682 vec_oprnds0.release ();
4683 vec_oprnds1.release ();
4684 interm_types.release ();
4686 return true;
4690 /* Function vectorizable_assignment.
4692 Check if STMT performs an assignment (copy) that can be vectorized.
4693 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4694 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4695 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4697 static bool
4698 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4699 gimple **vec_stmt, slp_tree slp_node)
4701 tree vec_dest;
4702 tree scalar_dest;
4703 tree op;
4704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4705 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4706 tree new_temp;
4707 gimple *def_stmt;
4708 enum vect_def_type dt[1] = {vect_unknown_def_type};
4709 int ndts = 1;
4710 int ncopies;
4711 int i, j;
4712 vec<tree> vec_oprnds = vNULL;
4713 tree vop;
4714 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4715 vec_info *vinfo = stmt_info->vinfo;
4716 gimple *new_stmt = NULL;
4717 stmt_vec_info prev_stmt_info = NULL;
4718 enum tree_code code;
4719 tree vectype_in;
4721 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4722 return false;
4724 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4725 && ! vec_stmt)
4726 return false;
4728 /* Is vectorizable assignment? */
4729 if (!is_gimple_assign (stmt))
4730 return false;
4732 scalar_dest = gimple_assign_lhs (stmt);
4733 if (TREE_CODE (scalar_dest) != SSA_NAME)
4734 return false;
4736 code = gimple_assign_rhs_code (stmt);
4737 if (gimple_assign_single_p (stmt)
4738 || code == PAREN_EXPR
4739 || CONVERT_EXPR_CODE_P (code))
4740 op = gimple_assign_rhs1 (stmt);
4741 else
4742 return false;
4744 if (code == VIEW_CONVERT_EXPR)
4745 op = TREE_OPERAND (op, 0);
4747 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4748 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4750 /* Multiple types in SLP are handled by creating the appropriate number of
4751 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4752 case of SLP. */
4753 if (slp_node)
4754 ncopies = 1;
4755 else
4756 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4758 gcc_assert (ncopies >= 1);
4760 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4762 if (dump_enabled_p ())
4763 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4764 "use not simple.\n");
4765 return false;
4768 /* We can handle NOP_EXPR conversions that do not change the number
4769 of elements or the vector size. */
4770 if ((CONVERT_EXPR_CODE_P (code)
4771 || code == VIEW_CONVERT_EXPR)
4772 && (!vectype_in
4773 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4774 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4775 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4776 return false;
4778 /* We do not handle bit-precision changes. */
4779 if ((CONVERT_EXPR_CODE_P (code)
4780 || code == VIEW_CONVERT_EXPR)
4781 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4782 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4783 || !type_has_mode_precision_p (TREE_TYPE (op)))
4784 /* But a conversion that does not change the bit-pattern is ok. */
4785 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4786 > TYPE_PRECISION (TREE_TYPE (op)))
4787 && TYPE_UNSIGNED (TREE_TYPE (op)))
4788 /* Conversion between boolean types of different sizes is
4789 a simple assignment in case their vectypes are same
4790 boolean vectors. */
4791 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4792 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4794 if (dump_enabled_p ())
4795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4796 "type conversion to/from bit-precision "
4797 "unsupported.\n");
4798 return false;
4801 if (!vec_stmt) /* transformation not required. */
4803 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4804 if (dump_enabled_p ())
4805 dump_printf_loc (MSG_NOTE, vect_location,
4806 "=== vectorizable_assignment ===\n");
4807 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4808 return true;
4811 /* Transform. */
4812 if (dump_enabled_p ())
4813 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4815 /* Handle def. */
4816 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4818 /* Handle use. */
4819 for (j = 0; j < ncopies; j++)
4821 /* Handle uses. */
4822 if (j == 0)
4823 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4824 else
4825 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4827 /* Arguments are ready. create the new vector stmt. */
4828 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4830 if (CONVERT_EXPR_CODE_P (code)
4831 || code == VIEW_CONVERT_EXPR)
4832 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4833 new_stmt = gimple_build_assign (vec_dest, vop);
4834 new_temp = make_ssa_name (vec_dest, new_stmt);
4835 gimple_assign_set_lhs (new_stmt, new_temp);
4836 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4837 if (slp_node)
4838 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4841 if (slp_node)
4842 continue;
4844 if (j == 0)
4845 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4846 else
4847 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4849 prev_stmt_info = vinfo_for_stmt (new_stmt);
4852 vec_oprnds.release ();
4853 return true;
4857 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4858 either as shift by a scalar or by a vector. */
4860 bool
4861 vect_supportable_shift (enum tree_code code, tree scalar_type)
4864 machine_mode vec_mode;
4865 optab optab;
4866 int icode;
4867 tree vectype;
4869 vectype = get_vectype_for_scalar_type (scalar_type);
4870 if (!vectype)
4871 return false;
4873 optab = optab_for_tree_code (code, vectype, optab_scalar);
4874 if (!optab
4875 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4877 optab = optab_for_tree_code (code, vectype, optab_vector);
4878 if (!optab
4879 || (optab_handler (optab, TYPE_MODE (vectype))
4880 == CODE_FOR_nothing))
4881 return false;
4884 vec_mode = TYPE_MODE (vectype);
4885 icode = (int) optab_handler (optab, vec_mode);
4886 if (icode == CODE_FOR_nothing)
4887 return false;
4889 return true;
4893 /* Function vectorizable_shift.
4895 Check if STMT performs a shift operation that can be vectorized.
4896 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4897 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4898 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4900 static bool
4901 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4902 gimple **vec_stmt, slp_tree slp_node)
4904 tree vec_dest;
4905 tree scalar_dest;
4906 tree op0, op1 = NULL;
4907 tree vec_oprnd1 = NULL_TREE;
4908 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4909 tree vectype;
4910 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4911 enum tree_code code;
4912 machine_mode vec_mode;
4913 tree new_temp;
4914 optab optab;
4915 int icode;
4916 machine_mode optab_op2_mode;
4917 gimple *def_stmt;
4918 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4919 int ndts = 2;
4920 gimple *new_stmt = NULL;
4921 stmt_vec_info prev_stmt_info;
4922 int nunits_in;
4923 int nunits_out;
4924 tree vectype_out;
4925 tree op1_vectype;
4926 int ncopies;
4927 int j, i;
4928 vec<tree> vec_oprnds0 = vNULL;
4929 vec<tree> vec_oprnds1 = vNULL;
4930 tree vop0, vop1;
4931 unsigned int k;
4932 bool scalar_shift_arg = true;
4933 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4934 vec_info *vinfo = stmt_info->vinfo;
4936 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4937 return false;
4939 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4940 && ! vec_stmt)
4941 return false;
4943 /* Is STMT a vectorizable binary/unary operation? */
4944 if (!is_gimple_assign (stmt))
4945 return false;
4947 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4948 return false;
4950 code = gimple_assign_rhs_code (stmt);
4952 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4953 || code == RROTATE_EXPR))
4954 return false;
4956 scalar_dest = gimple_assign_lhs (stmt);
4957 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4958 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4960 if (dump_enabled_p ())
4961 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4962 "bit-precision shifts not supported.\n");
4963 return false;
4966 op0 = gimple_assign_rhs1 (stmt);
4967 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4969 if (dump_enabled_p ())
4970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4971 "use not simple.\n");
4972 return false;
4974 /* If op0 is an external or constant def use a vector type with
4975 the same size as the output vector type. */
4976 if (!vectype)
4977 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4978 if (vec_stmt)
4979 gcc_assert (vectype);
4980 if (!vectype)
4982 if (dump_enabled_p ())
4983 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4984 "no vectype for scalar type\n");
4985 return false;
4988 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4989 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4990 if (nunits_out != nunits_in)
4991 return false;
4993 op1 = gimple_assign_rhs2 (stmt);
4994 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4996 if (dump_enabled_p ())
4997 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4998 "use not simple.\n");
4999 return false;
5002 /* Multiple types in SLP are handled by creating the appropriate number of
5003 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5004 case of SLP. */
5005 if (slp_node)
5006 ncopies = 1;
5007 else
5008 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5010 gcc_assert (ncopies >= 1);
5012 /* Determine whether the shift amount is a vector, or scalar. If the
5013 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5015 if ((dt[1] == vect_internal_def
5016 || dt[1] == vect_induction_def)
5017 && !slp_node)
5018 scalar_shift_arg = false;
5019 else if (dt[1] == vect_constant_def
5020 || dt[1] == vect_external_def
5021 || dt[1] == vect_internal_def)
5023 /* In SLP, need to check whether the shift count is the same,
5024 in loops if it is a constant or invariant, it is always
5025 a scalar shift. */
5026 if (slp_node)
5028 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5029 gimple *slpstmt;
5031 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5032 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5033 scalar_shift_arg = false;
5036 /* If the shift amount is computed by a pattern stmt we cannot
5037 use the scalar amount directly thus give up and use a vector
5038 shift. */
5039 if (dt[1] == vect_internal_def)
5041 gimple *def = SSA_NAME_DEF_STMT (op1);
5042 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5043 scalar_shift_arg = false;
5046 else
5048 if (dump_enabled_p ())
5049 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5050 "operand mode requires invariant argument.\n");
5051 return false;
5054 /* Vector shifted by vector. */
5055 if (!scalar_shift_arg)
5057 optab = optab_for_tree_code (code, vectype, optab_vector);
5058 if (dump_enabled_p ())
5059 dump_printf_loc (MSG_NOTE, vect_location,
5060 "vector/vector shift/rotate found.\n");
5062 if (!op1_vectype)
5063 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5064 if (op1_vectype == NULL_TREE
5065 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5067 if (dump_enabled_p ())
5068 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5069 "unusable type for last operand in"
5070 " vector/vector shift/rotate.\n");
5071 return false;
5074 /* See if the machine has a vector shifted by scalar insn and if not
5075 then see if it has a vector shifted by vector insn. */
5076 else
5078 optab = optab_for_tree_code (code, vectype, optab_scalar);
5079 if (optab
5080 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5082 if (dump_enabled_p ())
5083 dump_printf_loc (MSG_NOTE, vect_location,
5084 "vector/scalar shift/rotate found.\n");
5086 else
5088 optab = optab_for_tree_code (code, vectype, optab_vector);
5089 if (optab
5090 && (optab_handler (optab, TYPE_MODE (vectype))
5091 != CODE_FOR_nothing))
5093 scalar_shift_arg = false;
5095 if (dump_enabled_p ())
5096 dump_printf_loc (MSG_NOTE, vect_location,
5097 "vector/vector shift/rotate found.\n");
5099 /* Unlike the other binary operators, shifts/rotates have
5100 the rhs being int, instead of the same type as the lhs,
5101 so make sure the scalar is the right type if we are
5102 dealing with vectors of long long/long/short/char. */
5103 if (dt[1] == vect_constant_def)
5104 op1 = fold_convert (TREE_TYPE (vectype), op1);
5105 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5106 TREE_TYPE (op1)))
5108 if (slp_node
5109 && TYPE_MODE (TREE_TYPE (vectype))
5110 != TYPE_MODE (TREE_TYPE (op1)))
5112 if (dump_enabled_p ())
5113 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5114 "unusable type for last operand in"
5115 " vector/vector shift/rotate.\n");
5116 return false;
5118 if (vec_stmt && !slp_node)
5120 op1 = fold_convert (TREE_TYPE (vectype), op1);
5121 op1 = vect_init_vector (stmt, op1,
5122 TREE_TYPE (vectype), NULL);
5129 /* Supportable by target? */
5130 if (!optab)
5132 if (dump_enabled_p ())
5133 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5134 "no optab.\n");
5135 return false;
5137 vec_mode = TYPE_MODE (vectype);
5138 icode = (int) optab_handler (optab, vec_mode);
5139 if (icode == CODE_FOR_nothing)
5141 if (dump_enabled_p ())
5142 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5143 "op not supported by target.\n");
5144 /* Check only during analysis. */
5145 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5146 || (!vec_stmt
5147 && !vect_worthwhile_without_simd_p (vinfo, code)))
5148 return false;
5149 if (dump_enabled_p ())
5150 dump_printf_loc (MSG_NOTE, vect_location,
5151 "proceeding using word mode.\n");
5154 /* Worthwhile without SIMD support? Check only during analysis. */
5155 if (!vec_stmt
5156 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5157 && !vect_worthwhile_without_simd_p (vinfo, code))
5159 if (dump_enabled_p ())
5160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5161 "not worthwhile without SIMD support.\n");
5162 return false;
5165 if (!vec_stmt) /* transformation not required. */
5167 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5168 if (dump_enabled_p ())
5169 dump_printf_loc (MSG_NOTE, vect_location,
5170 "=== vectorizable_shift ===\n");
5171 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5172 return true;
5175 /* Transform. */
5177 if (dump_enabled_p ())
5178 dump_printf_loc (MSG_NOTE, vect_location,
5179 "transform binary/unary operation.\n");
5181 /* Handle def. */
5182 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5184 prev_stmt_info = NULL;
5185 for (j = 0; j < ncopies; j++)
5187 /* Handle uses. */
5188 if (j == 0)
5190 if (scalar_shift_arg)
5192 /* Vector shl and shr insn patterns can be defined with scalar
5193 operand 2 (shift operand). In this case, use constant or loop
5194 invariant op1 directly, without extending it to vector mode
5195 first. */
5196 optab_op2_mode = insn_data[icode].operand[2].mode;
5197 if (!VECTOR_MODE_P (optab_op2_mode))
5199 if (dump_enabled_p ())
5200 dump_printf_loc (MSG_NOTE, vect_location,
5201 "operand 1 using scalar mode.\n");
5202 vec_oprnd1 = op1;
5203 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5204 vec_oprnds1.quick_push (vec_oprnd1);
5205 if (slp_node)
5207 /* Store vec_oprnd1 for every vector stmt to be created
5208 for SLP_NODE. We check during the analysis that all
5209 the shift arguments are the same.
5210 TODO: Allow different constants for different vector
5211 stmts generated for an SLP instance. */
5212 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5213 vec_oprnds1.quick_push (vec_oprnd1);
5218 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5219 (a special case for certain kind of vector shifts); otherwise,
5220 operand 1 should be of a vector type (the usual case). */
5221 if (vec_oprnd1)
5222 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5223 slp_node);
5224 else
5225 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5226 slp_node);
5228 else
5229 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5231 /* Arguments are ready. Create the new vector stmt. */
5232 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5234 vop1 = vec_oprnds1[i];
5235 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5236 new_temp = make_ssa_name (vec_dest, new_stmt);
5237 gimple_assign_set_lhs (new_stmt, new_temp);
5238 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5239 if (slp_node)
5240 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5243 if (slp_node)
5244 continue;
5246 if (j == 0)
5247 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5248 else
5249 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5250 prev_stmt_info = vinfo_for_stmt (new_stmt);
5253 vec_oprnds0.release ();
5254 vec_oprnds1.release ();
5256 return true;
5260 /* Function vectorizable_operation.
5262 Check if STMT performs a binary, unary or ternary operation that can
5263 be vectorized.
5264 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5265 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5266 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5268 static bool
5269 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5270 gimple **vec_stmt, slp_tree slp_node)
5272 tree vec_dest;
5273 tree scalar_dest;
5274 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5275 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5276 tree vectype;
5277 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5278 enum tree_code code, orig_code;
5279 machine_mode vec_mode;
5280 tree new_temp;
5281 int op_type;
5282 optab optab;
5283 bool target_support_p;
5284 gimple *def_stmt;
5285 enum vect_def_type dt[3]
5286 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5287 int ndts = 3;
5288 gimple *new_stmt = NULL;
5289 stmt_vec_info prev_stmt_info;
5290 int nunits_in;
5291 int nunits_out;
5292 tree vectype_out;
5293 int ncopies;
5294 int j, i;
5295 vec<tree> vec_oprnds0 = vNULL;
5296 vec<tree> vec_oprnds1 = vNULL;
5297 vec<tree> vec_oprnds2 = vNULL;
5298 tree vop0, vop1, vop2;
5299 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5300 vec_info *vinfo = stmt_info->vinfo;
5302 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5303 return false;
5305 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5306 && ! vec_stmt)
5307 return false;
5309 /* Is STMT a vectorizable binary/unary operation? */
5310 if (!is_gimple_assign (stmt))
5311 return false;
5313 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5314 return false;
5316 orig_code = code = gimple_assign_rhs_code (stmt);
5318 /* For pointer addition and subtraction, we should use the normal
5319 plus and minus for the vector operation. */
5320 if (code == POINTER_PLUS_EXPR)
5321 code = PLUS_EXPR;
5322 if (code == POINTER_DIFF_EXPR)
5323 code = MINUS_EXPR;
5325 /* Support only unary or binary operations. */
5326 op_type = TREE_CODE_LENGTH (code);
5327 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5329 if (dump_enabled_p ())
5330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5331 "num. args = %d (not unary/binary/ternary op).\n",
5332 op_type);
5333 return false;
5336 scalar_dest = gimple_assign_lhs (stmt);
5337 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5339 /* Most operations cannot handle bit-precision types without extra
5340 truncations. */
5341 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5342 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5343 /* Exception are bitwise binary operations. */
5344 && code != BIT_IOR_EXPR
5345 && code != BIT_XOR_EXPR
5346 && code != BIT_AND_EXPR)
5348 if (dump_enabled_p ())
5349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5350 "bit-precision arithmetic not supported.\n");
5351 return false;
5354 op0 = gimple_assign_rhs1 (stmt);
5355 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5357 if (dump_enabled_p ())
5358 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5359 "use not simple.\n");
5360 return false;
5362 /* If op0 is an external or constant def use a vector type with
5363 the same size as the output vector type. */
5364 if (!vectype)
5366 /* For boolean type we cannot determine vectype by
5367 invariant value (don't know whether it is a vector
5368 of booleans or vector of integers). We use output
5369 vectype because operations on boolean don't change
5370 type. */
5371 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5373 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5375 if (dump_enabled_p ())
5376 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5377 "not supported operation on bool value.\n");
5378 return false;
5380 vectype = vectype_out;
5382 else
5383 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5385 if (vec_stmt)
5386 gcc_assert (vectype);
5387 if (!vectype)
5389 if (dump_enabled_p ())
5391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5392 "no vectype for scalar type ");
5393 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5394 TREE_TYPE (op0));
5395 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5398 return false;
5401 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5402 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5403 if (nunits_out != nunits_in)
5404 return false;
5406 if (op_type == binary_op || op_type == ternary_op)
5408 op1 = gimple_assign_rhs2 (stmt);
5409 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5411 if (dump_enabled_p ())
5412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5413 "use not simple.\n");
5414 return false;
5417 if (op_type == ternary_op)
5419 op2 = gimple_assign_rhs3 (stmt);
5420 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5422 if (dump_enabled_p ())
5423 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5424 "use not simple.\n");
5425 return false;
5429 /* Multiple types in SLP are handled by creating the appropriate number of
5430 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5431 case of SLP. */
5432 if (slp_node)
5433 ncopies = 1;
5434 else
5435 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5437 gcc_assert (ncopies >= 1);
5439 /* Shifts are handled in vectorizable_shift (). */
5440 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5441 || code == RROTATE_EXPR)
5442 return false;
5444 /* Supportable by target? */
5446 vec_mode = TYPE_MODE (vectype);
5447 if (code == MULT_HIGHPART_EXPR)
5448 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5449 else
5451 optab = optab_for_tree_code (code, vectype, optab_default);
5452 if (!optab)
5454 if (dump_enabled_p ())
5455 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5456 "no optab.\n");
5457 return false;
5459 target_support_p = (optab_handler (optab, vec_mode)
5460 != CODE_FOR_nothing);
5463 if (!target_support_p)
5465 if (dump_enabled_p ())
5466 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5467 "op not supported by target.\n");
5468 /* Check only during analysis. */
5469 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5470 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5471 return false;
5472 if (dump_enabled_p ())
5473 dump_printf_loc (MSG_NOTE, vect_location,
5474 "proceeding using word mode.\n");
5477 /* Worthwhile without SIMD support? Check only during analysis. */
5478 if (!VECTOR_MODE_P (vec_mode)
5479 && !vec_stmt
5480 && !vect_worthwhile_without_simd_p (vinfo, code))
5482 if (dump_enabled_p ())
5483 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5484 "not worthwhile without SIMD support.\n");
5485 return false;
5488 if (!vec_stmt) /* transformation not required. */
5490 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5491 if (dump_enabled_p ())
5492 dump_printf_loc (MSG_NOTE, vect_location,
5493 "=== vectorizable_operation ===\n");
5494 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5495 return true;
5498 /* Transform. */
5500 if (dump_enabled_p ())
5501 dump_printf_loc (MSG_NOTE, vect_location,
5502 "transform binary/unary operation.\n");
5504 /* Handle def. */
5505 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5507 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5508 vectors with unsigned elements, but the result is signed. So, we
5509 need to compute the MINUS_EXPR into vectype temporary and
5510 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5511 tree vec_cvt_dest = NULL_TREE;
5512 if (orig_code == POINTER_DIFF_EXPR)
5513 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5515 /* In case the vectorization factor (VF) is bigger than the number
5516 of elements that we can fit in a vectype (nunits), we have to generate
5517 more than one vector stmt - i.e - we need to "unroll" the
5518 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5519 from one copy of the vector stmt to the next, in the field
5520 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5521 stages to find the correct vector defs to be used when vectorizing
5522 stmts that use the defs of the current stmt. The example below
5523 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5524 we need to create 4 vectorized stmts):
5526 before vectorization:
5527 RELATED_STMT VEC_STMT
5528 S1: x = memref - -
5529 S2: z = x + 1 - -
5531 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5532 there):
5533 RELATED_STMT VEC_STMT
5534 VS1_0: vx0 = memref0 VS1_1 -
5535 VS1_1: vx1 = memref1 VS1_2 -
5536 VS1_2: vx2 = memref2 VS1_3 -
5537 VS1_3: vx3 = memref3 - -
5538 S1: x = load - VS1_0
5539 S2: z = x + 1 - -
5541 step2: vectorize stmt S2 (done here):
5542 To vectorize stmt S2 we first need to find the relevant vector
5543 def for the first operand 'x'. This is, as usual, obtained from
5544 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5545 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5546 relevant vector def 'vx0'. Having found 'vx0' we can generate
5547 the vector stmt VS2_0, and as usual, record it in the
5548 STMT_VINFO_VEC_STMT of stmt S2.
5549 When creating the second copy (VS2_1), we obtain the relevant vector
5550 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5551 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5552 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5553 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5554 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5555 chain of stmts and pointers:
5556 RELATED_STMT VEC_STMT
5557 VS1_0: vx0 = memref0 VS1_1 -
5558 VS1_1: vx1 = memref1 VS1_2 -
5559 VS1_2: vx2 = memref2 VS1_3 -
5560 VS1_3: vx3 = memref3 - -
5561 S1: x = load - VS1_0
5562 VS2_0: vz0 = vx0 + v1 VS2_1 -
5563 VS2_1: vz1 = vx1 + v1 VS2_2 -
5564 VS2_2: vz2 = vx2 + v1 VS2_3 -
5565 VS2_3: vz3 = vx3 + v1 - -
5566 S2: z = x + 1 - VS2_0 */
5568 prev_stmt_info = NULL;
5569 for (j = 0; j < ncopies; j++)
5571 /* Handle uses. */
5572 if (j == 0)
5574 if (op_type == binary_op || op_type == ternary_op)
5575 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5576 slp_node);
5577 else
5578 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5579 slp_node);
5580 if (op_type == ternary_op)
5581 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5582 slp_node);
5584 else
5586 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5587 if (op_type == ternary_op)
5589 tree vec_oprnd = vec_oprnds2.pop ();
5590 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5591 vec_oprnd));
5595 /* Arguments are ready. Create the new vector stmt. */
5596 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5598 vop1 = ((op_type == binary_op || op_type == ternary_op)
5599 ? vec_oprnds1[i] : NULL_TREE);
5600 vop2 = ((op_type == ternary_op)
5601 ? vec_oprnds2[i] : NULL_TREE);
5602 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5603 new_temp = make_ssa_name (vec_dest, new_stmt);
5604 gimple_assign_set_lhs (new_stmt, new_temp);
5605 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5606 if (vec_cvt_dest)
5608 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5609 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5610 new_temp);
5611 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5612 gimple_assign_set_lhs (new_stmt, new_temp);
5613 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5615 if (slp_node)
5616 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5619 if (slp_node)
5620 continue;
5622 if (j == 0)
5623 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5624 else
5625 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5626 prev_stmt_info = vinfo_for_stmt (new_stmt);
5629 vec_oprnds0.release ();
5630 vec_oprnds1.release ();
5631 vec_oprnds2.release ();
5633 return true;
5636 /* A helper function to ensure data reference DR's base alignment. */
5638 static void
5639 ensure_base_align (struct data_reference *dr)
5641 if (!dr->aux)
5642 return;
5644 if (DR_VECT_AUX (dr)->base_misaligned)
5646 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5648 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5650 if (decl_in_symtab_p (base_decl))
5651 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5652 else
5654 SET_DECL_ALIGN (base_decl, align_base_to);
5655 DECL_USER_ALIGN (base_decl) = 1;
5657 DR_VECT_AUX (dr)->base_misaligned = false;
5662 /* Function get_group_alias_ptr_type.
5664 Return the alias type for the group starting at FIRST_STMT. */
5666 static tree
5667 get_group_alias_ptr_type (gimple *first_stmt)
5669 struct data_reference *first_dr, *next_dr;
5670 gimple *next_stmt;
5672 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5673 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5674 while (next_stmt)
5676 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5677 if (get_alias_set (DR_REF (first_dr))
5678 != get_alias_set (DR_REF (next_dr)))
5680 if (dump_enabled_p ())
5681 dump_printf_loc (MSG_NOTE, vect_location,
5682 "conflicting alias set types.\n");
5683 return ptr_type_node;
5685 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5687 return reference_alias_ptr_type (DR_REF (first_dr));
5691 /* Function vectorizable_store.
5693 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5694 can be vectorized.
5695 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5696 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5697 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5699 static bool
5700 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5701 slp_tree slp_node)
5703 tree scalar_dest;
5704 tree data_ref;
5705 tree op;
5706 tree vec_oprnd = NULL_TREE;
5707 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5708 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5709 tree elem_type;
5710 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5711 struct loop *loop = NULL;
5712 machine_mode vec_mode;
5713 tree dummy;
5714 enum dr_alignment_support alignment_support_scheme;
5715 gimple *def_stmt;
5716 enum vect_def_type dt;
5717 stmt_vec_info prev_stmt_info = NULL;
5718 tree dataref_ptr = NULL_TREE;
5719 tree dataref_offset = NULL_TREE;
5720 gimple *ptr_incr = NULL;
5721 int ncopies;
5722 int j;
5723 gimple *next_stmt, *first_stmt;
5724 bool grouped_store;
5725 unsigned int group_size, i;
5726 vec<tree> oprnds = vNULL;
5727 vec<tree> result_chain = vNULL;
5728 bool inv_p;
5729 tree offset = NULL_TREE;
5730 vec<tree> vec_oprnds = vNULL;
5731 bool slp = (slp_node != NULL);
5732 unsigned int vec_num;
5733 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5734 vec_info *vinfo = stmt_info->vinfo;
5735 tree aggr_type;
5736 gather_scatter_info gs_info;
5737 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5738 gimple *new_stmt;
5739 poly_uint64 vf;
5740 vec_load_store_type vls_type;
5741 tree ref_type;
5743 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5744 return false;
5746 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5747 && ! vec_stmt)
5748 return false;
5750 /* Is vectorizable store? */
5752 if (!is_gimple_assign (stmt))
5753 return false;
5755 scalar_dest = gimple_assign_lhs (stmt);
5756 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5757 && is_pattern_stmt_p (stmt_info))
5758 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5759 if (TREE_CODE (scalar_dest) != ARRAY_REF
5760 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5761 && TREE_CODE (scalar_dest) != INDIRECT_REF
5762 && TREE_CODE (scalar_dest) != COMPONENT_REF
5763 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5764 && TREE_CODE (scalar_dest) != REALPART_EXPR
5765 && TREE_CODE (scalar_dest) != MEM_REF)
5766 return false;
5768 /* Cannot have hybrid store SLP -- that would mean storing to the
5769 same location twice. */
5770 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5772 gcc_assert (gimple_assign_single_p (stmt));
5774 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5775 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5777 if (loop_vinfo)
5779 loop = LOOP_VINFO_LOOP (loop_vinfo);
5780 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5782 else
5783 vf = 1;
5785 /* Multiple types in SLP are handled by creating the appropriate number of
5786 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5787 case of SLP. */
5788 if (slp)
5789 ncopies = 1;
5790 else
5791 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5793 gcc_assert (ncopies >= 1);
5795 /* FORNOW. This restriction should be relaxed. */
5796 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5798 if (dump_enabled_p ())
5799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5800 "multiple types in nested loop.\n");
5801 return false;
5804 op = gimple_assign_rhs1 (stmt);
5806 /* In the case this is a store from a constant make sure
5807 native_encode_expr can handle it. */
5808 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5809 return false;
5811 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5813 if (dump_enabled_p ())
5814 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5815 "use not simple.\n");
5816 return false;
5819 if (dt == vect_constant_def || dt == vect_external_def)
5820 vls_type = VLS_STORE_INVARIANT;
5821 else
5822 vls_type = VLS_STORE;
5824 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5825 return false;
5827 elem_type = TREE_TYPE (vectype);
5828 vec_mode = TYPE_MODE (vectype);
5830 /* FORNOW. In some cases can vectorize even if data-type not supported
5831 (e.g. - array initialization with 0). */
5832 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5833 return false;
5835 if (!STMT_VINFO_DATA_REF (stmt_info))
5836 return false;
5838 vect_memory_access_type memory_access_type;
5839 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5840 &memory_access_type, &gs_info))
5841 return false;
5843 if (!vec_stmt) /* transformation not required. */
5845 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5846 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5847 /* The SLP costs are calculated during SLP analysis. */
5848 if (!PURE_SLP_STMT (stmt_info))
5849 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5850 NULL, NULL, NULL);
5851 return true;
5853 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5855 /* Transform. */
5857 ensure_base_align (dr);
5859 if (memory_access_type == VMAT_GATHER_SCATTER)
5861 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5862 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5863 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5864 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5865 edge pe = loop_preheader_edge (loop);
5866 gimple_seq seq;
5867 basic_block new_bb;
5868 enum { NARROW, NONE, WIDEN } modifier;
5869 poly_uint64 scatter_off_nunits
5870 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5872 if (known_eq (nunits, scatter_off_nunits))
5873 modifier = NONE;
5874 else if (known_eq (nunits * 2, scatter_off_nunits))
5876 modifier = WIDEN;
5878 /* Currently gathers and scatters are only supported for
5879 fixed-length vectors. */
5880 unsigned int count = scatter_off_nunits.to_constant ();
5881 vec_perm_builder sel (count, count, 1);
5882 for (i = 0; i < (unsigned int) count; ++i)
5883 sel.quick_push (i | (count / 2));
5885 vec_perm_indices indices (sel, 1, count);
5886 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5887 indices);
5888 gcc_assert (perm_mask != NULL_TREE);
5890 else if (known_eq (nunits, scatter_off_nunits * 2))
5892 modifier = NARROW;
5894 /* Currently gathers and scatters are only supported for
5895 fixed-length vectors. */
5896 unsigned int count = nunits.to_constant ();
5897 vec_perm_builder sel (count, count, 1);
5898 for (i = 0; i < (unsigned int) count; ++i)
5899 sel.quick_push (i | (count / 2));
5901 vec_perm_indices indices (sel, 2, count);
5902 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5903 gcc_assert (perm_mask != NULL_TREE);
5904 ncopies *= 2;
5906 else
5907 gcc_unreachable ();
5909 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5910 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5911 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5912 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5913 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5914 scaletype = TREE_VALUE (arglist);
5916 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5917 && TREE_CODE (rettype) == VOID_TYPE);
5919 ptr = fold_convert (ptrtype, gs_info.base);
5920 if (!is_gimple_min_invariant (ptr))
5922 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5923 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5924 gcc_assert (!new_bb);
5927 /* Currently we support only unconditional scatter stores,
5928 so mask should be all ones. */
5929 mask = build_int_cst (masktype, -1);
5930 mask = vect_init_vector (stmt, mask, masktype, NULL);
5932 scale = build_int_cst (scaletype, gs_info.scale);
5934 prev_stmt_info = NULL;
5935 for (j = 0; j < ncopies; ++j)
5937 if (j == 0)
5939 src = vec_oprnd1
5940 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5941 op = vec_oprnd0
5942 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5944 else if (modifier != NONE && (j & 1))
5946 if (modifier == WIDEN)
5948 src = vec_oprnd1
5949 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5950 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5951 stmt, gsi);
5953 else if (modifier == NARROW)
5955 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5956 stmt, gsi);
5957 op = vec_oprnd0
5958 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5959 vec_oprnd0);
5961 else
5962 gcc_unreachable ();
5964 else
5966 src = vec_oprnd1
5967 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5968 op = vec_oprnd0
5969 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5970 vec_oprnd0);
5973 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5975 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5976 == TYPE_VECTOR_SUBPARTS (srctype));
5977 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5978 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5979 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5980 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5981 src = var;
5984 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5986 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5987 == TYPE_VECTOR_SUBPARTS (idxtype));
5988 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5989 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5990 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5991 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5992 op = var;
5995 new_stmt
5996 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5998 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6000 if (prev_stmt_info == NULL)
6001 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6002 else
6003 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6004 prev_stmt_info = vinfo_for_stmt (new_stmt);
6006 return true;
6009 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
6010 if (grouped_store)
6012 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6013 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6014 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6016 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
6018 /* FORNOW */
6019 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
6021 /* We vectorize all the stmts of the interleaving group when we
6022 reach the last stmt in the group. */
6023 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
6024 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
6025 && !slp)
6027 *vec_stmt = NULL;
6028 return true;
6031 if (slp)
6033 grouped_store = false;
6034 /* VEC_NUM is the number of vect stmts to be created for this
6035 group. */
6036 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6037 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6038 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6039 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6040 op = gimple_assign_rhs1 (first_stmt);
6042 else
6043 /* VEC_NUM is the number of vect stmts to be created for this
6044 group. */
6045 vec_num = group_size;
6047 ref_type = get_group_alias_ptr_type (first_stmt);
6049 else
6051 first_stmt = stmt;
6052 first_dr = dr;
6053 group_size = vec_num = 1;
6054 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6057 if (dump_enabled_p ())
6058 dump_printf_loc (MSG_NOTE, vect_location,
6059 "transform store. ncopies = %d\n", ncopies);
6061 if (memory_access_type == VMAT_ELEMENTWISE
6062 || memory_access_type == VMAT_STRIDED_SLP)
6064 gimple_stmt_iterator incr_gsi;
6065 bool insert_after;
6066 gimple *incr;
6067 tree offvar;
6068 tree ivstep;
6069 tree running_off;
6070 gimple_seq stmts = NULL;
6071 tree stride_base, stride_step, alias_off;
6072 tree vec_oprnd;
6073 unsigned int g;
6074 /* Checked by get_load_store_type. */
6075 unsigned int const_nunits = nunits.to_constant ();
6077 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6079 stride_base
6080 = fold_build_pointer_plus
6081 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6082 size_binop (PLUS_EXPR,
6083 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6084 convert_to_ptrofftype (DR_INIT (first_dr))));
6085 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6087 /* For a store with loop-invariant (but other than power-of-2)
6088 stride (i.e. not a grouped access) like so:
6090 for (i = 0; i < n; i += stride)
6091 array[i] = ...;
6093 we generate a new induction variable and new stores from
6094 the components of the (vectorized) rhs:
6096 for (j = 0; ; j += VF*stride)
6097 vectemp = ...;
6098 tmp1 = vectemp[0];
6099 array[j] = tmp1;
6100 tmp2 = vectemp[1];
6101 array[j + stride] = tmp2;
6105 unsigned nstores = const_nunits;
6106 unsigned lnel = 1;
6107 tree ltype = elem_type;
6108 tree lvectype = vectype;
6109 if (slp)
6111 if (group_size < const_nunits
6112 && const_nunits % group_size == 0)
6114 nstores = const_nunits / group_size;
6115 lnel = group_size;
6116 ltype = build_vector_type (elem_type, group_size);
6117 lvectype = vectype;
6119 /* First check if vec_extract optab doesn't support extraction
6120 of vector elts directly. */
6121 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6122 machine_mode vmode;
6123 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6124 || !VECTOR_MODE_P (vmode)
6125 || (convert_optab_handler (vec_extract_optab,
6126 TYPE_MODE (vectype), vmode)
6127 == CODE_FOR_nothing))
6129 /* Try to avoid emitting an extract of vector elements
6130 by performing the extracts using an integer type of the
6131 same size, extracting from a vector of those and then
6132 re-interpreting it as the original vector type if
6133 supported. */
6134 unsigned lsize
6135 = group_size * GET_MODE_BITSIZE (elmode);
6136 elmode = int_mode_for_size (lsize, 0).require ();
6137 unsigned int lnunits = const_nunits / group_size;
6138 /* If we can't construct such a vector fall back to
6139 element extracts from the original vector type and
6140 element size stores. */
6141 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6142 && VECTOR_MODE_P (vmode)
6143 && (convert_optab_handler (vec_extract_optab,
6144 vmode, elmode)
6145 != CODE_FOR_nothing))
6147 nstores = lnunits;
6148 lnel = group_size;
6149 ltype = build_nonstandard_integer_type (lsize, 1);
6150 lvectype = build_vector_type (ltype, nstores);
6152 /* Else fall back to vector extraction anyway.
6153 Fewer stores are more important than avoiding spilling
6154 of the vector we extract from. Compared to the
6155 construction case in vectorizable_load no store-forwarding
6156 issue exists here for reasonable archs. */
6159 else if (group_size >= const_nunits
6160 && group_size % const_nunits == 0)
6162 nstores = 1;
6163 lnel = const_nunits;
6164 ltype = vectype;
6165 lvectype = vectype;
6167 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6168 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6171 ivstep = stride_step;
6172 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6173 build_int_cst (TREE_TYPE (ivstep), vf));
6175 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6177 create_iv (stride_base, ivstep, NULL,
6178 loop, &incr_gsi, insert_after,
6179 &offvar, NULL);
6180 incr = gsi_stmt (incr_gsi);
6181 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6183 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6184 if (stmts)
6185 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6187 prev_stmt_info = NULL;
6188 alias_off = build_int_cst (ref_type, 0);
6189 next_stmt = first_stmt;
6190 for (g = 0; g < group_size; g++)
6192 running_off = offvar;
6193 if (g)
6195 tree size = TYPE_SIZE_UNIT (ltype);
6196 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6197 size);
6198 tree newoff = copy_ssa_name (running_off, NULL);
6199 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6200 running_off, pos);
6201 vect_finish_stmt_generation (stmt, incr, gsi);
6202 running_off = newoff;
6204 unsigned int group_el = 0;
6205 unsigned HOST_WIDE_INT
6206 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6207 for (j = 0; j < ncopies; j++)
6209 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6210 and first_stmt == stmt. */
6211 if (j == 0)
6213 if (slp)
6215 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6216 slp_node);
6217 vec_oprnd = vec_oprnds[0];
6219 else
6221 gcc_assert (gimple_assign_single_p (next_stmt));
6222 op = gimple_assign_rhs1 (next_stmt);
6223 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6226 else
6228 if (slp)
6229 vec_oprnd = vec_oprnds[j];
6230 else
6232 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6233 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6236 /* Pun the vector to extract from if necessary. */
6237 if (lvectype != vectype)
6239 tree tem = make_ssa_name (lvectype);
6240 gimple *pun
6241 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6242 lvectype, vec_oprnd));
6243 vect_finish_stmt_generation (stmt, pun, gsi);
6244 vec_oprnd = tem;
6246 for (i = 0; i < nstores; i++)
6248 tree newref, newoff;
6249 gimple *incr, *assign;
6250 tree size = TYPE_SIZE (ltype);
6251 /* Extract the i'th component. */
6252 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6253 bitsize_int (i), size);
6254 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6255 size, pos);
6257 elem = force_gimple_operand_gsi (gsi, elem, true,
6258 NULL_TREE, true,
6259 GSI_SAME_STMT);
6261 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6262 group_el * elsz);
6263 newref = build2 (MEM_REF, ltype,
6264 running_off, this_off);
6266 /* And store it to *running_off. */
6267 assign = gimple_build_assign (newref, elem);
6268 vect_finish_stmt_generation (stmt, assign, gsi);
6270 group_el += lnel;
6271 if (! slp
6272 || group_el == group_size)
6274 newoff = copy_ssa_name (running_off, NULL);
6275 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6276 running_off, stride_step);
6277 vect_finish_stmt_generation (stmt, incr, gsi);
6279 running_off = newoff;
6280 group_el = 0;
6282 if (g == group_size - 1
6283 && !slp)
6285 if (j == 0 && i == 0)
6286 STMT_VINFO_VEC_STMT (stmt_info)
6287 = *vec_stmt = assign;
6288 else
6289 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6290 prev_stmt_info = vinfo_for_stmt (assign);
6294 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6295 if (slp)
6296 break;
6299 vec_oprnds.release ();
6300 return true;
6303 auto_vec<tree> dr_chain (group_size);
6304 oprnds.create (group_size);
6306 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6307 gcc_assert (alignment_support_scheme);
6308 /* Targets with store-lane instructions must not require explicit
6309 realignment. */
6310 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6311 || alignment_support_scheme == dr_aligned
6312 || alignment_support_scheme == dr_unaligned_supported);
6314 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6315 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6316 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6318 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6319 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6320 else
6321 aggr_type = vectype;
6323 /* In case the vectorization factor (VF) is bigger than the number
6324 of elements that we can fit in a vectype (nunits), we have to generate
6325 more than one vector stmt - i.e - we need to "unroll" the
6326 vector stmt by a factor VF/nunits. For more details see documentation in
6327 vect_get_vec_def_for_copy_stmt. */
6329 /* In case of interleaving (non-unit grouped access):
6331 S1: &base + 2 = x2
6332 S2: &base = x0
6333 S3: &base + 1 = x1
6334 S4: &base + 3 = x3
6336 We create vectorized stores starting from base address (the access of the
6337 first stmt in the chain (S2 in the above example), when the last store stmt
6338 of the chain (S4) is reached:
6340 VS1: &base = vx2
6341 VS2: &base + vec_size*1 = vx0
6342 VS3: &base + vec_size*2 = vx1
6343 VS4: &base + vec_size*3 = vx3
6345 Then permutation statements are generated:
6347 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6348 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6351 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6352 (the order of the data-refs in the output of vect_permute_store_chain
6353 corresponds to the order of scalar stmts in the interleaving chain - see
6354 the documentation of vect_permute_store_chain()).
6356 In case of both multiple types and interleaving, above vector stores and
6357 permutation stmts are created for every copy. The result vector stmts are
6358 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6359 STMT_VINFO_RELATED_STMT for the next copies.
6362 prev_stmt_info = NULL;
6363 for (j = 0; j < ncopies; j++)
6366 if (j == 0)
6368 if (slp)
6370 /* Get vectorized arguments for SLP_NODE. */
6371 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6372 NULL, slp_node);
6374 vec_oprnd = vec_oprnds[0];
6376 else
6378 /* For interleaved stores we collect vectorized defs for all the
6379 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6380 used as an input to vect_permute_store_chain(), and OPRNDS as
6381 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6383 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6384 OPRNDS are of size 1. */
6385 next_stmt = first_stmt;
6386 for (i = 0; i < group_size; i++)
6388 /* Since gaps are not supported for interleaved stores,
6389 GROUP_SIZE is the exact number of stmts in the chain.
6390 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6391 there is no interleaving, GROUP_SIZE is 1, and only one
6392 iteration of the loop will be executed. */
6393 gcc_assert (next_stmt
6394 && gimple_assign_single_p (next_stmt));
6395 op = gimple_assign_rhs1 (next_stmt);
6397 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6398 dr_chain.quick_push (vec_oprnd);
6399 oprnds.quick_push (vec_oprnd);
6400 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6404 /* We should have catched mismatched types earlier. */
6405 gcc_assert (useless_type_conversion_p (vectype,
6406 TREE_TYPE (vec_oprnd)));
6407 bool simd_lane_access_p
6408 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6409 if (simd_lane_access_p
6410 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6411 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6412 && integer_zerop (DR_OFFSET (first_dr))
6413 && integer_zerop (DR_INIT (first_dr))
6414 && alias_sets_conflict_p (get_alias_set (aggr_type),
6415 get_alias_set (TREE_TYPE (ref_type))))
6417 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6418 dataref_offset = build_int_cst (ref_type, 0);
6419 inv_p = false;
6421 else
6422 dataref_ptr
6423 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6424 simd_lane_access_p ? loop : NULL,
6425 offset, &dummy, gsi, &ptr_incr,
6426 simd_lane_access_p, &inv_p);
6427 gcc_assert (bb_vinfo || !inv_p);
6429 else
6431 /* For interleaved stores we created vectorized defs for all the
6432 defs stored in OPRNDS in the previous iteration (previous copy).
6433 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6434 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6435 next copy.
6436 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6437 OPRNDS are of size 1. */
6438 for (i = 0; i < group_size; i++)
6440 op = oprnds[i];
6441 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6442 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6443 dr_chain[i] = vec_oprnd;
6444 oprnds[i] = vec_oprnd;
6446 if (dataref_offset)
6447 dataref_offset
6448 = int_const_binop (PLUS_EXPR, dataref_offset,
6449 TYPE_SIZE_UNIT (aggr_type));
6450 else
6451 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6452 TYPE_SIZE_UNIT (aggr_type));
6455 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6457 tree vec_array;
6459 /* Combine all the vectors into an array. */
6460 vec_array = create_vector_array (vectype, vec_num);
6461 for (i = 0; i < vec_num; i++)
6463 vec_oprnd = dr_chain[i];
6464 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6467 /* Emit:
6468 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6469 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6470 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6471 vec_array);
6472 gimple_call_set_lhs (call, data_ref);
6473 gimple_call_set_nothrow (call, true);
6474 new_stmt = call;
6475 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6477 else
6479 new_stmt = NULL;
6480 if (grouped_store)
6482 if (j == 0)
6483 result_chain.create (group_size);
6484 /* Permute. */
6485 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6486 &result_chain);
6489 next_stmt = first_stmt;
6490 for (i = 0; i < vec_num; i++)
6492 unsigned align, misalign;
6494 if (i > 0)
6495 /* Bump the vector pointer. */
6496 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6497 stmt, NULL_TREE);
6499 if (slp)
6500 vec_oprnd = vec_oprnds[i];
6501 else if (grouped_store)
6502 /* For grouped stores vectorized defs are interleaved in
6503 vect_permute_store_chain(). */
6504 vec_oprnd = result_chain[i];
6506 data_ref = fold_build2 (MEM_REF, vectype,
6507 dataref_ptr,
6508 dataref_offset
6509 ? dataref_offset
6510 : build_int_cst (ref_type, 0));
6511 align = DR_TARGET_ALIGNMENT (first_dr);
6512 if (aligned_access_p (first_dr))
6513 misalign = 0;
6514 else if (DR_MISALIGNMENT (first_dr) == -1)
6516 align = dr_alignment (vect_dr_behavior (first_dr));
6517 misalign = 0;
6518 TREE_TYPE (data_ref)
6519 = build_aligned_type (TREE_TYPE (data_ref),
6520 align * BITS_PER_UNIT);
6522 else
6524 TREE_TYPE (data_ref)
6525 = build_aligned_type (TREE_TYPE (data_ref),
6526 TYPE_ALIGN (elem_type));
6527 misalign = DR_MISALIGNMENT (first_dr);
6529 if (dataref_offset == NULL_TREE
6530 && TREE_CODE (dataref_ptr) == SSA_NAME)
6531 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6532 misalign);
6534 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6536 tree perm_mask = perm_mask_for_reverse (vectype);
6537 tree perm_dest
6538 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6539 vectype);
6540 tree new_temp = make_ssa_name (perm_dest);
6542 /* Generate the permute statement. */
6543 gimple *perm_stmt
6544 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6545 vec_oprnd, perm_mask);
6546 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6548 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6549 vec_oprnd = new_temp;
6552 /* Arguments are ready. Create the new vector stmt. */
6553 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6554 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6556 if (slp)
6557 continue;
6559 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6560 if (!next_stmt)
6561 break;
6564 if (!slp)
6566 if (j == 0)
6567 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6568 else
6569 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6570 prev_stmt_info = vinfo_for_stmt (new_stmt);
6574 oprnds.release ();
6575 result_chain.release ();
6576 vec_oprnds.release ();
6578 return true;
6581 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6582 VECTOR_CST mask. No checks are made that the target platform supports the
6583 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6584 vect_gen_perm_mask_checked. */
6586 tree
6587 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6589 tree mask_type;
6591 poly_uint64 nunits = sel.length ();
6592 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
6594 mask_type = build_vector_type (ssizetype, nunits);
6595 return vec_perm_indices_to_tree (mask_type, sel);
6598 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6599 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6601 tree
6602 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6604 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6605 return vect_gen_perm_mask_any (vectype, sel);
6608 /* Given a vector variable X and Y, that was generated for the scalar
6609 STMT, generate instructions to permute the vector elements of X and Y
6610 using permutation mask MASK_VEC, insert them at *GSI and return the
6611 permuted vector variable. */
6613 static tree
6614 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6615 gimple_stmt_iterator *gsi)
6617 tree vectype = TREE_TYPE (x);
6618 tree perm_dest, data_ref;
6619 gimple *perm_stmt;
6621 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6622 data_ref = make_ssa_name (perm_dest);
6624 /* Generate the permute statement. */
6625 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6626 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6628 return data_ref;
6631 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6632 inserting them on the loops preheader edge. Returns true if we
6633 were successful in doing so (and thus STMT can be moved then),
6634 otherwise returns false. */
6636 static bool
6637 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6639 ssa_op_iter i;
6640 tree op;
6641 bool any = false;
6643 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6645 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6646 if (!gimple_nop_p (def_stmt)
6647 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6649 /* Make sure we don't need to recurse. While we could do
6650 so in simple cases when there are more complex use webs
6651 we don't have an easy way to preserve stmt order to fulfil
6652 dependencies within them. */
6653 tree op2;
6654 ssa_op_iter i2;
6655 if (gimple_code (def_stmt) == GIMPLE_PHI)
6656 return false;
6657 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6659 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6660 if (!gimple_nop_p (def_stmt2)
6661 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6662 return false;
6664 any = true;
6668 if (!any)
6669 return true;
6671 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6673 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6674 if (!gimple_nop_p (def_stmt)
6675 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6677 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6678 gsi_remove (&gsi, false);
6679 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6683 return true;
6686 /* vectorizable_load.
6688 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6689 can be vectorized.
6690 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6691 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6692 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6694 static bool
6695 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6696 slp_tree slp_node, slp_instance slp_node_instance)
6698 tree scalar_dest;
6699 tree vec_dest = NULL;
6700 tree data_ref = NULL;
6701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6702 stmt_vec_info prev_stmt_info;
6703 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6704 struct loop *loop = NULL;
6705 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6706 bool nested_in_vect_loop = false;
6707 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6708 tree elem_type;
6709 tree new_temp;
6710 machine_mode mode;
6711 gimple *new_stmt = NULL;
6712 tree dummy;
6713 enum dr_alignment_support alignment_support_scheme;
6714 tree dataref_ptr = NULL_TREE;
6715 tree dataref_offset = NULL_TREE;
6716 gimple *ptr_incr = NULL;
6717 int ncopies;
6718 int i, j;
6719 unsigned int group_size;
6720 poly_uint64 group_gap_adj;
6721 tree msq = NULL_TREE, lsq;
6722 tree offset = NULL_TREE;
6723 tree byte_offset = NULL_TREE;
6724 tree realignment_token = NULL_TREE;
6725 gphi *phi = NULL;
6726 vec<tree> dr_chain = vNULL;
6727 bool grouped_load = false;
6728 gimple *first_stmt;
6729 gimple *first_stmt_for_drptr = NULL;
6730 bool inv_p;
6731 bool compute_in_loop = false;
6732 struct loop *at_loop;
6733 int vec_num;
6734 bool slp = (slp_node != NULL);
6735 bool slp_perm = false;
6736 enum tree_code code;
6737 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6738 poly_uint64 vf;
6739 tree aggr_type;
6740 gather_scatter_info gs_info;
6741 vec_info *vinfo = stmt_info->vinfo;
6742 tree ref_type;
6744 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6745 return false;
6747 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6748 && ! vec_stmt)
6749 return false;
6751 /* Is vectorizable load? */
6752 if (!is_gimple_assign (stmt))
6753 return false;
6755 scalar_dest = gimple_assign_lhs (stmt);
6756 if (TREE_CODE (scalar_dest) != SSA_NAME)
6757 return false;
6759 code = gimple_assign_rhs_code (stmt);
6760 if (code != ARRAY_REF
6761 && code != BIT_FIELD_REF
6762 && code != INDIRECT_REF
6763 && code != COMPONENT_REF
6764 && code != IMAGPART_EXPR
6765 && code != REALPART_EXPR
6766 && code != MEM_REF
6767 && TREE_CODE_CLASS (code) != tcc_declaration)
6768 return false;
6770 if (!STMT_VINFO_DATA_REF (stmt_info))
6771 return false;
6773 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6774 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6776 if (loop_vinfo)
6778 loop = LOOP_VINFO_LOOP (loop_vinfo);
6779 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6780 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6782 else
6783 vf = 1;
6785 /* Multiple types in SLP are handled by creating the appropriate number of
6786 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6787 case of SLP. */
6788 if (slp)
6789 ncopies = 1;
6790 else
6791 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6793 gcc_assert (ncopies >= 1);
6795 /* FORNOW. This restriction should be relaxed. */
6796 if (nested_in_vect_loop && ncopies > 1)
6798 if (dump_enabled_p ())
6799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6800 "multiple types in nested loop.\n");
6801 return false;
6804 /* Invalidate assumptions made by dependence analysis when vectorization
6805 on the unrolled body effectively re-orders stmts. */
6806 if (ncopies > 1
6807 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6808 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6809 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6811 if (dump_enabled_p ())
6812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6813 "cannot perform implicit CSE when unrolling "
6814 "with negative dependence distance\n");
6815 return false;
6818 elem_type = TREE_TYPE (vectype);
6819 mode = TYPE_MODE (vectype);
6821 /* FORNOW. In some cases can vectorize even if data-type not supported
6822 (e.g. - data copies). */
6823 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6825 if (dump_enabled_p ())
6826 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6827 "Aligned load, but unsupported type.\n");
6828 return false;
6831 /* Check if the load is a part of an interleaving chain. */
6832 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6834 grouped_load = true;
6835 /* FORNOW */
6836 gcc_assert (!nested_in_vect_loop);
6837 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6839 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6840 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6842 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6843 slp_perm = true;
6845 /* Invalidate assumptions made by dependence analysis when vectorization
6846 on the unrolled body effectively re-orders stmts. */
6847 if (!PURE_SLP_STMT (stmt_info)
6848 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6849 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6850 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6852 if (dump_enabled_p ())
6853 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6854 "cannot perform implicit CSE when performing "
6855 "group loads with negative dependence distance\n");
6856 return false;
6859 /* Similarly when the stmt is a load that is both part of a SLP
6860 instance and a loop vectorized stmt via the same-dr mechanism
6861 we have to give up. */
6862 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6863 && (STMT_SLP_TYPE (stmt_info)
6864 != STMT_SLP_TYPE (vinfo_for_stmt
6865 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6867 if (dump_enabled_p ())
6868 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6869 "conflicting SLP types for CSEd load\n");
6870 return false;
6874 vect_memory_access_type memory_access_type;
6875 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6876 &memory_access_type, &gs_info))
6877 return false;
6879 if (!vec_stmt) /* transformation not required. */
6881 if (!slp)
6882 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6883 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6884 /* The SLP costs are calculated during SLP analysis. */
6885 if (!PURE_SLP_STMT (stmt_info))
6886 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6887 NULL, NULL, NULL);
6888 return true;
6891 if (!slp)
6892 gcc_assert (memory_access_type
6893 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6895 if (dump_enabled_p ())
6896 dump_printf_loc (MSG_NOTE, vect_location,
6897 "transform load. ncopies = %d\n", ncopies);
6899 /* Transform. */
6901 ensure_base_align (dr);
6903 if (memory_access_type == VMAT_GATHER_SCATTER)
6905 tree vec_oprnd0 = NULL_TREE, op;
6906 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6907 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6908 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6909 edge pe = loop_preheader_edge (loop);
6910 gimple_seq seq;
6911 basic_block new_bb;
6912 enum { NARROW, NONE, WIDEN } modifier;
6913 poly_uint64 gather_off_nunits
6914 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6916 if (known_eq (nunits, gather_off_nunits))
6917 modifier = NONE;
6918 else if (known_eq (nunits * 2, gather_off_nunits))
6920 modifier = WIDEN;
6922 /* Currently widening gathers are only supported for
6923 fixed-length vectors. */
6924 int count = gather_off_nunits.to_constant ();
6925 vec_perm_builder sel (count, count, 1);
6926 for (i = 0; i < count; ++i)
6927 sel.quick_push (i | (count / 2));
6929 vec_perm_indices indices (sel, 1, count);
6930 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6931 indices);
6933 else if (known_eq (nunits, gather_off_nunits * 2))
6935 modifier = NARROW;
6937 /* Currently narrowing gathers are only supported for
6938 fixed-length vectors. */
6939 int count = nunits.to_constant ();
6940 vec_perm_builder sel (count, count, 1);
6941 for (i = 0; i < count; ++i)
6942 sel.quick_push (i < count / 2 ? i : i + count / 2);
6944 vec_perm_indices indices (sel, 2, count);
6945 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6946 ncopies *= 2;
6948 else
6949 gcc_unreachable ();
6951 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6952 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6953 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6954 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6955 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6956 scaletype = TREE_VALUE (arglist);
6957 gcc_checking_assert (types_compatible_p (srctype, rettype));
6959 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6961 ptr = fold_convert (ptrtype, gs_info.base);
6962 if (!is_gimple_min_invariant (ptr))
6964 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6965 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6966 gcc_assert (!new_bb);
6969 /* Currently we support only unconditional gather loads,
6970 so mask should be all ones. */
6971 if (TREE_CODE (masktype) == INTEGER_TYPE)
6972 mask = build_int_cst (masktype, -1);
6973 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6975 mask = build_int_cst (TREE_TYPE (masktype), -1);
6976 mask = build_vector_from_val (masktype, mask);
6977 mask = vect_init_vector (stmt, mask, masktype, NULL);
6979 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6981 REAL_VALUE_TYPE r;
6982 long tmp[6];
6983 for (j = 0; j < 6; ++j)
6984 tmp[j] = -1;
6985 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6986 mask = build_real (TREE_TYPE (masktype), r);
6987 mask = build_vector_from_val (masktype, mask);
6988 mask = vect_init_vector (stmt, mask, masktype, NULL);
6990 else
6991 gcc_unreachable ();
6993 scale = build_int_cst (scaletype, gs_info.scale);
6995 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6996 merge = build_int_cst (TREE_TYPE (rettype), 0);
6997 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6999 REAL_VALUE_TYPE r;
7000 long tmp[6];
7001 for (j = 0; j < 6; ++j)
7002 tmp[j] = 0;
7003 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
7004 merge = build_real (TREE_TYPE (rettype), r);
7006 else
7007 gcc_unreachable ();
7008 merge = build_vector_from_val (rettype, merge);
7009 merge = vect_init_vector (stmt, merge, rettype, NULL);
7011 prev_stmt_info = NULL;
7012 for (j = 0; j < ncopies; ++j)
7014 if (modifier == WIDEN && (j & 1))
7015 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
7016 perm_mask, stmt, gsi);
7017 else if (j == 0)
7018 op = vec_oprnd0
7019 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
7020 else
7021 op = vec_oprnd0
7022 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
7024 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
7026 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
7027 == TYPE_VECTOR_SUBPARTS (idxtype));
7028 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
7029 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
7030 new_stmt
7031 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7032 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7033 op = var;
7036 new_stmt
7037 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
7039 if (!useless_type_conversion_p (vectype, rettype))
7041 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
7042 == TYPE_VECTOR_SUBPARTS (rettype));
7043 op = vect_get_new_ssa_name (rettype, vect_simple_var);
7044 gimple_call_set_lhs (new_stmt, op);
7045 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7046 var = make_ssa_name (vec_dest);
7047 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
7048 new_stmt
7049 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7051 else
7053 var = make_ssa_name (vec_dest, new_stmt);
7054 gimple_call_set_lhs (new_stmt, var);
7057 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7059 if (modifier == NARROW)
7061 if ((j & 1) == 0)
7063 prev_res = var;
7064 continue;
7066 var = permute_vec_elements (prev_res, var,
7067 perm_mask, stmt, gsi);
7068 new_stmt = SSA_NAME_DEF_STMT (var);
7071 if (prev_stmt_info == NULL)
7072 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7073 else
7074 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7075 prev_stmt_info = vinfo_for_stmt (new_stmt);
7077 return true;
7080 if (memory_access_type == VMAT_ELEMENTWISE
7081 || memory_access_type == VMAT_STRIDED_SLP)
7083 gimple_stmt_iterator incr_gsi;
7084 bool insert_after;
7085 gimple *incr;
7086 tree offvar;
7087 tree ivstep;
7088 tree running_off;
7089 vec<constructor_elt, va_gc> *v = NULL;
7090 gimple_seq stmts = NULL;
7091 tree stride_base, stride_step, alias_off;
7092 /* Checked by get_load_store_type. */
7093 unsigned int const_nunits = nunits.to_constant ();
7095 gcc_assert (!nested_in_vect_loop);
7097 if (slp && grouped_load)
7099 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7100 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7101 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7102 ref_type = get_group_alias_ptr_type (first_stmt);
7104 else
7106 first_stmt = stmt;
7107 first_dr = dr;
7108 group_size = 1;
7109 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7112 stride_base
7113 = fold_build_pointer_plus
7114 (DR_BASE_ADDRESS (first_dr),
7115 size_binop (PLUS_EXPR,
7116 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7117 convert_to_ptrofftype (DR_INIT (first_dr))));
7118 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7120 /* For a load with loop-invariant (but other than power-of-2)
7121 stride (i.e. not a grouped access) like so:
7123 for (i = 0; i < n; i += stride)
7124 ... = array[i];
7126 we generate a new induction variable and new accesses to
7127 form a new vector (or vectors, depending on ncopies):
7129 for (j = 0; ; j += VF*stride)
7130 tmp1 = array[j];
7131 tmp2 = array[j + stride];
7133 vectemp = {tmp1, tmp2, ...}
7136 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7137 build_int_cst (TREE_TYPE (stride_step), vf));
7139 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7141 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7142 loop, &incr_gsi, insert_after,
7143 &offvar, NULL);
7144 incr = gsi_stmt (incr_gsi);
7145 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7147 stride_step = force_gimple_operand (unshare_expr (stride_step),
7148 &stmts, true, NULL_TREE);
7149 if (stmts)
7150 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7152 prev_stmt_info = NULL;
7153 running_off = offvar;
7154 alias_off = build_int_cst (ref_type, 0);
7155 int nloads = const_nunits;
7156 int lnel = 1;
7157 tree ltype = TREE_TYPE (vectype);
7158 tree lvectype = vectype;
7159 auto_vec<tree> dr_chain;
7160 if (memory_access_type == VMAT_STRIDED_SLP)
7162 if (group_size < const_nunits)
7164 /* First check if vec_init optab supports construction from
7165 vector elts directly. */
7166 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7167 machine_mode vmode;
7168 if (mode_for_vector (elmode, group_size).exists (&vmode)
7169 && VECTOR_MODE_P (vmode)
7170 && (convert_optab_handler (vec_init_optab,
7171 TYPE_MODE (vectype), vmode)
7172 != CODE_FOR_nothing))
7174 nloads = const_nunits / group_size;
7175 lnel = group_size;
7176 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7178 else
7180 /* Otherwise avoid emitting a constructor of vector elements
7181 by performing the loads using an integer type of the same
7182 size, constructing a vector of those and then
7183 re-interpreting it as the original vector type.
7184 This avoids a huge runtime penalty due to the general
7185 inability to perform store forwarding from smaller stores
7186 to a larger load. */
7187 unsigned lsize
7188 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7189 elmode = int_mode_for_size (lsize, 0).require ();
7190 unsigned int lnunits = const_nunits / group_size;
7191 /* If we can't construct such a vector fall back to
7192 element loads of the original vector type. */
7193 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7194 && VECTOR_MODE_P (vmode)
7195 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7196 != CODE_FOR_nothing))
7198 nloads = lnunits;
7199 lnel = group_size;
7200 ltype = build_nonstandard_integer_type (lsize, 1);
7201 lvectype = build_vector_type (ltype, nloads);
7205 else
7207 nloads = 1;
7208 lnel = const_nunits;
7209 ltype = vectype;
7211 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7213 if (slp)
7215 /* For SLP permutation support we need to load the whole group,
7216 not only the number of vector stmts the permutation result
7217 fits in. */
7218 if (slp_perm)
7220 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7221 variable VF. */
7222 unsigned int const_vf = vf.to_constant ();
7223 ncopies = CEIL (group_size * const_vf, const_nunits);
7224 dr_chain.create (ncopies);
7226 else
7227 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7229 unsigned int group_el = 0;
7230 unsigned HOST_WIDE_INT
7231 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7232 for (j = 0; j < ncopies; j++)
7234 if (nloads > 1)
7235 vec_alloc (v, nloads);
7236 for (i = 0; i < nloads; i++)
7238 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7239 group_el * elsz);
7240 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7241 build2 (MEM_REF, ltype,
7242 running_off, this_off));
7243 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7244 if (nloads > 1)
7245 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7246 gimple_assign_lhs (new_stmt));
7248 group_el += lnel;
7249 if (! slp
7250 || group_el == group_size)
7252 tree newoff = copy_ssa_name (running_off);
7253 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7254 running_off, stride_step);
7255 vect_finish_stmt_generation (stmt, incr, gsi);
7257 running_off = newoff;
7258 group_el = 0;
7261 if (nloads > 1)
7263 tree vec_inv = build_constructor (lvectype, v);
7264 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7265 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7266 if (lvectype != vectype)
7268 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7269 VIEW_CONVERT_EXPR,
7270 build1 (VIEW_CONVERT_EXPR,
7271 vectype, new_temp));
7272 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7276 if (slp)
7278 if (slp_perm)
7279 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7280 else
7281 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7283 else
7285 if (j == 0)
7286 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7287 else
7288 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7289 prev_stmt_info = vinfo_for_stmt (new_stmt);
7292 if (slp_perm)
7294 unsigned n_perms;
7295 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7296 slp_node_instance, false, &n_perms);
7298 return true;
7301 if (grouped_load)
7303 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7304 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7305 /* For SLP vectorization we directly vectorize a subchain
7306 without permutation. */
7307 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7308 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7309 /* For BB vectorization always use the first stmt to base
7310 the data ref pointer on. */
7311 if (bb_vinfo)
7312 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7314 /* Check if the chain of loads is already vectorized. */
7315 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7316 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7317 ??? But we can only do so if there is exactly one
7318 as we have no way to get at the rest. Leave the CSE
7319 opportunity alone.
7320 ??? With the group load eventually participating
7321 in multiple different permutations (having multiple
7322 slp nodes which refer to the same group) the CSE
7323 is even wrong code. See PR56270. */
7324 && !slp)
7326 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7327 return true;
7329 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7330 group_gap_adj = 0;
7332 /* VEC_NUM is the number of vect stmts to be created for this group. */
7333 if (slp)
7335 grouped_load = false;
7336 /* For SLP permutation support we need to load the whole group,
7337 not only the number of vector stmts the permutation result
7338 fits in. */
7339 if (slp_perm)
7341 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7342 variable VF. */
7343 unsigned int const_vf = vf.to_constant ();
7344 unsigned int const_nunits = nunits.to_constant ();
7345 vec_num = CEIL (group_size * const_vf, const_nunits);
7346 group_gap_adj = vf * group_size - nunits * vec_num;
7348 else
7350 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7351 group_gap_adj
7352 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7355 else
7356 vec_num = group_size;
7358 ref_type = get_group_alias_ptr_type (first_stmt);
7360 else
7362 first_stmt = stmt;
7363 first_dr = dr;
7364 group_size = vec_num = 1;
7365 group_gap_adj = 0;
7366 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7369 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7370 gcc_assert (alignment_support_scheme);
7371 /* Targets with load-lane instructions must not require explicit
7372 realignment. */
7373 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7374 || alignment_support_scheme == dr_aligned
7375 || alignment_support_scheme == dr_unaligned_supported);
7377 /* In case the vectorization factor (VF) is bigger than the number
7378 of elements that we can fit in a vectype (nunits), we have to generate
7379 more than one vector stmt - i.e - we need to "unroll" the
7380 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7381 from one copy of the vector stmt to the next, in the field
7382 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7383 stages to find the correct vector defs to be used when vectorizing
7384 stmts that use the defs of the current stmt. The example below
7385 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7386 need to create 4 vectorized stmts):
7388 before vectorization:
7389 RELATED_STMT VEC_STMT
7390 S1: x = memref - -
7391 S2: z = x + 1 - -
7393 step 1: vectorize stmt S1:
7394 We first create the vector stmt VS1_0, and, as usual, record a
7395 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7396 Next, we create the vector stmt VS1_1, and record a pointer to
7397 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7398 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7399 stmts and pointers:
7400 RELATED_STMT VEC_STMT
7401 VS1_0: vx0 = memref0 VS1_1 -
7402 VS1_1: vx1 = memref1 VS1_2 -
7403 VS1_2: vx2 = memref2 VS1_3 -
7404 VS1_3: vx3 = memref3 - -
7405 S1: x = load - VS1_0
7406 S2: z = x + 1 - -
7408 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7409 information we recorded in RELATED_STMT field is used to vectorize
7410 stmt S2. */
7412 /* In case of interleaving (non-unit grouped access):
7414 S1: x2 = &base + 2
7415 S2: x0 = &base
7416 S3: x1 = &base + 1
7417 S4: x3 = &base + 3
7419 Vectorized loads are created in the order of memory accesses
7420 starting from the access of the first stmt of the chain:
7422 VS1: vx0 = &base
7423 VS2: vx1 = &base + vec_size*1
7424 VS3: vx3 = &base + vec_size*2
7425 VS4: vx4 = &base + vec_size*3
7427 Then permutation statements are generated:
7429 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7430 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7433 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7434 (the order of the data-refs in the output of vect_permute_load_chain
7435 corresponds to the order of scalar stmts in the interleaving chain - see
7436 the documentation of vect_permute_load_chain()).
7437 The generation of permutation stmts and recording them in
7438 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7440 In case of both multiple types and interleaving, the vector loads and
7441 permutation stmts above are created for every copy. The result vector
7442 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7443 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7445 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7446 on a target that supports unaligned accesses (dr_unaligned_supported)
7447 we generate the following code:
7448 p = initial_addr;
7449 indx = 0;
7450 loop {
7451 p = p + indx * vectype_size;
7452 vec_dest = *(p);
7453 indx = indx + 1;
7456 Otherwise, the data reference is potentially unaligned on a target that
7457 does not support unaligned accesses (dr_explicit_realign_optimized) -
7458 then generate the following code, in which the data in each iteration is
7459 obtained by two vector loads, one from the previous iteration, and one
7460 from the current iteration:
7461 p1 = initial_addr;
7462 msq_init = *(floor(p1))
7463 p2 = initial_addr + VS - 1;
7464 realignment_token = call target_builtin;
7465 indx = 0;
7466 loop {
7467 p2 = p2 + indx * vectype_size
7468 lsq = *(floor(p2))
7469 vec_dest = realign_load (msq, lsq, realignment_token)
7470 indx = indx + 1;
7471 msq = lsq;
7472 } */
7474 /* If the misalignment remains the same throughout the execution of the
7475 loop, we can create the init_addr and permutation mask at the loop
7476 preheader. Otherwise, it needs to be created inside the loop.
7477 This can only occur when vectorizing memory accesses in the inner-loop
7478 nested within an outer-loop that is being vectorized. */
7480 if (nested_in_vect_loop
7481 && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0)
7483 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7484 compute_in_loop = true;
7487 if ((alignment_support_scheme == dr_explicit_realign_optimized
7488 || alignment_support_scheme == dr_explicit_realign)
7489 && !compute_in_loop)
7491 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7492 alignment_support_scheme, NULL_TREE,
7493 &at_loop);
7494 if (alignment_support_scheme == dr_explicit_realign_optimized)
7496 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7497 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7498 size_one_node);
7501 else
7502 at_loop = loop;
7504 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7505 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7507 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7508 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7509 else
7510 aggr_type = vectype;
7512 prev_stmt_info = NULL;
7513 poly_uint64 group_elt = 0;
7514 for (j = 0; j < ncopies; j++)
7516 /* 1. Create the vector or array pointer update chain. */
7517 if (j == 0)
7519 bool simd_lane_access_p
7520 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7521 if (simd_lane_access_p
7522 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7523 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7524 && integer_zerop (DR_OFFSET (first_dr))
7525 && integer_zerop (DR_INIT (first_dr))
7526 && alias_sets_conflict_p (get_alias_set (aggr_type),
7527 get_alias_set (TREE_TYPE (ref_type)))
7528 && (alignment_support_scheme == dr_aligned
7529 || alignment_support_scheme == dr_unaligned_supported))
7531 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7532 dataref_offset = build_int_cst (ref_type, 0);
7533 inv_p = false;
7535 else if (first_stmt_for_drptr
7536 && first_stmt != first_stmt_for_drptr)
7538 dataref_ptr
7539 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7540 at_loop, offset, &dummy, gsi,
7541 &ptr_incr, simd_lane_access_p,
7542 &inv_p, byte_offset);
7543 /* Adjust the pointer by the difference to first_stmt. */
7544 data_reference_p ptrdr
7545 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7546 tree diff = fold_convert (sizetype,
7547 size_binop (MINUS_EXPR,
7548 DR_INIT (first_dr),
7549 DR_INIT (ptrdr)));
7550 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7551 stmt, diff);
7553 else
7554 dataref_ptr
7555 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7556 offset, &dummy, gsi, &ptr_incr,
7557 simd_lane_access_p, &inv_p,
7558 byte_offset);
7560 else if (dataref_offset)
7561 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7562 TYPE_SIZE_UNIT (aggr_type));
7563 else
7564 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7565 TYPE_SIZE_UNIT (aggr_type));
7567 if (grouped_load || slp_perm)
7568 dr_chain.create (vec_num);
7570 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7572 tree vec_array;
7574 vec_array = create_vector_array (vectype, vec_num);
7576 /* Emit:
7577 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7578 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7579 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7580 data_ref);
7581 gimple_call_set_lhs (call, vec_array);
7582 gimple_call_set_nothrow (call, true);
7583 new_stmt = call;
7584 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7586 /* Extract each vector into an SSA_NAME. */
7587 for (i = 0; i < vec_num; i++)
7589 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7590 vec_array, i);
7591 dr_chain.quick_push (new_temp);
7594 /* Record the mapping between SSA_NAMEs and statements. */
7595 vect_record_grouped_load_vectors (stmt, dr_chain);
7597 else
7599 for (i = 0; i < vec_num; i++)
7601 if (i > 0)
7602 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7603 stmt, NULL_TREE);
7605 /* 2. Create the vector-load in the loop. */
7606 switch (alignment_support_scheme)
7608 case dr_aligned:
7609 case dr_unaligned_supported:
7611 unsigned int align, misalign;
7613 data_ref
7614 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7615 dataref_offset
7616 ? dataref_offset
7617 : build_int_cst (ref_type, 0));
7618 align = DR_TARGET_ALIGNMENT (dr);
7619 if (alignment_support_scheme == dr_aligned)
7621 gcc_assert (aligned_access_p (first_dr));
7622 misalign = 0;
7624 else if (DR_MISALIGNMENT (first_dr) == -1)
7626 align = dr_alignment (vect_dr_behavior (first_dr));
7627 misalign = 0;
7628 TREE_TYPE (data_ref)
7629 = build_aligned_type (TREE_TYPE (data_ref),
7630 align * BITS_PER_UNIT);
7632 else
7634 TREE_TYPE (data_ref)
7635 = build_aligned_type (TREE_TYPE (data_ref),
7636 TYPE_ALIGN (elem_type));
7637 misalign = DR_MISALIGNMENT (first_dr);
7639 if (dataref_offset == NULL_TREE
7640 && TREE_CODE (dataref_ptr) == SSA_NAME)
7641 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7642 align, misalign);
7643 break;
7645 case dr_explicit_realign:
7647 tree ptr, bump;
7649 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7651 if (compute_in_loop)
7652 msq = vect_setup_realignment (first_stmt, gsi,
7653 &realignment_token,
7654 dr_explicit_realign,
7655 dataref_ptr, NULL);
7657 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7658 ptr = copy_ssa_name (dataref_ptr);
7659 else
7660 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7661 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7662 new_stmt = gimple_build_assign
7663 (ptr, BIT_AND_EXPR, dataref_ptr,
7664 build_int_cst
7665 (TREE_TYPE (dataref_ptr),
7666 -(HOST_WIDE_INT) align));
7667 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7668 data_ref
7669 = build2 (MEM_REF, vectype, ptr,
7670 build_int_cst (ref_type, 0));
7671 vec_dest = vect_create_destination_var (scalar_dest,
7672 vectype);
7673 new_stmt = gimple_build_assign (vec_dest, data_ref);
7674 new_temp = make_ssa_name (vec_dest, new_stmt);
7675 gimple_assign_set_lhs (new_stmt, new_temp);
7676 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7677 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7678 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7679 msq = new_temp;
7681 bump = size_binop (MULT_EXPR, vs,
7682 TYPE_SIZE_UNIT (elem_type));
7683 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7684 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7685 new_stmt = gimple_build_assign
7686 (NULL_TREE, BIT_AND_EXPR, ptr,
7687 build_int_cst
7688 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7689 ptr = copy_ssa_name (ptr, new_stmt);
7690 gimple_assign_set_lhs (new_stmt, ptr);
7691 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7692 data_ref
7693 = build2 (MEM_REF, vectype, ptr,
7694 build_int_cst (ref_type, 0));
7695 break;
7697 case dr_explicit_realign_optimized:
7699 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7700 new_temp = copy_ssa_name (dataref_ptr);
7701 else
7702 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7703 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7704 new_stmt = gimple_build_assign
7705 (new_temp, BIT_AND_EXPR, dataref_ptr,
7706 build_int_cst (TREE_TYPE (dataref_ptr),
7707 -(HOST_WIDE_INT) align));
7708 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7709 data_ref
7710 = build2 (MEM_REF, vectype, new_temp,
7711 build_int_cst (ref_type, 0));
7712 break;
7714 default:
7715 gcc_unreachable ();
7717 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7718 new_stmt = gimple_build_assign (vec_dest, data_ref);
7719 new_temp = make_ssa_name (vec_dest, new_stmt);
7720 gimple_assign_set_lhs (new_stmt, new_temp);
7721 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7723 /* 3. Handle explicit realignment if necessary/supported.
7724 Create in loop:
7725 vec_dest = realign_load (msq, lsq, realignment_token) */
7726 if (alignment_support_scheme == dr_explicit_realign_optimized
7727 || alignment_support_scheme == dr_explicit_realign)
7729 lsq = gimple_assign_lhs (new_stmt);
7730 if (!realignment_token)
7731 realignment_token = dataref_ptr;
7732 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7733 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7734 msq, lsq, realignment_token);
7735 new_temp = make_ssa_name (vec_dest, new_stmt);
7736 gimple_assign_set_lhs (new_stmt, new_temp);
7737 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7739 if (alignment_support_scheme == dr_explicit_realign_optimized)
7741 gcc_assert (phi);
7742 if (i == vec_num - 1 && j == ncopies - 1)
7743 add_phi_arg (phi, lsq,
7744 loop_latch_edge (containing_loop),
7745 UNKNOWN_LOCATION);
7746 msq = lsq;
7750 /* 4. Handle invariant-load. */
7751 if (inv_p && !bb_vinfo)
7753 gcc_assert (!grouped_load);
7754 /* If we have versioned for aliasing or the loop doesn't
7755 have any data dependencies that would preclude this,
7756 then we are sure this is a loop invariant load and
7757 thus we can insert it on the preheader edge. */
7758 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7759 && !nested_in_vect_loop
7760 && hoist_defs_of_uses (stmt, loop))
7762 if (dump_enabled_p ())
7764 dump_printf_loc (MSG_NOTE, vect_location,
7765 "hoisting out of the vectorized "
7766 "loop: ");
7767 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7769 tree tem = copy_ssa_name (scalar_dest);
7770 gsi_insert_on_edge_immediate
7771 (loop_preheader_edge (loop),
7772 gimple_build_assign (tem,
7773 unshare_expr
7774 (gimple_assign_rhs1 (stmt))));
7775 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7776 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7777 set_vinfo_for_stmt (new_stmt,
7778 new_stmt_vec_info (new_stmt, vinfo));
7780 else
7782 gimple_stmt_iterator gsi2 = *gsi;
7783 gsi_next (&gsi2);
7784 new_temp = vect_init_vector (stmt, scalar_dest,
7785 vectype, &gsi2);
7786 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7790 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7792 tree perm_mask = perm_mask_for_reverse (vectype);
7793 new_temp = permute_vec_elements (new_temp, new_temp,
7794 perm_mask, stmt, gsi);
7795 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7798 /* Collect vector loads and later create their permutation in
7799 vect_transform_grouped_load (). */
7800 if (grouped_load || slp_perm)
7801 dr_chain.quick_push (new_temp);
7803 /* Store vector loads in the corresponding SLP_NODE. */
7804 if (slp && !slp_perm)
7805 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7807 /* With SLP permutation we load the gaps as well, without
7808 we need to skip the gaps after we manage to fully load
7809 all elements. group_gap_adj is GROUP_SIZE here. */
7810 group_elt += nunits;
7811 if (maybe_ne (group_gap_adj, 0U)
7812 && !slp_perm
7813 && known_eq (group_elt, group_size - group_gap_adj))
7815 poly_wide_int bump_val
7816 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7817 * group_gap_adj);
7818 tree bump = wide_int_to_tree (sizetype, bump_val);
7819 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7820 stmt, bump);
7821 group_elt = 0;
7824 /* Bump the vector pointer to account for a gap or for excess
7825 elements loaded for a permuted SLP load. */
7826 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
7828 poly_wide_int bump_val
7829 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7830 * group_gap_adj);
7831 tree bump = wide_int_to_tree (sizetype, bump_val);
7832 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7833 stmt, bump);
7837 if (slp && !slp_perm)
7838 continue;
7840 if (slp_perm)
7842 unsigned n_perms;
7843 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7844 slp_node_instance, false,
7845 &n_perms))
7847 dr_chain.release ();
7848 return false;
7851 else
7853 if (grouped_load)
7855 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7856 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7857 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7859 else
7861 if (j == 0)
7862 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7863 else
7864 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7865 prev_stmt_info = vinfo_for_stmt (new_stmt);
7868 dr_chain.release ();
7871 return true;
7874 /* Function vect_is_simple_cond.
7876 Input:
7877 LOOP - the loop that is being vectorized.
7878 COND - Condition that is checked for simple use.
7880 Output:
7881 *COMP_VECTYPE - the vector type for the comparison.
7882 *DTS - The def types for the arguments of the comparison
7884 Returns whether a COND can be vectorized. Checks whether
7885 condition operands are supportable using vec_is_simple_use. */
7887 static bool
7888 vect_is_simple_cond (tree cond, vec_info *vinfo,
7889 tree *comp_vectype, enum vect_def_type *dts,
7890 tree vectype)
7892 tree lhs, rhs;
7893 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7895 /* Mask case. */
7896 if (TREE_CODE (cond) == SSA_NAME
7897 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7899 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7900 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7901 &dts[0], comp_vectype)
7902 || !*comp_vectype
7903 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7904 return false;
7905 return true;
7908 if (!COMPARISON_CLASS_P (cond))
7909 return false;
7911 lhs = TREE_OPERAND (cond, 0);
7912 rhs = TREE_OPERAND (cond, 1);
7914 if (TREE_CODE (lhs) == SSA_NAME)
7916 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7917 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7918 return false;
7920 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7921 || TREE_CODE (lhs) == FIXED_CST)
7922 dts[0] = vect_constant_def;
7923 else
7924 return false;
7926 if (TREE_CODE (rhs) == SSA_NAME)
7928 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7929 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7930 return false;
7932 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7933 || TREE_CODE (rhs) == FIXED_CST)
7934 dts[1] = vect_constant_def;
7935 else
7936 return false;
7938 if (vectype1 && vectype2
7939 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7940 return false;
7942 *comp_vectype = vectype1 ? vectype1 : vectype2;
7943 /* Invariant comparison. */
7944 if (! *comp_vectype)
7946 tree scalar_type = TREE_TYPE (lhs);
7947 /* If we can widen the comparison to match vectype do so. */
7948 if (INTEGRAL_TYPE_P (scalar_type)
7949 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7950 TYPE_SIZE (TREE_TYPE (vectype))))
7951 scalar_type = build_nonstandard_integer_type
7952 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7953 TYPE_UNSIGNED (scalar_type));
7954 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7957 return true;
7960 /* vectorizable_condition.
7962 Check if STMT is conditional modify expression that can be vectorized.
7963 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7964 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7965 at GSI.
7967 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7968 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7969 else clause if it is 2).
7971 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7973 bool
7974 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7975 gimple **vec_stmt, tree reduc_def, int reduc_index,
7976 slp_tree slp_node)
7978 tree scalar_dest = NULL_TREE;
7979 tree vec_dest = NULL_TREE;
7980 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7981 tree then_clause, else_clause;
7982 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7983 tree comp_vectype = NULL_TREE;
7984 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7985 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7986 tree vec_compare;
7987 tree new_temp;
7988 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7989 enum vect_def_type dts[4]
7990 = {vect_unknown_def_type, vect_unknown_def_type,
7991 vect_unknown_def_type, vect_unknown_def_type};
7992 int ndts = 4;
7993 int ncopies;
7994 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7995 stmt_vec_info prev_stmt_info = NULL;
7996 int i, j;
7997 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7998 vec<tree> vec_oprnds0 = vNULL;
7999 vec<tree> vec_oprnds1 = vNULL;
8000 vec<tree> vec_oprnds2 = vNULL;
8001 vec<tree> vec_oprnds3 = vNULL;
8002 tree vec_cmp_type;
8003 bool masked = false;
8005 if (reduc_index && STMT_SLP_TYPE (stmt_info))
8006 return false;
8008 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
8010 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8011 return false;
8013 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8014 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8015 && reduc_def))
8016 return false;
8018 /* FORNOW: not yet supported. */
8019 if (STMT_VINFO_LIVE_P (stmt_info))
8021 if (dump_enabled_p ())
8022 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8023 "value used after loop.\n");
8024 return false;
8028 /* Is vectorizable conditional operation? */
8029 if (!is_gimple_assign (stmt))
8030 return false;
8032 code = gimple_assign_rhs_code (stmt);
8034 if (code != COND_EXPR)
8035 return false;
8037 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8038 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8040 if (slp_node)
8041 ncopies = 1;
8042 else
8043 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8045 gcc_assert (ncopies >= 1);
8046 if (reduc_index && ncopies > 1)
8047 return false; /* FORNOW */
8049 cond_expr = gimple_assign_rhs1 (stmt);
8050 then_clause = gimple_assign_rhs2 (stmt);
8051 else_clause = gimple_assign_rhs3 (stmt);
8053 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8054 &comp_vectype, &dts[0], vectype)
8055 || !comp_vectype)
8056 return false;
8058 gimple *def_stmt;
8059 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8060 &vectype1))
8061 return false;
8062 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8063 &vectype2))
8064 return false;
8066 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8067 return false;
8069 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8070 return false;
8072 masked = !COMPARISON_CLASS_P (cond_expr);
8073 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8075 if (vec_cmp_type == NULL_TREE)
8076 return false;
8078 cond_code = TREE_CODE (cond_expr);
8079 if (!masked)
8081 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8082 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8085 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8087 /* Boolean values may have another representation in vectors
8088 and therefore we prefer bit operations over comparison for
8089 them (which also works for scalar masks). We store opcodes
8090 to use in bitop1 and bitop2. Statement is vectorized as
8091 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8092 depending on bitop1 and bitop2 arity. */
8093 switch (cond_code)
8095 case GT_EXPR:
8096 bitop1 = BIT_NOT_EXPR;
8097 bitop2 = BIT_AND_EXPR;
8098 break;
8099 case GE_EXPR:
8100 bitop1 = BIT_NOT_EXPR;
8101 bitop2 = BIT_IOR_EXPR;
8102 break;
8103 case LT_EXPR:
8104 bitop1 = BIT_NOT_EXPR;
8105 bitop2 = BIT_AND_EXPR;
8106 std::swap (cond_expr0, cond_expr1);
8107 break;
8108 case LE_EXPR:
8109 bitop1 = BIT_NOT_EXPR;
8110 bitop2 = BIT_IOR_EXPR;
8111 std::swap (cond_expr0, cond_expr1);
8112 break;
8113 case NE_EXPR:
8114 bitop1 = BIT_XOR_EXPR;
8115 break;
8116 case EQ_EXPR:
8117 bitop1 = BIT_XOR_EXPR;
8118 bitop2 = BIT_NOT_EXPR;
8119 break;
8120 default:
8121 return false;
8123 cond_code = SSA_NAME;
8126 if (!vec_stmt)
8128 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8129 if (bitop1 != NOP_EXPR)
8131 machine_mode mode = TYPE_MODE (comp_vectype);
8132 optab optab;
8134 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8135 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8136 return false;
8138 if (bitop2 != NOP_EXPR)
8140 optab = optab_for_tree_code (bitop2, comp_vectype,
8141 optab_default);
8142 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8143 return false;
8146 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8147 cond_code))
8149 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8150 return true;
8152 return false;
8155 /* Transform. */
8157 if (!slp_node)
8159 vec_oprnds0.create (1);
8160 vec_oprnds1.create (1);
8161 vec_oprnds2.create (1);
8162 vec_oprnds3.create (1);
8165 /* Handle def. */
8166 scalar_dest = gimple_assign_lhs (stmt);
8167 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8169 /* Handle cond expr. */
8170 for (j = 0; j < ncopies; j++)
8172 gassign *new_stmt = NULL;
8173 if (j == 0)
8175 if (slp_node)
8177 auto_vec<tree, 4> ops;
8178 auto_vec<vec<tree>, 4> vec_defs;
8180 if (masked)
8181 ops.safe_push (cond_expr);
8182 else
8184 ops.safe_push (cond_expr0);
8185 ops.safe_push (cond_expr1);
8187 ops.safe_push (then_clause);
8188 ops.safe_push (else_clause);
8189 vect_get_slp_defs (ops, slp_node, &vec_defs);
8190 vec_oprnds3 = vec_defs.pop ();
8191 vec_oprnds2 = vec_defs.pop ();
8192 if (!masked)
8193 vec_oprnds1 = vec_defs.pop ();
8194 vec_oprnds0 = vec_defs.pop ();
8196 else
8198 gimple *gtemp;
8199 if (masked)
8201 vec_cond_lhs
8202 = vect_get_vec_def_for_operand (cond_expr, stmt,
8203 comp_vectype);
8204 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8205 &gtemp, &dts[0]);
8207 else
8209 vec_cond_lhs
8210 = vect_get_vec_def_for_operand (cond_expr0,
8211 stmt, comp_vectype);
8212 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8214 vec_cond_rhs
8215 = vect_get_vec_def_for_operand (cond_expr1,
8216 stmt, comp_vectype);
8217 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8219 if (reduc_index == 1)
8220 vec_then_clause = reduc_def;
8221 else
8223 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8224 stmt);
8225 vect_is_simple_use (then_clause, loop_vinfo,
8226 &gtemp, &dts[2]);
8228 if (reduc_index == 2)
8229 vec_else_clause = reduc_def;
8230 else
8232 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8233 stmt);
8234 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8238 else
8240 vec_cond_lhs
8241 = vect_get_vec_def_for_stmt_copy (dts[0],
8242 vec_oprnds0.pop ());
8243 if (!masked)
8244 vec_cond_rhs
8245 = vect_get_vec_def_for_stmt_copy (dts[1],
8246 vec_oprnds1.pop ());
8248 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8249 vec_oprnds2.pop ());
8250 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8251 vec_oprnds3.pop ());
8254 if (!slp_node)
8256 vec_oprnds0.quick_push (vec_cond_lhs);
8257 if (!masked)
8258 vec_oprnds1.quick_push (vec_cond_rhs);
8259 vec_oprnds2.quick_push (vec_then_clause);
8260 vec_oprnds3.quick_push (vec_else_clause);
8263 /* Arguments are ready. Create the new vector stmt. */
8264 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8266 vec_then_clause = vec_oprnds2[i];
8267 vec_else_clause = vec_oprnds3[i];
8269 if (masked)
8270 vec_compare = vec_cond_lhs;
8271 else
8273 vec_cond_rhs = vec_oprnds1[i];
8274 if (bitop1 == NOP_EXPR)
8275 vec_compare = build2 (cond_code, vec_cmp_type,
8276 vec_cond_lhs, vec_cond_rhs);
8277 else
8279 new_temp = make_ssa_name (vec_cmp_type);
8280 if (bitop1 == BIT_NOT_EXPR)
8281 new_stmt = gimple_build_assign (new_temp, bitop1,
8282 vec_cond_rhs);
8283 else
8284 new_stmt
8285 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8286 vec_cond_rhs);
8287 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8288 if (bitop2 == NOP_EXPR)
8289 vec_compare = new_temp;
8290 else if (bitop2 == BIT_NOT_EXPR)
8292 /* Instead of doing ~x ? y : z do x ? z : y. */
8293 vec_compare = new_temp;
8294 std::swap (vec_then_clause, vec_else_clause);
8296 else
8298 vec_compare = make_ssa_name (vec_cmp_type);
8299 new_stmt
8300 = gimple_build_assign (vec_compare, bitop2,
8301 vec_cond_lhs, new_temp);
8302 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8306 new_temp = make_ssa_name (vec_dest);
8307 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8308 vec_compare, vec_then_clause,
8309 vec_else_clause);
8310 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8311 if (slp_node)
8312 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8315 if (slp_node)
8316 continue;
8318 if (j == 0)
8319 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8320 else
8321 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8323 prev_stmt_info = vinfo_for_stmt (new_stmt);
8326 vec_oprnds0.release ();
8327 vec_oprnds1.release ();
8328 vec_oprnds2.release ();
8329 vec_oprnds3.release ();
8331 return true;
8334 /* vectorizable_comparison.
8336 Check if STMT is comparison expression that can be vectorized.
8337 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8338 comparison, put it in VEC_STMT, and insert it at GSI.
8340 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8342 static bool
8343 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8344 gimple **vec_stmt, tree reduc_def,
8345 slp_tree slp_node)
8347 tree lhs, rhs1, rhs2;
8348 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8349 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8350 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8351 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8352 tree new_temp;
8353 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8354 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8355 int ndts = 2;
8356 unsigned nunits;
8357 int ncopies;
8358 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8359 stmt_vec_info prev_stmt_info = NULL;
8360 int i, j;
8361 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8362 vec<tree> vec_oprnds0 = vNULL;
8363 vec<tree> vec_oprnds1 = vNULL;
8364 gimple *def_stmt;
8365 tree mask_type;
8366 tree mask;
8368 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8369 return false;
8371 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8372 return false;
8374 mask_type = vectype;
8375 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8377 if (slp_node)
8378 ncopies = 1;
8379 else
8380 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8382 gcc_assert (ncopies >= 1);
8383 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8384 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8385 && reduc_def))
8386 return false;
8388 if (STMT_VINFO_LIVE_P (stmt_info))
8390 if (dump_enabled_p ())
8391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8392 "value used after loop.\n");
8393 return false;
8396 if (!is_gimple_assign (stmt))
8397 return false;
8399 code = gimple_assign_rhs_code (stmt);
8401 if (TREE_CODE_CLASS (code) != tcc_comparison)
8402 return false;
8404 rhs1 = gimple_assign_rhs1 (stmt);
8405 rhs2 = gimple_assign_rhs2 (stmt);
8407 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8408 &dts[0], &vectype1))
8409 return false;
8411 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8412 &dts[1], &vectype2))
8413 return false;
8415 if (vectype1 && vectype2
8416 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
8417 return false;
8419 vectype = vectype1 ? vectype1 : vectype2;
8421 /* Invariant comparison. */
8422 if (!vectype)
8424 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8425 if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
8426 return false;
8428 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
8429 return false;
8431 /* Can't compare mask and non-mask types. */
8432 if (vectype1 && vectype2
8433 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8434 return false;
8436 /* Boolean values may have another representation in vectors
8437 and therefore we prefer bit operations over comparison for
8438 them (which also works for scalar masks). We store opcodes
8439 to use in bitop1 and bitop2. Statement is vectorized as
8440 BITOP2 (rhs1 BITOP1 rhs2) or
8441 rhs1 BITOP2 (BITOP1 rhs2)
8442 depending on bitop1 and bitop2 arity. */
8443 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8445 if (code == GT_EXPR)
8447 bitop1 = BIT_NOT_EXPR;
8448 bitop2 = BIT_AND_EXPR;
8450 else if (code == GE_EXPR)
8452 bitop1 = BIT_NOT_EXPR;
8453 bitop2 = BIT_IOR_EXPR;
8455 else if (code == LT_EXPR)
8457 bitop1 = BIT_NOT_EXPR;
8458 bitop2 = BIT_AND_EXPR;
8459 std::swap (rhs1, rhs2);
8460 std::swap (dts[0], dts[1]);
8462 else if (code == LE_EXPR)
8464 bitop1 = BIT_NOT_EXPR;
8465 bitop2 = BIT_IOR_EXPR;
8466 std::swap (rhs1, rhs2);
8467 std::swap (dts[0], dts[1]);
8469 else
8471 bitop1 = BIT_XOR_EXPR;
8472 if (code == EQ_EXPR)
8473 bitop2 = BIT_NOT_EXPR;
8477 if (!vec_stmt)
8479 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8480 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8481 dts, ndts, NULL, NULL);
8482 if (bitop1 == NOP_EXPR)
8483 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8484 else
8486 machine_mode mode = TYPE_MODE (vectype);
8487 optab optab;
8489 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8490 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8491 return false;
8493 if (bitop2 != NOP_EXPR)
8495 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8496 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8497 return false;
8499 return true;
8503 /* Transform. */
8504 if (!slp_node)
8506 vec_oprnds0.create (1);
8507 vec_oprnds1.create (1);
8510 /* Handle def. */
8511 lhs = gimple_assign_lhs (stmt);
8512 mask = vect_create_destination_var (lhs, mask_type);
8514 /* Handle cmp expr. */
8515 for (j = 0; j < ncopies; j++)
8517 gassign *new_stmt = NULL;
8518 if (j == 0)
8520 if (slp_node)
8522 auto_vec<tree, 2> ops;
8523 auto_vec<vec<tree>, 2> vec_defs;
8525 ops.safe_push (rhs1);
8526 ops.safe_push (rhs2);
8527 vect_get_slp_defs (ops, slp_node, &vec_defs);
8528 vec_oprnds1 = vec_defs.pop ();
8529 vec_oprnds0 = vec_defs.pop ();
8531 else
8533 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8534 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8537 else
8539 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8540 vec_oprnds0.pop ());
8541 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8542 vec_oprnds1.pop ());
8545 if (!slp_node)
8547 vec_oprnds0.quick_push (vec_rhs1);
8548 vec_oprnds1.quick_push (vec_rhs2);
8551 /* Arguments are ready. Create the new vector stmt. */
8552 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8554 vec_rhs2 = vec_oprnds1[i];
8556 new_temp = make_ssa_name (mask);
8557 if (bitop1 == NOP_EXPR)
8559 new_stmt = gimple_build_assign (new_temp, code,
8560 vec_rhs1, vec_rhs2);
8561 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8563 else
8565 if (bitop1 == BIT_NOT_EXPR)
8566 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8567 else
8568 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8569 vec_rhs2);
8570 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8571 if (bitop2 != NOP_EXPR)
8573 tree res = make_ssa_name (mask);
8574 if (bitop2 == BIT_NOT_EXPR)
8575 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8576 else
8577 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8578 new_temp);
8579 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8582 if (slp_node)
8583 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8586 if (slp_node)
8587 continue;
8589 if (j == 0)
8590 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8591 else
8592 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8594 prev_stmt_info = vinfo_for_stmt (new_stmt);
8597 vec_oprnds0.release ();
8598 vec_oprnds1.release ();
8600 return true;
8603 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8604 can handle all live statements in the node. Otherwise return true
8605 if STMT is not live or if vectorizable_live_operation can handle it.
8606 GSI and VEC_STMT are as for vectorizable_live_operation. */
8608 static bool
8609 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8610 slp_tree slp_node, gimple **vec_stmt)
8612 if (slp_node)
8614 gimple *slp_stmt;
8615 unsigned int i;
8616 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8618 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8619 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8620 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8621 vec_stmt))
8622 return false;
8625 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8626 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8627 return false;
8629 return true;
8632 /* Make sure the statement is vectorizable. */
8634 bool
8635 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8636 slp_instance node_instance)
8638 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8639 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8640 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8641 bool ok;
8642 gimple *pattern_stmt;
8643 gimple_seq pattern_def_seq;
8645 if (dump_enabled_p ())
8647 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8648 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8651 if (gimple_has_volatile_ops (stmt))
8653 if (dump_enabled_p ())
8654 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8655 "not vectorized: stmt has volatile operands\n");
8657 return false;
8660 /* Skip stmts that do not need to be vectorized. In loops this is expected
8661 to include:
8662 - the COND_EXPR which is the loop exit condition
8663 - any LABEL_EXPRs in the loop
8664 - computations that are used only for array indexing or loop control.
8665 In basic blocks we only analyze statements that are a part of some SLP
8666 instance, therefore, all the statements are relevant.
8668 Pattern statement needs to be analyzed instead of the original statement
8669 if the original statement is not relevant. Otherwise, we analyze both
8670 statements. In basic blocks we are called from some SLP instance
8671 traversal, don't analyze pattern stmts instead, the pattern stmts
8672 already will be part of SLP instance. */
8674 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8675 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8676 && !STMT_VINFO_LIVE_P (stmt_info))
8678 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8679 && pattern_stmt
8680 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8681 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8683 /* Analyze PATTERN_STMT instead of the original stmt. */
8684 stmt = pattern_stmt;
8685 stmt_info = vinfo_for_stmt (pattern_stmt);
8686 if (dump_enabled_p ())
8688 dump_printf_loc (MSG_NOTE, vect_location,
8689 "==> examining pattern statement: ");
8690 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8693 else
8695 if (dump_enabled_p ())
8696 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8698 return true;
8701 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8702 && node == NULL
8703 && pattern_stmt
8704 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8705 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8707 /* Analyze PATTERN_STMT too. */
8708 if (dump_enabled_p ())
8710 dump_printf_loc (MSG_NOTE, vect_location,
8711 "==> examining pattern statement: ");
8712 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8715 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8716 node_instance))
8717 return false;
8720 if (is_pattern_stmt_p (stmt_info)
8721 && node == NULL
8722 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8724 gimple_stmt_iterator si;
8726 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8728 gimple *pattern_def_stmt = gsi_stmt (si);
8729 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8730 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8732 /* Analyze def stmt of STMT if it's a pattern stmt. */
8733 if (dump_enabled_p ())
8735 dump_printf_loc (MSG_NOTE, vect_location,
8736 "==> examining pattern def statement: ");
8737 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8740 if (!vect_analyze_stmt (pattern_def_stmt,
8741 need_to_vectorize, node, node_instance))
8742 return false;
8747 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8749 case vect_internal_def:
8750 break;
8752 case vect_reduction_def:
8753 case vect_nested_cycle:
8754 gcc_assert (!bb_vinfo
8755 && (relevance == vect_used_in_outer
8756 || relevance == vect_used_in_outer_by_reduction
8757 || relevance == vect_used_by_reduction
8758 || relevance == vect_unused_in_scope
8759 || relevance == vect_used_only_live));
8760 break;
8762 case vect_induction_def:
8763 gcc_assert (!bb_vinfo);
8764 break;
8766 case vect_constant_def:
8767 case vect_external_def:
8768 case vect_unknown_def_type:
8769 default:
8770 gcc_unreachable ();
8773 if (STMT_VINFO_RELEVANT_P (stmt_info))
8775 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8776 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8777 || (is_gimple_call (stmt)
8778 && gimple_call_lhs (stmt) == NULL_TREE));
8779 *need_to_vectorize = true;
8782 if (PURE_SLP_STMT (stmt_info) && !node)
8784 dump_printf_loc (MSG_NOTE, vect_location,
8785 "handled only by SLP analysis\n");
8786 return true;
8789 ok = true;
8790 if (!bb_vinfo
8791 && (STMT_VINFO_RELEVANT_P (stmt_info)
8792 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8793 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8794 || vectorizable_conversion (stmt, NULL, NULL, node)
8795 || vectorizable_shift (stmt, NULL, NULL, node)
8796 || vectorizable_operation (stmt, NULL, NULL, node)
8797 || vectorizable_assignment (stmt, NULL, NULL, node)
8798 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8799 || vectorizable_call (stmt, NULL, NULL, node)
8800 || vectorizable_store (stmt, NULL, NULL, node)
8801 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8802 || vectorizable_induction (stmt, NULL, NULL, node)
8803 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8804 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8805 else
8807 if (bb_vinfo)
8808 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8809 || vectorizable_conversion (stmt, NULL, NULL, node)
8810 || vectorizable_shift (stmt, NULL, NULL, node)
8811 || vectorizable_operation (stmt, NULL, NULL, node)
8812 || vectorizable_assignment (stmt, NULL, NULL, node)
8813 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8814 || vectorizable_call (stmt, NULL, NULL, node)
8815 || vectorizable_store (stmt, NULL, NULL, node)
8816 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8817 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8820 if (!ok)
8822 if (dump_enabled_p ())
8824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8825 "not vectorized: relevant stmt not ");
8826 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8827 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8830 return false;
8833 if (bb_vinfo)
8834 return true;
8836 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8837 need extra handling, except for vectorizable reductions. */
8838 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8839 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8841 if (dump_enabled_p ())
8843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8844 "not vectorized: live stmt not supported: ");
8845 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8848 return false;
8851 return true;
8855 /* Function vect_transform_stmt.
8857 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8859 bool
8860 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8861 bool *grouped_store, slp_tree slp_node,
8862 slp_instance slp_node_instance)
8864 bool is_store = false;
8865 gimple *vec_stmt = NULL;
8866 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8867 bool done;
8869 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8870 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8872 switch (STMT_VINFO_TYPE (stmt_info))
8874 case type_demotion_vec_info_type:
8875 case type_promotion_vec_info_type:
8876 case type_conversion_vec_info_type:
8877 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8878 gcc_assert (done);
8879 break;
8881 case induc_vec_info_type:
8882 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8883 gcc_assert (done);
8884 break;
8886 case shift_vec_info_type:
8887 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8888 gcc_assert (done);
8889 break;
8891 case op_vec_info_type:
8892 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8893 gcc_assert (done);
8894 break;
8896 case assignment_vec_info_type:
8897 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8898 gcc_assert (done);
8899 break;
8901 case load_vec_info_type:
8902 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8903 slp_node_instance);
8904 gcc_assert (done);
8905 break;
8907 case store_vec_info_type:
8908 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8909 gcc_assert (done);
8910 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8912 /* In case of interleaving, the whole chain is vectorized when the
8913 last store in the chain is reached. Store stmts before the last
8914 one are skipped, and there vec_stmt_info shouldn't be freed
8915 meanwhile. */
8916 *grouped_store = true;
8917 if (STMT_VINFO_VEC_STMT (stmt_info))
8918 is_store = true;
8920 else
8921 is_store = true;
8922 break;
8924 case condition_vec_info_type:
8925 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8926 gcc_assert (done);
8927 break;
8929 case comparison_vec_info_type:
8930 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8931 gcc_assert (done);
8932 break;
8934 case call_vec_info_type:
8935 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8936 stmt = gsi_stmt (*gsi);
8937 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8938 is_store = true;
8939 break;
8941 case call_simd_clone_vec_info_type:
8942 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8943 stmt = gsi_stmt (*gsi);
8944 break;
8946 case reduc_vec_info_type:
8947 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8948 slp_node_instance);
8949 gcc_assert (done);
8950 break;
8952 default:
8953 if (!STMT_VINFO_LIVE_P (stmt_info))
8955 if (dump_enabled_p ())
8956 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8957 "stmt not supported.\n");
8958 gcc_unreachable ();
8962 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8963 This would break hybrid SLP vectorization. */
8964 if (slp_node)
8965 gcc_assert (!vec_stmt
8966 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8968 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8969 is being vectorized, but outside the immediately enclosing loop. */
8970 if (vec_stmt
8971 && STMT_VINFO_LOOP_VINFO (stmt_info)
8972 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8973 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8974 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8975 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8976 || STMT_VINFO_RELEVANT (stmt_info) ==
8977 vect_used_in_outer_by_reduction))
8979 struct loop *innerloop = LOOP_VINFO_LOOP (
8980 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8981 imm_use_iterator imm_iter;
8982 use_operand_p use_p;
8983 tree scalar_dest;
8984 gimple *exit_phi;
8986 if (dump_enabled_p ())
8987 dump_printf_loc (MSG_NOTE, vect_location,
8988 "Record the vdef for outer-loop vectorization.\n");
8990 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8991 (to be used when vectorizing outer-loop stmts that use the DEF of
8992 STMT). */
8993 if (gimple_code (stmt) == GIMPLE_PHI)
8994 scalar_dest = PHI_RESULT (stmt);
8995 else
8996 scalar_dest = gimple_assign_lhs (stmt);
8998 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
9000 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
9002 exit_phi = USE_STMT (use_p);
9003 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
9008 /* Handle stmts whose DEF is used outside the loop-nest that is
9009 being vectorized. */
9010 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
9012 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
9013 gcc_assert (done);
9016 if (vec_stmt)
9017 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
9019 return is_store;
9023 /* Remove a group of stores (for SLP or interleaving), free their
9024 stmt_vec_info. */
9026 void
9027 vect_remove_stores (gimple *first_stmt)
9029 gimple *next = first_stmt;
9030 gimple *tmp;
9031 gimple_stmt_iterator next_si;
9033 while (next)
9035 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9037 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9038 if (is_pattern_stmt_p (stmt_info))
9039 next = STMT_VINFO_RELATED_STMT (stmt_info);
9040 /* Free the attached stmt_vec_info and remove the stmt. */
9041 next_si = gsi_for_stmt (next);
9042 unlink_stmt_vdef (next);
9043 gsi_remove (&next_si, true);
9044 release_defs (next);
9045 free_stmt_vec_info (next);
9046 next = tmp;
9051 /* Function new_stmt_vec_info.
9053 Create and initialize a new stmt_vec_info struct for STMT. */
9055 stmt_vec_info
9056 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9058 stmt_vec_info res;
9059 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9061 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9062 STMT_VINFO_STMT (res) = stmt;
9063 res->vinfo = vinfo;
9064 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9065 STMT_VINFO_LIVE_P (res) = false;
9066 STMT_VINFO_VECTYPE (res) = NULL;
9067 STMT_VINFO_VEC_STMT (res) = NULL;
9068 STMT_VINFO_VECTORIZABLE (res) = true;
9069 STMT_VINFO_IN_PATTERN_P (res) = false;
9070 STMT_VINFO_RELATED_STMT (res) = NULL;
9071 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9072 STMT_VINFO_DATA_REF (res) = NULL;
9073 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9074 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9076 if (gimple_code (stmt) == GIMPLE_PHI
9077 && is_loop_header_bb_p (gimple_bb (stmt)))
9078 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9079 else
9080 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9082 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9083 STMT_SLP_TYPE (res) = loop_vect;
9084 STMT_VINFO_NUM_SLP_USES (res) = 0;
9086 GROUP_FIRST_ELEMENT (res) = NULL;
9087 GROUP_NEXT_ELEMENT (res) = NULL;
9088 GROUP_SIZE (res) = 0;
9089 GROUP_STORE_COUNT (res) = 0;
9090 GROUP_GAP (res) = 0;
9091 GROUP_SAME_DR_STMT (res) = NULL;
9093 return res;
9097 /* Create a hash table for stmt_vec_info. */
9099 void
9100 init_stmt_vec_info_vec (void)
9102 gcc_assert (!stmt_vec_info_vec.exists ());
9103 stmt_vec_info_vec.create (50);
9107 /* Free hash table for stmt_vec_info. */
9109 void
9110 free_stmt_vec_info_vec (void)
9112 unsigned int i;
9113 stmt_vec_info info;
9114 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9115 if (info != NULL)
9116 free_stmt_vec_info (STMT_VINFO_STMT (info));
9117 gcc_assert (stmt_vec_info_vec.exists ());
9118 stmt_vec_info_vec.release ();
9122 /* Free stmt vectorization related info. */
9124 void
9125 free_stmt_vec_info (gimple *stmt)
9127 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9129 if (!stmt_info)
9130 return;
9132 /* Check if this statement has a related "pattern stmt"
9133 (introduced by the vectorizer during the pattern recognition
9134 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9135 too. */
9136 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9138 stmt_vec_info patt_info
9139 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9140 if (patt_info)
9142 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9143 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9144 gimple_set_bb (patt_stmt, NULL);
9145 tree lhs = gimple_get_lhs (patt_stmt);
9146 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9147 release_ssa_name (lhs);
9148 if (seq)
9150 gimple_stmt_iterator si;
9151 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9153 gimple *seq_stmt = gsi_stmt (si);
9154 gimple_set_bb (seq_stmt, NULL);
9155 lhs = gimple_get_lhs (seq_stmt);
9156 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9157 release_ssa_name (lhs);
9158 free_stmt_vec_info (seq_stmt);
9161 free_stmt_vec_info (patt_stmt);
9165 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9166 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9167 set_vinfo_for_stmt (stmt, NULL);
9168 free (stmt_info);
9172 /* Function get_vectype_for_scalar_type_and_size.
9174 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9175 by the target. */
9177 static tree
9178 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9180 tree orig_scalar_type = scalar_type;
9181 scalar_mode inner_mode;
9182 machine_mode simd_mode;
9183 poly_uint64 nunits;
9184 tree vectype;
9186 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9187 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9188 return NULL_TREE;
9190 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9192 /* For vector types of elements whose mode precision doesn't
9193 match their types precision we use a element type of mode
9194 precision. The vectorization routines will have to make sure
9195 they support the proper result truncation/extension.
9196 We also make sure to build vector types with INTEGER_TYPE
9197 component type only. */
9198 if (INTEGRAL_TYPE_P (scalar_type)
9199 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9200 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9201 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9202 TYPE_UNSIGNED (scalar_type));
9204 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9205 When the component mode passes the above test simply use a type
9206 corresponding to that mode. The theory is that any use that
9207 would cause problems with this will disable vectorization anyway. */
9208 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9209 && !INTEGRAL_TYPE_P (scalar_type))
9210 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9212 /* We can't build a vector type of elements with alignment bigger than
9213 their size. */
9214 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9215 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9216 TYPE_UNSIGNED (scalar_type));
9218 /* If we felt back to using the mode fail if there was
9219 no scalar type for it. */
9220 if (scalar_type == NULL_TREE)
9221 return NULL_TREE;
9223 /* If no size was supplied use the mode the target prefers. Otherwise
9224 lookup a vector mode of the specified size. */
9225 if (known_eq (size, 0U))
9226 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9227 else if (!multiple_p (size, nbytes, &nunits)
9228 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9229 return NULL_TREE;
9230 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9231 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9232 return NULL_TREE;
9234 vectype = build_vector_type (scalar_type, nunits);
9236 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9237 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9238 return NULL_TREE;
9240 /* Re-attach the address-space qualifier if we canonicalized the scalar
9241 type. */
9242 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9243 return build_qualified_type
9244 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9246 return vectype;
9249 poly_uint64 current_vector_size;
9251 /* Function get_vectype_for_scalar_type.
9253 Returns the vector type corresponding to SCALAR_TYPE as supported
9254 by the target. */
9256 tree
9257 get_vectype_for_scalar_type (tree scalar_type)
9259 tree vectype;
9260 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9261 current_vector_size);
9262 if (vectype
9263 && known_eq (current_vector_size, 0U))
9264 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9265 return vectype;
9268 /* Function get_mask_type_for_scalar_type.
9270 Returns the mask type corresponding to a result of comparison
9271 of vectors of specified SCALAR_TYPE as supported by target. */
9273 tree
9274 get_mask_type_for_scalar_type (tree scalar_type)
9276 tree vectype = get_vectype_for_scalar_type (scalar_type);
9278 if (!vectype)
9279 return NULL;
9281 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9282 current_vector_size);
9285 /* Function get_same_sized_vectype
9287 Returns a vector type corresponding to SCALAR_TYPE of size
9288 VECTOR_TYPE if supported by the target. */
9290 tree
9291 get_same_sized_vectype (tree scalar_type, tree vector_type)
9293 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9294 return build_same_sized_truth_vector_type (vector_type);
9296 return get_vectype_for_scalar_type_and_size
9297 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9300 /* Function vect_is_simple_use.
9302 Input:
9303 VINFO - the vect info of the loop or basic block that is being vectorized.
9304 OPERAND - operand in the loop or bb.
9305 Output:
9306 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9307 DT - the type of definition
9309 Returns whether a stmt with OPERAND can be vectorized.
9310 For loops, supportable operands are constants, loop invariants, and operands
9311 that are defined by the current iteration of the loop. Unsupportable
9312 operands are those that are defined by a previous iteration of the loop (as
9313 is the case in reduction/induction computations).
9314 For basic blocks, supportable operands are constants and bb invariants.
9315 For now, operands defined outside the basic block are not supported. */
9317 bool
9318 vect_is_simple_use (tree operand, vec_info *vinfo,
9319 gimple **def_stmt, enum vect_def_type *dt)
9321 *def_stmt = NULL;
9322 *dt = vect_unknown_def_type;
9324 if (dump_enabled_p ())
9326 dump_printf_loc (MSG_NOTE, vect_location,
9327 "vect_is_simple_use: operand ");
9328 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9329 dump_printf (MSG_NOTE, "\n");
9332 if (CONSTANT_CLASS_P (operand))
9334 *dt = vect_constant_def;
9335 return true;
9338 if (is_gimple_min_invariant (operand))
9340 *dt = vect_external_def;
9341 return true;
9344 if (TREE_CODE (operand) != SSA_NAME)
9346 if (dump_enabled_p ())
9347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9348 "not ssa-name.\n");
9349 return false;
9352 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9354 *dt = vect_external_def;
9355 return true;
9358 *def_stmt = SSA_NAME_DEF_STMT (operand);
9359 if (dump_enabled_p ())
9361 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9362 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9365 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9366 *dt = vect_external_def;
9367 else
9369 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9370 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9373 if (dump_enabled_p ())
9375 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9376 switch (*dt)
9378 case vect_uninitialized_def:
9379 dump_printf (MSG_NOTE, "uninitialized\n");
9380 break;
9381 case vect_constant_def:
9382 dump_printf (MSG_NOTE, "constant\n");
9383 break;
9384 case vect_external_def:
9385 dump_printf (MSG_NOTE, "external\n");
9386 break;
9387 case vect_internal_def:
9388 dump_printf (MSG_NOTE, "internal\n");
9389 break;
9390 case vect_induction_def:
9391 dump_printf (MSG_NOTE, "induction\n");
9392 break;
9393 case vect_reduction_def:
9394 dump_printf (MSG_NOTE, "reduction\n");
9395 break;
9396 case vect_double_reduction_def:
9397 dump_printf (MSG_NOTE, "double reduction\n");
9398 break;
9399 case vect_nested_cycle:
9400 dump_printf (MSG_NOTE, "nested cycle\n");
9401 break;
9402 case vect_unknown_def_type:
9403 dump_printf (MSG_NOTE, "unknown\n");
9404 break;
9408 if (*dt == vect_unknown_def_type)
9410 if (dump_enabled_p ())
9411 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9412 "Unsupported pattern.\n");
9413 return false;
9416 switch (gimple_code (*def_stmt))
9418 case GIMPLE_PHI:
9419 case GIMPLE_ASSIGN:
9420 case GIMPLE_CALL:
9421 break;
9422 default:
9423 if (dump_enabled_p ())
9424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9425 "unsupported defining stmt:\n");
9426 return false;
9429 return true;
9432 /* Function vect_is_simple_use.
9434 Same as vect_is_simple_use but also determines the vector operand
9435 type of OPERAND and stores it to *VECTYPE. If the definition of
9436 OPERAND is vect_uninitialized_def, vect_constant_def or
9437 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9438 is responsible to compute the best suited vector type for the
9439 scalar operand. */
9441 bool
9442 vect_is_simple_use (tree operand, vec_info *vinfo,
9443 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9445 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9446 return false;
9448 /* Now get a vector type if the def is internal, otherwise supply
9449 NULL_TREE and leave it up to the caller to figure out a proper
9450 type for the use stmt. */
9451 if (*dt == vect_internal_def
9452 || *dt == vect_induction_def
9453 || *dt == vect_reduction_def
9454 || *dt == vect_double_reduction_def
9455 || *dt == vect_nested_cycle)
9457 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9459 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9460 && !STMT_VINFO_RELEVANT (stmt_info)
9461 && !STMT_VINFO_LIVE_P (stmt_info))
9462 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9464 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9465 gcc_assert (*vectype != NULL_TREE);
9467 else if (*dt == vect_uninitialized_def
9468 || *dt == vect_constant_def
9469 || *dt == vect_external_def)
9470 *vectype = NULL_TREE;
9471 else
9472 gcc_unreachable ();
9474 return true;
9478 /* Function supportable_widening_operation
9480 Check whether an operation represented by the code CODE is a
9481 widening operation that is supported by the target platform in
9482 vector form (i.e., when operating on arguments of type VECTYPE_IN
9483 producing a result of type VECTYPE_OUT).
9485 Widening operations we currently support are NOP (CONVERT), FLOAT
9486 and WIDEN_MULT. This function checks if these operations are supported
9487 by the target platform either directly (via vector tree-codes), or via
9488 target builtins.
9490 Output:
9491 - CODE1 and CODE2 are codes of vector operations to be used when
9492 vectorizing the operation, if available.
9493 - MULTI_STEP_CVT determines the number of required intermediate steps in
9494 case of multi-step conversion (like char->short->int - in that case
9495 MULTI_STEP_CVT will be 1).
9496 - INTERM_TYPES contains the intermediate type required to perform the
9497 widening operation (short in the above example). */
9499 bool
9500 supportable_widening_operation (enum tree_code code, gimple *stmt,
9501 tree vectype_out, tree vectype_in,
9502 enum tree_code *code1, enum tree_code *code2,
9503 int *multi_step_cvt,
9504 vec<tree> *interm_types)
9506 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9507 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9508 struct loop *vect_loop = NULL;
9509 machine_mode vec_mode;
9510 enum insn_code icode1, icode2;
9511 optab optab1, optab2;
9512 tree vectype = vectype_in;
9513 tree wide_vectype = vectype_out;
9514 enum tree_code c1, c2;
9515 int i;
9516 tree prev_type, intermediate_type;
9517 machine_mode intermediate_mode, prev_mode;
9518 optab optab3, optab4;
9520 *multi_step_cvt = 0;
9521 if (loop_info)
9522 vect_loop = LOOP_VINFO_LOOP (loop_info);
9524 switch (code)
9526 case WIDEN_MULT_EXPR:
9527 /* The result of a vectorized widening operation usually requires
9528 two vectors (because the widened results do not fit into one vector).
9529 The generated vector results would normally be expected to be
9530 generated in the same order as in the original scalar computation,
9531 i.e. if 8 results are generated in each vector iteration, they are
9532 to be organized as follows:
9533 vect1: [res1,res2,res3,res4],
9534 vect2: [res5,res6,res7,res8].
9536 However, in the special case that the result of the widening
9537 operation is used in a reduction computation only, the order doesn't
9538 matter (because when vectorizing a reduction we change the order of
9539 the computation). Some targets can take advantage of this and
9540 generate more efficient code. For example, targets like Altivec,
9541 that support widen_mult using a sequence of {mult_even,mult_odd}
9542 generate the following vectors:
9543 vect1: [res1,res3,res5,res7],
9544 vect2: [res2,res4,res6,res8].
9546 When vectorizing outer-loops, we execute the inner-loop sequentially
9547 (each vectorized inner-loop iteration contributes to VF outer-loop
9548 iterations in parallel). We therefore don't allow to change the
9549 order of the computation in the inner-loop during outer-loop
9550 vectorization. */
9551 /* TODO: Another case in which order doesn't *really* matter is when we
9552 widen and then contract again, e.g. (short)((int)x * y >> 8).
9553 Normally, pack_trunc performs an even/odd permute, whereas the
9554 repack from an even/odd expansion would be an interleave, which
9555 would be significantly simpler for e.g. AVX2. */
9556 /* In any case, in order to avoid duplicating the code below, recurse
9557 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9558 are properly set up for the caller. If we fail, we'll continue with
9559 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9560 if (vect_loop
9561 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9562 && !nested_in_vect_loop_p (vect_loop, stmt)
9563 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9564 stmt, vectype_out, vectype_in,
9565 code1, code2, multi_step_cvt,
9566 interm_types))
9568 /* Elements in a vector with vect_used_by_reduction property cannot
9569 be reordered if the use chain with this property does not have the
9570 same operation. One such an example is s += a * b, where elements
9571 in a and b cannot be reordered. Here we check if the vector defined
9572 by STMT is only directly used in the reduction statement. */
9573 tree lhs = gimple_assign_lhs (stmt);
9574 use_operand_p dummy;
9575 gimple *use_stmt;
9576 stmt_vec_info use_stmt_info = NULL;
9577 if (single_imm_use (lhs, &dummy, &use_stmt)
9578 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9579 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9580 return true;
9582 c1 = VEC_WIDEN_MULT_LO_EXPR;
9583 c2 = VEC_WIDEN_MULT_HI_EXPR;
9584 break;
9586 case DOT_PROD_EXPR:
9587 c1 = DOT_PROD_EXPR;
9588 c2 = DOT_PROD_EXPR;
9589 break;
9591 case SAD_EXPR:
9592 c1 = SAD_EXPR;
9593 c2 = SAD_EXPR;
9594 break;
9596 case VEC_WIDEN_MULT_EVEN_EXPR:
9597 /* Support the recursion induced just above. */
9598 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9599 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9600 break;
9602 case WIDEN_LSHIFT_EXPR:
9603 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9604 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9605 break;
9607 CASE_CONVERT:
9608 c1 = VEC_UNPACK_LO_EXPR;
9609 c2 = VEC_UNPACK_HI_EXPR;
9610 break;
9612 case FLOAT_EXPR:
9613 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9614 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9615 break;
9617 case FIX_TRUNC_EXPR:
9618 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9619 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9620 computing the operation. */
9621 return false;
9623 default:
9624 gcc_unreachable ();
9627 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9628 std::swap (c1, c2);
9630 if (code == FIX_TRUNC_EXPR)
9632 /* The signedness is determined from output operand. */
9633 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9634 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9636 else
9638 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9639 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9642 if (!optab1 || !optab2)
9643 return false;
9645 vec_mode = TYPE_MODE (vectype);
9646 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9647 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9648 return false;
9650 *code1 = c1;
9651 *code2 = c2;
9653 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9654 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9655 /* For scalar masks we may have different boolean
9656 vector types having the same QImode. Thus we
9657 add additional check for elements number. */
9658 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9659 || (TYPE_VECTOR_SUBPARTS (vectype) / 2
9660 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9662 /* Check if it's a multi-step conversion that can be done using intermediate
9663 types. */
9665 prev_type = vectype;
9666 prev_mode = vec_mode;
9668 if (!CONVERT_EXPR_CODE_P (code))
9669 return false;
9671 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9672 intermediate steps in promotion sequence. We try
9673 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9674 not. */
9675 interm_types->create (MAX_INTERM_CVT_STEPS);
9676 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9678 intermediate_mode = insn_data[icode1].operand[0].mode;
9679 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9681 intermediate_type
9682 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
9683 current_vector_size);
9684 if (intermediate_mode != TYPE_MODE (intermediate_type))
9685 return false;
9687 else
9688 intermediate_type
9689 = lang_hooks.types.type_for_mode (intermediate_mode,
9690 TYPE_UNSIGNED (prev_type));
9692 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9693 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9695 if (!optab3 || !optab4
9696 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9697 || insn_data[icode1].operand[0].mode != intermediate_mode
9698 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9699 || insn_data[icode2].operand[0].mode != intermediate_mode
9700 || ((icode1 = optab_handler (optab3, intermediate_mode))
9701 == CODE_FOR_nothing)
9702 || ((icode2 = optab_handler (optab4, intermediate_mode))
9703 == CODE_FOR_nothing))
9704 break;
9706 interm_types->quick_push (intermediate_type);
9707 (*multi_step_cvt)++;
9709 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9710 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9711 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9712 || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
9713 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9715 prev_type = intermediate_type;
9716 prev_mode = intermediate_mode;
9719 interm_types->release ();
9720 return false;
9724 /* Function supportable_narrowing_operation
9726 Check whether an operation represented by the code CODE is a
9727 narrowing operation that is supported by the target platform in
9728 vector form (i.e., when operating on arguments of type VECTYPE_IN
9729 and producing a result of type VECTYPE_OUT).
9731 Narrowing operations we currently support are NOP (CONVERT) and
9732 FIX_TRUNC. This function checks if these operations are supported by
9733 the target platform directly via vector tree-codes.
9735 Output:
9736 - CODE1 is the code of a vector operation to be used when
9737 vectorizing the operation, if available.
9738 - MULTI_STEP_CVT determines the number of required intermediate steps in
9739 case of multi-step conversion (like int->short->char - in that case
9740 MULTI_STEP_CVT will be 1).
9741 - INTERM_TYPES contains the intermediate type required to perform the
9742 narrowing operation (short in the above example). */
9744 bool
9745 supportable_narrowing_operation (enum tree_code code,
9746 tree vectype_out, tree vectype_in,
9747 enum tree_code *code1, int *multi_step_cvt,
9748 vec<tree> *interm_types)
9750 machine_mode vec_mode;
9751 enum insn_code icode1;
9752 optab optab1, interm_optab;
9753 tree vectype = vectype_in;
9754 tree narrow_vectype = vectype_out;
9755 enum tree_code c1;
9756 tree intermediate_type, prev_type;
9757 machine_mode intermediate_mode, prev_mode;
9758 int i;
9759 bool uns;
9761 *multi_step_cvt = 0;
9762 switch (code)
9764 CASE_CONVERT:
9765 c1 = VEC_PACK_TRUNC_EXPR;
9766 break;
9768 case FIX_TRUNC_EXPR:
9769 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9770 break;
9772 case FLOAT_EXPR:
9773 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9774 tree code and optabs used for computing the operation. */
9775 return false;
9777 default:
9778 gcc_unreachable ();
9781 if (code == FIX_TRUNC_EXPR)
9782 /* The signedness is determined from output operand. */
9783 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9784 else
9785 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9787 if (!optab1)
9788 return false;
9790 vec_mode = TYPE_MODE (vectype);
9791 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9792 return false;
9794 *code1 = c1;
9796 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9797 /* For scalar masks we may have different boolean
9798 vector types having the same QImode. Thus we
9799 add additional check for elements number. */
9800 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9801 || (TYPE_VECTOR_SUBPARTS (vectype) * 2
9802 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9804 /* Check if it's a multi-step conversion that can be done using intermediate
9805 types. */
9806 prev_mode = vec_mode;
9807 prev_type = vectype;
9808 if (code == FIX_TRUNC_EXPR)
9809 uns = TYPE_UNSIGNED (vectype_out);
9810 else
9811 uns = TYPE_UNSIGNED (vectype);
9813 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9814 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9815 costly than signed. */
9816 if (code == FIX_TRUNC_EXPR && uns)
9818 enum insn_code icode2;
9820 intermediate_type
9821 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9822 interm_optab
9823 = optab_for_tree_code (c1, intermediate_type, optab_default);
9824 if (interm_optab != unknown_optab
9825 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9826 && insn_data[icode1].operand[0].mode
9827 == insn_data[icode2].operand[0].mode)
9829 uns = false;
9830 optab1 = interm_optab;
9831 icode1 = icode2;
9835 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9836 intermediate steps in promotion sequence. We try
9837 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9838 interm_types->create (MAX_INTERM_CVT_STEPS);
9839 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9841 intermediate_mode = insn_data[icode1].operand[0].mode;
9842 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9844 intermediate_type
9845 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9846 current_vector_size);
9847 if (intermediate_mode != TYPE_MODE (intermediate_type))
9848 return false;
9850 else
9851 intermediate_type
9852 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9853 interm_optab
9854 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9855 optab_default);
9856 if (!interm_optab
9857 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9858 || insn_data[icode1].operand[0].mode != intermediate_mode
9859 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9860 == CODE_FOR_nothing))
9861 break;
9863 interm_types->quick_push (intermediate_type);
9864 (*multi_step_cvt)++;
9866 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9867 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9868 || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
9869 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9871 prev_mode = intermediate_mode;
9872 prev_type = intermediate_type;
9873 optab1 = interm_optab;
9876 interm_types->release ();
9877 return false;