Add a vec_perm_indices_to_tree helper function
[official-gcc.git] / gcc / tree-vect-stmts.c
blob8f26320f01803eb62d4266d1cac1373d4f485fd7
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type {
60 VLS_LOAD,
61 VLS_STORE,
62 VLS_STORE_INVARIANT
65 /* Return the vectorized type for the given statement. */
67 tree
68 stmt_vectype (struct _stmt_vec_info *stmt_info)
70 return STMT_VINFO_VECTYPE (stmt_info);
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
75 bool
76 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
78 gimple *stmt = STMT_VINFO_STMT (stmt_info);
79 basic_block bb = gimple_bb (stmt);
80 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
81 struct loop* loop;
83 if (!loop_vinfo)
84 return false;
86 loop = LOOP_VINFO_LOOP (loop_vinfo);
88 return (bb->loop_father == loop->inner);
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
95 unsigned
96 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
97 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
98 int misalign, enum vect_cost_model_location where)
100 if ((kind == vector_load || kind == unaligned_load)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
102 kind = vector_gather_load;
103 if ((kind == vector_store || kind == unaligned_store)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
105 kind = vector_scatter_store;
106 if (body_cost_vec)
108 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
109 stmt_info_for_cost si = { count, kind,
110 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
111 misalign };
112 body_cost_vec->safe_push (si);
113 return (unsigned)
114 (builtin_vectorization_cost (kind, vectype, misalign) * count);
116 else
117 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
118 count, kind, stmt_info, misalign, where);
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
123 static tree
124 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
126 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
127 "vect_array");
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
135 static tree
136 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
137 tree array, unsigned HOST_WIDE_INT n)
139 tree vect_type, vect, vect_name, array_ref;
140 gimple *new_stmt;
142 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
143 vect_type = TREE_TYPE (TREE_TYPE (array));
144 vect = vect_create_destination_var (scalar_dest, vect_type);
145 array_ref = build4 (ARRAY_REF, vect_type, array,
146 build_int_cst (size_type_node, n),
147 NULL_TREE, NULL_TREE);
149 new_stmt = gimple_build_assign (vect, array_ref);
150 vect_name = make_ssa_name (vect, new_stmt);
151 gimple_assign_set_lhs (new_stmt, vect_name);
152 vect_finish_stmt_generation (stmt, new_stmt, gsi);
154 return vect_name;
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
161 static void
162 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
163 tree array, unsigned HOST_WIDE_INT n)
165 tree array_ref;
166 gimple *new_stmt;
168 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
169 build_int_cst (size_type_node, n),
170 NULL_TREE, NULL_TREE);
172 new_stmt = gimple_build_assign (array_ref, vect);
173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
178 (and its group). */
180 static tree
181 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
183 tree mem_ref;
185 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
188 return mem_ref;
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
199 enum vect_relevant relevant, bool live_p)
201 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
202 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
203 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 gimple *pattern_stmt;
206 if (dump_enabled_p ())
208 dump_printf_loc (MSG_NOTE, vect_location,
209 "mark relevant %d, live %d: ", relevant, live_p);
210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
224 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info = vinfo_for_stmt (pattern_stmt);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
232 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
233 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
234 stmt = pattern_stmt;
237 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
238 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
239 STMT_VINFO_RELEVANT (stmt_info) = relevant;
241 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE, vect_location,
246 "already marked relevant/live.\n");
247 return;
250 worklist->safe_push (stmt);
254 /* Function is_simple_and_all_uses_invariant
256 Return true if STMT is simple and all uses of it are invariant. */
258 bool
259 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
261 tree op;
262 gimple *def_stmt;
263 ssa_op_iter iter;
265 if (!is_gimple_assign (stmt))
266 return false;
268 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
270 enum vect_def_type dt = vect_uninitialized_def;
272 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "use not simple.\n");
277 return false;
280 if (dt != vect_external_def && dt != vect_constant_def)
281 return false;
283 return true;
286 /* Function vect_stmt_relevant_p.
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
296 CHECKME: what other side effects would the vectorizer allow? */
298 static bool
299 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
300 enum vect_relevant *relevant, bool *live_p)
302 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
303 ssa_op_iter op_iter;
304 imm_use_iterator imm_iter;
305 use_operand_p use_p;
306 def_operand_p def_p;
308 *relevant = vect_unused_in_scope;
309 *live_p = false;
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
314 != loop_exit_ctrl_vec_info_type)
315 *relevant = vect_used_in_scope;
317 /* changing memory. */
318 if (gimple_code (stmt) != GIMPLE_PHI)
319 if (gimple_vdef (stmt)
320 && !gimple_clobber_p (stmt))
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE, vect_location,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant = vect_used_in_scope;
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
333 basic_block bb = gimple_bb (USE_STMT (use_p));
334 if (!flow_bb_inside_loop_p (loop, bb))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: used out of loop.\n");
340 if (is_gimple_debug (USE_STMT (use_p)))
341 continue;
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
346 gcc_assert (bb == single_exit (loop)->dest);
348 *live_p = true;
353 if (*live_p && *relevant == vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant = vect_used_only_live;
362 return (*live_p || *relevant);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
371 static bool
372 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
374 tree operand;
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
381 return true;
383 /* STMT has a data_ref. FORNOW this means that its of one of
384 the following forms:
385 -1- ARRAY_REF = var
386 -2- var = ARRAY_REF
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
391 for array indexing.
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt))
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
402 case IFN_MASK_STORE:
403 operand = gimple_call_arg (stmt, 3);
404 if (operand == use)
405 return true;
406 /* FALLTHRU */
407 case IFN_MASK_LOAD:
408 operand = gimple_call_arg (stmt, 2);
409 if (operand == use)
410 return true;
411 break;
412 default:
413 break;
415 return false;
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
424 if (operand == use)
425 return true;
427 return false;
432 Function process_use.
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
458 static bool
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
461 bool force)
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 gimple *def_stmt;
468 enum vect_def_type dt;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
473 return true;
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
480 return false;
483 if (!def_stmt || gimple_nop_p (def_stmt))
484 return true;
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
491 return true;
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
515 return true;
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
520 d = def_stmt
521 inner-loop:
522 stmt # use (d)
523 outer-loop-tail-bb:
524 ... */
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
531 switch (relevant)
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
536 break;
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
541 break;
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
546 break;
548 case vect_used_in_scope:
549 break;
551 default:
552 gcc_unreachable ();
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
559 inner-loop:
560 d = def_stmt
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
562 stmt # use (d) */
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
569 switch (relevant)
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
575 break;
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
586 default:
587 gcc_unreachable ();
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
593 of course. */
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
598 == use))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
603 return true;
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
608 return true;
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
616 for i...
617 for j...
618 1. T0 = i + j
619 2. T1 = a[T0]
621 3. j = j + 1
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
628 bool
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
635 gimple *stmt;
636 unsigned int i;
637 stmt_vec_info stmt_vinfo;
638 basic_block bb;
639 gimple *phi;
640 bool live_p;
641 enum vect_relevant relevant;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec<gimple *, 64> worklist;
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
652 bb = bbs[i];
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
655 phi = gsi_stmt (si);
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
662 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, phi, relevant, live_p);
665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
667 stmt = gsi_stmt (si);
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
674 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
675 vect_mark_relevant (&worklist, stmt, relevant, live_p);
679 /* 2. Process_worklist */
680 while (worklist.length () > 0)
682 use_operand_p use_p;
683 ssa_op_iter iter;
685 stmt = worklist.pop ();
686 if (dump_enabled_p ())
688 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
694 of STMT. */
695 stmt_vinfo = vinfo_for_stmt (stmt);
696 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
711 case vect_reduction_def:
712 gcc_assert (relevant != vect_unused_in_scope);
713 if (relevant != vect_unused_in_scope
714 && relevant != vect_used_in_scope
715 && relevant != vect_used_by_reduction
716 && relevant != vect_used_only_live)
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "unsupported use of reduction.\n");
721 return false;
723 break;
725 case vect_nested_cycle:
726 if (relevant != vect_unused_in_scope
727 && relevant != vect_used_in_outer_by_reduction
728 && relevant != vect_used_in_outer)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of nested cycle.\n");
734 return false;
736 break;
738 case vect_double_reduction_def:
739 if (relevant != vect_unused_in_scope
740 && relevant != vect_used_by_reduction
741 && relevant != vect_used_only_live)
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "unsupported use of double reduction.\n");
747 return false;
749 break;
751 default:
752 break;
755 if (is_pattern_stmt_p (stmt_vinfo))
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt))
762 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
763 tree op = gimple_assign_rhs1 (stmt);
765 i = 1;
766 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
768 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
769 relevant, &worklist, false)
770 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
771 relevant, &worklist, false))
772 return false;
773 i = 2;
775 for (; i < gimple_num_ops (stmt); i++)
777 op = gimple_op (stmt, i);
778 if (TREE_CODE (op) == SSA_NAME
779 && !process_use (stmt, op, loop_vinfo, relevant,
780 &worklist, false))
781 return false;
784 else if (is_gimple_call (stmt))
786 for (i = 0; i < gimple_call_num_args (stmt); i++)
788 tree arg = gimple_call_arg (stmt, i);
789 if (!process_use (stmt, arg, loop_vinfo, relevant,
790 &worklist, false))
791 return false;
795 else
796 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
798 tree op = USE_FROM_PTR (use_p);
799 if (!process_use (stmt, op, loop_vinfo, relevant,
800 &worklist, false))
801 return false;
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
808 gcc_unreachable ();
809 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
810 &worklist, true))
811 return false;
813 } /* while worklist */
815 return true;
819 /* Function vect_model_simple_cost.
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
825 void
826 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
827 enum vect_def_type *dt,
828 int ndts,
829 stmt_vector_for_cost *prologue_cost_vec,
830 stmt_vector_for_cost *body_cost_vec)
832 int i;
833 int inside_cost = 0, prologue_cost = 0;
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info))
837 return;
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
841 cost model. */
842 for (i = 0; i < ndts; i++)
843 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
844 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
845 stmt_info, 0, vect_prologue);
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
849 stmt_info, 0, vect_body);
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE, vect_location,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost, prologue_cost);
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
863 static void
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
865 enum vect_def_type *dt, int pwr)
867 int i, tmp;
868 int inside_cost = 0, prologue_cost = 0;
869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
870 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
871 void *target_cost_data;
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info))
875 return;
877 if (loop_vinfo)
878 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
879 else
880 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
882 for (i = 0; i < pwr + 1; i++)
884 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
885 (i + 1) : i;
886 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
887 vec_promote_demote, stmt_info, 0,
888 vect_body);
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i = 0; i < 2; i++)
893 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
894 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
895 stmt_info, 0, vect_prologue);
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE, vect_location,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost, prologue_cost);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
908 void
909 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
910 vect_memory_access_type memory_access_type,
911 enum vect_def_type dt, slp_tree slp_node,
912 stmt_vector_for_cost *prologue_cost_vec,
913 stmt_vector_for_cost *body_cost_vec)
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
917 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
918 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
920 if (dt == vect_constant_def || dt == vect_external_def)
921 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
922 stmt_info, 0, vect_prologue);
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node && grouped_access_p)
928 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
929 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
941 if (first_stmt_p
942 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
944 /* Uses a high and low interleave or shuffle operations for each
945 needed permute. */
946 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
947 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
948 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
949 stmt_info, 0, vect_body);
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE, vect_location,
953 "vect_model_store_cost: strided group_size = %d .\n",
954 group_size);
957 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
958 /* Costs of the stores. */
959 if (memory_access_type == VMAT_ELEMENTWISE
960 || memory_access_type == VMAT_GATHER_SCATTER)
961 /* N scalar stores plus extracting the elements. */
962 inside_cost += record_stmt_cost (body_cost_vec,
963 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
964 scalar_store, stmt_info, 0, vect_body);
965 else
966 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
968 if (memory_access_type == VMAT_ELEMENTWISE
969 || memory_access_type == VMAT_STRIDED_SLP)
970 inside_cost += record_stmt_cost (body_cost_vec,
971 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
972 vec_to_scalar, stmt_info, 0, vect_body);
974 if (dump_enabled_p ())
975 dump_printf_loc (MSG_NOTE, vect_location,
976 "vect_model_store_cost: inside_cost = %d, "
977 "prologue_cost = %d .\n", inside_cost, prologue_cost);
981 /* Calculate cost of DR's memory access. */
982 void
983 vect_get_store_cost (struct data_reference *dr, int ncopies,
984 unsigned int *inside_cost,
985 stmt_vector_for_cost *body_cost_vec)
987 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
988 gimple *stmt = DR_STMT (dr);
989 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
991 switch (alignment_support_scheme)
993 case dr_aligned:
995 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
996 vector_store, stmt_info, 0,
997 vect_body);
999 if (dump_enabled_p ())
1000 dump_printf_loc (MSG_NOTE, vect_location,
1001 "vect_model_store_cost: aligned.\n");
1002 break;
1005 case dr_unaligned_supported:
1007 /* Here, we assign an additional cost for the unaligned store. */
1008 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1009 unaligned_store, stmt_info,
1010 DR_MISALIGNMENT (dr), vect_body);
1011 if (dump_enabled_p ())
1012 dump_printf_loc (MSG_NOTE, vect_location,
1013 "vect_model_store_cost: unaligned supported by "
1014 "hardware.\n");
1015 break;
1018 case dr_unaligned_unsupported:
1020 *inside_cost = VECT_MAX_COST;
1022 if (dump_enabled_p ())
1023 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1024 "vect_model_store_cost: unsupported access.\n");
1025 break;
1028 default:
1029 gcc_unreachable ();
1034 /* Function vect_model_load_cost
1036 Models cost for loads. In the case of grouped accesses, one access has
1037 the overhead of the grouped access attributed to it. Since unaligned
1038 accesses are supported for loads, we also account for the costs of the
1039 access scheme chosen. */
1041 void
1042 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1043 vect_memory_access_type memory_access_type,
1044 slp_tree slp_node,
1045 stmt_vector_for_cost *prologue_cost_vec,
1046 stmt_vector_for_cost *body_cost_vec)
1048 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1049 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1050 unsigned int inside_cost = 0, prologue_cost = 0;
1051 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1053 /* Grouped loads read all elements in the group at once,
1054 so we want the DR for the first statement. */
1055 if (!slp_node && grouped_access_p)
1057 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1058 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1061 /* True if we should include any once-per-group costs as well as
1062 the cost of the statement itself. For SLP we only get called
1063 once per group anyhow. */
1064 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1066 /* We assume that the cost of a single load-lanes instruction is
1067 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1068 access is instead being provided by a load-and-permute operation,
1069 include the cost of the permutes. */
1070 if (first_stmt_p
1071 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1073 /* Uses an even and odd extract operations or shuffle operations
1074 for each needed permute. */
1075 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1076 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1077 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1078 stmt_info, 0, vect_body);
1080 if (dump_enabled_p ())
1081 dump_printf_loc (MSG_NOTE, vect_location,
1082 "vect_model_load_cost: strided group_size = %d .\n",
1083 group_size);
1086 /* The loads themselves. */
1087 if (memory_access_type == VMAT_ELEMENTWISE
1088 || memory_access_type == VMAT_GATHER_SCATTER)
1090 /* N scalar loads plus gathering them into a vector. */
1091 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1092 inside_cost += record_stmt_cost (body_cost_vec,
1093 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1094 scalar_load, stmt_info, 0, vect_body);
1096 else
1097 vect_get_load_cost (dr, ncopies, first_stmt_p,
1098 &inside_cost, &prologue_cost,
1099 prologue_cost_vec, body_cost_vec, true);
1100 if (memory_access_type == VMAT_ELEMENTWISE
1101 || memory_access_type == VMAT_STRIDED_SLP)
1102 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1103 stmt_info, 0, vect_body);
1105 if (dump_enabled_p ())
1106 dump_printf_loc (MSG_NOTE, vect_location,
1107 "vect_model_load_cost: inside_cost = %d, "
1108 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1112 /* Calculate cost of DR's memory access. */
1113 void
1114 vect_get_load_cost (struct data_reference *dr, int ncopies,
1115 bool add_realign_cost, unsigned int *inside_cost,
1116 unsigned int *prologue_cost,
1117 stmt_vector_for_cost *prologue_cost_vec,
1118 stmt_vector_for_cost *body_cost_vec,
1119 bool record_prologue_costs)
1121 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1122 gimple *stmt = DR_STMT (dr);
1123 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1125 switch (alignment_support_scheme)
1127 case dr_aligned:
1129 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1130 stmt_info, 0, vect_body);
1132 if (dump_enabled_p ())
1133 dump_printf_loc (MSG_NOTE, vect_location,
1134 "vect_model_load_cost: aligned.\n");
1136 break;
1138 case dr_unaligned_supported:
1140 /* Here, we assign an additional cost for the unaligned load. */
1141 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1142 unaligned_load, stmt_info,
1143 DR_MISALIGNMENT (dr), vect_body);
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE, vect_location,
1147 "vect_model_load_cost: unaligned supported by "
1148 "hardware.\n");
1150 break;
1152 case dr_explicit_realign:
1154 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1155 vector_load, stmt_info, 0, vect_body);
1156 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1157 vec_perm, stmt_info, 0, vect_body);
1159 /* FIXME: If the misalignment remains fixed across the iterations of
1160 the containing loop, the following cost should be added to the
1161 prologue costs. */
1162 if (targetm.vectorize.builtin_mask_for_load)
1163 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1164 stmt_info, 0, vect_body);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: explicit realign\n");
1170 break;
1172 case dr_explicit_realign_optimized:
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: unaligned software "
1177 "pipelined.\n");
1179 /* Unaligned software pipeline has a load of an address, an initial
1180 load, and possibly a mask operation to "prime" the loop. However,
1181 if this is an access in a group of loads, which provide grouped
1182 access, then the above cost should only be considered for one
1183 access in the group. Inside the loop, there is a load op
1184 and a realignment op. */
1186 if (add_realign_cost && record_prologue_costs)
1188 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1189 vector_stmt, stmt_info,
1190 0, vect_prologue);
1191 if (targetm.vectorize.builtin_mask_for_load)
1192 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1193 vector_stmt, stmt_info,
1194 0, vect_prologue);
1197 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1198 stmt_info, 0, vect_body);
1199 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1200 stmt_info, 0, vect_body);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE, vect_location,
1204 "vect_model_load_cost: explicit realign optimized"
1205 "\n");
1207 break;
1210 case dr_unaligned_unsupported:
1212 *inside_cost = VECT_MAX_COST;
1214 if (dump_enabled_p ())
1215 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1216 "vect_model_load_cost: unsupported access.\n");
1217 break;
1220 default:
1221 gcc_unreachable ();
1225 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1226 the loop preheader for the vectorized stmt STMT. */
1228 static void
1229 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1231 if (gsi)
1232 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1233 else
1235 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1236 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1238 if (loop_vinfo)
1240 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1241 basic_block new_bb;
1242 edge pe;
1244 if (nested_in_vect_loop_p (loop, stmt))
1245 loop = loop->inner;
1247 pe = loop_preheader_edge (loop);
1248 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1249 gcc_assert (!new_bb);
1251 else
1253 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1254 basic_block bb;
1255 gimple_stmt_iterator gsi_bb_start;
1257 gcc_assert (bb_vinfo);
1258 bb = BB_VINFO_BB (bb_vinfo);
1259 gsi_bb_start = gsi_after_labels (bb);
1260 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1264 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_NOTE, vect_location,
1267 "created new init_stmt: ");
1268 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1272 /* Function vect_init_vector.
1274 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1275 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1276 vector type a vector with all elements equal to VAL is created first.
1277 Place the initialization at BSI if it is not NULL. Otherwise, place the
1278 initialization at the loop preheader.
1279 Return the DEF of INIT_STMT.
1280 It will be used in the vectorization of STMT. */
1282 tree
1283 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1285 gimple *init_stmt;
1286 tree new_temp;
1288 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1289 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1291 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1292 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1294 /* Scalar boolean value should be transformed into
1295 all zeros or all ones value before building a vector. */
1296 if (VECTOR_BOOLEAN_TYPE_P (type))
1298 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1299 tree false_val = build_zero_cst (TREE_TYPE (type));
1301 if (CONSTANT_CLASS_P (val))
1302 val = integer_zerop (val) ? false_val : true_val;
1303 else
1305 new_temp = make_ssa_name (TREE_TYPE (type));
1306 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1307 val, true_val, false_val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1309 val = new_temp;
1312 else if (CONSTANT_CLASS_P (val))
1313 val = fold_convert (TREE_TYPE (type), val);
1314 else
1316 new_temp = make_ssa_name (TREE_TYPE (type));
1317 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1318 init_stmt = gimple_build_assign (new_temp,
1319 fold_build1 (VIEW_CONVERT_EXPR,
1320 TREE_TYPE (type),
1321 val));
1322 else
1323 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1324 vect_init_vector_1 (stmt, init_stmt, gsi);
1325 val = new_temp;
1328 val = build_vector_from_val (type, val);
1331 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1332 init_stmt = gimple_build_assign (new_temp, val);
1333 vect_init_vector_1 (stmt, init_stmt, gsi);
1334 return new_temp;
1337 /* Function vect_get_vec_def_for_operand_1.
1339 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1340 DT that will be used in the vectorized stmt. */
1342 tree
1343 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1345 tree vec_oprnd;
1346 gimple *vec_stmt;
1347 stmt_vec_info def_stmt_info = NULL;
1349 switch (dt)
1351 /* operand is a constant or a loop invariant. */
1352 case vect_constant_def:
1353 case vect_external_def:
1354 /* Code should use vect_get_vec_def_for_operand. */
1355 gcc_unreachable ();
1357 /* operand is defined inside the loop. */
1358 case vect_internal_def:
1360 /* Get the def from the vectorized stmt. */
1361 def_stmt_info = vinfo_for_stmt (def_stmt);
1363 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1364 /* Get vectorized pattern statement. */
1365 if (!vec_stmt
1366 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1367 && !STMT_VINFO_RELEVANT (def_stmt_info))
1368 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1369 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1370 gcc_assert (vec_stmt);
1371 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1372 vec_oprnd = PHI_RESULT (vec_stmt);
1373 else if (is_gimple_call (vec_stmt))
1374 vec_oprnd = gimple_call_lhs (vec_stmt);
1375 else
1376 vec_oprnd = gimple_assign_lhs (vec_stmt);
1377 return vec_oprnd;
1380 /* operand is defined by a loop header phi. */
1381 case vect_reduction_def:
1382 case vect_double_reduction_def:
1383 case vect_nested_cycle:
1384 case vect_induction_def:
1386 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info = vinfo_for_stmt (def_stmt);
1390 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1391 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1392 vec_oprnd = PHI_RESULT (vec_stmt);
1393 else
1394 vec_oprnd = gimple_get_lhs (vec_stmt);
1395 return vec_oprnd;
1398 default:
1399 gcc_unreachable ();
1404 /* Function vect_get_vec_def_for_operand.
1406 OP is an operand in STMT. This function returns a (vector) def that will be
1407 used in the vectorized stmt for STMT.
1409 In the case that OP is an SSA_NAME which is defined in the loop, then
1410 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1412 In case OP is an invariant or constant, a new stmt that creates a vector def
1413 needs to be introduced. VECTYPE may be used to specify a required type for
1414 vector invariant. */
1416 tree
1417 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1419 gimple *def_stmt;
1420 enum vect_def_type dt;
1421 bool is_simple_use;
1422 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1423 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1425 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE, vect_location,
1428 "vect_get_vec_def_for_operand: ");
1429 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1430 dump_printf (MSG_NOTE, "\n");
1433 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1434 gcc_assert (is_simple_use);
1435 if (def_stmt && dump_enabled_p ())
1437 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1438 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1441 if (dt == vect_constant_def || dt == vect_external_def)
1443 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1444 tree vector_type;
1446 if (vectype)
1447 vector_type = vectype;
1448 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1449 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1450 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1451 else
1452 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1454 gcc_assert (vector_type);
1455 return vect_init_vector (stmt, op, vector_type, NULL);
1457 else
1458 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1462 /* Function vect_get_vec_def_for_stmt_copy
1464 Return a vector-def for an operand. This function is used when the
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
1467 copies of the vector-stmt are required. In this case the vector-def is
1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1469 of the stmt that defines VEC_OPRND.
1470 DT is the type of the vector def VEC_OPRND.
1472 Context:
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
1475 more than one vector stmt to vectorize the scalar stmt. This situation
1476 arises when there are multiple data-types operated upon in the loop; the
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
1480 computing 'VF' results in each iteration). This function is called when
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
1489 VS1.3: vx.3 = memref3
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
1500 get the relevant vector-def for each operand of S2. For operand x it
1501 returns the vector-def 'vx.0'.
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1518 tree
1519 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1521 gimple *vec_stmt_for_operand;
1522 stmt_vec_info def_stmt_info;
1524 /* Do nothing; can reuse same def. */
1525 if (dt == vect_external_def || dt == vect_constant_def )
1526 return vec_oprnd;
1528 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1529 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1530 gcc_assert (def_stmt_info);
1531 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1532 gcc_assert (vec_stmt_for_operand);
1533 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1534 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1535 else
1536 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1537 return vec_oprnd;
1541 /* Get vectorized definitions for the operands to create a copy of an original
1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1544 void
1545 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1546 vec<tree> *vec_oprnds0,
1547 vec<tree> *vec_oprnds1)
1549 tree vec_oprnd = vec_oprnds0->pop ();
1551 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1552 vec_oprnds0->quick_push (vec_oprnd);
1554 if (vec_oprnds1 && vec_oprnds1->length ())
1556 vec_oprnd = vec_oprnds1->pop ();
1557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1558 vec_oprnds1->quick_push (vec_oprnd);
1563 /* Get vectorized definitions for OP0 and OP1. */
1565 void
1566 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1567 vec<tree> *vec_oprnds0,
1568 vec<tree> *vec_oprnds1,
1569 slp_tree slp_node)
1571 if (slp_node)
1573 int nops = (op1 == NULL_TREE) ? 1 : 2;
1574 auto_vec<tree> ops (nops);
1575 auto_vec<vec<tree> > vec_defs (nops);
1577 ops.quick_push (op0);
1578 if (op1)
1579 ops.quick_push (op1);
1581 vect_get_slp_defs (ops, slp_node, &vec_defs);
1583 *vec_oprnds0 = vec_defs[0];
1584 if (op1)
1585 *vec_oprnds1 = vec_defs[1];
1587 else
1589 tree vec_oprnd;
1591 vec_oprnds0->create (1);
1592 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1593 vec_oprnds0->quick_push (vec_oprnd);
1595 if (op1)
1597 vec_oprnds1->create (1);
1598 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1599 vec_oprnds1->quick_push (vec_oprnd);
1605 /* Function vect_finish_stmt_generation.
1607 Insert a new stmt. */
1609 void
1610 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1611 gimple_stmt_iterator *gsi)
1613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1614 vec_info *vinfo = stmt_info->vinfo;
1616 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1618 if (!gsi_end_p (*gsi)
1619 && gimple_has_mem_ops (vec_stmt))
1621 gimple *at_stmt = gsi_stmt (*gsi);
1622 tree vuse = gimple_vuse (at_stmt);
1623 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1625 tree vdef = gimple_vdef (at_stmt);
1626 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1627 /* If we have an SSA vuse and insert a store, update virtual
1628 SSA form to avoid triggering the renamer. Do so only
1629 if we can easily see all uses - which is what almost always
1630 happens with the way vectorized stmts are inserted. */
1631 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1632 && ((is_gimple_assign (vec_stmt)
1633 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1634 || (is_gimple_call (vec_stmt)
1635 && !(gimple_call_flags (vec_stmt)
1636 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1638 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1639 gimple_set_vdef (vec_stmt, new_vdef);
1640 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1644 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1646 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1648 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1651 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1654 gimple_set_location (vec_stmt, gimple_location (stmt));
1656 /* While EH edges will generally prevent vectorization, stmt might
1657 e.g. be in a must-not-throw region. Ensure newly created stmts
1658 that could throw are part of the same region. */
1659 int lp_nr = lookup_stmt_eh_lp (stmt);
1660 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1661 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1664 /* We want to vectorize a call to combined function CFN with function
1665 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1666 as the types of all inputs. Check whether this is possible using
1667 an internal function, returning its code if so or IFN_LAST if not. */
1669 static internal_fn
1670 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1671 tree vectype_out, tree vectype_in)
1673 internal_fn ifn;
1674 if (internal_fn_p (cfn))
1675 ifn = as_internal_fn (cfn);
1676 else
1677 ifn = associated_internal_fn (fndecl);
1678 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1680 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1681 if (info.vectorizable)
1683 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1684 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1685 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1686 OPTIMIZE_FOR_SPEED))
1687 return ifn;
1690 return IFN_LAST;
1694 static tree permute_vec_elements (tree, tree, tree, gimple *,
1695 gimple_stmt_iterator *);
1697 /* STMT is a non-strided load or store, meaning that it accesses
1698 elements with a known constant step. Return -1 if that step
1699 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1701 static int
1702 compare_step_with_zero (gimple *stmt)
1704 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1705 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1706 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1707 size_zero_node);
1710 /* If the target supports a permute mask that reverses the elements in
1711 a vector of type VECTYPE, return that mask, otherwise return null. */
1713 static tree
1714 perm_mask_for_reverse (tree vectype)
1716 int i, nunits;
1718 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1720 vec_perm_builder sel (nunits, nunits, 1);
1721 for (i = 0; i < nunits; ++i)
1722 sel.quick_push (nunits - 1 - i);
1724 vec_perm_indices indices (sel, 1, nunits);
1725 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1726 return NULL_TREE;
1727 return vect_gen_perm_mask_checked (vectype, indices);
1730 /* A subroutine of get_load_store_type, with a subset of the same
1731 arguments. Handle the case where STMT is part of a grouped load
1732 or store.
1734 For stores, the statements in the group are all consecutive
1735 and there is no gap at the end. For loads, the statements in the
1736 group might not be consecutive; there can be gaps between statements
1737 as well as at the end. */
1739 static bool
1740 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1741 vec_load_store_type vls_type,
1742 vect_memory_access_type *memory_access_type)
1744 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1745 vec_info *vinfo = stmt_info->vinfo;
1746 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1747 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1748 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1749 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1750 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1751 bool single_element_p = (stmt == first_stmt
1752 && !GROUP_NEXT_ELEMENT (stmt_info));
1753 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1754 unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
1756 /* True if the vectorized statements would access beyond the last
1757 statement in the group. */
1758 bool overrun_p = false;
1760 /* True if we can cope with such overrun by peeling for gaps, so that
1761 there is at least one final scalar iteration after the vector loop. */
1762 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1764 /* There can only be a gap at the end of the group if the stride is
1765 known at compile time. */
1766 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1768 /* Stores can't yet have gaps. */
1769 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1771 if (slp)
1773 if (STMT_VINFO_STRIDED_P (stmt_info))
1775 /* Try to use consecutive accesses of GROUP_SIZE elements,
1776 separated by the stride, until we have a complete vector.
1777 Fall back to scalar accesses if that isn't possible. */
1778 if (nunits % group_size == 0)
1779 *memory_access_type = VMAT_STRIDED_SLP;
1780 else
1781 *memory_access_type = VMAT_ELEMENTWISE;
1783 else
1785 overrun_p = loop_vinfo && gap != 0;
1786 if (overrun_p && vls_type != VLS_LOAD)
1788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1789 "Grouped store with gaps requires"
1790 " non-consecutive accesses\n");
1791 return false;
1793 /* An overrun is fine if the trailing elements are smaller
1794 than the alignment boundary B. Every vector access will
1795 be a multiple of B and so we are guaranteed to access a
1796 non-gap element in the same B-sized block. */
1797 if (overrun_p
1798 && gap < (vect_known_alignment_in_bytes (first_dr)
1799 / vect_get_scalar_dr_size (first_dr)))
1800 overrun_p = false;
1801 if (overrun_p && !can_overrun_p)
1803 if (dump_enabled_p ())
1804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1805 "Peeling for outer loop is not supported\n");
1806 return false;
1808 *memory_access_type = VMAT_CONTIGUOUS;
1811 else
1813 /* We can always handle this case using elementwise accesses,
1814 but see if something more efficient is available. */
1815 *memory_access_type = VMAT_ELEMENTWISE;
1817 /* If there is a gap at the end of the group then these optimizations
1818 would access excess elements in the last iteration. */
1819 bool would_overrun_p = (gap != 0);
1820 /* An overrun is fine if the trailing elements are smaller than the
1821 alignment boundary B. Every vector access will be a multiple of B
1822 and so we are guaranteed to access a non-gap element in the
1823 same B-sized block. */
1824 if (would_overrun_p
1825 && gap < (vect_known_alignment_in_bytes (first_dr)
1826 / vect_get_scalar_dr_size (first_dr)))
1827 would_overrun_p = false;
1829 if (!STMT_VINFO_STRIDED_P (stmt_info)
1830 && (can_overrun_p || !would_overrun_p)
1831 && compare_step_with_zero (stmt) > 0)
1833 /* First try using LOAD/STORE_LANES. */
1834 if (vls_type == VLS_LOAD
1835 ? vect_load_lanes_supported (vectype, group_size)
1836 : vect_store_lanes_supported (vectype, group_size))
1838 *memory_access_type = VMAT_LOAD_STORE_LANES;
1839 overrun_p = would_overrun_p;
1842 /* If that fails, try using permuting loads. */
1843 if (*memory_access_type == VMAT_ELEMENTWISE
1844 && (vls_type == VLS_LOAD
1845 ? vect_grouped_load_supported (vectype, single_element_p,
1846 group_size)
1847 : vect_grouped_store_supported (vectype, group_size)))
1849 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1850 overrun_p = would_overrun_p;
1855 if (vls_type != VLS_LOAD && first_stmt == stmt)
1857 /* STMT is the leader of the group. Check the operands of all the
1858 stmts of the group. */
1859 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1860 while (next_stmt)
1862 gcc_assert (gimple_assign_single_p (next_stmt));
1863 tree op = gimple_assign_rhs1 (next_stmt);
1864 gimple *def_stmt;
1865 enum vect_def_type dt;
1866 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1868 if (dump_enabled_p ())
1869 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1870 "use not simple.\n");
1871 return false;
1873 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1877 if (overrun_p)
1879 gcc_assert (can_overrun_p);
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1882 "Data access with gaps requires scalar "
1883 "epilogue loop\n");
1884 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1887 return true;
1890 /* A subroutine of get_load_store_type, with a subset of the same
1891 arguments. Handle the case where STMT is a load or store that
1892 accesses consecutive elements with a negative step. */
1894 static vect_memory_access_type
1895 get_negative_load_store_type (gimple *stmt, tree vectype,
1896 vec_load_store_type vls_type,
1897 unsigned int ncopies)
1899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1900 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1901 dr_alignment_support alignment_support_scheme;
1903 if (ncopies > 1)
1905 if (dump_enabled_p ())
1906 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1907 "multiple types with negative step.\n");
1908 return VMAT_ELEMENTWISE;
1911 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1912 if (alignment_support_scheme != dr_aligned
1913 && alignment_support_scheme != dr_unaligned_supported)
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "negative step but alignment required.\n");
1918 return VMAT_ELEMENTWISE;
1921 if (vls_type == VLS_STORE_INVARIANT)
1923 if (dump_enabled_p ())
1924 dump_printf_loc (MSG_NOTE, vect_location,
1925 "negative step with invariant source;"
1926 " no permute needed.\n");
1927 return VMAT_CONTIGUOUS_DOWN;
1930 if (!perm_mask_for_reverse (vectype))
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1934 "negative step and reversing not supported.\n");
1935 return VMAT_ELEMENTWISE;
1938 return VMAT_CONTIGUOUS_REVERSE;
1941 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1942 if there is a memory access type that the vectorized form can use,
1943 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1944 or scatters, fill in GS_INFO accordingly.
1946 SLP says whether we're performing SLP rather than loop vectorization.
1947 VECTYPE is the vector type that the vectorized statements will use.
1948 NCOPIES is the number of vector statements that will be needed. */
1950 static bool
1951 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1952 vec_load_store_type vls_type, unsigned int ncopies,
1953 vect_memory_access_type *memory_access_type,
1954 gather_scatter_info *gs_info)
1956 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1957 vec_info *vinfo = stmt_info->vinfo;
1958 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1959 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1961 *memory_access_type = VMAT_GATHER_SCATTER;
1962 gimple *def_stmt;
1963 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1964 gcc_unreachable ();
1965 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1966 &gs_info->offset_dt,
1967 &gs_info->offset_vectype))
1969 if (dump_enabled_p ())
1970 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1971 "%s index use not simple.\n",
1972 vls_type == VLS_LOAD ? "gather" : "scatter");
1973 return false;
1976 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1978 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1979 memory_access_type))
1980 return false;
1982 else if (STMT_VINFO_STRIDED_P (stmt_info))
1984 gcc_assert (!slp);
1985 *memory_access_type = VMAT_ELEMENTWISE;
1987 else
1989 int cmp = compare_step_with_zero (stmt);
1990 if (cmp < 0)
1991 *memory_access_type = get_negative_load_store_type
1992 (stmt, vectype, vls_type, ncopies);
1993 else if (cmp == 0)
1995 gcc_assert (vls_type == VLS_LOAD);
1996 *memory_access_type = VMAT_INVARIANT;
1998 else
1999 *memory_access_type = VMAT_CONTIGUOUS;
2002 /* FIXME: At the moment the cost model seems to underestimate the
2003 cost of using elementwise accesses. This check preserves the
2004 traditional behavior until that can be fixed. */
2005 if (*memory_access_type == VMAT_ELEMENTWISE
2006 && !STMT_VINFO_STRIDED_P (stmt_info))
2008 if (dump_enabled_p ())
2009 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2010 "not falling back to elementwise accesses\n");
2011 return false;
2013 return true;
2016 /* Function vectorizable_mask_load_store.
2018 Check if STMT performs a conditional load or store that can be vectorized.
2019 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2020 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2021 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2023 static bool
2024 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2025 gimple **vec_stmt, slp_tree slp_node)
2027 tree vec_dest = NULL;
2028 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2029 stmt_vec_info prev_stmt_info;
2030 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2031 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2032 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2033 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2034 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2035 tree rhs_vectype = NULL_TREE;
2036 tree mask_vectype;
2037 tree elem_type;
2038 gimple *new_stmt;
2039 tree dummy;
2040 tree dataref_ptr = NULL_TREE;
2041 gimple *ptr_incr;
2042 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
2043 int ncopies;
2044 int i, j;
2045 bool inv_p;
2046 gather_scatter_info gs_info;
2047 vec_load_store_type vls_type;
2048 tree mask;
2049 gimple *def_stmt;
2050 enum vect_def_type dt;
2052 if (slp_node != NULL)
2053 return false;
2055 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2056 gcc_assert (ncopies >= 1);
2058 mask = gimple_call_arg (stmt, 2);
2060 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2061 return false;
2063 /* FORNOW. This restriction should be relaxed. */
2064 if (nested_in_vect_loop && ncopies > 1)
2066 if (dump_enabled_p ())
2067 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2068 "multiple types in nested loop.");
2069 return false;
2072 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2073 return false;
2075 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2076 && ! vec_stmt)
2077 return false;
2079 if (!STMT_VINFO_DATA_REF (stmt_info))
2080 return false;
2082 elem_type = TREE_TYPE (vectype);
2084 if (TREE_CODE (mask) != SSA_NAME)
2085 return false;
2087 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2088 return false;
2090 if (!mask_vectype)
2091 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2093 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2094 || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
2095 return false;
2097 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2099 tree rhs = gimple_call_arg (stmt, 3);
2100 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2101 return false;
2102 if (dt == vect_constant_def || dt == vect_external_def)
2103 vls_type = VLS_STORE_INVARIANT;
2104 else
2105 vls_type = VLS_STORE;
2107 else
2108 vls_type = VLS_LOAD;
2110 vect_memory_access_type memory_access_type;
2111 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2112 &memory_access_type, &gs_info))
2113 return false;
2115 if (memory_access_type == VMAT_GATHER_SCATTER)
2117 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2118 tree masktype
2119 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2120 if (TREE_CODE (masktype) == INTEGER_TYPE)
2122 if (dump_enabled_p ())
2123 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2124 "masked gather with integer mask not supported.");
2125 return false;
2128 else if (memory_access_type != VMAT_CONTIGUOUS)
2130 if (dump_enabled_p ())
2131 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2132 "unsupported access type for masked %s.\n",
2133 vls_type == VLS_LOAD ? "load" : "store");
2134 return false;
2136 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2137 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2138 TYPE_MODE (mask_vectype),
2139 vls_type == VLS_LOAD)
2140 || (rhs_vectype
2141 && !useless_type_conversion_p (vectype, rhs_vectype)))
2142 return false;
2144 if (!vec_stmt) /* transformation not required. */
2146 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2147 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2148 if (vls_type == VLS_LOAD)
2149 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2150 NULL, NULL, NULL);
2151 else
2152 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2153 dt, NULL, NULL, NULL);
2154 return true;
2156 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2158 /* Transform. */
2160 if (memory_access_type == VMAT_GATHER_SCATTER)
2162 tree vec_oprnd0 = NULL_TREE, op;
2163 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2164 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2165 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2166 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2167 tree mask_perm_mask = NULL_TREE;
2168 edge pe = loop_preheader_edge (loop);
2169 gimple_seq seq;
2170 basic_block new_bb;
2171 enum { NARROW, NONE, WIDEN } modifier;
2172 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2174 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2175 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2176 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2177 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2178 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2179 scaletype = TREE_VALUE (arglist);
2180 gcc_checking_assert (types_compatible_p (srctype, rettype)
2181 && types_compatible_p (srctype, masktype));
2183 if (nunits == gather_off_nunits)
2184 modifier = NONE;
2185 else if (nunits == gather_off_nunits / 2)
2187 modifier = WIDEN;
2189 vec_perm_builder sel (gather_off_nunits, gather_off_nunits, 1);
2190 for (i = 0; i < gather_off_nunits; ++i)
2191 sel.quick_push (i | nunits);
2193 vec_perm_indices indices (sel, 1, gather_off_nunits);
2194 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2195 indices);
2197 else if (nunits == gather_off_nunits * 2)
2199 modifier = NARROW;
2201 vec_perm_builder sel (nunits, nunits, 1);
2202 sel.quick_grow (nunits);
2203 for (i = 0; i < nunits; ++i)
2204 sel[i] = i < gather_off_nunits
2205 ? i : i + nunits - gather_off_nunits;
2206 vec_perm_indices indices (sel, 2, nunits);
2207 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2209 ncopies *= 2;
2211 for (i = 0; i < nunits; ++i)
2212 sel[i] = i | gather_off_nunits;
2213 indices.new_vector (sel, 2, gather_off_nunits);
2214 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2216 else
2217 gcc_unreachable ();
2219 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2221 ptr = fold_convert (ptrtype, gs_info.base);
2222 if (!is_gimple_min_invariant (ptr))
2224 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2225 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2226 gcc_assert (!new_bb);
2229 scale = build_int_cst (scaletype, gs_info.scale);
2231 prev_stmt_info = NULL;
2232 for (j = 0; j < ncopies; ++j)
2234 if (modifier == WIDEN && (j & 1))
2235 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2236 perm_mask, stmt, gsi);
2237 else if (j == 0)
2238 op = vec_oprnd0
2239 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2240 else
2241 op = vec_oprnd0
2242 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2244 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2246 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
2247 == TYPE_VECTOR_SUBPARTS (idxtype));
2248 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2249 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2250 new_stmt
2251 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2253 op = var;
2256 if (mask_perm_mask && (j & 1))
2257 mask_op = permute_vec_elements (mask_op, mask_op,
2258 mask_perm_mask, stmt, gsi);
2259 else
2261 if (j == 0)
2262 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2263 else
2265 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2266 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2269 mask_op = vec_mask;
2270 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2272 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
2273 == TYPE_VECTOR_SUBPARTS (masktype));
2274 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2275 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2276 new_stmt
2277 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2279 mask_op = var;
2283 new_stmt
2284 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2285 scale);
2287 if (!useless_type_conversion_p (vectype, rettype))
2289 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2290 == TYPE_VECTOR_SUBPARTS (rettype));
2291 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2292 gimple_call_set_lhs (new_stmt, op);
2293 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2294 var = make_ssa_name (vec_dest);
2295 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2296 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2298 else
2300 var = make_ssa_name (vec_dest, new_stmt);
2301 gimple_call_set_lhs (new_stmt, var);
2304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2306 if (modifier == NARROW)
2308 if ((j & 1) == 0)
2310 prev_res = var;
2311 continue;
2313 var = permute_vec_elements (prev_res, var,
2314 perm_mask, stmt, gsi);
2315 new_stmt = SSA_NAME_DEF_STMT (var);
2318 if (prev_stmt_info == NULL)
2319 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2320 else
2321 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2322 prev_stmt_info = vinfo_for_stmt (new_stmt);
2325 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2326 from the IL. */
2327 if (STMT_VINFO_RELATED_STMT (stmt_info))
2329 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2330 stmt_info = vinfo_for_stmt (stmt);
2332 tree lhs = gimple_call_lhs (stmt);
2333 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2334 set_vinfo_for_stmt (new_stmt, stmt_info);
2335 set_vinfo_for_stmt (stmt, NULL);
2336 STMT_VINFO_STMT (stmt_info) = new_stmt;
2337 gsi_replace (gsi, new_stmt, true);
2338 return true;
2340 else if (vls_type != VLS_LOAD)
2342 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2343 prev_stmt_info = NULL;
2344 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2345 for (i = 0; i < ncopies; i++)
2347 unsigned align, misalign;
2349 if (i == 0)
2351 tree rhs = gimple_call_arg (stmt, 3);
2352 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2353 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2354 mask_vectype);
2355 /* We should have catched mismatched types earlier. */
2356 gcc_assert (useless_type_conversion_p (vectype,
2357 TREE_TYPE (vec_rhs)));
2358 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2359 NULL_TREE, &dummy, gsi,
2360 &ptr_incr, false, &inv_p);
2361 gcc_assert (!inv_p);
2363 else
2365 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2366 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2367 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2368 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2369 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2370 TYPE_SIZE_UNIT (vectype));
2373 align = DR_TARGET_ALIGNMENT (dr);
2374 if (aligned_access_p (dr))
2375 misalign = 0;
2376 else if (DR_MISALIGNMENT (dr) == -1)
2378 align = TYPE_ALIGN_UNIT (elem_type);
2379 misalign = 0;
2381 else
2382 misalign = DR_MISALIGNMENT (dr);
2383 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2384 misalign);
2385 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2386 misalign ? least_bit_hwi (misalign) : align);
2387 gcall *call
2388 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2389 ptr, vec_mask, vec_rhs);
2390 gimple_call_set_nothrow (call, true);
2391 new_stmt = call;
2392 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2393 if (i == 0)
2394 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2395 else
2396 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2397 prev_stmt_info = vinfo_for_stmt (new_stmt);
2400 else
2402 tree vec_mask = NULL_TREE;
2403 prev_stmt_info = NULL;
2404 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2405 for (i = 0; i < ncopies; i++)
2407 unsigned align, misalign;
2409 if (i == 0)
2411 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2412 mask_vectype);
2413 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2414 NULL_TREE, &dummy, gsi,
2415 &ptr_incr, false, &inv_p);
2416 gcc_assert (!inv_p);
2418 else
2420 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2421 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2422 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2423 TYPE_SIZE_UNIT (vectype));
2426 align = DR_TARGET_ALIGNMENT (dr);
2427 if (aligned_access_p (dr))
2428 misalign = 0;
2429 else if (DR_MISALIGNMENT (dr) == -1)
2431 align = TYPE_ALIGN_UNIT (elem_type);
2432 misalign = 0;
2434 else
2435 misalign = DR_MISALIGNMENT (dr);
2436 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2437 misalign);
2438 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2439 misalign ? least_bit_hwi (misalign) : align);
2440 gcall *call
2441 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2442 ptr, vec_mask);
2443 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2444 gimple_call_set_nothrow (call, true);
2445 vect_finish_stmt_generation (stmt, call, gsi);
2446 if (i == 0)
2447 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2448 else
2449 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2450 prev_stmt_info = vinfo_for_stmt (call);
2454 if (vls_type == VLS_LOAD)
2456 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2457 from the IL. */
2458 if (STMT_VINFO_RELATED_STMT (stmt_info))
2460 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2461 stmt_info = vinfo_for_stmt (stmt);
2463 tree lhs = gimple_call_lhs (stmt);
2464 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2465 set_vinfo_for_stmt (new_stmt, stmt_info);
2466 set_vinfo_for_stmt (stmt, NULL);
2467 STMT_VINFO_STMT (stmt_info) = new_stmt;
2468 gsi_replace (gsi, new_stmt, true);
2471 return true;
2474 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2476 static bool
2477 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2478 gimple **vec_stmt, slp_tree slp_node,
2479 tree vectype_in, enum vect_def_type *dt)
2481 tree op, vectype;
2482 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2483 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2484 unsigned ncopies, nunits;
2486 op = gimple_call_arg (stmt, 0);
2487 vectype = STMT_VINFO_VECTYPE (stmt_info);
2488 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2490 /* Multiple types in SLP are handled by creating the appropriate number of
2491 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2492 case of SLP. */
2493 if (slp_node)
2494 ncopies = 1;
2495 else
2496 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2498 gcc_assert (ncopies >= 1);
2500 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2501 if (! char_vectype)
2502 return false;
2504 unsigned int num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
2505 unsigned word_bytes = num_bytes / nunits;
2507 vec_perm_builder elts (num_bytes, num_bytes, 1);
2508 for (unsigned i = 0; i < nunits; ++i)
2509 for (unsigned j = 0; j < word_bytes; ++j)
2510 elts.quick_push ((i + 1) * word_bytes - j - 1);
2512 vec_perm_indices indices (elts, 1, num_bytes);
2513 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2514 return false;
2516 if (! vec_stmt)
2518 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2519 if (dump_enabled_p ())
2520 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2521 "\n");
2522 if (! PURE_SLP_STMT (stmt_info))
2524 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2525 1, vector_stmt, stmt_info, 0, vect_prologue);
2526 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2527 ncopies, vec_perm, stmt_info, 0, vect_body);
2529 return true;
2532 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2534 /* Transform. */
2535 vec<tree> vec_oprnds = vNULL;
2536 gimple *new_stmt = NULL;
2537 stmt_vec_info prev_stmt_info = NULL;
2538 for (unsigned j = 0; j < ncopies; j++)
2540 /* Handle uses. */
2541 if (j == 0)
2542 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2543 else
2544 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2546 /* Arguments are ready. create the new vector stmt. */
2547 unsigned i;
2548 tree vop;
2549 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2551 tree tem = make_ssa_name (char_vectype);
2552 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2553 char_vectype, vop));
2554 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2555 tree tem2 = make_ssa_name (char_vectype);
2556 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2557 tem, tem, bswap_vconst);
2558 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2559 tem = make_ssa_name (vectype);
2560 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2561 vectype, tem2));
2562 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2563 if (slp_node)
2564 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2567 if (slp_node)
2568 continue;
2570 if (j == 0)
2571 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2572 else
2573 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2575 prev_stmt_info = vinfo_for_stmt (new_stmt);
2578 vec_oprnds.release ();
2579 return true;
2582 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2583 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2584 in a single step. On success, store the binary pack code in
2585 *CONVERT_CODE. */
2587 static bool
2588 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2589 tree_code *convert_code)
2591 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2592 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2593 return false;
2595 tree_code code;
2596 int multi_step_cvt = 0;
2597 auto_vec <tree, 8> interm_types;
2598 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2599 &code, &multi_step_cvt,
2600 &interm_types)
2601 || multi_step_cvt)
2602 return false;
2604 *convert_code = code;
2605 return true;
2608 /* Function vectorizable_call.
2610 Check if GS performs a function call that can be vectorized.
2611 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2612 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2613 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2615 static bool
2616 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2617 slp_tree slp_node)
2619 gcall *stmt;
2620 tree vec_dest;
2621 tree scalar_dest;
2622 tree op, type;
2623 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2624 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2625 tree vectype_out, vectype_in;
2626 int nunits_in;
2627 int nunits_out;
2628 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2629 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2630 vec_info *vinfo = stmt_info->vinfo;
2631 tree fndecl, new_temp, rhs_type;
2632 gimple *def_stmt;
2633 enum vect_def_type dt[3]
2634 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2635 int ndts = 3;
2636 gimple *new_stmt = NULL;
2637 int ncopies, j;
2638 vec<tree> vargs = vNULL;
2639 enum { NARROW, NONE, WIDEN } modifier;
2640 size_t i, nargs;
2641 tree lhs;
2643 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2644 return false;
2646 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2647 && ! vec_stmt)
2648 return false;
2650 /* Is GS a vectorizable call? */
2651 stmt = dyn_cast <gcall *> (gs);
2652 if (!stmt)
2653 return false;
2655 if (gimple_call_internal_p (stmt)
2656 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2657 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2658 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2659 slp_node);
2661 if (gimple_call_lhs (stmt) == NULL_TREE
2662 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2663 return false;
2665 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2667 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2669 /* Process function arguments. */
2670 rhs_type = NULL_TREE;
2671 vectype_in = NULL_TREE;
2672 nargs = gimple_call_num_args (stmt);
2674 /* Bail out if the function has more than three arguments, we do not have
2675 interesting builtin functions to vectorize with more than two arguments
2676 except for fma. No arguments is also not good. */
2677 if (nargs == 0 || nargs > 3)
2678 return false;
2680 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2681 if (gimple_call_internal_p (stmt)
2682 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2684 nargs = 0;
2685 rhs_type = unsigned_type_node;
2688 for (i = 0; i < nargs; i++)
2690 tree opvectype;
2692 op = gimple_call_arg (stmt, i);
2694 /* We can only handle calls with arguments of the same type. */
2695 if (rhs_type
2696 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2698 if (dump_enabled_p ())
2699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2700 "argument types differ.\n");
2701 return false;
2703 if (!rhs_type)
2704 rhs_type = TREE_TYPE (op);
2706 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2708 if (dump_enabled_p ())
2709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2710 "use not simple.\n");
2711 return false;
2714 if (!vectype_in)
2715 vectype_in = opvectype;
2716 else if (opvectype
2717 && opvectype != vectype_in)
2719 if (dump_enabled_p ())
2720 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2721 "argument vector types differ.\n");
2722 return false;
2725 /* If all arguments are external or constant defs use a vector type with
2726 the same size as the output vector type. */
2727 if (!vectype_in)
2728 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2729 if (vec_stmt)
2730 gcc_assert (vectype_in);
2731 if (!vectype_in)
2733 if (dump_enabled_p ())
2735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2736 "no vectype for scalar type ");
2737 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2738 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2741 return false;
2744 /* FORNOW */
2745 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2746 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2747 if (nunits_in == nunits_out / 2)
2748 modifier = NARROW;
2749 else if (nunits_out == nunits_in)
2750 modifier = NONE;
2751 else if (nunits_out == nunits_in / 2)
2752 modifier = WIDEN;
2753 else
2754 return false;
2756 /* We only handle functions that do not read or clobber memory. */
2757 if (gimple_vuse (stmt))
2759 if (dump_enabled_p ())
2760 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2761 "function reads from or writes to memory.\n");
2762 return false;
2765 /* For now, we only vectorize functions if a target specific builtin
2766 is available. TODO -- in some cases, it might be profitable to
2767 insert the calls for pieces of the vector, in order to be able
2768 to vectorize other operations in the loop. */
2769 fndecl = NULL_TREE;
2770 internal_fn ifn = IFN_LAST;
2771 combined_fn cfn = gimple_call_combined_fn (stmt);
2772 tree callee = gimple_call_fndecl (stmt);
2774 /* First try using an internal function. */
2775 tree_code convert_code = ERROR_MARK;
2776 if (cfn != CFN_LAST
2777 && (modifier == NONE
2778 || (modifier == NARROW
2779 && simple_integer_narrowing (vectype_out, vectype_in,
2780 &convert_code))))
2781 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2782 vectype_in);
2784 /* If that fails, try asking for a target-specific built-in function. */
2785 if (ifn == IFN_LAST)
2787 if (cfn != CFN_LAST)
2788 fndecl = targetm.vectorize.builtin_vectorized_function
2789 (cfn, vectype_out, vectype_in);
2790 else
2791 fndecl = targetm.vectorize.builtin_md_vectorized_function
2792 (callee, vectype_out, vectype_in);
2795 if (ifn == IFN_LAST && !fndecl)
2797 if (cfn == CFN_GOMP_SIMD_LANE
2798 && !slp_node
2799 && loop_vinfo
2800 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2801 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2802 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2803 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2805 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2806 { 0, 1, 2, ... vf - 1 } vector. */
2807 gcc_assert (nargs == 0);
2809 else if (modifier == NONE
2810 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2811 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2812 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2813 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2814 vectype_in, dt);
2815 else
2817 if (dump_enabled_p ())
2818 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2819 "function is not vectorizable.\n");
2820 return false;
2824 if (slp_node)
2825 ncopies = 1;
2826 else if (modifier == NARROW && ifn == IFN_LAST)
2827 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2828 else
2829 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2831 /* Sanity check: make sure that at least one copy of the vectorized stmt
2832 needs to be generated. */
2833 gcc_assert (ncopies >= 1);
2835 if (!vec_stmt) /* transformation not required. */
2837 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2838 if (dump_enabled_p ())
2839 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2840 "\n");
2841 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2842 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2843 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2844 vec_promote_demote, stmt_info, 0, vect_body);
2846 return true;
2849 /* Transform. */
2851 if (dump_enabled_p ())
2852 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2854 /* Handle def. */
2855 scalar_dest = gimple_call_lhs (stmt);
2856 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2858 prev_stmt_info = NULL;
2859 if (modifier == NONE || ifn != IFN_LAST)
2861 tree prev_res = NULL_TREE;
2862 for (j = 0; j < ncopies; ++j)
2864 /* Build argument list for the vectorized call. */
2865 if (j == 0)
2866 vargs.create (nargs);
2867 else
2868 vargs.truncate (0);
2870 if (slp_node)
2872 auto_vec<vec<tree> > vec_defs (nargs);
2873 vec<tree> vec_oprnds0;
2875 for (i = 0; i < nargs; i++)
2876 vargs.quick_push (gimple_call_arg (stmt, i));
2877 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2878 vec_oprnds0 = vec_defs[0];
2880 /* Arguments are ready. Create the new vector stmt. */
2881 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2883 size_t k;
2884 for (k = 0; k < nargs; k++)
2886 vec<tree> vec_oprndsk = vec_defs[k];
2887 vargs[k] = vec_oprndsk[i];
2889 if (modifier == NARROW)
2891 tree half_res = make_ssa_name (vectype_in);
2892 gcall *call
2893 = gimple_build_call_internal_vec (ifn, vargs);
2894 gimple_call_set_lhs (call, half_res);
2895 gimple_call_set_nothrow (call, true);
2896 new_stmt = call;
2897 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2898 if ((i & 1) == 0)
2900 prev_res = half_res;
2901 continue;
2903 new_temp = make_ssa_name (vec_dest);
2904 new_stmt = gimple_build_assign (new_temp, convert_code,
2905 prev_res, half_res);
2907 else
2909 gcall *call;
2910 if (ifn != IFN_LAST)
2911 call = gimple_build_call_internal_vec (ifn, vargs);
2912 else
2913 call = gimple_build_call_vec (fndecl, vargs);
2914 new_temp = make_ssa_name (vec_dest, call);
2915 gimple_call_set_lhs (call, new_temp);
2916 gimple_call_set_nothrow (call, true);
2917 new_stmt = call;
2919 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2920 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2923 for (i = 0; i < nargs; i++)
2925 vec<tree> vec_oprndsi = vec_defs[i];
2926 vec_oprndsi.release ();
2928 continue;
2931 for (i = 0; i < nargs; i++)
2933 op = gimple_call_arg (stmt, i);
2934 if (j == 0)
2935 vec_oprnd0
2936 = vect_get_vec_def_for_operand (op, stmt);
2937 else
2939 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2940 vec_oprnd0
2941 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2944 vargs.quick_push (vec_oprnd0);
2947 if (gimple_call_internal_p (stmt)
2948 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2950 tree_vector_builder v (vectype_out, 1, 3);
2951 for (int k = 0; k < 3; ++k)
2952 v.quick_push (build_int_cst (unsigned_type_node,
2953 j * nunits_out + k));
2954 tree cst = v.build ();
2955 tree new_var
2956 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2957 gimple *init_stmt = gimple_build_assign (new_var, cst);
2958 vect_init_vector_1 (stmt, init_stmt, NULL);
2959 new_temp = make_ssa_name (vec_dest);
2960 new_stmt = gimple_build_assign (new_temp, new_var);
2962 else if (modifier == NARROW)
2964 tree half_res = make_ssa_name (vectype_in);
2965 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2966 gimple_call_set_lhs (call, half_res);
2967 gimple_call_set_nothrow (call, true);
2968 new_stmt = call;
2969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2970 if ((j & 1) == 0)
2972 prev_res = half_res;
2973 continue;
2975 new_temp = make_ssa_name (vec_dest);
2976 new_stmt = gimple_build_assign (new_temp, convert_code,
2977 prev_res, half_res);
2979 else
2981 gcall *call;
2982 if (ifn != IFN_LAST)
2983 call = gimple_build_call_internal_vec (ifn, vargs);
2984 else
2985 call = gimple_build_call_vec (fndecl, vargs);
2986 new_temp = make_ssa_name (vec_dest, new_stmt);
2987 gimple_call_set_lhs (call, new_temp);
2988 gimple_call_set_nothrow (call, true);
2989 new_stmt = call;
2991 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2993 if (j == (modifier == NARROW ? 1 : 0))
2994 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2995 else
2996 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2998 prev_stmt_info = vinfo_for_stmt (new_stmt);
3001 else if (modifier == NARROW)
3003 for (j = 0; j < ncopies; ++j)
3005 /* Build argument list for the vectorized call. */
3006 if (j == 0)
3007 vargs.create (nargs * 2);
3008 else
3009 vargs.truncate (0);
3011 if (slp_node)
3013 auto_vec<vec<tree> > vec_defs (nargs);
3014 vec<tree> vec_oprnds0;
3016 for (i = 0; i < nargs; i++)
3017 vargs.quick_push (gimple_call_arg (stmt, i));
3018 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3019 vec_oprnds0 = vec_defs[0];
3021 /* Arguments are ready. Create the new vector stmt. */
3022 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3024 size_t k;
3025 vargs.truncate (0);
3026 for (k = 0; k < nargs; k++)
3028 vec<tree> vec_oprndsk = vec_defs[k];
3029 vargs.quick_push (vec_oprndsk[i]);
3030 vargs.quick_push (vec_oprndsk[i + 1]);
3032 gcall *call;
3033 if (ifn != IFN_LAST)
3034 call = gimple_build_call_internal_vec (ifn, vargs);
3035 else
3036 call = gimple_build_call_vec (fndecl, vargs);
3037 new_temp = make_ssa_name (vec_dest, call);
3038 gimple_call_set_lhs (call, new_temp);
3039 gimple_call_set_nothrow (call, true);
3040 new_stmt = call;
3041 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3042 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3045 for (i = 0; i < nargs; i++)
3047 vec<tree> vec_oprndsi = vec_defs[i];
3048 vec_oprndsi.release ();
3050 continue;
3053 for (i = 0; i < nargs; i++)
3055 op = gimple_call_arg (stmt, i);
3056 if (j == 0)
3058 vec_oprnd0
3059 = vect_get_vec_def_for_operand (op, stmt);
3060 vec_oprnd1
3061 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3063 else
3065 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3066 vec_oprnd0
3067 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3068 vec_oprnd1
3069 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3072 vargs.quick_push (vec_oprnd0);
3073 vargs.quick_push (vec_oprnd1);
3076 new_stmt = gimple_build_call_vec (fndecl, vargs);
3077 new_temp = make_ssa_name (vec_dest, new_stmt);
3078 gimple_call_set_lhs (new_stmt, new_temp);
3079 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3081 if (j == 0)
3082 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3083 else
3084 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3086 prev_stmt_info = vinfo_for_stmt (new_stmt);
3089 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3091 else
3092 /* No current target implements this case. */
3093 return false;
3095 vargs.release ();
3097 /* The call in STMT might prevent it from being removed in dce.
3098 We however cannot remove it here, due to the way the ssa name
3099 it defines is mapped to the new definition. So just replace
3100 rhs of the statement with something harmless. */
3102 if (slp_node)
3103 return true;
3105 type = TREE_TYPE (scalar_dest);
3106 if (is_pattern_stmt_p (stmt_info))
3107 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3108 else
3109 lhs = gimple_call_lhs (stmt);
3111 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3112 set_vinfo_for_stmt (new_stmt, stmt_info);
3113 set_vinfo_for_stmt (stmt, NULL);
3114 STMT_VINFO_STMT (stmt_info) = new_stmt;
3115 gsi_replace (gsi, new_stmt, false);
3117 return true;
3121 struct simd_call_arg_info
3123 tree vectype;
3124 tree op;
3125 HOST_WIDE_INT linear_step;
3126 enum vect_def_type dt;
3127 unsigned int align;
3128 bool simd_lane_linear;
3131 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3132 is linear within simd lane (but not within whole loop), note it in
3133 *ARGINFO. */
3135 static void
3136 vect_simd_lane_linear (tree op, struct loop *loop,
3137 struct simd_call_arg_info *arginfo)
3139 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3141 if (!is_gimple_assign (def_stmt)
3142 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3143 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3144 return;
3146 tree base = gimple_assign_rhs1 (def_stmt);
3147 HOST_WIDE_INT linear_step = 0;
3148 tree v = gimple_assign_rhs2 (def_stmt);
3149 while (TREE_CODE (v) == SSA_NAME)
3151 tree t;
3152 def_stmt = SSA_NAME_DEF_STMT (v);
3153 if (is_gimple_assign (def_stmt))
3154 switch (gimple_assign_rhs_code (def_stmt))
3156 case PLUS_EXPR:
3157 t = gimple_assign_rhs2 (def_stmt);
3158 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3159 return;
3160 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3161 v = gimple_assign_rhs1 (def_stmt);
3162 continue;
3163 case MULT_EXPR:
3164 t = gimple_assign_rhs2 (def_stmt);
3165 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3166 return;
3167 linear_step = tree_to_shwi (t);
3168 v = gimple_assign_rhs1 (def_stmt);
3169 continue;
3170 CASE_CONVERT:
3171 t = gimple_assign_rhs1 (def_stmt);
3172 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3173 || (TYPE_PRECISION (TREE_TYPE (v))
3174 < TYPE_PRECISION (TREE_TYPE (t))))
3175 return;
3176 if (!linear_step)
3177 linear_step = 1;
3178 v = t;
3179 continue;
3180 default:
3181 return;
3183 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3184 && loop->simduid
3185 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3186 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3187 == loop->simduid))
3189 if (!linear_step)
3190 linear_step = 1;
3191 arginfo->linear_step = linear_step;
3192 arginfo->op = base;
3193 arginfo->simd_lane_linear = true;
3194 return;
3199 /* Function vectorizable_simd_clone_call.
3201 Check if STMT performs a function call that can be vectorized
3202 by calling a simd clone of the function.
3203 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3204 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3205 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3207 static bool
3208 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3209 gimple **vec_stmt, slp_tree slp_node)
3211 tree vec_dest;
3212 tree scalar_dest;
3213 tree op, type;
3214 tree vec_oprnd0 = NULL_TREE;
3215 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3216 tree vectype;
3217 unsigned int nunits;
3218 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3219 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3220 vec_info *vinfo = stmt_info->vinfo;
3221 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3222 tree fndecl, new_temp;
3223 gimple *def_stmt;
3224 gimple *new_stmt = NULL;
3225 int ncopies, j;
3226 auto_vec<simd_call_arg_info> arginfo;
3227 vec<tree> vargs = vNULL;
3228 size_t i, nargs;
3229 tree lhs, rtype, ratype;
3230 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3232 /* Is STMT a vectorizable call? */
3233 if (!is_gimple_call (stmt))
3234 return false;
3236 fndecl = gimple_call_fndecl (stmt);
3237 if (fndecl == NULL_TREE)
3238 return false;
3240 struct cgraph_node *node = cgraph_node::get (fndecl);
3241 if (node == NULL || node->simd_clones == NULL)
3242 return false;
3244 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3245 return false;
3247 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3248 && ! vec_stmt)
3249 return false;
3251 if (gimple_call_lhs (stmt)
3252 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3253 return false;
3255 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3257 vectype = STMT_VINFO_VECTYPE (stmt_info);
3259 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3260 return false;
3262 /* FORNOW */
3263 if (slp_node)
3264 return false;
3266 /* Process function arguments. */
3267 nargs = gimple_call_num_args (stmt);
3269 /* Bail out if the function has zero arguments. */
3270 if (nargs == 0)
3271 return false;
3273 arginfo.reserve (nargs, true);
3275 for (i = 0; i < nargs; i++)
3277 simd_call_arg_info thisarginfo;
3278 affine_iv iv;
3280 thisarginfo.linear_step = 0;
3281 thisarginfo.align = 0;
3282 thisarginfo.op = NULL_TREE;
3283 thisarginfo.simd_lane_linear = false;
3285 op = gimple_call_arg (stmt, i);
3286 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3287 &thisarginfo.vectype)
3288 || thisarginfo.dt == vect_uninitialized_def)
3290 if (dump_enabled_p ())
3291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3292 "use not simple.\n");
3293 return false;
3296 if (thisarginfo.dt == vect_constant_def
3297 || thisarginfo.dt == vect_external_def)
3298 gcc_assert (thisarginfo.vectype == NULL_TREE);
3299 else
3300 gcc_assert (thisarginfo.vectype != NULL_TREE);
3302 /* For linear arguments, the analyze phase should have saved
3303 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3304 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3305 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3307 gcc_assert (vec_stmt);
3308 thisarginfo.linear_step
3309 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3310 thisarginfo.op
3311 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3312 thisarginfo.simd_lane_linear
3313 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3314 == boolean_true_node);
3315 /* If loop has been peeled for alignment, we need to adjust it. */
3316 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3317 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3318 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3320 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3321 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3322 tree opt = TREE_TYPE (thisarginfo.op);
3323 bias = fold_convert (TREE_TYPE (step), bias);
3324 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3325 thisarginfo.op
3326 = fold_build2 (POINTER_TYPE_P (opt)
3327 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3328 thisarginfo.op, bias);
3331 else if (!vec_stmt
3332 && thisarginfo.dt != vect_constant_def
3333 && thisarginfo.dt != vect_external_def
3334 && loop_vinfo
3335 && TREE_CODE (op) == SSA_NAME
3336 && simple_iv (loop, loop_containing_stmt (stmt), op,
3337 &iv, false)
3338 && tree_fits_shwi_p (iv.step))
3340 thisarginfo.linear_step = tree_to_shwi (iv.step);
3341 thisarginfo.op = iv.base;
3343 else if ((thisarginfo.dt == vect_constant_def
3344 || thisarginfo.dt == vect_external_def)
3345 && POINTER_TYPE_P (TREE_TYPE (op)))
3346 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3347 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3348 linear too. */
3349 if (POINTER_TYPE_P (TREE_TYPE (op))
3350 && !thisarginfo.linear_step
3351 && !vec_stmt
3352 && thisarginfo.dt != vect_constant_def
3353 && thisarginfo.dt != vect_external_def
3354 && loop_vinfo
3355 && !slp_node
3356 && TREE_CODE (op) == SSA_NAME)
3357 vect_simd_lane_linear (op, loop, &thisarginfo);
3359 arginfo.quick_push (thisarginfo);
3362 unsigned int badness = 0;
3363 struct cgraph_node *bestn = NULL;
3364 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3365 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3366 else
3367 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3368 n = n->simdclone->next_clone)
3370 unsigned int this_badness = 0;
3371 if (n->simdclone->simdlen
3372 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
3373 || n->simdclone->nargs != nargs)
3374 continue;
3375 if (n->simdclone->simdlen
3376 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3377 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
3378 - exact_log2 (n->simdclone->simdlen)) * 1024;
3379 if (n->simdclone->inbranch)
3380 this_badness += 2048;
3381 int target_badness = targetm.simd_clone.usable (n);
3382 if (target_badness < 0)
3383 continue;
3384 this_badness += target_badness * 512;
3385 /* FORNOW: Have to add code to add the mask argument. */
3386 if (n->simdclone->inbranch)
3387 continue;
3388 for (i = 0; i < nargs; i++)
3390 switch (n->simdclone->args[i].arg_type)
3392 case SIMD_CLONE_ARG_TYPE_VECTOR:
3393 if (!useless_type_conversion_p
3394 (n->simdclone->args[i].orig_type,
3395 TREE_TYPE (gimple_call_arg (stmt, i))))
3396 i = -1;
3397 else if (arginfo[i].dt == vect_constant_def
3398 || arginfo[i].dt == vect_external_def
3399 || arginfo[i].linear_step)
3400 this_badness += 64;
3401 break;
3402 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3403 if (arginfo[i].dt != vect_constant_def
3404 && arginfo[i].dt != vect_external_def)
3405 i = -1;
3406 break;
3407 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3408 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3409 if (arginfo[i].dt == vect_constant_def
3410 || arginfo[i].dt == vect_external_def
3411 || (arginfo[i].linear_step
3412 != n->simdclone->args[i].linear_step))
3413 i = -1;
3414 break;
3415 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3416 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3417 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3418 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3419 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3420 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3421 /* FORNOW */
3422 i = -1;
3423 break;
3424 case SIMD_CLONE_ARG_TYPE_MASK:
3425 gcc_unreachable ();
3427 if (i == (size_t) -1)
3428 break;
3429 if (n->simdclone->args[i].alignment > arginfo[i].align)
3431 i = -1;
3432 break;
3434 if (arginfo[i].align)
3435 this_badness += (exact_log2 (arginfo[i].align)
3436 - exact_log2 (n->simdclone->args[i].alignment));
3438 if (i == (size_t) -1)
3439 continue;
3440 if (bestn == NULL || this_badness < badness)
3442 bestn = n;
3443 badness = this_badness;
3447 if (bestn == NULL)
3448 return false;
3450 for (i = 0; i < nargs; i++)
3451 if ((arginfo[i].dt == vect_constant_def
3452 || arginfo[i].dt == vect_external_def)
3453 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3455 arginfo[i].vectype
3456 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3457 i)));
3458 if (arginfo[i].vectype == NULL
3459 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3460 > bestn->simdclone->simdlen))
3461 return false;
3464 fndecl = bestn->decl;
3465 nunits = bestn->simdclone->simdlen;
3466 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3468 /* If the function isn't const, only allow it in simd loops where user
3469 has asserted that at least nunits consecutive iterations can be
3470 performed using SIMD instructions. */
3471 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3472 && gimple_vuse (stmt))
3473 return false;
3475 /* Sanity check: make sure that at least one copy of the vectorized stmt
3476 needs to be generated. */
3477 gcc_assert (ncopies >= 1);
3479 if (!vec_stmt) /* transformation not required. */
3481 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3482 for (i = 0; i < nargs; i++)
3483 if ((bestn->simdclone->args[i].arg_type
3484 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3485 || (bestn->simdclone->args[i].arg_type
3486 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3488 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3489 + 1);
3490 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3491 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3492 ? size_type_node : TREE_TYPE (arginfo[i].op);
3493 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3494 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3495 tree sll = arginfo[i].simd_lane_linear
3496 ? boolean_true_node : boolean_false_node;
3497 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3499 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3500 if (dump_enabled_p ())
3501 dump_printf_loc (MSG_NOTE, vect_location,
3502 "=== vectorizable_simd_clone_call ===\n");
3503 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3504 return true;
3507 /* Transform. */
3509 if (dump_enabled_p ())
3510 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3512 /* Handle def. */
3513 scalar_dest = gimple_call_lhs (stmt);
3514 vec_dest = NULL_TREE;
3515 rtype = NULL_TREE;
3516 ratype = NULL_TREE;
3517 if (scalar_dest)
3519 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3520 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3521 if (TREE_CODE (rtype) == ARRAY_TYPE)
3523 ratype = rtype;
3524 rtype = TREE_TYPE (ratype);
3528 prev_stmt_info = NULL;
3529 for (j = 0; j < ncopies; ++j)
3531 /* Build argument list for the vectorized call. */
3532 if (j == 0)
3533 vargs.create (nargs);
3534 else
3535 vargs.truncate (0);
3537 for (i = 0; i < nargs; i++)
3539 unsigned int k, l, m, o;
3540 tree atype;
3541 op = gimple_call_arg (stmt, i);
3542 switch (bestn->simdclone->args[i].arg_type)
3544 case SIMD_CLONE_ARG_TYPE_VECTOR:
3545 atype = bestn->simdclone->args[i].vector_type;
3546 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3547 for (m = j * o; m < (j + 1) * o; m++)
3549 if (TYPE_VECTOR_SUBPARTS (atype)
3550 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3552 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3553 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3554 / TYPE_VECTOR_SUBPARTS (atype));
3555 gcc_assert ((k & (k - 1)) == 0);
3556 if (m == 0)
3557 vec_oprnd0
3558 = vect_get_vec_def_for_operand (op, stmt);
3559 else
3561 vec_oprnd0 = arginfo[i].op;
3562 if ((m & (k - 1)) == 0)
3563 vec_oprnd0
3564 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3565 vec_oprnd0);
3567 arginfo[i].op = vec_oprnd0;
3568 vec_oprnd0
3569 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3570 bitsize_int (prec),
3571 bitsize_int ((m & (k - 1)) * prec));
3572 new_stmt
3573 = gimple_build_assign (make_ssa_name (atype),
3574 vec_oprnd0);
3575 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3576 vargs.safe_push (gimple_assign_lhs (new_stmt));
3578 else
3580 k = (TYPE_VECTOR_SUBPARTS (atype)
3581 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3582 gcc_assert ((k & (k - 1)) == 0);
3583 vec<constructor_elt, va_gc> *ctor_elts;
3584 if (k != 1)
3585 vec_alloc (ctor_elts, k);
3586 else
3587 ctor_elts = NULL;
3588 for (l = 0; l < k; l++)
3590 if (m == 0 && l == 0)
3591 vec_oprnd0
3592 = vect_get_vec_def_for_operand (op, stmt);
3593 else
3594 vec_oprnd0
3595 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3596 arginfo[i].op);
3597 arginfo[i].op = vec_oprnd0;
3598 if (k == 1)
3599 break;
3600 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3601 vec_oprnd0);
3603 if (k == 1)
3604 vargs.safe_push (vec_oprnd0);
3605 else
3607 vec_oprnd0 = build_constructor (atype, ctor_elts);
3608 new_stmt
3609 = gimple_build_assign (make_ssa_name (atype),
3610 vec_oprnd0);
3611 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3612 vargs.safe_push (gimple_assign_lhs (new_stmt));
3616 break;
3617 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3618 vargs.safe_push (op);
3619 break;
3620 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3621 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3622 if (j == 0)
3624 gimple_seq stmts;
3625 arginfo[i].op
3626 = force_gimple_operand (arginfo[i].op, &stmts, true,
3627 NULL_TREE);
3628 if (stmts != NULL)
3630 basic_block new_bb;
3631 edge pe = loop_preheader_edge (loop);
3632 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3633 gcc_assert (!new_bb);
3635 if (arginfo[i].simd_lane_linear)
3637 vargs.safe_push (arginfo[i].op);
3638 break;
3640 tree phi_res = copy_ssa_name (op);
3641 gphi *new_phi = create_phi_node (phi_res, loop->header);
3642 set_vinfo_for_stmt (new_phi,
3643 new_stmt_vec_info (new_phi, loop_vinfo));
3644 add_phi_arg (new_phi, arginfo[i].op,
3645 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3646 enum tree_code code
3647 = POINTER_TYPE_P (TREE_TYPE (op))
3648 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3649 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3650 ? sizetype : TREE_TYPE (op);
3651 widest_int cst
3652 = wi::mul (bestn->simdclone->args[i].linear_step,
3653 ncopies * nunits);
3654 tree tcst = wide_int_to_tree (type, cst);
3655 tree phi_arg = copy_ssa_name (op);
3656 new_stmt
3657 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3658 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3659 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3660 set_vinfo_for_stmt (new_stmt,
3661 new_stmt_vec_info (new_stmt, loop_vinfo));
3662 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3663 UNKNOWN_LOCATION);
3664 arginfo[i].op = phi_res;
3665 vargs.safe_push (phi_res);
3667 else
3669 enum tree_code code
3670 = POINTER_TYPE_P (TREE_TYPE (op))
3671 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3672 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3673 ? sizetype : TREE_TYPE (op);
3674 widest_int cst
3675 = wi::mul (bestn->simdclone->args[i].linear_step,
3676 j * nunits);
3677 tree tcst = wide_int_to_tree (type, cst);
3678 new_temp = make_ssa_name (TREE_TYPE (op));
3679 new_stmt = gimple_build_assign (new_temp, code,
3680 arginfo[i].op, tcst);
3681 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3682 vargs.safe_push (new_temp);
3684 break;
3685 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3686 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3687 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3688 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3689 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3690 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3691 default:
3692 gcc_unreachable ();
3696 new_stmt = gimple_build_call_vec (fndecl, vargs);
3697 if (vec_dest)
3699 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3700 if (ratype)
3701 new_temp = create_tmp_var (ratype);
3702 else if (TYPE_VECTOR_SUBPARTS (vectype)
3703 == TYPE_VECTOR_SUBPARTS (rtype))
3704 new_temp = make_ssa_name (vec_dest, new_stmt);
3705 else
3706 new_temp = make_ssa_name (rtype, new_stmt);
3707 gimple_call_set_lhs (new_stmt, new_temp);
3709 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3711 if (vec_dest)
3713 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3715 unsigned int k, l;
3716 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3717 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3718 gcc_assert ((k & (k - 1)) == 0);
3719 for (l = 0; l < k; l++)
3721 tree t;
3722 if (ratype)
3724 t = build_fold_addr_expr (new_temp);
3725 t = build2 (MEM_REF, vectype, t,
3726 build_int_cst (TREE_TYPE (t),
3727 l * prec / BITS_PER_UNIT));
3729 else
3730 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3731 bitsize_int (prec), bitsize_int (l * prec));
3732 new_stmt
3733 = gimple_build_assign (make_ssa_name (vectype), t);
3734 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3735 if (j == 0 && l == 0)
3736 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3737 else
3738 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3740 prev_stmt_info = vinfo_for_stmt (new_stmt);
3743 if (ratype)
3745 tree clobber = build_constructor (ratype, NULL);
3746 TREE_THIS_VOLATILE (clobber) = 1;
3747 new_stmt = gimple_build_assign (new_temp, clobber);
3748 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3750 continue;
3752 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3754 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3755 / TYPE_VECTOR_SUBPARTS (rtype));
3756 gcc_assert ((k & (k - 1)) == 0);
3757 if ((j & (k - 1)) == 0)
3758 vec_alloc (ret_ctor_elts, k);
3759 if (ratype)
3761 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3762 for (m = 0; m < o; m++)
3764 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3765 size_int (m), NULL_TREE, NULL_TREE);
3766 new_stmt
3767 = gimple_build_assign (make_ssa_name (rtype), tem);
3768 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3769 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3770 gimple_assign_lhs (new_stmt));
3772 tree clobber = build_constructor (ratype, NULL);
3773 TREE_THIS_VOLATILE (clobber) = 1;
3774 new_stmt = gimple_build_assign (new_temp, clobber);
3775 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3777 else
3778 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3779 if ((j & (k - 1)) != k - 1)
3780 continue;
3781 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3782 new_stmt
3783 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3784 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3786 if ((unsigned) j == k - 1)
3787 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3788 else
3789 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3791 prev_stmt_info = vinfo_for_stmt (new_stmt);
3792 continue;
3794 else if (ratype)
3796 tree t = build_fold_addr_expr (new_temp);
3797 t = build2 (MEM_REF, vectype, t,
3798 build_int_cst (TREE_TYPE (t), 0));
3799 new_stmt
3800 = gimple_build_assign (make_ssa_name (vec_dest), t);
3801 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3802 tree clobber = build_constructor (ratype, NULL);
3803 TREE_THIS_VOLATILE (clobber) = 1;
3804 vect_finish_stmt_generation (stmt,
3805 gimple_build_assign (new_temp,
3806 clobber), gsi);
3810 if (j == 0)
3811 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3812 else
3813 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3815 prev_stmt_info = vinfo_for_stmt (new_stmt);
3818 vargs.release ();
3820 /* The call in STMT might prevent it from being removed in dce.
3821 We however cannot remove it here, due to the way the ssa name
3822 it defines is mapped to the new definition. So just replace
3823 rhs of the statement with something harmless. */
3825 if (slp_node)
3826 return true;
3828 if (scalar_dest)
3830 type = TREE_TYPE (scalar_dest);
3831 if (is_pattern_stmt_p (stmt_info))
3832 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3833 else
3834 lhs = gimple_call_lhs (stmt);
3835 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3837 else
3838 new_stmt = gimple_build_nop ();
3839 set_vinfo_for_stmt (new_stmt, stmt_info);
3840 set_vinfo_for_stmt (stmt, NULL);
3841 STMT_VINFO_STMT (stmt_info) = new_stmt;
3842 gsi_replace (gsi, new_stmt, true);
3843 unlink_stmt_vdef (stmt);
3845 return true;
3849 /* Function vect_gen_widened_results_half
3851 Create a vector stmt whose code, type, number of arguments, and result
3852 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3853 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3854 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3855 needs to be created (DECL is a function-decl of a target-builtin).
3856 STMT is the original scalar stmt that we are vectorizing. */
3858 static gimple *
3859 vect_gen_widened_results_half (enum tree_code code,
3860 tree decl,
3861 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3862 tree vec_dest, gimple_stmt_iterator *gsi,
3863 gimple *stmt)
3865 gimple *new_stmt;
3866 tree new_temp;
3868 /* Generate half of the widened result: */
3869 if (code == CALL_EXPR)
3871 /* Target specific support */
3872 if (op_type == binary_op)
3873 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3874 else
3875 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3876 new_temp = make_ssa_name (vec_dest, new_stmt);
3877 gimple_call_set_lhs (new_stmt, new_temp);
3879 else
3881 /* Generic support */
3882 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3883 if (op_type != binary_op)
3884 vec_oprnd1 = NULL;
3885 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3886 new_temp = make_ssa_name (vec_dest, new_stmt);
3887 gimple_assign_set_lhs (new_stmt, new_temp);
3889 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3891 return new_stmt;
3895 /* Get vectorized definitions for loop-based vectorization. For the first
3896 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3897 scalar operand), and for the rest we get a copy with
3898 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3899 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3900 The vectors are collected into VEC_OPRNDS. */
3902 static void
3903 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3904 vec<tree> *vec_oprnds, int multi_step_cvt)
3906 tree vec_oprnd;
3908 /* Get first vector operand. */
3909 /* All the vector operands except the very first one (that is scalar oprnd)
3910 are stmt copies. */
3911 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3912 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3913 else
3914 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3916 vec_oprnds->quick_push (vec_oprnd);
3918 /* Get second vector operand. */
3919 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3920 vec_oprnds->quick_push (vec_oprnd);
3922 *oprnd = vec_oprnd;
3924 /* For conversion in multiple steps, continue to get operands
3925 recursively. */
3926 if (multi_step_cvt)
3927 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3931 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3932 For multi-step conversions store the resulting vectors and call the function
3933 recursively. */
3935 static void
3936 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3937 int multi_step_cvt, gimple *stmt,
3938 vec<tree> vec_dsts,
3939 gimple_stmt_iterator *gsi,
3940 slp_tree slp_node, enum tree_code code,
3941 stmt_vec_info *prev_stmt_info)
3943 unsigned int i;
3944 tree vop0, vop1, new_tmp, vec_dest;
3945 gimple *new_stmt;
3946 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3948 vec_dest = vec_dsts.pop ();
3950 for (i = 0; i < vec_oprnds->length (); i += 2)
3952 /* Create demotion operation. */
3953 vop0 = (*vec_oprnds)[i];
3954 vop1 = (*vec_oprnds)[i + 1];
3955 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3956 new_tmp = make_ssa_name (vec_dest, new_stmt);
3957 gimple_assign_set_lhs (new_stmt, new_tmp);
3958 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3960 if (multi_step_cvt)
3961 /* Store the resulting vector for next recursive call. */
3962 (*vec_oprnds)[i/2] = new_tmp;
3963 else
3965 /* This is the last step of the conversion sequence. Store the
3966 vectors in SLP_NODE or in vector info of the scalar statement
3967 (or in STMT_VINFO_RELATED_STMT chain). */
3968 if (slp_node)
3969 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3970 else
3972 if (!*prev_stmt_info)
3973 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3974 else
3975 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3977 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3982 /* For multi-step demotion operations we first generate demotion operations
3983 from the source type to the intermediate types, and then combine the
3984 results (stored in VEC_OPRNDS) in demotion operation to the destination
3985 type. */
3986 if (multi_step_cvt)
3988 /* At each level of recursion we have half of the operands we had at the
3989 previous level. */
3990 vec_oprnds->truncate ((i+1)/2);
3991 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3992 stmt, vec_dsts, gsi, slp_node,
3993 VEC_PACK_TRUNC_EXPR,
3994 prev_stmt_info);
3997 vec_dsts.quick_push (vec_dest);
4001 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4002 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4003 the resulting vectors and call the function recursively. */
4005 static void
4006 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4007 vec<tree> *vec_oprnds1,
4008 gimple *stmt, tree vec_dest,
4009 gimple_stmt_iterator *gsi,
4010 enum tree_code code1,
4011 enum tree_code code2, tree decl1,
4012 tree decl2, int op_type)
4014 int i;
4015 tree vop0, vop1, new_tmp1, new_tmp2;
4016 gimple *new_stmt1, *new_stmt2;
4017 vec<tree> vec_tmp = vNULL;
4019 vec_tmp.create (vec_oprnds0->length () * 2);
4020 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4022 if (op_type == binary_op)
4023 vop1 = (*vec_oprnds1)[i];
4024 else
4025 vop1 = NULL_TREE;
4027 /* Generate the two halves of promotion operation. */
4028 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4029 op_type, vec_dest, gsi, stmt);
4030 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4031 op_type, vec_dest, gsi, stmt);
4032 if (is_gimple_call (new_stmt1))
4034 new_tmp1 = gimple_call_lhs (new_stmt1);
4035 new_tmp2 = gimple_call_lhs (new_stmt2);
4037 else
4039 new_tmp1 = gimple_assign_lhs (new_stmt1);
4040 new_tmp2 = gimple_assign_lhs (new_stmt2);
4043 /* Store the results for the next step. */
4044 vec_tmp.quick_push (new_tmp1);
4045 vec_tmp.quick_push (new_tmp2);
4048 vec_oprnds0->release ();
4049 *vec_oprnds0 = vec_tmp;
4053 /* Check if STMT performs a conversion operation, that can be vectorized.
4054 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4055 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4056 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4058 static bool
4059 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4060 gimple **vec_stmt, slp_tree slp_node)
4062 tree vec_dest;
4063 tree scalar_dest;
4064 tree op0, op1 = NULL_TREE;
4065 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4066 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4067 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4068 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4069 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4070 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4071 tree new_temp;
4072 gimple *def_stmt;
4073 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4074 int ndts = 2;
4075 gimple *new_stmt = NULL;
4076 stmt_vec_info prev_stmt_info;
4077 int nunits_in;
4078 int nunits_out;
4079 tree vectype_out, vectype_in;
4080 int ncopies, i, j;
4081 tree lhs_type, rhs_type;
4082 enum { NARROW, NONE, WIDEN } modifier;
4083 vec<tree> vec_oprnds0 = vNULL;
4084 vec<tree> vec_oprnds1 = vNULL;
4085 tree vop0;
4086 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4087 vec_info *vinfo = stmt_info->vinfo;
4088 int multi_step_cvt = 0;
4089 vec<tree> interm_types = vNULL;
4090 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4091 int op_type;
4092 unsigned short fltsz;
4094 /* Is STMT a vectorizable conversion? */
4096 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4097 return false;
4099 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4100 && ! vec_stmt)
4101 return false;
4103 if (!is_gimple_assign (stmt))
4104 return false;
4106 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4107 return false;
4109 code = gimple_assign_rhs_code (stmt);
4110 if (!CONVERT_EXPR_CODE_P (code)
4111 && code != FIX_TRUNC_EXPR
4112 && code != FLOAT_EXPR
4113 && code != WIDEN_MULT_EXPR
4114 && code != WIDEN_LSHIFT_EXPR)
4115 return false;
4117 op_type = TREE_CODE_LENGTH (code);
4119 /* Check types of lhs and rhs. */
4120 scalar_dest = gimple_assign_lhs (stmt);
4121 lhs_type = TREE_TYPE (scalar_dest);
4122 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4124 op0 = gimple_assign_rhs1 (stmt);
4125 rhs_type = TREE_TYPE (op0);
4127 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4128 && !((INTEGRAL_TYPE_P (lhs_type)
4129 && INTEGRAL_TYPE_P (rhs_type))
4130 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4131 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4132 return false;
4134 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4135 && ((INTEGRAL_TYPE_P (lhs_type)
4136 && !type_has_mode_precision_p (lhs_type))
4137 || (INTEGRAL_TYPE_P (rhs_type)
4138 && !type_has_mode_precision_p (rhs_type))))
4140 if (dump_enabled_p ())
4141 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4142 "type conversion to/from bit-precision unsupported."
4143 "\n");
4144 return false;
4147 /* Check the operands of the operation. */
4148 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4150 if (dump_enabled_p ())
4151 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4152 "use not simple.\n");
4153 return false;
4155 if (op_type == binary_op)
4157 bool ok;
4159 op1 = gimple_assign_rhs2 (stmt);
4160 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4161 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4162 OP1. */
4163 if (CONSTANT_CLASS_P (op0))
4164 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4165 else
4166 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4168 if (!ok)
4170 if (dump_enabled_p ())
4171 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4172 "use not simple.\n");
4173 return false;
4177 /* If op0 is an external or constant defs use a vector type of
4178 the same size as the output vector type. */
4179 if (!vectype_in)
4180 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4181 if (vec_stmt)
4182 gcc_assert (vectype_in);
4183 if (!vectype_in)
4185 if (dump_enabled_p ())
4187 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4188 "no vectype for scalar type ");
4189 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4190 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4193 return false;
4196 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4197 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4199 if (dump_enabled_p ())
4201 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4202 "can't convert between boolean and non "
4203 "boolean vectors");
4204 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4205 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4208 return false;
4211 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4212 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4213 if (nunits_in < nunits_out)
4214 modifier = NARROW;
4215 else if (nunits_out == nunits_in)
4216 modifier = NONE;
4217 else
4218 modifier = WIDEN;
4220 /* Multiple types in SLP are handled by creating the appropriate number of
4221 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4222 case of SLP. */
4223 if (slp_node)
4224 ncopies = 1;
4225 else if (modifier == NARROW)
4226 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4227 else
4228 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4230 /* Sanity check: make sure that at least one copy of the vectorized stmt
4231 needs to be generated. */
4232 gcc_assert (ncopies >= 1);
4234 bool found_mode = false;
4235 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4236 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4237 opt_scalar_mode rhs_mode_iter;
4239 /* Supportable by target? */
4240 switch (modifier)
4242 case NONE:
4243 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4244 return false;
4245 if (supportable_convert_operation (code, vectype_out, vectype_in,
4246 &decl1, &code1))
4247 break;
4248 /* FALLTHRU */
4249 unsupported:
4250 if (dump_enabled_p ())
4251 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4252 "conversion not supported by target.\n");
4253 return false;
4255 case WIDEN:
4256 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4257 &code1, &code2, &multi_step_cvt,
4258 &interm_types))
4260 /* Binary widening operation can only be supported directly by the
4261 architecture. */
4262 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4263 break;
4266 if (code != FLOAT_EXPR
4267 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4268 goto unsupported;
4270 fltsz = GET_MODE_SIZE (lhs_mode);
4271 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4273 rhs_mode = rhs_mode_iter.require ();
4274 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4275 break;
4277 cvt_type
4278 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4279 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4280 if (cvt_type == NULL_TREE)
4281 goto unsupported;
4283 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4285 if (!supportable_convert_operation (code, vectype_out,
4286 cvt_type, &decl1, &codecvt1))
4287 goto unsupported;
4289 else if (!supportable_widening_operation (code, stmt, vectype_out,
4290 cvt_type, &codecvt1,
4291 &codecvt2, &multi_step_cvt,
4292 &interm_types))
4293 continue;
4294 else
4295 gcc_assert (multi_step_cvt == 0);
4297 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4298 vectype_in, &code1, &code2,
4299 &multi_step_cvt, &interm_types))
4301 found_mode = true;
4302 break;
4306 if (!found_mode)
4307 goto unsupported;
4309 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4310 codecvt2 = ERROR_MARK;
4311 else
4313 multi_step_cvt++;
4314 interm_types.safe_push (cvt_type);
4315 cvt_type = NULL_TREE;
4317 break;
4319 case NARROW:
4320 gcc_assert (op_type == unary_op);
4321 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4322 &code1, &multi_step_cvt,
4323 &interm_types))
4324 break;
4326 if (code != FIX_TRUNC_EXPR
4327 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4328 goto unsupported;
4330 cvt_type
4331 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4332 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4333 if (cvt_type == NULL_TREE)
4334 goto unsupported;
4335 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4336 &decl1, &codecvt1))
4337 goto unsupported;
4338 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4339 &code1, &multi_step_cvt,
4340 &interm_types))
4341 break;
4342 goto unsupported;
4344 default:
4345 gcc_unreachable ();
4348 if (!vec_stmt) /* transformation not required. */
4350 if (dump_enabled_p ())
4351 dump_printf_loc (MSG_NOTE, vect_location,
4352 "=== vectorizable_conversion ===\n");
4353 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4355 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4356 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4358 else if (modifier == NARROW)
4360 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4361 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4363 else
4365 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4366 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4368 interm_types.release ();
4369 return true;
4372 /* Transform. */
4373 if (dump_enabled_p ())
4374 dump_printf_loc (MSG_NOTE, vect_location,
4375 "transform conversion. ncopies = %d.\n", ncopies);
4377 if (op_type == binary_op)
4379 if (CONSTANT_CLASS_P (op0))
4380 op0 = fold_convert (TREE_TYPE (op1), op0);
4381 else if (CONSTANT_CLASS_P (op1))
4382 op1 = fold_convert (TREE_TYPE (op0), op1);
4385 /* In case of multi-step conversion, we first generate conversion operations
4386 to the intermediate types, and then from that types to the final one.
4387 We create vector destinations for the intermediate type (TYPES) received
4388 from supportable_*_operation, and store them in the correct order
4389 for future use in vect_create_vectorized_*_stmts (). */
4390 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4391 vec_dest = vect_create_destination_var (scalar_dest,
4392 (cvt_type && modifier == WIDEN)
4393 ? cvt_type : vectype_out);
4394 vec_dsts.quick_push (vec_dest);
4396 if (multi_step_cvt)
4398 for (i = interm_types.length () - 1;
4399 interm_types.iterate (i, &intermediate_type); i--)
4401 vec_dest = vect_create_destination_var (scalar_dest,
4402 intermediate_type);
4403 vec_dsts.quick_push (vec_dest);
4407 if (cvt_type)
4408 vec_dest = vect_create_destination_var (scalar_dest,
4409 modifier == WIDEN
4410 ? vectype_out : cvt_type);
4412 if (!slp_node)
4414 if (modifier == WIDEN)
4416 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4417 if (op_type == binary_op)
4418 vec_oprnds1.create (1);
4420 else if (modifier == NARROW)
4421 vec_oprnds0.create (
4422 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4424 else if (code == WIDEN_LSHIFT_EXPR)
4425 vec_oprnds1.create (slp_node->vec_stmts_size);
4427 last_oprnd = op0;
4428 prev_stmt_info = NULL;
4429 switch (modifier)
4431 case NONE:
4432 for (j = 0; j < ncopies; j++)
4434 if (j == 0)
4435 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4436 else
4437 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4439 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4441 /* Arguments are ready, create the new vector stmt. */
4442 if (code1 == CALL_EXPR)
4444 new_stmt = gimple_build_call (decl1, 1, vop0);
4445 new_temp = make_ssa_name (vec_dest, new_stmt);
4446 gimple_call_set_lhs (new_stmt, new_temp);
4448 else
4450 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4451 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4452 new_temp = make_ssa_name (vec_dest, new_stmt);
4453 gimple_assign_set_lhs (new_stmt, new_temp);
4456 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4457 if (slp_node)
4458 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4459 else
4461 if (!prev_stmt_info)
4462 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4463 else
4464 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4465 prev_stmt_info = vinfo_for_stmt (new_stmt);
4469 break;
4471 case WIDEN:
4472 /* In case the vectorization factor (VF) is bigger than the number
4473 of elements that we can fit in a vectype (nunits), we have to
4474 generate more than one vector stmt - i.e - we need to "unroll"
4475 the vector stmt by a factor VF/nunits. */
4476 for (j = 0; j < ncopies; j++)
4478 /* Handle uses. */
4479 if (j == 0)
4481 if (slp_node)
4483 if (code == WIDEN_LSHIFT_EXPR)
4485 unsigned int k;
4487 vec_oprnd1 = op1;
4488 /* Store vec_oprnd1 for every vector stmt to be created
4489 for SLP_NODE. We check during the analysis that all
4490 the shift arguments are the same. */
4491 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4492 vec_oprnds1.quick_push (vec_oprnd1);
4494 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4495 slp_node);
4497 else
4498 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4499 &vec_oprnds1, slp_node);
4501 else
4503 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4504 vec_oprnds0.quick_push (vec_oprnd0);
4505 if (op_type == binary_op)
4507 if (code == WIDEN_LSHIFT_EXPR)
4508 vec_oprnd1 = op1;
4509 else
4510 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4511 vec_oprnds1.quick_push (vec_oprnd1);
4515 else
4517 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4518 vec_oprnds0.truncate (0);
4519 vec_oprnds0.quick_push (vec_oprnd0);
4520 if (op_type == binary_op)
4522 if (code == WIDEN_LSHIFT_EXPR)
4523 vec_oprnd1 = op1;
4524 else
4525 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4526 vec_oprnd1);
4527 vec_oprnds1.truncate (0);
4528 vec_oprnds1.quick_push (vec_oprnd1);
4532 /* Arguments are ready. Create the new vector stmts. */
4533 for (i = multi_step_cvt; i >= 0; i--)
4535 tree this_dest = vec_dsts[i];
4536 enum tree_code c1 = code1, c2 = code2;
4537 if (i == 0 && codecvt2 != ERROR_MARK)
4539 c1 = codecvt1;
4540 c2 = codecvt2;
4542 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4543 &vec_oprnds1,
4544 stmt, this_dest, gsi,
4545 c1, c2, decl1, decl2,
4546 op_type);
4549 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4551 if (cvt_type)
4553 if (codecvt1 == CALL_EXPR)
4555 new_stmt = gimple_build_call (decl1, 1, vop0);
4556 new_temp = make_ssa_name (vec_dest, new_stmt);
4557 gimple_call_set_lhs (new_stmt, new_temp);
4559 else
4561 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4562 new_temp = make_ssa_name (vec_dest);
4563 new_stmt = gimple_build_assign (new_temp, codecvt1,
4564 vop0);
4567 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4569 else
4570 new_stmt = SSA_NAME_DEF_STMT (vop0);
4572 if (slp_node)
4573 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4574 else
4576 if (!prev_stmt_info)
4577 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4578 else
4579 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4580 prev_stmt_info = vinfo_for_stmt (new_stmt);
4585 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4586 break;
4588 case NARROW:
4589 /* In case the vectorization factor (VF) is bigger than the number
4590 of elements that we can fit in a vectype (nunits), we have to
4591 generate more than one vector stmt - i.e - we need to "unroll"
4592 the vector stmt by a factor VF/nunits. */
4593 for (j = 0; j < ncopies; j++)
4595 /* Handle uses. */
4596 if (slp_node)
4597 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4598 slp_node);
4599 else
4601 vec_oprnds0.truncate (0);
4602 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4603 vect_pow2 (multi_step_cvt) - 1);
4606 /* Arguments are ready. Create the new vector stmts. */
4607 if (cvt_type)
4608 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4610 if (codecvt1 == CALL_EXPR)
4612 new_stmt = gimple_build_call (decl1, 1, vop0);
4613 new_temp = make_ssa_name (vec_dest, new_stmt);
4614 gimple_call_set_lhs (new_stmt, new_temp);
4616 else
4618 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4619 new_temp = make_ssa_name (vec_dest);
4620 new_stmt = gimple_build_assign (new_temp, codecvt1,
4621 vop0);
4624 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4625 vec_oprnds0[i] = new_temp;
4628 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4629 stmt, vec_dsts, gsi,
4630 slp_node, code1,
4631 &prev_stmt_info);
4634 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4635 break;
4638 vec_oprnds0.release ();
4639 vec_oprnds1.release ();
4640 interm_types.release ();
4642 return true;
4646 /* Function vectorizable_assignment.
4648 Check if STMT performs an assignment (copy) that can be vectorized.
4649 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4650 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4651 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4653 static bool
4654 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4655 gimple **vec_stmt, slp_tree slp_node)
4657 tree vec_dest;
4658 tree scalar_dest;
4659 tree op;
4660 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4661 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4662 tree new_temp;
4663 gimple *def_stmt;
4664 enum vect_def_type dt[1] = {vect_unknown_def_type};
4665 int ndts = 1;
4666 int ncopies;
4667 int i, j;
4668 vec<tree> vec_oprnds = vNULL;
4669 tree vop;
4670 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4671 vec_info *vinfo = stmt_info->vinfo;
4672 gimple *new_stmt = NULL;
4673 stmt_vec_info prev_stmt_info = NULL;
4674 enum tree_code code;
4675 tree vectype_in;
4677 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4678 return false;
4680 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4681 && ! vec_stmt)
4682 return false;
4684 /* Is vectorizable assignment? */
4685 if (!is_gimple_assign (stmt))
4686 return false;
4688 scalar_dest = gimple_assign_lhs (stmt);
4689 if (TREE_CODE (scalar_dest) != SSA_NAME)
4690 return false;
4692 code = gimple_assign_rhs_code (stmt);
4693 if (gimple_assign_single_p (stmt)
4694 || code == PAREN_EXPR
4695 || CONVERT_EXPR_CODE_P (code))
4696 op = gimple_assign_rhs1 (stmt);
4697 else
4698 return false;
4700 if (code == VIEW_CONVERT_EXPR)
4701 op = TREE_OPERAND (op, 0);
4703 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4704 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4706 /* Multiple types in SLP are handled by creating the appropriate number of
4707 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4708 case of SLP. */
4709 if (slp_node)
4710 ncopies = 1;
4711 else
4712 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4714 gcc_assert (ncopies >= 1);
4716 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4718 if (dump_enabled_p ())
4719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4720 "use not simple.\n");
4721 return false;
4724 /* We can handle NOP_EXPR conversions that do not change the number
4725 of elements or the vector size. */
4726 if ((CONVERT_EXPR_CODE_P (code)
4727 || code == VIEW_CONVERT_EXPR)
4728 && (!vectype_in
4729 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4730 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4731 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4732 return false;
4734 /* We do not handle bit-precision changes. */
4735 if ((CONVERT_EXPR_CODE_P (code)
4736 || code == VIEW_CONVERT_EXPR)
4737 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4738 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4739 || !type_has_mode_precision_p (TREE_TYPE (op)))
4740 /* But a conversion that does not change the bit-pattern is ok. */
4741 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4742 > TYPE_PRECISION (TREE_TYPE (op)))
4743 && TYPE_UNSIGNED (TREE_TYPE (op)))
4744 /* Conversion between boolean types of different sizes is
4745 a simple assignment in case their vectypes are same
4746 boolean vectors. */
4747 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4748 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4750 if (dump_enabled_p ())
4751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4752 "type conversion to/from bit-precision "
4753 "unsupported.\n");
4754 return false;
4757 if (!vec_stmt) /* transformation not required. */
4759 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4760 if (dump_enabled_p ())
4761 dump_printf_loc (MSG_NOTE, vect_location,
4762 "=== vectorizable_assignment ===\n");
4763 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4764 return true;
4767 /* Transform. */
4768 if (dump_enabled_p ())
4769 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4771 /* Handle def. */
4772 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4774 /* Handle use. */
4775 for (j = 0; j < ncopies; j++)
4777 /* Handle uses. */
4778 if (j == 0)
4779 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4780 else
4781 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4783 /* Arguments are ready. create the new vector stmt. */
4784 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4786 if (CONVERT_EXPR_CODE_P (code)
4787 || code == VIEW_CONVERT_EXPR)
4788 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4789 new_stmt = gimple_build_assign (vec_dest, vop);
4790 new_temp = make_ssa_name (vec_dest, new_stmt);
4791 gimple_assign_set_lhs (new_stmt, new_temp);
4792 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4793 if (slp_node)
4794 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4797 if (slp_node)
4798 continue;
4800 if (j == 0)
4801 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4802 else
4803 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4805 prev_stmt_info = vinfo_for_stmt (new_stmt);
4808 vec_oprnds.release ();
4809 return true;
4813 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4814 either as shift by a scalar or by a vector. */
4816 bool
4817 vect_supportable_shift (enum tree_code code, tree scalar_type)
4820 machine_mode vec_mode;
4821 optab optab;
4822 int icode;
4823 tree vectype;
4825 vectype = get_vectype_for_scalar_type (scalar_type);
4826 if (!vectype)
4827 return false;
4829 optab = optab_for_tree_code (code, vectype, optab_scalar);
4830 if (!optab
4831 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4833 optab = optab_for_tree_code (code, vectype, optab_vector);
4834 if (!optab
4835 || (optab_handler (optab, TYPE_MODE (vectype))
4836 == CODE_FOR_nothing))
4837 return false;
4840 vec_mode = TYPE_MODE (vectype);
4841 icode = (int) optab_handler (optab, vec_mode);
4842 if (icode == CODE_FOR_nothing)
4843 return false;
4845 return true;
4849 /* Function vectorizable_shift.
4851 Check if STMT performs a shift operation that can be vectorized.
4852 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4853 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4854 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4856 static bool
4857 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4858 gimple **vec_stmt, slp_tree slp_node)
4860 tree vec_dest;
4861 tree scalar_dest;
4862 tree op0, op1 = NULL;
4863 tree vec_oprnd1 = NULL_TREE;
4864 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4865 tree vectype;
4866 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4867 enum tree_code code;
4868 machine_mode vec_mode;
4869 tree new_temp;
4870 optab optab;
4871 int icode;
4872 machine_mode optab_op2_mode;
4873 gimple *def_stmt;
4874 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4875 int ndts = 2;
4876 gimple *new_stmt = NULL;
4877 stmt_vec_info prev_stmt_info;
4878 int nunits_in;
4879 int nunits_out;
4880 tree vectype_out;
4881 tree op1_vectype;
4882 int ncopies;
4883 int j, i;
4884 vec<tree> vec_oprnds0 = vNULL;
4885 vec<tree> vec_oprnds1 = vNULL;
4886 tree vop0, vop1;
4887 unsigned int k;
4888 bool scalar_shift_arg = true;
4889 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4890 vec_info *vinfo = stmt_info->vinfo;
4892 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4893 return false;
4895 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4896 && ! vec_stmt)
4897 return false;
4899 /* Is STMT a vectorizable binary/unary operation? */
4900 if (!is_gimple_assign (stmt))
4901 return false;
4903 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4904 return false;
4906 code = gimple_assign_rhs_code (stmt);
4908 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4909 || code == RROTATE_EXPR))
4910 return false;
4912 scalar_dest = gimple_assign_lhs (stmt);
4913 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4914 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4916 if (dump_enabled_p ())
4917 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4918 "bit-precision shifts not supported.\n");
4919 return false;
4922 op0 = gimple_assign_rhs1 (stmt);
4923 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4925 if (dump_enabled_p ())
4926 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4927 "use not simple.\n");
4928 return false;
4930 /* If op0 is an external or constant def use a vector type with
4931 the same size as the output vector type. */
4932 if (!vectype)
4933 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4934 if (vec_stmt)
4935 gcc_assert (vectype);
4936 if (!vectype)
4938 if (dump_enabled_p ())
4939 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4940 "no vectype for scalar type\n");
4941 return false;
4944 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4945 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4946 if (nunits_out != nunits_in)
4947 return false;
4949 op1 = gimple_assign_rhs2 (stmt);
4950 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4952 if (dump_enabled_p ())
4953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4954 "use not simple.\n");
4955 return false;
4958 /* Multiple types in SLP are handled by creating the appropriate number of
4959 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4960 case of SLP. */
4961 if (slp_node)
4962 ncopies = 1;
4963 else
4964 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4966 gcc_assert (ncopies >= 1);
4968 /* Determine whether the shift amount is a vector, or scalar. If the
4969 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4971 if ((dt[1] == vect_internal_def
4972 || dt[1] == vect_induction_def)
4973 && !slp_node)
4974 scalar_shift_arg = false;
4975 else if (dt[1] == vect_constant_def
4976 || dt[1] == vect_external_def
4977 || dt[1] == vect_internal_def)
4979 /* In SLP, need to check whether the shift count is the same,
4980 in loops if it is a constant or invariant, it is always
4981 a scalar shift. */
4982 if (slp_node)
4984 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4985 gimple *slpstmt;
4987 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4988 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4989 scalar_shift_arg = false;
4992 /* If the shift amount is computed by a pattern stmt we cannot
4993 use the scalar amount directly thus give up and use a vector
4994 shift. */
4995 if (dt[1] == vect_internal_def)
4997 gimple *def = SSA_NAME_DEF_STMT (op1);
4998 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
4999 scalar_shift_arg = false;
5002 else
5004 if (dump_enabled_p ())
5005 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5006 "operand mode requires invariant argument.\n");
5007 return false;
5010 /* Vector shifted by vector. */
5011 if (!scalar_shift_arg)
5013 optab = optab_for_tree_code (code, vectype, optab_vector);
5014 if (dump_enabled_p ())
5015 dump_printf_loc (MSG_NOTE, vect_location,
5016 "vector/vector shift/rotate found.\n");
5018 if (!op1_vectype)
5019 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5020 if (op1_vectype == NULL_TREE
5021 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5023 if (dump_enabled_p ())
5024 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5025 "unusable type for last operand in"
5026 " vector/vector shift/rotate.\n");
5027 return false;
5030 /* See if the machine has a vector shifted by scalar insn and if not
5031 then see if it has a vector shifted by vector insn. */
5032 else
5034 optab = optab_for_tree_code (code, vectype, optab_scalar);
5035 if (optab
5036 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5038 if (dump_enabled_p ())
5039 dump_printf_loc (MSG_NOTE, vect_location,
5040 "vector/scalar shift/rotate found.\n");
5042 else
5044 optab = optab_for_tree_code (code, vectype, optab_vector);
5045 if (optab
5046 && (optab_handler (optab, TYPE_MODE (vectype))
5047 != CODE_FOR_nothing))
5049 scalar_shift_arg = false;
5051 if (dump_enabled_p ())
5052 dump_printf_loc (MSG_NOTE, vect_location,
5053 "vector/vector shift/rotate found.\n");
5055 /* Unlike the other binary operators, shifts/rotates have
5056 the rhs being int, instead of the same type as the lhs,
5057 so make sure the scalar is the right type if we are
5058 dealing with vectors of long long/long/short/char. */
5059 if (dt[1] == vect_constant_def)
5060 op1 = fold_convert (TREE_TYPE (vectype), op1);
5061 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5062 TREE_TYPE (op1)))
5064 if (slp_node
5065 && TYPE_MODE (TREE_TYPE (vectype))
5066 != TYPE_MODE (TREE_TYPE (op1)))
5068 if (dump_enabled_p ())
5069 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5070 "unusable type for last operand in"
5071 " vector/vector shift/rotate.\n");
5072 return false;
5074 if (vec_stmt && !slp_node)
5076 op1 = fold_convert (TREE_TYPE (vectype), op1);
5077 op1 = vect_init_vector (stmt, op1,
5078 TREE_TYPE (vectype), NULL);
5085 /* Supportable by target? */
5086 if (!optab)
5088 if (dump_enabled_p ())
5089 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5090 "no optab.\n");
5091 return false;
5093 vec_mode = TYPE_MODE (vectype);
5094 icode = (int) optab_handler (optab, vec_mode);
5095 if (icode == CODE_FOR_nothing)
5097 if (dump_enabled_p ())
5098 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5099 "op not supported by target.\n");
5100 /* Check only during analysis. */
5101 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5102 || (!vec_stmt
5103 && !vect_worthwhile_without_simd_p (vinfo, code)))
5104 return false;
5105 if (dump_enabled_p ())
5106 dump_printf_loc (MSG_NOTE, vect_location,
5107 "proceeding using word mode.\n");
5110 /* Worthwhile without SIMD support? Check only during analysis. */
5111 if (!vec_stmt
5112 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5113 && !vect_worthwhile_without_simd_p (vinfo, code))
5115 if (dump_enabled_p ())
5116 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5117 "not worthwhile without SIMD support.\n");
5118 return false;
5121 if (!vec_stmt) /* transformation not required. */
5123 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5124 if (dump_enabled_p ())
5125 dump_printf_loc (MSG_NOTE, vect_location,
5126 "=== vectorizable_shift ===\n");
5127 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5128 return true;
5131 /* Transform. */
5133 if (dump_enabled_p ())
5134 dump_printf_loc (MSG_NOTE, vect_location,
5135 "transform binary/unary operation.\n");
5137 /* Handle def. */
5138 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5140 prev_stmt_info = NULL;
5141 for (j = 0; j < ncopies; j++)
5143 /* Handle uses. */
5144 if (j == 0)
5146 if (scalar_shift_arg)
5148 /* Vector shl and shr insn patterns can be defined with scalar
5149 operand 2 (shift operand). In this case, use constant or loop
5150 invariant op1 directly, without extending it to vector mode
5151 first. */
5152 optab_op2_mode = insn_data[icode].operand[2].mode;
5153 if (!VECTOR_MODE_P (optab_op2_mode))
5155 if (dump_enabled_p ())
5156 dump_printf_loc (MSG_NOTE, vect_location,
5157 "operand 1 using scalar mode.\n");
5158 vec_oprnd1 = op1;
5159 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5160 vec_oprnds1.quick_push (vec_oprnd1);
5161 if (slp_node)
5163 /* Store vec_oprnd1 for every vector stmt to be created
5164 for SLP_NODE. We check during the analysis that all
5165 the shift arguments are the same.
5166 TODO: Allow different constants for different vector
5167 stmts generated for an SLP instance. */
5168 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5169 vec_oprnds1.quick_push (vec_oprnd1);
5174 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5175 (a special case for certain kind of vector shifts); otherwise,
5176 operand 1 should be of a vector type (the usual case). */
5177 if (vec_oprnd1)
5178 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5179 slp_node);
5180 else
5181 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5182 slp_node);
5184 else
5185 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5187 /* Arguments are ready. Create the new vector stmt. */
5188 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5190 vop1 = vec_oprnds1[i];
5191 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5192 new_temp = make_ssa_name (vec_dest, new_stmt);
5193 gimple_assign_set_lhs (new_stmt, new_temp);
5194 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5195 if (slp_node)
5196 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5199 if (slp_node)
5200 continue;
5202 if (j == 0)
5203 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5204 else
5205 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5206 prev_stmt_info = vinfo_for_stmt (new_stmt);
5209 vec_oprnds0.release ();
5210 vec_oprnds1.release ();
5212 return true;
5216 /* Function vectorizable_operation.
5218 Check if STMT performs a binary, unary or ternary operation that can
5219 be vectorized.
5220 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5221 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5222 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5224 static bool
5225 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5226 gimple **vec_stmt, slp_tree slp_node)
5228 tree vec_dest;
5229 tree scalar_dest;
5230 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5231 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5232 tree vectype;
5233 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5234 enum tree_code code, orig_code;
5235 machine_mode vec_mode;
5236 tree new_temp;
5237 int op_type;
5238 optab optab;
5239 bool target_support_p;
5240 gimple *def_stmt;
5241 enum vect_def_type dt[3]
5242 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5243 int ndts = 3;
5244 gimple *new_stmt = NULL;
5245 stmt_vec_info prev_stmt_info;
5246 int nunits_in;
5247 int nunits_out;
5248 tree vectype_out;
5249 int ncopies;
5250 int j, i;
5251 vec<tree> vec_oprnds0 = vNULL;
5252 vec<tree> vec_oprnds1 = vNULL;
5253 vec<tree> vec_oprnds2 = vNULL;
5254 tree vop0, vop1, vop2;
5255 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5256 vec_info *vinfo = stmt_info->vinfo;
5258 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5259 return false;
5261 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5262 && ! vec_stmt)
5263 return false;
5265 /* Is STMT a vectorizable binary/unary operation? */
5266 if (!is_gimple_assign (stmt))
5267 return false;
5269 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5270 return false;
5272 orig_code = code = gimple_assign_rhs_code (stmt);
5274 /* For pointer addition and subtraction, we should use the normal
5275 plus and minus for the vector operation. */
5276 if (code == POINTER_PLUS_EXPR)
5277 code = PLUS_EXPR;
5278 if (code == POINTER_DIFF_EXPR)
5279 code = MINUS_EXPR;
5281 /* Support only unary or binary operations. */
5282 op_type = TREE_CODE_LENGTH (code);
5283 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5285 if (dump_enabled_p ())
5286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5287 "num. args = %d (not unary/binary/ternary op).\n",
5288 op_type);
5289 return false;
5292 scalar_dest = gimple_assign_lhs (stmt);
5293 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5295 /* Most operations cannot handle bit-precision types without extra
5296 truncations. */
5297 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5298 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5299 /* Exception are bitwise binary operations. */
5300 && code != BIT_IOR_EXPR
5301 && code != BIT_XOR_EXPR
5302 && code != BIT_AND_EXPR)
5304 if (dump_enabled_p ())
5305 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5306 "bit-precision arithmetic not supported.\n");
5307 return false;
5310 op0 = gimple_assign_rhs1 (stmt);
5311 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5313 if (dump_enabled_p ())
5314 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5315 "use not simple.\n");
5316 return false;
5318 /* If op0 is an external or constant def use a vector type with
5319 the same size as the output vector type. */
5320 if (!vectype)
5322 /* For boolean type we cannot determine vectype by
5323 invariant value (don't know whether it is a vector
5324 of booleans or vector of integers). We use output
5325 vectype because operations on boolean don't change
5326 type. */
5327 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5329 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5331 if (dump_enabled_p ())
5332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5333 "not supported operation on bool value.\n");
5334 return false;
5336 vectype = vectype_out;
5338 else
5339 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5341 if (vec_stmt)
5342 gcc_assert (vectype);
5343 if (!vectype)
5345 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5348 "no vectype for scalar type ");
5349 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5350 TREE_TYPE (op0));
5351 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5354 return false;
5357 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5358 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5359 if (nunits_out != nunits_in)
5360 return false;
5362 if (op_type == binary_op || op_type == ternary_op)
5364 op1 = gimple_assign_rhs2 (stmt);
5365 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5367 if (dump_enabled_p ())
5368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5369 "use not simple.\n");
5370 return false;
5373 if (op_type == ternary_op)
5375 op2 = gimple_assign_rhs3 (stmt);
5376 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5378 if (dump_enabled_p ())
5379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5380 "use not simple.\n");
5381 return false;
5385 /* Multiple types in SLP are handled by creating the appropriate number of
5386 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5387 case of SLP. */
5388 if (slp_node)
5389 ncopies = 1;
5390 else
5391 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5393 gcc_assert (ncopies >= 1);
5395 /* Shifts are handled in vectorizable_shift (). */
5396 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5397 || code == RROTATE_EXPR)
5398 return false;
5400 /* Supportable by target? */
5402 vec_mode = TYPE_MODE (vectype);
5403 if (code == MULT_HIGHPART_EXPR)
5404 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5405 else
5407 optab = optab_for_tree_code (code, vectype, optab_default);
5408 if (!optab)
5410 if (dump_enabled_p ())
5411 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5412 "no optab.\n");
5413 return false;
5415 target_support_p = (optab_handler (optab, vec_mode)
5416 != CODE_FOR_nothing);
5419 if (!target_support_p)
5421 if (dump_enabled_p ())
5422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5423 "op not supported by target.\n");
5424 /* Check only during analysis. */
5425 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5426 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5427 return false;
5428 if (dump_enabled_p ())
5429 dump_printf_loc (MSG_NOTE, vect_location,
5430 "proceeding using word mode.\n");
5433 /* Worthwhile without SIMD support? Check only during analysis. */
5434 if (!VECTOR_MODE_P (vec_mode)
5435 && !vec_stmt
5436 && !vect_worthwhile_without_simd_p (vinfo, code))
5438 if (dump_enabled_p ())
5439 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5440 "not worthwhile without SIMD support.\n");
5441 return false;
5444 if (!vec_stmt) /* transformation not required. */
5446 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5447 if (dump_enabled_p ())
5448 dump_printf_loc (MSG_NOTE, vect_location,
5449 "=== vectorizable_operation ===\n");
5450 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5451 return true;
5454 /* Transform. */
5456 if (dump_enabled_p ())
5457 dump_printf_loc (MSG_NOTE, vect_location,
5458 "transform binary/unary operation.\n");
5460 /* Handle def. */
5461 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5463 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5464 vectors with unsigned elements, but the result is signed. So, we
5465 need to compute the MINUS_EXPR into vectype temporary and
5466 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5467 tree vec_cvt_dest = NULL_TREE;
5468 if (orig_code == POINTER_DIFF_EXPR)
5469 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5471 /* In case the vectorization factor (VF) is bigger than the number
5472 of elements that we can fit in a vectype (nunits), we have to generate
5473 more than one vector stmt - i.e - we need to "unroll" the
5474 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5475 from one copy of the vector stmt to the next, in the field
5476 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5477 stages to find the correct vector defs to be used when vectorizing
5478 stmts that use the defs of the current stmt. The example below
5479 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5480 we need to create 4 vectorized stmts):
5482 before vectorization:
5483 RELATED_STMT VEC_STMT
5484 S1: x = memref - -
5485 S2: z = x + 1 - -
5487 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5488 there):
5489 RELATED_STMT VEC_STMT
5490 VS1_0: vx0 = memref0 VS1_1 -
5491 VS1_1: vx1 = memref1 VS1_2 -
5492 VS1_2: vx2 = memref2 VS1_3 -
5493 VS1_3: vx3 = memref3 - -
5494 S1: x = load - VS1_0
5495 S2: z = x + 1 - -
5497 step2: vectorize stmt S2 (done here):
5498 To vectorize stmt S2 we first need to find the relevant vector
5499 def for the first operand 'x'. This is, as usual, obtained from
5500 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5501 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5502 relevant vector def 'vx0'. Having found 'vx0' we can generate
5503 the vector stmt VS2_0, and as usual, record it in the
5504 STMT_VINFO_VEC_STMT of stmt S2.
5505 When creating the second copy (VS2_1), we obtain the relevant vector
5506 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5507 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5508 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5509 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5510 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5511 chain of stmts and pointers:
5512 RELATED_STMT VEC_STMT
5513 VS1_0: vx0 = memref0 VS1_1 -
5514 VS1_1: vx1 = memref1 VS1_2 -
5515 VS1_2: vx2 = memref2 VS1_3 -
5516 VS1_3: vx3 = memref3 - -
5517 S1: x = load - VS1_0
5518 VS2_0: vz0 = vx0 + v1 VS2_1 -
5519 VS2_1: vz1 = vx1 + v1 VS2_2 -
5520 VS2_2: vz2 = vx2 + v1 VS2_3 -
5521 VS2_3: vz3 = vx3 + v1 - -
5522 S2: z = x + 1 - VS2_0 */
5524 prev_stmt_info = NULL;
5525 for (j = 0; j < ncopies; j++)
5527 /* Handle uses. */
5528 if (j == 0)
5530 if (op_type == binary_op || op_type == ternary_op)
5531 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5532 slp_node);
5533 else
5534 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5535 slp_node);
5536 if (op_type == ternary_op)
5537 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5538 slp_node);
5540 else
5542 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5543 if (op_type == ternary_op)
5545 tree vec_oprnd = vec_oprnds2.pop ();
5546 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5547 vec_oprnd));
5551 /* Arguments are ready. Create the new vector stmt. */
5552 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5554 vop1 = ((op_type == binary_op || op_type == ternary_op)
5555 ? vec_oprnds1[i] : NULL_TREE);
5556 vop2 = ((op_type == ternary_op)
5557 ? vec_oprnds2[i] : NULL_TREE);
5558 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5559 new_temp = make_ssa_name (vec_dest, new_stmt);
5560 gimple_assign_set_lhs (new_stmt, new_temp);
5561 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5562 if (vec_cvt_dest)
5564 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5565 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5566 new_temp);
5567 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5568 gimple_assign_set_lhs (new_stmt, new_temp);
5569 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5571 if (slp_node)
5572 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5575 if (slp_node)
5576 continue;
5578 if (j == 0)
5579 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5580 else
5581 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5582 prev_stmt_info = vinfo_for_stmt (new_stmt);
5585 vec_oprnds0.release ();
5586 vec_oprnds1.release ();
5587 vec_oprnds2.release ();
5589 return true;
5592 /* A helper function to ensure data reference DR's base alignment. */
5594 static void
5595 ensure_base_align (struct data_reference *dr)
5597 if (!dr->aux)
5598 return;
5600 if (DR_VECT_AUX (dr)->base_misaligned)
5602 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5604 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5606 if (decl_in_symtab_p (base_decl))
5607 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5608 else
5610 SET_DECL_ALIGN (base_decl, align_base_to);
5611 DECL_USER_ALIGN (base_decl) = 1;
5613 DR_VECT_AUX (dr)->base_misaligned = false;
5618 /* Function get_group_alias_ptr_type.
5620 Return the alias type for the group starting at FIRST_STMT. */
5622 static tree
5623 get_group_alias_ptr_type (gimple *first_stmt)
5625 struct data_reference *first_dr, *next_dr;
5626 gimple *next_stmt;
5628 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5629 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5630 while (next_stmt)
5632 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5633 if (get_alias_set (DR_REF (first_dr))
5634 != get_alias_set (DR_REF (next_dr)))
5636 if (dump_enabled_p ())
5637 dump_printf_loc (MSG_NOTE, vect_location,
5638 "conflicting alias set types.\n");
5639 return ptr_type_node;
5641 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5643 return reference_alias_ptr_type (DR_REF (first_dr));
5647 /* Function vectorizable_store.
5649 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5650 can be vectorized.
5651 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5652 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5653 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5655 static bool
5656 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5657 slp_tree slp_node)
5659 tree scalar_dest;
5660 tree data_ref;
5661 tree op;
5662 tree vec_oprnd = NULL_TREE;
5663 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5664 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5665 tree elem_type;
5666 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5667 struct loop *loop = NULL;
5668 machine_mode vec_mode;
5669 tree dummy;
5670 enum dr_alignment_support alignment_support_scheme;
5671 gimple *def_stmt;
5672 enum vect_def_type dt;
5673 stmt_vec_info prev_stmt_info = NULL;
5674 tree dataref_ptr = NULL_TREE;
5675 tree dataref_offset = NULL_TREE;
5676 gimple *ptr_incr = NULL;
5677 int ncopies;
5678 int j;
5679 gimple *next_stmt, *first_stmt;
5680 bool grouped_store;
5681 unsigned int group_size, i;
5682 vec<tree> oprnds = vNULL;
5683 vec<tree> result_chain = vNULL;
5684 bool inv_p;
5685 tree offset = NULL_TREE;
5686 vec<tree> vec_oprnds = vNULL;
5687 bool slp = (slp_node != NULL);
5688 unsigned int vec_num;
5689 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5690 vec_info *vinfo = stmt_info->vinfo;
5691 tree aggr_type;
5692 gather_scatter_info gs_info;
5693 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5694 gimple *new_stmt;
5695 int vf;
5696 vec_load_store_type vls_type;
5697 tree ref_type;
5699 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5700 return false;
5702 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5703 && ! vec_stmt)
5704 return false;
5706 /* Is vectorizable store? */
5708 if (!is_gimple_assign (stmt))
5709 return false;
5711 scalar_dest = gimple_assign_lhs (stmt);
5712 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5713 && is_pattern_stmt_p (stmt_info))
5714 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5715 if (TREE_CODE (scalar_dest) != ARRAY_REF
5716 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5717 && TREE_CODE (scalar_dest) != INDIRECT_REF
5718 && TREE_CODE (scalar_dest) != COMPONENT_REF
5719 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5720 && TREE_CODE (scalar_dest) != REALPART_EXPR
5721 && TREE_CODE (scalar_dest) != MEM_REF)
5722 return false;
5724 /* Cannot have hybrid store SLP -- that would mean storing to the
5725 same location twice. */
5726 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5728 gcc_assert (gimple_assign_single_p (stmt));
5730 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5731 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5733 if (loop_vinfo)
5735 loop = LOOP_VINFO_LOOP (loop_vinfo);
5736 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5738 else
5739 vf = 1;
5741 /* Multiple types in SLP are handled by creating the appropriate number of
5742 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5743 case of SLP. */
5744 if (slp)
5745 ncopies = 1;
5746 else
5747 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5749 gcc_assert (ncopies >= 1);
5751 /* FORNOW. This restriction should be relaxed. */
5752 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5754 if (dump_enabled_p ())
5755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5756 "multiple types in nested loop.\n");
5757 return false;
5760 op = gimple_assign_rhs1 (stmt);
5762 /* In the case this is a store from a constant make sure
5763 native_encode_expr can handle it. */
5764 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5765 return false;
5767 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5769 if (dump_enabled_p ())
5770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5771 "use not simple.\n");
5772 return false;
5775 if (dt == vect_constant_def || dt == vect_external_def)
5776 vls_type = VLS_STORE_INVARIANT;
5777 else
5778 vls_type = VLS_STORE;
5780 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5781 return false;
5783 elem_type = TREE_TYPE (vectype);
5784 vec_mode = TYPE_MODE (vectype);
5786 /* FORNOW. In some cases can vectorize even if data-type not supported
5787 (e.g. - array initialization with 0). */
5788 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5789 return false;
5791 if (!STMT_VINFO_DATA_REF (stmt_info))
5792 return false;
5794 vect_memory_access_type memory_access_type;
5795 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5796 &memory_access_type, &gs_info))
5797 return false;
5799 if (!vec_stmt) /* transformation not required. */
5801 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5802 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5803 /* The SLP costs are calculated during SLP analysis. */
5804 if (!PURE_SLP_STMT (stmt_info))
5805 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5806 NULL, NULL, NULL);
5807 return true;
5809 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5811 /* Transform. */
5813 ensure_base_align (dr);
5815 if (memory_access_type == VMAT_GATHER_SCATTER)
5817 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5818 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5819 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5820 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5821 edge pe = loop_preheader_edge (loop);
5822 gimple_seq seq;
5823 basic_block new_bb;
5824 enum { NARROW, NONE, WIDEN } modifier;
5825 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5827 if (nunits == (unsigned int) scatter_off_nunits)
5828 modifier = NONE;
5829 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5831 modifier = WIDEN;
5833 vec_perm_builder sel (scatter_off_nunits, scatter_off_nunits, 1);
5834 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5835 sel.quick_push (i | nunits);
5837 vec_perm_indices indices (sel, 1, scatter_off_nunits);
5838 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5839 indices);
5840 gcc_assert (perm_mask != NULL_TREE);
5842 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5844 modifier = NARROW;
5846 vec_perm_builder sel (nunits, nunits, 1);
5847 for (i = 0; i < (unsigned int) nunits; ++i)
5848 sel.quick_push (i | scatter_off_nunits);
5850 vec_perm_indices indices (sel, 2, nunits);
5851 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5852 gcc_assert (perm_mask != NULL_TREE);
5853 ncopies *= 2;
5855 else
5856 gcc_unreachable ();
5858 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5859 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5860 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5861 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5862 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5863 scaletype = TREE_VALUE (arglist);
5865 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5866 && TREE_CODE (rettype) == VOID_TYPE);
5868 ptr = fold_convert (ptrtype, gs_info.base);
5869 if (!is_gimple_min_invariant (ptr))
5871 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5872 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5873 gcc_assert (!new_bb);
5876 /* Currently we support only unconditional scatter stores,
5877 so mask should be all ones. */
5878 mask = build_int_cst (masktype, -1);
5879 mask = vect_init_vector (stmt, mask, masktype, NULL);
5881 scale = build_int_cst (scaletype, gs_info.scale);
5883 prev_stmt_info = NULL;
5884 for (j = 0; j < ncopies; ++j)
5886 if (j == 0)
5888 src = vec_oprnd1
5889 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5890 op = vec_oprnd0
5891 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5893 else if (modifier != NONE && (j & 1))
5895 if (modifier == WIDEN)
5897 src = vec_oprnd1
5898 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5899 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5900 stmt, gsi);
5902 else if (modifier == NARROW)
5904 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5905 stmt, gsi);
5906 op = vec_oprnd0
5907 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5908 vec_oprnd0);
5910 else
5911 gcc_unreachable ();
5913 else
5915 src = vec_oprnd1
5916 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5917 op = vec_oprnd0
5918 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5919 vec_oprnd0);
5922 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5924 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5925 == TYPE_VECTOR_SUBPARTS (srctype));
5926 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5927 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5928 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5929 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5930 src = var;
5933 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5935 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5936 == TYPE_VECTOR_SUBPARTS (idxtype));
5937 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5938 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5939 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5940 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5941 op = var;
5944 new_stmt
5945 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5947 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5949 if (prev_stmt_info == NULL)
5950 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5951 else
5952 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5953 prev_stmt_info = vinfo_for_stmt (new_stmt);
5955 return true;
5958 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
5959 if (grouped_store)
5961 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5962 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5963 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5965 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5967 /* FORNOW */
5968 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5970 /* We vectorize all the stmts of the interleaving group when we
5971 reach the last stmt in the group. */
5972 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5973 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5974 && !slp)
5976 *vec_stmt = NULL;
5977 return true;
5980 if (slp)
5982 grouped_store = false;
5983 /* VEC_NUM is the number of vect stmts to be created for this
5984 group. */
5985 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5986 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5987 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
5988 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5989 op = gimple_assign_rhs1 (first_stmt);
5991 else
5992 /* VEC_NUM is the number of vect stmts to be created for this
5993 group. */
5994 vec_num = group_size;
5996 ref_type = get_group_alias_ptr_type (first_stmt);
5998 else
6000 first_stmt = stmt;
6001 first_dr = dr;
6002 group_size = vec_num = 1;
6003 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6006 if (dump_enabled_p ())
6007 dump_printf_loc (MSG_NOTE, vect_location,
6008 "transform store. ncopies = %d\n", ncopies);
6010 if (memory_access_type == VMAT_ELEMENTWISE
6011 || memory_access_type == VMAT_STRIDED_SLP)
6013 gimple_stmt_iterator incr_gsi;
6014 bool insert_after;
6015 gimple *incr;
6016 tree offvar;
6017 tree ivstep;
6018 tree running_off;
6019 gimple_seq stmts = NULL;
6020 tree stride_base, stride_step, alias_off;
6021 tree vec_oprnd;
6022 unsigned int g;
6024 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6026 stride_base
6027 = fold_build_pointer_plus
6028 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6029 size_binop (PLUS_EXPR,
6030 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6031 convert_to_ptrofftype (DR_INIT (first_dr))));
6032 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6034 /* For a store with loop-invariant (but other than power-of-2)
6035 stride (i.e. not a grouped access) like so:
6037 for (i = 0; i < n; i += stride)
6038 array[i] = ...;
6040 we generate a new induction variable and new stores from
6041 the components of the (vectorized) rhs:
6043 for (j = 0; ; j += VF*stride)
6044 vectemp = ...;
6045 tmp1 = vectemp[0];
6046 array[j] = tmp1;
6047 tmp2 = vectemp[1];
6048 array[j + stride] = tmp2;
6052 unsigned nstores = nunits;
6053 unsigned lnel = 1;
6054 tree ltype = elem_type;
6055 tree lvectype = vectype;
6056 if (slp)
6058 if (group_size < nunits
6059 && nunits % group_size == 0)
6061 nstores = nunits / group_size;
6062 lnel = group_size;
6063 ltype = build_vector_type (elem_type, group_size);
6064 lvectype = vectype;
6066 /* First check if vec_extract optab doesn't support extraction
6067 of vector elts directly. */
6068 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6069 machine_mode vmode;
6070 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6071 || !VECTOR_MODE_P (vmode)
6072 || (convert_optab_handler (vec_extract_optab,
6073 TYPE_MODE (vectype), vmode)
6074 == CODE_FOR_nothing))
6076 /* Try to avoid emitting an extract of vector elements
6077 by performing the extracts using an integer type of the
6078 same size, extracting from a vector of those and then
6079 re-interpreting it as the original vector type if
6080 supported. */
6081 unsigned lsize
6082 = group_size * GET_MODE_BITSIZE (elmode);
6083 elmode = int_mode_for_size (lsize, 0).require ();
6084 /* If we can't construct such a vector fall back to
6085 element extracts from the original vector type and
6086 element size stores. */
6087 if (mode_for_vector (elmode,
6088 nunits / group_size).exists (&vmode)
6089 && VECTOR_MODE_P (vmode)
6090 && (convert_optab_handler (vec_extract_optab,
6091 vmode, elmode)
6092 != CODE_FOR_nothing))
6094 nstores = nunits / group_size;
6095 lnel = group_size;
6096 ltype = build_nonstandard_integer_type (lsize, 1);
6097 lvectype = build_vector_type (ltype, nstores);
6099 /* Else fall back to vector extraction anyway.
6100 Fewer stores are more important than avoiding spilling
6101 of the vector we extract from. Compared to the
6102 construction case in vectorizable_load no store-forwarding
6103 issue exists here for reasonable archs. */
6106 else if (group_size >= nunits
6107 && group_size % nunits == 0)
6109 nstores = 1;
6110 lnel = nunits;
6111 ltype = vectype;
6112 lvectype = vectype;
6114 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6115 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6118 ivstep = stride_step;
6119 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6120 build_int_cst (TREE_TYPE (ivstep), vf));
6122 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6124 create_iv (stride_base, ivstep, NULL,
6125 loop, &incr_gsi, insert_after,
6126 &offvar, NULL);
6127 incr = gsi_stmt (incr_gsi);
6128 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6130 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6131 if (stmts)
6132 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6134 prev_stmt_info = NULL;
6135 alias_off = build_int_cst (ref_type, 0);
6136 next_stmt = first_stmt;
6137 for (g = 0; g < group_size; g++)
6139 running_off = offvar;
6140 if (g)
6142 tree size = TYPE_SIZE_UNIT (ltype);
6143 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6144 size);
6145 tree newoff = copy_ssa_name (running_off, NULL);
6146 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6147 running_off, pos);
6148 vect_finish_stmt_generation (stmt, incr, gsi);
6149 running_off = newoff;
6151 unsigned int group_el = 0;
6152 unsigned HOST_WIDE_INT
6153 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6154 for (j = 0; j < ncopies; j++)
6156 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6157 and first_stmt == stmt. */
6158 if (j == 0)
6160 if (slp)
6162 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6163 slp_node);
6164 vec_oprnd = vec_oprnds[0];
6166 else
6168 gcc_assert (gimple_assign_single_p (next_stmt));
6169 op = gimple_assign_rhs1 (next_stmt);
6170 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6173 else
6175 if (slp)
6176 vec_oprnd = vec_oprnds[j];
6177 else
6179 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6180 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6183 /* Pun the vector to extract from if necessary. */
6184 if (lvectype != vectype)
6186 tree tem = make_ssa_name (lvectype);
6187 gimple *pun
6188 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6189 lvectype, vec_oprnd));
6190 vect_finish_stmt_generation (stmt, pun, gsi);
6191 vec_oprnd = tem;
6193 for (i = 0; i < nstores; i++)
6195 tree newref, newoff;
6196 gimple *incr, *assign;
6197 tree size = TYPE_SIZE (ltype);
6198 /* Extract the i'th component. */
6199 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6200 bitsize_int (i), size);
6201 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6202 size, pos);
6204 elem = force_gimple_operand_gsi (gsi, elem, true,
6205 NULL_TREE, true,
6206 GSI_SAME_STMT);
6208 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6209 group_el * elsz);
6210 newref = build2 (MEM_REF, ltype,
6211 running_off, this_off);
6213 /* And store it to *running_off. */
6214 assign = gimple_build_assign (newref, elem);
6215 vect_finish_stmt_generation (stmt, assign, gsi);
6217 group_el += lnel;
6218 if (! slp
6219 || group_el == group_size)
6221 newoff = copy_ssa_name (running_off, NULL);
6222 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6223 running_off, stride_step);
6224 vect_finish_stmt_generation (stmt, incr, gsi);
6226 running_off = newoff;
6227 group_el = 0;
6229 if (g == group_size - 1
6230 && !slp)
6232 if (j == 0 && i == 0)
6233 STMT_VINFO_VEC_STMT (stmt_info)
6234 = *vec_stmt = assign;
6235 else
6236 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6237 prev_stmt_info = vinfo_for_stmt (assign);
6241 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6242 if (slp)
6243 break;
6246 vec_oprnds.release ();
6247 return true;
6250 auto_vec<tree> dr_chain (group_size);
6251 oprnds.create (group_size);
6253 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6254 gcc_assert (alignment_support_scheme);
6255 /* Targets with store-lane instructions must not require explicit
6256 realignment. */
6257 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6258 || alignment_support_scheme == dr_aligned
6259 || alignment_support_scheme == dr_unaligned_supported);
6261 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6262 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6263 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6265 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6266 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6267 else
6268 aggr_type = vectype;
6270 /* In case the vectorization factor (VF) is bigger than the number
6271 of elements that we can fit in a vectype (nunits), we have to generate
6272 more than one vector stmt - i.e - we need to "unroll" the
6273 vector stmt by a factor VF/nunits. For more details see documentation in
6274 vect_get_vec_def_for_copy_stmt. */
6276 /* In case of interleaving (non-unit grouped access):
6278 S1: &base + 2 = x2
6279 S2: &base = x0
6280 S3: &base + 1 = x1
6281 S4: &base + 3 = x3
6283 We create vectorized stores starting from base address (the access of the
6284 first stmt in the chain (S2 in the above example), when the last store stmt
6285 of the chain (S4) is reached:
6287 VS1: &base = vx2
6288 VS2: &base + vec_size*1 = vx0
6289 VS3: &base + vec_size*2 = vx1
6290 VS4: &base + vec_size*3 = vx3
6292 Then permutation statements are generated:
6294 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6295 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6298 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6299 (the order of the data-refs in the output of vect_permute_store_chain
6300 corresponds to the order of scalar stmts in the interleaving chain - see
6301 the documentation of vect_permute_store_chain()).
6303 In case of both multiple types and interleaving, above vector stores and
6304 permutation stmts are created for every copy. The result vector stmts are
6305 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6306 STMT_VINFO_RELATED_STMT for the next copies.
6309 prev_stmt_info = NULL;
6310 for (j = 0; j < ncopies; j++)
6313 if (j == 0)
6315 if (slp)
6317 /* Get vectorized arguments for SLP_NODE. */
6318 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6319 NULL, slp_node);
6321 vec_oprnd = vec_oprnds[0];
6323 else
6325 /* For interleaved stores we collect vectorized defs for all the
6326 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6327 used as an input to vect_permute_store_chain(), and OPRNDS as
6328 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6330 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6331 OPRNDS are of size 1. */
6332 next_stmt = first_stmt;
6333 for (i = 0; i < group_size; i++)
6335 /* Since gaps are not supported for interleaved stores,
6336 GROUP_SIZE is the exact number of stmts in the chain.
6337 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6338 there is no interleaving, GROUP_SIZE is 1, and only one
6339 iteration of the loop will be executed. */
6340 gcc_assert (next_stmt
6341 && gimple_assign_single_p (next_stmt));
6342 op = gimple_assign_rhs1 (next_stmt);
6344 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6345 dr_chain.quick_push (vec_oprnd);
6346 oprnds.quick_push (vec_oprnd);
6347 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6351 /* We should have catched mismatched types earlier. */
6352 gcc_assert (useless_type_conversion_p (vectype,
6353 TREE_TYPE (vec_oprnd)));
6354 bool simd_lane_access_p
6355 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6356 if (simd_lane_access_p
6357 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6358 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6359 && integer_zerop (DR_OFFSET (first_dr))
6360 && integer_zerop (DR_INIT (first_dr))
6361 && alias_sets_conflict_p (get_alias_set (aggr_type),
6362 get_alias_set (TREE_TYPE (ref_type))))
6364 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6365 dataref_offset = build_int_cst (ref_type, 0);
6366 inv_p = false;
6368 else
6369 dataref_ptr
6370 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6371 simd_lane_access_p ? loop : NULL,
6372 offset, &dummy, gsi, &ptr_incr,
6373 simd_lane_access_p, &inv_p);
6374 gcc_assert (bb_vinfo || !inv_p);
6376 else
6378 /* For interleaved stores we created vectorized defs for all the
6379 defs stored in OPRNDS in the previous iteration (previous copy).
6380 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6381 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6382 next copy.
6383 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6384 OPRNDS are of size 1. */
6385 for (i = 0; i < group_size; i++)
6387 op = oprnds[i];
6388 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6389 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6390 dr_chain[i] = vec_oprnd;
6391 oprnds[i] = vec_oprnd;
6393 if (dataref_offset)
6394 dataref_offset
6395 = int_const_binop (PLUS_EXPR, dataref_offset,
6396 TYPE_SIZE_UNIT (aggr_type));
6397 else
6398 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6399 TYPE_SIZE_UNIT (aggr_type));
6402 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6404 tree vec_array;
6406 /* Combine all the vectors into an array. */
6407 vec_array = create_vector_array (vectype, vec_num);
6408 for (i = 0; i < vec_num; i++)
6410 vec_oprnd = dr_chain[i];
6411 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6414 /* Emit:
6415 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6416 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6417 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6418 vec_array);
6419 gimple_call_set_lhs (call, data_ref);
6420 gimple_call_set_nothrow (call, true);
6421 new_stmt = call;
6422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6424 else
6426 new_stmt = NULL;
6427 if (grouped_store)
6429 if (j == 0)
6430 result_chain.create (group_size);
6431 /* Permute. */
6432 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6433 &result_chain);
6436 next_stmt = first_stmt;
6437 for (i = 0; i < vec_num; i++)
6439 unsigned align, misalign;
6441 if (i > 0)
6442 /* Bump the vector pointer. */
6443 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6444 stmt, NULL_TREE);
6446 if (slp)
6447 vec_oprnd = vec_oprnds[i];
6448 else if (grouped_store)
6449 /* For grouped stores vectorized defs are interleaved in
6450 vect_permute_store_chain(). */
6451 vec_oprnd = result_chain[i];
6453 data_ref = fold_build2 (MEM_REF, vectype,
6454 dataref_ptr,
6455 dataref_offset
6456 ? dataref_offset
6457 : build_int_cst (ref_type, 0));
6458 align = DR_TARGET_ALIGNMENT (first_dr);
6459 if (aligned_access_p (first_dr))
6460 misalign = 0;
6461 else if (DR_MISALIGNMENT (first_dr) == -1)
6463 align = dr_alignment (vect_dr_behavior (first_dr));
6464 misalign = 0;
6465 TREE_TYPE (data_ref)
6466 = build_aligned_type (TREE_TYPE (data_ref),
6467 align * BITS_PER_UNIT);
6469 else
6471 TREE_TYPE (data_ref)
6472 = build_aligned_type (TREE_TYPE (data_ref),
6473 TYPE_ALIGN (elem_type));
6474 misalign = DR_MISALIGNMENT (first_dr);
6476 if (dataref_offset == NULL_TREE
6477 && TREE_CODE (dataref_ptr) == SSA_NAME)
6478 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6479 misalign);
6481 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6483 tree perm_mask = perm_mask_for_reverse (vectype);
6484 tree perm_dest
6485 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6486 vectype);
6487 tree new_temp = make_ssa_name (perm_dest);
6489 /* Generate the permute statement. */
6490 gimple *perm_stmt
6491 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6492 vec_oprnd, perm_mask);
6493 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6495 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6496 vec_oprnd = new_temp;
6499 /* Arguments are ready. Create the new vector stmt. */
6500 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6501 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6503 if (slp)
6504 continue;
6506 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6507 if (!next_stmt)
6508 break;
6511 if (!slp)
6513 if (j == 0)
6514 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6515 else
6516 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6517 prev_stmt_info = vinfo_for_stmt (new_stmt);
6521 oprnds.release ();
6522 result_chain.release ();
6523 vec_oprnds.release ();
6525 return true;
6528 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6529 VECTOR_CST mask. No checks are made that the target platform supports the
6530 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6531 vect_gen_perm_mask_checked. */
6533 tree
6534 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6536 tree mask_elt_type, mask_type;
6538 mask_elt_type = lang_hooks.types.type_for_mode
6539 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
6540 mask_type = get_vectype_for_scalar_type (mask_elt_type);
6541 return vec_perm_indices_to_tree (mask_type, sel);
6544 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6545 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6547 tree
6548 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6550 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6551 return vect_gen_perm_mask_any (vectype, sel);
6554 /* Given a vector variable X and Y, that was generated for the scalar
6555 STMT, generate instructions to permute the vector elements of X and Y
6556 using permutation mask MASK_VEC, insert them at *GSI and return the
6557 permuted vector variable. */
6559 static tree
6560 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6561 gimple_stmt_iterator *gsi)
6563 tree vectype = TREE_TYPE (x);
6564 tree perm_dest, data_ref;
6565 gimple *perm_stmt;
6567 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6568 data_ref = make_ssa_name (perm_dest);
6570 /* Generate the permute statement. */
6571 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6572 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6574 return data_ref;
6577 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6578 inserting them on the loops preheader edge. Returns true if we
6579 were successful in doing so (and thus STMT can be moved then),
6580 otherwise returns false. */
6582 static bool
6583 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6585 ssa_op_iter i;
6586 tree op;
6587 bool any = false;
6589 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6591 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6592 if (!gimple_nop_p (def_stmt)
6593 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6595 /* Make sure we don't need to recurse. While we could do
6596 so in simple cases when there are more complex use webs
6597 we don't have an easy way to preserve stmt order to fulfil
6598 dependencies within them. */
6599 tree op2;
6600 ssa_op_iter i2;
6601 if (gimple_code (def_stmt) == GIMPLE_PHI)
6602 return false;
6603 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6605 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6606 if (!gimple_nop_p (def_stmt2)
6607 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6608 return false;
6610 any = true;
6614 if (!any)
6615 return true;
6617 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6619 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6620 if (!gimple_nop_p (def_stmt)
6621 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6623 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6624 gsi_remove (&gsi, false);
6625 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6629 return true;
6632 /* vectorizable_load.
6634 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6635 can be vectorized.
6636 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6637 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6638 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6640 static bool
6641 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6642 slp_tree slp_node, slp_instance slp_node_instance)
6644 tree scalar_dest;
6645 tree vec_dest = NULL;
6646 tree data_ref = NULL;
6647 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6648 stmt_vec_info prev_stmt_info;
6649 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6650 struct loop *loop = NULL;
6651 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6652 bool nested_in_vect_loop = false;
6653 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6654 tree elem_type;
6655 tree new_temp;
6656 machine_mode mode;
6657 gimple *new_stmt = NULL;
6658 tree dummy;
6659 enum dr_alignment_support alignment_support_scheme;
6660 tree dataref_ptr = NULL_TREE;
6661 tree dataref_offset = NULL_TREE;
6662 gimple *ptr_incr = NULL;
6663 int ncopies;
6664 int i, j, group_size, group_gap_adj;
6665 tree msq = NULL_TREE, lsq;
6666 tree offset = NULL_TREE;
6667 tree byte_offset = NULL_TREE;
6668 tree realignment_token = NULL_TREE;
6669 gphi *phi = NULL;
6670 vec<tree> dr_chain = vNULL;
6671 bool grouped_load = false;
6672 gimple *first_stmt;
6673 gimple *first_stmt_for_drptr = NULL;
6674 bool inv_p;
6675 bool compute_in_loop = false;
6676 struct loop *at_loop;
6677 int vec_num;
6678 bool slp = (slp_node != NULL);
6679 bool slp_perm = false;
6680 enum tree_code code;
6681 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6682 int vf;
6683 tree aggr_type;
6684 gather_scatter_info gs_info;
6685 vec_info *vinfo = stmt_info->vinfo;
6686 tree ref_type;
6688 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6689 return false;
6691 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6692 && ! vec_stmt)
6693 return false;
6695 /* Is vectorizable load? */
6696 if (!is_gimple_assign (stmt))
6697 return false;
6699 scalar_dest = gimple_assign_lhs (stmt);
6700 if (TREE_CODE (scalar_dest) != SSA_NAME)
6701 return false;
6703 code = gimple_assign_rhs_code (stmt);
6704 if (code != ARRAY_REF
6705 && code != BIT_FIELD_REF
6706 && code != INDIRECT_REF
6707 && code != COMPONENT_REF
6708 && code != IMAGPART_EXPR
6709 && code != REALPART_EXPR
6710 && code != MEM_REF
6711 && TREE_CODE_CLASS (code) != tcc_declaration)
6712 return false;
6714 if (!STMT_VINFO_DATA_REF (stmt_info))
6715 return false;
6717 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6718 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6720 if (loop_vinfo)
6722 loop = LOOP_VINFO_LOOP (loop_vinfo);
6723 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6724 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6726 else
6727 vf = 1;
6729 /* Multiple types in SLP are handled by creating the appropriate number of
6730 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6731 case of SLP. */
6732 if (slp)
6733 ncopies = 1;
6734 else
6735 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6737 gcc_assert (ncopies >= 1);
6739 /* FORNOW. This restriction should be relaxed. */
6740 if (nested_in_vect_loop && ncopies > 1)
6742 if (dump_enabled_p ())
6743 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6744 "multiple types in nested loop.\n");
6745 return false;
6748 /* Invalidate assumptions made by dependence analysis when vectorization
6749 on the unrolled body effectively re-orders stmts. */
6750 if (ncopies > 1
6751 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6752 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6753 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6755 if (dump_enabled_p ())
6756 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6757 "cannot perform implicit CSE when unrolling "
6758 "with negative dependence distance\n");
6759 return false;
6762 elem_type = TREE_TYPE (vectype);
6763 mode = TYPE_MODE (vectype);
6765 /* FORNOW. In some cases can vectorize even if data-type not supported
6766 (e.g. - data copies). */
6767 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6769 if (dump_enabled_p ())
6770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6771 "Aligned load, but unsupported type.\n");
6772 return false;
6775 /* Check if the load is a part of an interleaving chain. */
6776 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6778 grouped_load = true;
6779 /* FORNOW */
6780 gcc_assert (!nested_in_vect_loop);
6781 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6783 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6784 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6786 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6787 slp_perm = true;
6789 /* Invalidate assumptions made by dependence analysis when vectorization
6790 on the unrolled body effectively re-orders stmts. */
6791 if (!PURE_SLP_STMT (stmt_info)
6792 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6793 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6794 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6796 if (dump_enabled_p ())
6797 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6798 "cannot perform implicit CSE when performing "
6799 "group loads with negative dependence distance\n");
6800 return false;
6803 /* Similarly when the stmt is a load that is both part of a SLP
6804 instance and a loop vectorized stmt via the same-dr mechanism
6805 we have to give up. */
6806 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6807 && (STMT_SLP_TYPE (stmt_info)
6808 != STMT_SLP_TYPE (vinfo_for_stmt
6809 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6811 if (dump_enabled_p ())
6812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6813 "conflicting SLP types for CSEd load\n");
6814 return false;
6818 vect_memory_access_type memory_access_type;
6819 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6820 &memory_access_type, &gs_info))
6821 return false;
6823 if (!vec_stmt) /* transformation not required. */
6825 if (!slp)
6826 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6827 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6828 /* The SLP costs are calculated during SLP analysis. */
6829 if (!PURE_SLP_STMT (stmt_info))
6830 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6831 NULL, NULL, NULL);
6832 return true;
6835 if (!slp)
6836 gcc_assert (memory_access_type
6837 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6839 if (dump_enabled_p ())
6840 dump_printf_loc (MSG_NOTE, vect_location,
6841 "transform load. ncopies = %d\n", ncopies);
6843 /* Transform. */
6845 ensure_base_align (dr);
6847 if (memory_access_type == VMAT_GATHER_SCATTER)
6849 tree vec_oprnd0 = NULL_TREE, op;
6850 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6851 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6852 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6853 edge pe = loop_preheader_edge (loop);
6854 gimple_seq seq;
6855 basic_block new_bb;
6856 enum { NARROW, NONE, WIDEN } modifier;
6857 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6859 if (nunits == gather_off_nunits)
6860 modifier = NONE;
6861 else if (nunits == gather_off_nunits / 2)
6863 modifier = WIDEN;
6865 vec_perm_builder sel (gather_off_nunits, gather_off_nunits, 1);
6866 for (i = 0; i < gather_off_nunits; ++i)
6867 sel.quick_push (i | nunits);
6869 vec_perm_indices indices (sel, 1, gather_off_nunits);
6870 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6871 indices);
6873 else if (nunits == gather_off_nunits * 2)
6875 modifier = NARROW;
6877 vec_perm_builder sel (nunits, nunits, 1);
6878 for (i = 0; i < nunits; ++i)
6879 sel.quick_push (i < gather_off_nunits
6880 ? i : i + nunits - gather_off_nunits);
6882 vec_perm_indices indices (sel, 2, nunits);
6883 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6884 ncopies *= 2;
6886 else
6887 gcc_unreachable ();
6889 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6890 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6891 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6892 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6893 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6894 scaletype = TREE_VALUE (arglist);
6895 gcc_checking_assert (types_compatible_p (srctype, rettype));
6897 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6899 ptr = fold_convert (ptrtype, gs_info.base);
6900 if (!is_gimple_min_invariant (ptr))
6902 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6903 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6904 gcc_assert (!new_bb);
6907 /* Currently we support only unconditional gather loads,
6908 so mask should be all ones. */
6909 if (TREE_CODE (masktype) == INTEGER_TYPE)
6910 mask = build_int_cst (masktype, -1);
6911 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6913 mask = build_int_cst (TREE_TYPE (masktype), -1);
6914 mask = build_vector_from_val (masktype, mask);
6915 mask = vect_init_vector (stmt, mask, masktype, NULL);
6917 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6919 REAL_VALUE_TYPE r;
6920 long tmp[6];
6921 for (j = 0; j < 6; ++j)
6922 tmp[j] = -1;
6923 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6924 mask = build_real (TREE_TYPE (masktype), r);
6925 mask = build_vector_from_val (masktype, mask);
6926 mask = vect_init_vector (stmt, mask, masktype, NULL);
6928 else
6929 gcc_unreachable ();
6931 scale = build_int_cst (scaletype, gs_info.scale);
6933 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6934 merge = build_int_cst (TREE_TYPE (rettype), 0);
6935 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6937 REAL_VALUE_TYPE r;
6938 long tmp[6];
6939 for (j = 0; j < 6; ++j)
6940 tmp[j] = 0;
6941 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6942 merge = build_real (TREE_TYPE (rettype), r);
6944 else
6945 gcc_unreachable ();
6946 merge = build_vector_from_val (rettype, merge);
6947 merge = vect_init_vector (stmt, merge, rettype, NULL);
6949 prev_stmt_info = NULL;
6950 for (j = 0; j < ncopies; ++j)
6952 if (modifier == WIDEN && (j & 1))
6953 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6954 perm_mask, stmt, gsi);
6955 else if (j == 0)
6956 op = vec_oprnd0
6957 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6958 else
6959 op = vec_oprnd0
6960 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
6962 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6964 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6965 == TYPE_VECTOR_SUBPARTS (idxtype));
6966 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6967 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6968 new_stmt
6969 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6970 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6971 op = var;
6974 new_stmt
6975 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
6977 if (!useless_type_conversion_p (vectype, rettype))
6979 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6980 == TYPE_VECTOR_SUBPARTS (rettype));
6981 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6982 gimple_call_set_lhs (new_stmt, op);
6983 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6984 var = make_ssa_name (vec_dest);
6985 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6986 new_stmt
6987 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6989 else
6991 var = make_ssa_name (vec_dest, new_stmt);
6992 gimple_call_set_lhs (new_stmt, var);
6995 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6997 if (modifier == NARROW)
6999 if ((j & 1) == 0)
7001 prev_res = var;
7002 continue;
7004 var = permute_vec_elements (prev_res, var,
7005 perm_mask, stmt, gsi);
7006 new_stmt = SSA_NAME_DEF_STMT (var);
7009 if (prev_stmt_info == NULL)
7010 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7011 else
7012 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7013 prev_stmt_info = vinfo_for_stmt (new_stmt);
7015 return true;
7018 if (memory_access_type == VMAT_ELEMENTWISE
7019 || memory_access_type == VMAT_STRIDED_SLP)
7021 gimple_stmt_iterator incr_gsi;
7022 bool insert_after;
7023 gimple *incr;
7024 tree offvar;
7025 tree ivstep;
7026 tree running_off;
7027 vec<constructor_elt, va_gc> *v = NULL;
7028 gimple_seq stmts = NULL;
7029 tree stride_base, stride_step, alias_off;
7031 gcc_assert (!nested_in_vect_loop);
7033 if (slp && grouped_load)
7035 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7036 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7037 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7038 ref_type = get_group_alias_ptr_type (first_stmt);
7040 else
7042 first_stmt = stmt;
7043 first_dr = dr;
7044 group_size = 1;
7045 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7048 stride_base
7049 = fold_build_pointer_plus
7050 (DR_BASE_ADDRESS (first_dr),
7051 size_binop (PLUS_EXPR,
7052 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7053 convert_to_ptrofftype (DR_INIT (first_dr))));
7054 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7056 /* For a load with loop-invariant (but other than power-of-2)
7057 stride (i.e. not a grouped access) like so:
7059 for (i = 0; i < n; i += stride)
7060 ... = array[i];
7062 we generate a new induction variable and new accesses to
7063 form a new vector (or vectors, depending on ncopies):
7065 for (j = 0; ; j += VF*stride)
7066 tmp1 = array[j];
7067 tmp2 = array[j + stride];
7069 vectemp = {tmp1, tmp2, ...}
7072 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7073 build_int_cst (TREE_TYPE (stride_step), vf));
7075 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7077 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7078 loop, &incr_gsi, insert_after,
7079 &offvar, NULL);
7080 incr = gsi_stmt (incr_gsi);
7081 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7083 stride_step = force_gimple_operand (unshare_expr (stride_step),
7084 &stmts, true, NULL_TREE);
7085 if (stmts)
7086 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7088 prev_stmt_info = NULL;
7089 running_off = offvar;
7090 alias_off = build_int_cst (ref_type, 0);
7091 int nloads = nunits;
7092 int lnel = 1;
7093 tree ltype = TREE_TYPE (vectype);
7094 tree lvectype = vectype;
7095 auto_vec<tree> dr_chain;
7096 if (memory_access_type == VMAT_STRIDED_SLP)
7098 if (group_size < nunits)
7100 /* First check if vec_init optab supports construction from
7101 vector elts directly. */
7102 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7103 machine_mode vmode;
7104 if (mode_for_vector (elmode, group_size).exists (&vmode)
7105 && VECTOR_MODE_P (vmode)
7106 && (convert_optab_handler (vec_init_optab,
7107 TYPE_MODE (vectype), vmode)
7108 != CODE_FOR_nothing))
7110 nloads = nunits / group_size;
7111 lnel = group_size;
7112 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7114 else
7116 /* Otherwise avoid emitting a constructor of vector elements
7117 by performing the loads using an integer type of the same
7118 size, constructing a vector of those and then
7119 re-interpreting it as the original vector type.
7120 This avoids a huge runtime penalty due to the general
7121 inability to perform store forwarding from smaller stores
7122 to a larger load. */
7123 unsigned lsize
7124 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7125 elmode = int_mode_for_size (lsize, 0).require ();
7126 /* If we can't construct such a vector fall back to
7127 element loads of the original vector type. */
7128 if (mode_for_vector (elmode,
7129 nunits / group_size).exists (&vmode)
7130 && VECTOR_MODE_P (vmode)
7131 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7132 != CODE_FOR_nothing))
7134 nloads = nunits / group_size;
7135 lnel = group_size;
7136 ltype = build_nonstandard_integer_type (lsize, 1);
7137 lvectype = build_vector_type (ltype, nloads);
7141 else
7143 nloads = 1;
7144 lnel = nunits;
7145 ltype = vectype;
7147 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7149 if (slp)
7151 /* For SLP permutation support we need to load the whole group,
7152 not only the number of vector stmts the permutation result
7153 fits in. */
7154 if (slp_perm)
7156 ncopies = (group_size * vf + nunits - 1) / nunits;
7157 dr_chain.create (ncopies);
7159 else
7160 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7162 int group_el = 0;
7163 unsigned HOST_WIDE_INT
7164 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7165 for (j = 0; j < ncopies; j++)
7167 if (nloads > 1)
7168 vec_alloc (v, nloads);
7169 for (i = 0; i < nloads; i++)
7171 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7172 group_el * elsz);
7173 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7174 build2 (MEM_REF, ltype,
7175 running_off, this_off));
7176 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7177 if (nloads > 1)
7178 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7179 gimple_assign_lhs (new_stmt));
7181 group_el += lnel;
7182 if (! slp
7183 || group_el == group_size)
7185 tree newoff = copy_ssa_name (running_off);
7186 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7187 running_off, stride_step);
7188 vect_finish_stmt_generation (stmt, incr, gsi);
7190 running_off = newoff;
7191 group_el = 0;
7194 if (nloads > 1)
7196 tree vec_inv = build_constructor (lvectype, v);
7197 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7198 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7199 if (lvectype != vectype)
7201 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7202 VIEW_CONVERT_EXPR,
7203 build1 (VIEW_CONVERT_EXPR,
7204 vectype, new_temp));
7205 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7209 if (slp)
7211 if (slp_perm)
7212 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7213 else
7214 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7216 else
7218 if (j == 0)
7219 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7220 else
7221 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7222 prev_stmt_info = vinfo_for_stmt (new_stmt);
7225 if (slp_perm)
7227 unsigned n_perms;
7228 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7229 slp_node_instance, false, &n_perms);
7231 return true;
7234 if (grouped_load)
7236 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7237 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7238 /* For SLP vectorization we directly vectorize a subchain
7239 without permutation. */
7240 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7241 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7242 /* For BB vectorization always use the first stmt to base
7243 the data ref pointer on. */
7244 if (bb_vinfo)
7245 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7247 /* Check if the chain of loads is already vectorized. */
7248 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7249 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7250 ??? But we can only do so if there is exactly one
7251 as we have no way to get at the rest. Leave the CSE
7252 opportunity alone.
7253 ??? With the group load eventually participating
7254 in multiple different permutations (having multiple
7255 slp nodes which refer to the same group) the CSE
7256 is even wrong code. See PR56270. */
7257 && !slp)
7259 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7260 return true;
7262 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7263 group_gap_adj = 0;
7265 /* VEC_NUM is the number of vect stmts to be created for this group. */
7266 if (slp)
7268 grouped_load = false;
7269 /* For SLP permutation support we need to load the whole group,
7270 not only the number of vector stmts the permutation result
7271 fits in. */
7272 if (slp_perm)
7274 vec_num = (group_size * vf + nunits - 1) / nunits;
7275 group_gap_adj = vf * group_size - nunits * vec_num;
7277 else
7279 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7280 group_gap_adj
7281 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7284 else
7285 vec_num = group_size;
7287 ref_type = get_group_alias_ptr_type (first_stmt);
7289 else
7291 first_stmt = stmt;
7292 first_dr = dr;
7293 group_size = vec_num = 1;
7294 group_gap_adj = 0;
7295 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7298 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7299 gcc_assert (alignment_support_scheme);
7300 /* Targets with load-lane instructions must not require explicit
7301 realignment. */
7302 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7303 || alignment_support_scheme == dr_aligned
7304 || alignment_support_scheme == dr_unaligned_supported);
7306 /* In case the vectorization factor (VF) is bigger than the number
7307 of elements that we can fit in a vectype (nunits), we have to generate
7308 more than one vector stmt - i.e - we need to "unroll" the
7309 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7310 from one copy of the vector stmt to the next, in the field
7311 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7312 stages to find the correct vector defs to be used when vectorizing
7313 stmts that use the defs of the current stmt. The example below
7314 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7315 need to create 4 vectorized stmts):
7317 before vectorization:
7318 RELATED_STMT VEC_STMT
7319 S1: x = memref - -
7320 S2: z = x + 1 - -
7322 step 1: vectorize stmt S1:
7323 We first create the vector stmt VS1_0, and, as usual, record a
7324 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7325 Next, we create the vector stmt VS1_1, and record a pointer to
7326 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7327 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7328 stmts and pointers:
7329 RELATED_STMT VEC_STMT
7330 VS1_0: vx0 = memref0 VS1_1 -
7331 VS1_1: vx1 = memref1 VS1_2 -
7332 VS1_2: vx2 = memref2 VS1_3 -
7333 VS1_3: vx3 = memref3 - -
7334 S1: x = load - VS1_0
7335 S2: z = x + 1 - -
7337 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7338 information we recorded in RELATED_STMT field is used to vectorize
7339 stmt S2. */
7341 /* In case of interleaving (non-unit grouped access):
7343 S1: x2 = &base + 2
7344 S2: x0 = &base
7345 S3: x1 = &base + 1
7346 S4: x3 = &base + 3
7348 Vectorized loads are created in the order of memory accesses
7349 starting from the access of the first stmt of the chain:
7351 VS1: vx0 = &base
7352 VS2: vx1 = &base + vec_size*1
7353 VS3: vx3 = &base + vec_size*2
7354 VS4: vx4 = &base + vec_size*3
7356 Then permutation statements are generated:
7358 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7359 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7362 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7363 (the order of the data-refs in the output of vect_permute_load_chain
7364 corresponds to the order of scalar stmts in the interleaving chain - see
7365 the documentation of vect_permute_load_chain()).
7366 The generation of permutation stmts and recording them in
7367 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7369 In case of both multiple types and interleaving, the vector loads and
7370 permutation stmts above are created for every copy. The result vector
7371 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7372 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7374 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7375 on a target that supports unaligned accesses (dr_unaligned_supported)
7376 we generate the following code:
7377 p = initial_addr;
7378 indx = 0;
7379 loop {
7380 p = p + indx * vectype_size;
7381 vec_dest = *(p);
7382 indx = indx + 1;
7385 Otherwise, the data reference is potentially unaligned on a target that
7386 does not support unaligned accesses (dr_explicit_realign_optimized) -
7387 then generate the following code, in which the data in each iteration is
7388 obtained by two vector loads, one from the previous iteration, and one
7389 from the current iteration:
7390 p1 = initial_addr;
7391 msq_init = *(floor(p1))
7392 p2 = initial_addr + VS - 1;
7393 realignment_token = call target_builtin;
7394 indx = 0;
7395 loop {
7396 p2 = p2 + indx * vectype_size
7397 lsq = *(floor(p2))
7398 vec_dest = realign_load (msq, lsq, realignment_token)
7399 indx = indx + 1;
7400 msq = lsq;
7401 } */
7403 /* If the misalignment remains the same throughout the execution of the
7404 loop, we can create the init_addr and permutation mask at the loop
7405 preheader. Otherwise, it needs to be created inside the loop.
7406 This can only occur when vectorizing memory accesses in the inner-loop
7407 nested within an outer-loop that is being vectorized. */
7409 if (nested_in_vect_loop
7410 && (DR_STEP_ALIGNMENT (dr) % GET_MODE_SIZE (TYPE_MODE (vectype))) != 0)
7412 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7413 compute_in_loop = true;
7416 if ((alignment_support_scheme == dr_explicit_realign_optimized
7417 || alignment_support_scheme == dr_explicit_realign)
7418 && !compute_in_loop)
7420 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7421 alignment_support_scheme, NULL_TREE,
7422 &at_loop);
7423 if (alignment_support_scheme == dr_explicit_realign_optimized)
7425 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7426 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7427 size_one_node);
7430 else
7431 at_loop = loop;
7433 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7434 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7436 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7437 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7438 else
7439 aggr_type = vectype;
7441 prev_stmt_info = NULL;
7442 int group_elt = 0;
7443 for (j = 0; j < ncopies; j++)
7445 /* 1. Create the vector or array pointer update chain. */
7446 if (j == 0)
7448 bool simd_lane_access_p
7449 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7450 if (simd_lane_access_p
7451 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7452 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7453 && integer_zerop (DR_OFFSET (first_dr))
7454 && integer_zerop (DR_INIT (first_dr))
7455 && alias_sets_conflict_p (get_alias_set (aggr_type),
7456 get_alias_set (TREE_TYPE (ref_type)))
7457 && (alignment_support_scheme == dr_aligned
7458 || alignment_support_scheme == dr_unaligned_supported))
7460 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7461 dataref_offset = build_int_cst (ref_type, 0);
7462 inv_p = false;
7464 else if (first_stmt_for_drptr
7465 && first_stmt != first_stmt_for_drptr)
7467 dataref_ptr
7468 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7469 at_loop, offset, &dummy, gsi,
7470 &ptr_incr, simd_lane_access_p,
7471 &inv_p, byte_offset);
7472 /* Adjust the pointer by the difference to first_stmt. */
7473 data_reference_p ptrdr
7474 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7475 tree diff = fold_convert (sizetype,
7476 size_binop (MINUS_EXPR,
7477 DR_INIT (first_dr),
7478 DR_INIT (ptrdr)));
7479 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7480 stmt, diff);
7482 else
7483 dataref_ptr
7484 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7485 offset, &dummy, gsi, &ptr_incr,
7486 simd_lane_access_p, &inv_p,
7487 byte_offset);
7489 else if (dataref_offset)
7490 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7491 TYPE_SIZE_UNIT (aggr_type));
7492 else
7493 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7494 TYPE_SIZE_UNIT (aggr_type));
7496 if (grouped_load || slp_perm)
7497 dr_chain.create (vec_num);
7499 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7501 tree vec_array;
7503 vec_array = create_vector_array (vectype, vec_num);
7505 /* Emit:
7506 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7507 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7508 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7509 data_ref);
7510 gimple_call_set_lhs (call, vec_array);
7511 gimple_call_set_nothrow (call, true);
7512 new_stmt = call;
7513 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7515 /* Extract each vector into an SSA_NAME. */
7516 for (i = 0; i < vec_num; i++)
7518 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7519 vec_array, i);
7520 dr_chain.quick_push (new_temp);
7523 /* Record the mapping between SSA_NAMEs and statements. */
7524 vect_record_grouped_load_vectors (stmt, dr_chain);
7526 else
7528 for (i = 0; i < vec_num; i++)
7530 if (i > 0)
7531 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7532 stmt, NULL_TREE);
7534 /* 2. Create the vector-load in the loop. */
7535 switch (alignment_support_scheme)
7537 case dr_aligned:
7538 case dr_unaligned_supported:
7540 unsigned int align, misalign;
7542 data_ref
7543 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7544 dataref_offset
7545 ? dataref_offset
7546 : build_int_cst (ref_type, 0));
7547 align = DR_TARGET_ALIGNMENT (dr);
7548 if (alignment_support_scheme == dr_aligned)
7550 gcc_assert (aligned_access_p (first_dr));
7551 misalign = 0;
7553 else if (DR_MISALIGNMENT (first_dr) == -1)
7555 align = dr_alignment (vect_dr_behavior (first_dr));
7556 misalign = 0;
7557 TREE_TYPE (data_ref)
7558 = build_aligned_type (TREE_TYPE (data_ref),
7559 align * BITS_PER_UNIT);
7561 else
7563 TREE_TYPE (data_ref)
7564 = build_aligned_type (TREE_TYPE (data_ref),
7565 TYPE_ALIGN (elem_type));
7566 misalign = DR_MISALIGNMENT (first_dr);
7568 if (dataref_offset == NULL_TREE
7569 && TREE_CODE (dataref_ptr) == SSA_NAME)
7570 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7571 align, misalign);
7572 break;
7574 case dr_explicit_realign:
7576 tree ptr, bump;
7578 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7580 if (compute_in_loop)
7581 msq = vect_setup_realignment (first_stmt, gsi,
7582 &realignment_token,
7583 dr_explicit_realign,
7584 dataref_ptr, NULL);
7586 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7587 ptr = copy_ssa_name (dataref_ptr);
7588 else
7589 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7590 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7591 new_stmt = gimple_build_assign
7592 (ptr, BIT_AND_EXPR, dataref_ptr,
7593 build_int_cst
7594 (TREE_TYPE (dataref_ptr),
7595 -(HOST_WIDE_INT) align));
7596 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7597 data_ref
7598 = build2 (MEM_REF, vectype, ptr,
7599 build_int_cst (ref_type, 0));
7600 vec_dest = vect_create_destination_var (scalar_dest,
7601 vectype);
7602 new_stmt = gimple_build_assign (vec_dest, data_ref);
7603 new_temp = make_ssa_name (vec_dest, new_stmt);
7604 gimple_assign_set_lhs (new_stmt, new_temp);
7605 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7606 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7607 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7608 msq = new_temp;
7610 bump = size_binop (MULT_EXPR, vs,
7611 TYPE_SIZE_UNIT (elem_type));
7612 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7613 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7614 new_stmt = gimple_build_assign
7615 (NULL_TREE, BIT_AND_EXPR, ptr,
7616 build_int_cst
7617 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7618 ptr = copy_ssa_name (ptr, new_stmt);
7619 gimple_assign_set_lhs (new_stmt, ptr);
7620 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7621 data_ref
7622 = build2 (MEM_REF, vectype, ptr,
7623 build_int_cst (ref_type, 0));
7624 break;
7626 case dr_explicit_realign_optimized:
7628 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7629 new_temp = copy_ssa_name (dataref_ptr);
7630 else
7631 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7632 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7633 new_stmt = gimple_build_assign
7634 (new_temp, BIT_AND_EXPR, dataref_ptr,
7635 build_int_cst (TREE_TYPE (dataref_ptr),
7636 -(HOST_WIDE_INT) align));
7637 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7638 data_ref
7639 = build2 (MEM_REF, vectype, new_temp,
7640 build_int_cst (ref_type, 0));
7641 break;
7643 default:
7644 gcc_unreachable ();
7646 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7647 new_stmt = gimple_build_assign (vec_dest, data_ref);
7648 new_temp = make_ssa_name (vec_dest, new_stmt);
7649 gimple_assign_set_lhs (new_stmt, new_temp);
7650 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7652 /* 3. Handle explicit realignment if necessary/supported.
7653 Create in loop:
7654 vec_dest = realign_load (msq, lsq, realignment_token) */
7655 if (alignment_support_scheme == dr_explicit_realign_optimized
7656 || alignment_support_scheme == dr_explicit_realign)
7658 lsq = gimple_assign_lhs (new_stmt);
7659 if (!realignment_token)
7660 realignment_token = dataref_ptr;
7661 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7662 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7663 msq, lsq, realignment_token);
7664 new_temp = make_ssa_name (vec_dest, new_stmt);
7665 gimple_assign_set_lhs (new_stmt, new_temp);
7666 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7668 if (alignment_support_scheme == dr_explicit_realign_optimized)
7670 gcc_assert (phi);
7671 if (i == vec_num - 1 && j == ncopies - 1)
7672 add_phi_arg (phi, lsq,
7673 loop_latch_edge (containing_loop),
7674 UNKNOWN_LOCATION);
7675 msq = lsq;
7679 /* 4. Handle invariant-load. */
7680 if (inv_p && !bb_vinfo)
7682 gcc_assert (!grouped_load);
7683 /* If we have versioned for aliasing or the loop doesn't
7684 have any data dependencies that would preclude this,
7685 then we are sure this is a loop invariant load and
7686 thus we can insert it on the preheader edge. */
7687 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7688 && !nested_in_vect_loop
7689 && hoist_defs_of_uses (stmt, loop))
7691 if (dump_enabled_p ())
7693 dump_printf_loc (MSG_NOTE, vect_location,
7694 "hoisting out of the vectorized "
7695 "loop: ");
7696 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7698 tree tem = copy_ssa_name (scalar_dest);
7699 gsi_insert_on_edge_immediate
7700 (loop_preheader_edge (loop),
7701 gimple_build_assign (tem,
7702 unshare_expr
7703 (gimple_assign_rhs1 (stmt))));
7704 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7705 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7706 set_vinfo_for_stmt (new_stmt,
7707 new_stmt_vec_info (new_stmt, vinfo));
7709 else
7711 gimple_stmt_iterator gsi2 = *gsi;
7712 gsi_next (&gsi2);
7713 new_temp = vect_init_vector (stmt, scalar_dest,
7714 vectype, &gsi2);
7715 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7719 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7721 tree perm_mask = perm_mask_for_reverse (vectype);
7722 new_temp = permute_vec_elements (new_temp, new_temp,
7723 perm_mask, stmt, gsi);
7724 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7727 /* Collect vector loads and later create their permutation in
7728 vect_transform_grouped_load (). */
7729 if (grouped_load || slp_perm)
7730 dr_chain.quick_push (new_temp);
7732 /* Store vector loads in the corresponding SLP_NODE. */
7733 if (slp && !slp_perm)
7734 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7736 /* With SLP permutation we load the gaps as well, without
7737 we need to skip the gaps after we manage to fully load
7738 all elements. group_gap_adj is GROUP_SIZE here. */
7739 group_elt += nunits;
7740 if (group_gap_adj != 0 && ! slp_perm
7741 && group_elt == group_size - group_gap_adj)
7743 wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7744 * group_gap_adj);
7745 tree bump = wide_int_to_tree (sizetype, bump_val);
7746 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7747 stmt, bump);
7748 group_elt = 0;
7751 /* Bump the vector pointer to account for a gap or for excess
7752 elements loaded for a permuted SLP load. */
7753 if (group_gap_adj != 0 && slp_perm)
7755 wide_int bump_val = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7756 * group_gap_adj);
7757 tree bump = wide_int_to_tree (sizetype, bump_val);
7758 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7759 stmt, bump);
7763 if (slp && !slp_perm)
7764 continue;
7766 if (slp_perm)
7768 unsigned n_perms;
7769 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7770 slp_node_instance, false,
7771 &n_perms))
7773 dr_chain.release ();
7774 return false;
7777 else
7779 if (grouped_load)
7781 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7782 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7783 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7785 else
7787 if (j == 0)
7788 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7789 else
7790 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7791 prev_stmt_info = vinfo_for_stmt (new_stmt);
7794 dr_chain.release ();
7797 return true;
7800 /* Function vect_is_simple_cond.
7802 Input:
7803 LOOP - the loop that is being vectorized.
7804 COND - Condition that is checked for simple use.
7806 Output:
7807 *COMP_VECTYPE - the vector type for the comparison.
7808 *DTS - The def types for the arguments of the comparison
7810 Returns whether a COND can be vectorized. Checks whether
7811 condition operands are supportable using vec_is_simple_use. */
7813 static bool
7814 vect_is_simple_cond (tree cond, vec_info *vinfo,
7815 tree *comp_vectype, enum vect_def_type *dts,
7816 tree vectype)
7818 tree lhs, rhs;
7819 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7821 /* Mask case. */
7822 if (TREE_CODE (cond) == SSA_NAME
7823 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7825 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7826 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7827 &dts[0], comp_vectype)
7828 || !*comp_vectype
7829 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7830 return false;
7831 return true;
7834 if (!COMPARISON_CLASS_P (cond))
7835 return false;
7837 lhs = TREE_OPERAND (cond, 0);
7838 rhs = TREE_OPERAND (cond, 1);
7840 if (TREE_CODE (lhs) == SSA_NAME)
7842 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7843 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7844 return false;
7846 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7847 || TREE_CODE (lhs) == FIXED_CST)
7848 dts[0] = vect_constant_def;
7849 else
7850 return false;
7852 if (TREE_CODE (rhs) == SSA_NAME)
7854 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7855 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7856 return false;
7858 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7859 || TREE_CODE (rhs) == FIXED_CST)
7860 dts[1] = vect_constant_def;
7861 else
7862 return false;
7864 if (vectype1 && vectype2
7865 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7866 return false;
7868 *comp_vectype = vectype1 ? vectype1 : vectype2;
7869 /* Invariant comparison. */
7870 if (! *comp_vectype)
7872 tree scalar_type = TREE_TYPE (lhs);
7873 /* If we can widen the comparison to match vectype do so. */
7874 if (INTEGRAL_TYPE_P (scalar_type)
7875 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7876 TYPE_SIZE (TREE_TYPE (vectype))))
7877 scalar_type = build_nonstandard_integer_type
7878 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7879 TYPE_UNSIGNED (scalar_type));
7880 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7883 return true;
7886 /* vectorizable_condition.
7888 Check if STMT is conditional modify expression that can be vectorized.
7889 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7890 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7891 at GSI.
7893 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7894 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7895 else clause if it is 2).
7897 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7899 bool
7900 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7901 gimple **vec_stmt, tree reduc_def, int reduc_index,
7902 slp_tree slp_node)
7904 tree scalar_dest = NULL_TREE;
7905 tree vec_dest = NULL_TREE;
7906 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7907 tree then_clause, else_clause;
7908 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7909 tree comp_vectype = NULL_TREE;
7910 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7911 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7912 tree vec_compare;
7913 tree new_temp;
7914 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7915 enum vect_def_type dts[4]
7916 = {vect_unknown_def_type, vect_unknown_def_type,
7917 vect_unknown_def_type, vect_unknown_def_type};
7918 int ndts = 4;
7919 int ncopies;
7920 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7921 stmt_vec_info prev_stmt_info = NULL;
7922 int i, j;
7923 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7924 vec<tree> vec_oprnds0 = vNULL;
7925 vec<tree> vec_oprnds1 = vNULL;
7926 vec<tree> vec_oprnds2 = vNULL;
7927 vec<tree> vec_oprnds3 = vNULL;
7928 tree vec_cmp_type;
7929 bool masked = false;
7931 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7932 return false;
7934 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7936 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7937 return false;
7939 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7940 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7941 && reduc_def))
7942 return false;
7944 /* FORNOW: not yet supported. */
7945 if (STMT_VINFO_LIVE_P (stmt_info))
7947 if (dump_enabled_p ())
7948 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7949 "value used after loop.\n");
7950 return false;
7954 /* Is vectorizable conditional operation? */
7955 if (!is_gimple_assign (stmt))
7956 return false;
7958 code = gimple_assign_rhs_code (stmt);
7960 if (code != COND_EXPR)
7961 return false;
7963 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7964 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7966 if (slp_node)
7967 ncopies = 1;
7968 else
7969 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7971 gcc_assert (ncopies >= 1);
7972 if (reduc_index && ncopies > 1)
7973 return false; /* FORNOW */
7975 cond_expr = gimple_assign_rhs1 (stmt);
7976 then_clause = gimple_assign_rhs2 (stmt);
7977 else_clause = gimple_assign_rhs3 (stmt);
7979 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
7980 &comp_vectype, &dts[0], vectype)
7981 || !comp_vectype)
7982 return false;
7984 gimple *def_stmt;
7985 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
7986 &vectype1))
7987 return false;
7988 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
7989 &vectype2))
7990 return false;
7992 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
7993 return false;
7995 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
7996 return false;
7998 masked = !COMPARISON_CLASS_P (cond_expr);
7999 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8001 if (vec_cmp_type == NULL_TREE)
8002 return false;
8004 cond_code = TREE_CODE (cond_expr);
8005 if (!masked)
8007 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8008 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8011 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8013 /* Boolean values may have another representation in vectors
8014 and therefore we prefer bit operations over comparison for
8015 them (which also works for scalar masks). We store opcodes
8016 to use in bitop1 and bitop2. Statement is vectorized as
8017 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8018 depending on bitop1 and bitop2 arity. */
8019 switch (cond_code)
8021 case GT_EXPR:
8022 bitop1 = BIT_NOT_EXPR;
8023 bitop2 = BIT_AND_EXPR;
8024 break;
8025 case GE_EXPR:
8026 bitop1 = BIT_NOT_EXPR;
8027 bitop2 = BIT_IOR_EXPR;
8028 break;
8029 case LT_EXPR:
8030 bitop1 = BIT_NOT_EXPR;
8031 bitop2 = BIT_AND_EXPR;
8032 std::swap (cond_expr0, cond_expr1);
8033 break;
8034 case LE_EXPR:
8035 bitop1 = BIT_NOT_EXPR;
8036 bitop2 = BIT_IOR_EXPR;
8037 std::swap (cond_expr0, cond_expr1);
8038 break;
8039 case NE_EXPR:
8040 bitop1 = BIT_XOR_EXPR;
8041 break;
8042 case EQ_EXPR:
8043 bitop1 = BIT_XOR_EXPR;
8044 bitop2 = BIT_NOT_EXPR;
8045 break;
8046 default:
8047 return false;
8049 cond_code = SSA_NAME;
8052 if (!vec_stmt)
8054 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8055 if (bitop1 != NOP_EXPR)
8057 machine_mode mode = TYPE_MODE (comp_vectype);
8058 optab optab;
8060 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8061 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8062 return false;
8064 if (bitop2 != NOP_EXPR)
8066 optab = optab_for_tree_code (bitop2, comp_vectype,
8067 optab_default);
8068 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8069 return false;
8072 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8073 cond_code))
8075 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8076 return true;
8078 return false;
8081 /* Transform. */
8083 if (!slp_node)
8085 vec_oprnds0.create (1);
8086 vec_oprnds1.create (1);
8087 vec_oprnds2.create (1);
8088 vec_oprnds3.create (1);
8091 /* Handle def. */
8092 scalar_dest = gimple_assign_lhs (stmt);
8093 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8095 /* Handle cond expr. */
8096 for (j = 0; j < ncopies; j++)
8098 gassign *new_stmt = NULL;
8099 if (j == 0)
8101 if (slp_node)
8103 auto_vec<tree, 4> ops;
8104 auto_vec<vec<tree>, 4> vec_defs;
8106 if (masked)
8107 ops.safe_push (cond_expr);
8108 else
8110 ops.safe_push (cond_expr0);
8111 ops.safe_push (cond_expr1);
8113 ops.safe_push (then_clause);
8114 ops.safe_push (else_clause);
8115 vect_get_slp_defs (ops, slp_node, &vec_defs);
8116 vec_oprnds3 = vec_defs.pop ();
8117 vec_oprnds2 = vec_defs.pop ();
8118 if (!masked)
8119 vec_oprnds1 = vec_defs.pop ();
8120 vec_oprnds0 = vec_defs.pop ();
8122 else
8124 gimple *gtemp;
8125 if (masked)
8127 vec_cond_lhs
8128 = vect_get_vec_def_for_operand (cond_expr, stmt,
8129 comp_vectype);
8130 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8131 &gtemp, &dts[0]);
8133 else
8135 vec_cond_lhs
8136 = vect_get_vec_def_for_operand (cond_expr0,
8137 stmt, comp_vectype);
8138 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8140 vec_cond_rhs
8141 = vect_get_vec_def_for_operand (cond_expr1,
8142 stmt, comp_vectype);
8143 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8145 if (reduc_index == 1)
8146 vec_then_clause = reduc_def;
8147 else
8149 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8150 stmt);
8151 vect_is_simple_use (then_clause, loop_vinfo,
8152 &gtemp, &dts[2]);
8154 if (reduc_index == 2)
8155 vec_else_clause = reduc_def;
8156 else
8158 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8159 stmt);
8160 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8164 else
8166 vec_cond_lhs
8167 = vect_get_vec_def_for_stmt_copy (dts[0],
8168 vec_oprnds0.pop ());
8169 if (!masked)
8170 vec_cond_rhs
8171 = vect_get_vec_def_for_stmt_copy (dts[1],
8172 vec_oprnds1.pop ());
8174 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8175 vec_oprnds2.pop ());
8176 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8177 vec_oprnds3.pop ());
8180 if (!slp_node)
8182 vec_oprnds0.quick_push (vec_cond_lhs);
8183 if (!masked)
8184 vec_oprnds1.quick_push (vec_cond_rhs);
8185 vec_oprnds2.quick_push (vec_then_clause);
8186 vec_oprnds3.quick_push (vec_else_clause);
8189 /* Arguments are ready. Create the new vector stmt. */
8190 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8192 vec_then_clause = vec_oprnds2[i];
8193 vec_else_clause = vec_oprnds3[i];
8195 if (masked)
8196 vec_compare = vec_cond_lhs;
8197 else
8199 vec_cond_rhs = vec_oprnds1[i];
8200 if (bitop1 == NOP_EXPR)
8201 vec_compare = build2 (cond_code, vec_cmp_type,
8202 vec_cond_lhs, vec_cond_rhs);
8203 else
8205 new_temp = make_ssa_name (vec_cmp_type);
8206 if (bitop1 == BIT_NOT_EXPR)
8207 new_stmt = gimple_build_assign (new_temp, bitop1,
8208 vec_cond_rhs);
8209 else
8210 new_stmt
8211 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8212 vec_cond_rhs);
8213 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8214 if (bitop2 == NOP_EXPR)
8215 vec_compare = new_temp;
8216 else if (bitop2 == BIT_NOT_EXPR)
8218 /* Instead of doing ~x ? y : z do x ? z : y. */
8219 vec_compare = new_temp;
8220 std::swap (vec_then_clause, vec_else_clause);
8222 else
8224 vec_compare = make_ssa_name (vec_cmp_type);
8225 new_stmt
8226 = gimple_build_assign (vec_compare, bitop2,
8227 vec_cond_lhs, new_temp);
8228 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8232 new_temp = make_ssa_name (vec_dest);
8233 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8234 vec_compare, vec_then_clause,
8235 vec_else_clause);
8236 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8237 if (slp_node)
8238 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8241 if (slp_node)
8242 continue;
8244 if (j == 0)
8245 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8246 else
8247 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8249 prev_stmt_info = vinfo_for_stmt (new_stmt);
8252 vec_oprnds0.release ();
8253 vec_oprnds1.release ();
8254 vec_oprnds2.release ();
8255 vec_oprnds3.release ();
8257 return true;
8260 /* vectorizable_comparison.
8262 Check if STMT is comparison expression that can be vectorized.
8263 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8264 comparison, put it in VEC_STMT, and insert it at GSI.
8266 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8268 static bool
8269 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8270 gimple **vec_stmt, tree reduc_def,
8271 slp_tree slp_node)
8273 tree lhs, rhs1, rhs2;
8274 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8275 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8276 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8277 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8278 tree new_temp;
8279 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8280 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8281 int ndts = 2;
8282 unsigned nunits;
8283 int ncopies;
8284 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8285 stmt_vec_info prev_stmt_info = NULL;
8286 int i, j;
8287 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8288 vec<tree> vec_oprnds0 = vNULL;
8289 vec<tree> vec_oprnds1 = vNULL;
8290 gimple *def_stmt;
8291 tree mask_type;
8292 tree mask;
8294 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8295 return false;
8297 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8298 return false;
8300 mask_type = vectype;
8301 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8303 if (slp_node)
8304 ncopies = 1;
8305 else
8306 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8308 gcc_assert (ncopies >= 1);
8309 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8310 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8311 && reduc_def))
8312 return false;
8314 if (STMT_VINFO_LIVE_P (stmt_info))
8316 if (dump_enabled_p ())
8317 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8318 "value used after loop.\n");
8319 return false;
8322 if (!is_gimple_assign (stmt))
8323 return false;
8325 code = gimple_assign_rhs_code (stmt);
8327 if (TREE_CODE_CLASS (code) != tcc_comparison)
8328 return false;
8330 rhs1 = gimple_assign_rhs1 (stmt);
8331 rhs2 = gimple_assign_rhs2 (stmt);
8333 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8334 &dts[0], &vectype1))
8335 return false;
8337 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8338 &dts[1], &vectype2))
8339 return false;
8341 if (vectype1 && vectype2
8342 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
8343 return false;
8345 vectype = vectype1 ? vectype1 : vectype2;
8347 /* Invariant comparison. */
8348 if (!vectype)
8350 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8351 if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
8352 return false;
8354 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
8355 return false;
8357 /* Can't compare mask and non-mask types. */
8358 if (vectype1 && vectype2
8359 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8360 return false;
8362 /* Boolean values may have another representation in vectors
8363 and therefore we prefer bit operations over comparison for
8364 them (which also works for scalar masks). We store opcodes
8365 to use in bitop1 and bitop2. Statement is vectorized as
8366 BITOP2 (rhs1 BITOP1 rhs2) or
8367 rhs1 BITOP2 (BITOP1 rhs2)
8368 depending on bitop1 and bitop2 arity. */
8369 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8371 if (code == GT_EXPR)
8373 bitop1 = BIT_NOT_EXPR;
8374 bitop2 = BIT_AND_EXPR;
8376 else if (code == GE_EXPR)
8378 bitop1 = BIT_NOT_EXPR;
8379 bitop2 = BIT_IOR_EXPR;
8381 else if (code == LT_EXPR)
8383 bitop1 = BIT_NOT_EXPR;
8384 bitop2 = BIT_AND_EXPR;
8385 std::swap (rhs1, rhs2);
8386 std::swap (dts[0], dts[1]);
8388 else if (code == LE_EXPR)
8390 bitop1 = BIT_NOT_EXPR;
8391 bitop2 = BIT_IOR_EXPR;
8392 std::swap (rhs1, rhs2);
8393 std::swap (dts[0], dts[1]);
8395 else
8397 bitop1 = BIT_XOR_EXPR;
8398 if (code == EQ_EXPR)
8399 bitop2 = BIT_NOT_EXPR;
8403 if (!vec_stmt)
8405 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8406 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8407 dts, ndts, NULL, NULL);
8408 if (bitop1 == NOP_EXPR)
8409 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8410 else
8412 machine_mode mode = TYPE_MODE (vectype);
8413 optab optab;
8415 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8416 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8417 return false;
8419 if (bitop2 != NOP_EXPR)
8421 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8422 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8423 return false;
8425 return true;
8429 /* Transform. */
8430 if (!slp_node)
8432 vec_oprnds0.create (1);
8433 vec_oprnds1.create (1);
8436 /* Handle def. */
8437 lhs = gimple_assign_lhs (stmt);
8438 mask = vect_create_destination_var (lhs, mask_type);
8440 /* Handle cmp expr. */
8441 for (j = 0; j < ncopies; j++)
8443 gassign *new_stmt = NULL;
8444 if (j == 0)
8446 if (slp_node)
8448 auto_vec<tree, 2> ops;
8449 auto_vec<vec<tree>, 2> vec_defs;
8451 ops.safe_push (rhs1);
8452 ops.safe_push (rhs2);
8453 vect_get_slp_defs (ops, slp_node, &vec_defs);
8454 vec_oprnds1 = vec_defs.pop ();
8455 vec_oprnds0 = vec_defs.pop ();
8457 else
8459 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8460 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8463 else
8465 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8466 vec_oprnds0.pop ());
8467 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8468 vec_oprnds1.pop ());
8471 if (!slp_node)
8473 vec_oprnds0.quick_push (vec_rhs1);
8474 vec_oprnds1.quick_push (vec_rhs2);
8477 /* Arguments are ready. Create the new vector stmt. */
8478 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8480 vec_rhs2 = vec_oprnds1[i];
8482 new_temp = make_ssa_name (mask);
8483 if (bitop1 == NOP_EXPR)
8485 new_stmt = gimple_build_assign (new_temp, code,
8486 vec_rhs1, vec_rhs2);
8487 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8489 else
8491 if (bitop1 == BIT_NOT_EXPR)
8492 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8493 else
8494 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8495 vec_rhs2);
8496 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8497 if (bitop2 != NOP_EXPR)
8499 tree res = make_ssa_name (mask);
8500 if (bitop2 == BIT_NOT_EXPR)
8501 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8502 else
8503 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8504 new_temp);
8505 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8508 if (slp_node)
8509 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8512 if (slp_node)
8513 continue;
8515 if (j == 0)
8516 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8517 else
8518 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8520 prev_stmt_info = vinfo_for_stmt (new_stmt);
8523 vec_oprnds0.release ();
8524 vec_oprnds1.release ();
8526 return true;
8529 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8530 can handle all live statements in the node. Otherwise return true
8531 if STMT is not live or if vectorizable_live_operation can handle it.
8532 GSI and VEC_STMT are as for vectorizable_live_operation. */
8534 static bool
8535 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8536 slp_tree slp_node, gimple **vec_stmt)
8538 if (slp_node)
8540 gimple *slp_stmt;
8541 unsigned int i;
8542 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8544 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8545 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8546 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8547 vec_stmt))
8548 return false;
8551 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8552 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8553 return false;
8555 return true;
8558 /* Make sure the statement is vectorizable. */
8560 bool
8561 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8562 slp_instance node_instance)
8564 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8565 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8566 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8567 bool ok;
8568 gimple *pattern_stmt;
8569 gimple_seq pattern_def_seq;
8571 if (dump_enabled_p ())
8573 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8574 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8577 if (gimple_has_volatile_ops (stmt))
8579 if (dump_enabled_p ())
8580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8581 "not vectorized: stmt has volatile operands\n");
8583 return false;
8586 /* Skip stmts that do not need to be vectorized. In loops this is expected
8587 to include:
8588 - the COND_EXPR which is the loop exit condition
8589 - any LABEL_EXPRs in the loop
8590 - computations that are used only for array indexing or loop control.
8591 In basic blocks we only analyze statements that are a part of some SLP
8592 instance, therefore, all the statements are relevant.
8594 Pattern statement needs to be analyzed instead of the original statement
8595 if the original statement is not relevant. Otherwise, we analyze both
8596 statements. In basic blocks we are called from some SLP instance
8597 traversal, don't analyze pattern stmts instead, the pattern stmts
8598 already will be part of SLP instance. */
8600 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8601 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8602 && !STMT_VINFO_LIVE_P (stmt_info))
8604 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8605 && pattern_stmt
8606 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8607 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8609 /* Analyze PATTERN_STMT instead of the original stmt. */
8610 stmt = pattern_stmt;
8611 stmt_info = vinfo_for_stmt (pattern_stmt);
8612 if (dump_enabled_p ())
8614 dump_printf_loc (MSG_NOTE, vect_location,
8615 "==> examining pattern statement: ");
8616 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8619 else
8621 if (dump_enabled_p ())
8622 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8624 return true;
8627 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8628 && node == NULL
8629 && pattern_stmt
8630 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8631 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8633 /* Analyze PATTERN_STMT too. */
8634 if (dump_enabled_p ())
8636 dump_printf_loc (MSG_NOTE, vect_location,
8637 "==> examining pattern statement: ");
8638 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8641 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8642 node_instance))
8643 return false;
8646 if (is_pattern_stmt_p (stmt_info)
8647 && node == NULL
8648 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8650 gimple_stmt_iterator si;
8652 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8654 gimple *pattern_def_stmt = gsi_stmt (si);
8655 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8656 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8658 /* Analyze def stmt of STMT if it's a pattern stmt. */
8659 if (dump_enabled_p ())
8661 dump_printf_loc (MSG_NOTE, vect_location,
8662 "==> examining pattern def statement: ");
8663 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8666 if (!vect_analyze_stmt (pattern_def_stmt,
8667 need_to_vectorize, node, node_instance))
8668 return false;
8673 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8675 case vect_internal_def:
8676 break;
8678 case vect_reduction_def:
8679 case vect_nested_cycle:
8680 gcc_assert (!bb_vinfo
8681 && (relevance == vect_used_in_outer
8682 || relevance == vect_used_in_outer_by_reduction
8683 || relevance == vect_used_by_reduction
8684 || relevance == vect_unused_in_scope
8685 || relevance == vect_used_only_live));
8686 break;
8688 case vect_induction_def:
8689 gcc_assert (!bb_vinfo);
8690 break;
8692 case vect_constant_def:
8693 case vect_external_def:
8694 case vect_unknown_def_type:
8695 default:
8696 gcc_unreachable ();
8699 if (STMT_VINFO_RELEVANT_P (stmt_info))
8701 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8702 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8703 || (is_gimple_call (stmt)
8704 && gimple_call_lhs (stmt) == NULL_TREE));
8705 *need_to_vectorize = true;
8708 if (PURE_SLP_STMT (stmt_info) && !node)
8710 dump_printf_loc (MSG_NOTE, vect_location,
8711 "handled only by SLP analysis\n");
8712 return true;
8715 ok = true;
8716 if (!bb_vinfo
8717 && (STMT_VINFO_RELEVANT_P (stmt_info)
8718 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8719 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8720 || vectorizable_conversion (stmt, NULL, NULL, node)
8721 || vectorizable_shift (stmt, NULL, NULL, node)
8722 || vectorizable_operation (stmt, NULL, NULL, node)
8723 || vectorizable_assignment (stmt, NULL, NULL, node)
8724 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8725 || vectorizable_call (stmt, NULL, NULL, node)
8726 || vectorizable_store (stmt, NULL, NULL, node)
8727 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8728 || vectorizable_induction (stmt, NULL, NULL, node)
8729 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8730 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8731 else
8733 if (bb_vinfo)
8734 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8735 || vectorizable_conversion (stmt, NULL, NULL, node)
8736 || vectorizable_shift (stmt, NULL, NULL, node)
8737 || vectorizable_operation (stmt, NULL, NULL, node)
8738 || vectorizable_assignment (stmt, NULL, NULL, node)
8739 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8740 || vectorizable_call (stmt, NULL, NULL, node)
8741 || vectorizable_store (stmt, NULL, NULL, node)
8742 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8743 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8746 if (!ok)
8748 if (dump_enabled_p ())
8750 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8751 "not vectorized: relevant stmt not ");
8752 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8753 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8756 return false;
8759 if (bb_vinfo)
8760 return true;
8762 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8763 need extra handling, except for vectorizable reductions. */
8764 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8765 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8767 if (dump_enabled_p ())
8769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8770 "not vectorized: live stmt not supported: ");
8771 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8774 return false;
8777 return true;
8781 /* Function vect_transform_stmt.
8783 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8785 bool
8786 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8787 bool *grouped_store, slp_tree slp_node,
8788 slp_instance slp_node_instance)
8790 bool is_store = false;
8791 gimple *vec_stmt = NULL;
8792 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8793 bool done;
8795 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8796 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8798 switch (STMT_VINFO_TYPE (stmt_info))
8800 case type_demotion_vec_info_type:
8801 case type_promotion_vec_info_type:
8802 case type_conversion_vec_info_type:
8803 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8804 gcc_assert (done);
8805 break;
8807 case induc_vec_info_type:
8808 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8809 gcc_assert (done);
8810 break;
8812 case shift_vec_info_type:
8813 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8814 gcc_assert (done);
8815 break;
8817 case op_vec_info_type:
8818 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8819 gcc_assert (done);
8820 break;
8822 case assignment_vec_info_type:
8823 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8824 gcc_assert (done);
8825 break;
8827 case load_vec_info_type:
8828 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8829 slp_node_instance);
8830 gcc_assert (done);
8831 break;
8833 case store_vec_info_type:
8834 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8835 gcc_assert (done);
8836 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8838 /* In case of interleaving, the whole chain is vectorized when the
8839 last store in the chain is reached. Store stmts before the last
8840 one are skipped, and there vec_stmt_info shouldn't be freed
8841 meanwhile. */
8842 *grouped_store = true;
8843 if (STMT_VINFO_VEC_STMT (stmt_info))
8844 is_store = true;
8846 else
8847 is_store = true;
8848 break;
8850 case condition_vec_info_type:
8851 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8852 gcc_assert (done);
8853 break;
8855 case comparison_vec_info_type:
8856 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8857 gcc_assert (done);
8858 break;
8860 case call_vec_info_type:
8861 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8862 stmt = gsi_stmt (*gsi);
8863 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8864 is_store = true;
8865 break;
8867 case call_simd_clone_vec_info_type:
8868 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8869 stmt = gsi_stmt (*gsi);
8870 break;
8872 case reduc_vec_info_type:
8873 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8874 slp_node_instance);
8875 gcc_assert (done);
8876 break;
8878 default:
8879 if (!STMT_VINFO_LIVE_P (stmt_info))
8881 if (dump_enabled_p ())
8882 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8883 "stmt not supported.\n");
8884 gcc_unreachable ();
8888 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8889 This would break hybrid SLP vectorization. */
8890 if (slp_node)
8891 gcc_assert (!vec_stmt
8892 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8894 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8895 is being vectorized, but outside the immediately enclosing loop. */
8896 if (vec_stmt
8897 && STMT_VINFO_LOOP_VINFO (stmt_info)
8898 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8899 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8900 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8901 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8902 || STMT_VINFO_RELEVANT (stmt_info) ==
8903 vect_used_in_outer_by_reduction))
8905 struct loop *innerloop = LOOP_VINFO_LOOP (
8906 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8907 imm_use_iterator imm_iter;
8908 use_operand_p use_p;
8909 tree scalar_dest;
8910 gimple *exit_phi;
8912 if (dump_enabled_p ())
8913 dump_printf_loc (MSG_NOTE, vect_location,
8914 "Record the vdef for outer-loop vectorization.\n");
8916 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8917 (to be used when vectorizing outer-loop stmts that use the DEF of
8918 STMT). */
8919 if (gimple_code (stmt) == GIMPLE_PHI)
8920 scalar_dest = PHI_RESULT (stmt);
8921 else
8922 scalar_dest = gimple_assign_lhs (stmt);
8924 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8926 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8928 exit_phi = USE_STMT (use_p);
8929 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8934 /* Handle stmts whose DEF is used outside the loop-nest that is
8935 being vectorized. */
8936 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8938 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
8939 gcc_assert (done);
8942 if (vec_stmt)
8943 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8945 return is_store;
8949 /* Remove a group of stores (for SLP or interleaving), free their
8950 stmt_vec_info. */
8952 void
8953 vect_remove_stores (gimple *first_stmt)
8955 gimple *next = first_stmt;
8956 gimple *tmp;
8957 gimple_stmt_iterator next_si;
8959 while (next)
8961 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8963 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8964 if (is_pattern_stmt_p (stmt_info))
8965 next = STMT_VINFO_RELATED_STMT (stmt_info);
8966 /* Free the attached stmt_vec_info and remove the stmt. */
8967 next_si = gsi_for_stmt (next);
8968 unlink_stmt_vdef (next);
8969 gsi_remove (&next_si, true);
8970 release_defs (next);
8971 free_stmt_vec_info (next);
8972 next = tmp;
8977 /* Function new_stmt_vec_info.
8979 Create and initialize a new stmt_vec_info struct for STMT. */
8981 stmt_vec_info
8982 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
8984 stmt_vec_info res;
8985 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8987 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8988 STMT_VINFO_STMT (res) = stmt;
8989 res->vinfo = vinfo;
8990 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
8991 STMT_VINFO_LIVE_P (res) = false;
8992 STMT_VINFO_VECTYPE (res) = NULL;
8993 STMT_VINFO_VEC_STMT (res) = NULL;
8994 STMT_VINFO_VECTORIZABLE (res) = true;
8995 STMT_VINFO_IN_PATTERN_P (res) = false;
8996 STMT_VINFO_RELATED_STMT (res) = NULL;
8997 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
8998 STMT_VINFO_DATA_REF (res) = NULL;
8999 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9000 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9002 if (gimple_code (stmt) == GIMPLE_PHI
9003 && is_loop_header_bb_p (gimple_bb (stmt)))
9004 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9005 else
9006 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9008 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9009 STMT_SLP_TYPE (res) = loop_vect;
9010 STMT_VINFO_NUM_SLP_USES (res) = 0;
9012 GROUP_FIRST_ELEMENT (res) = NULL;
9013 GROUP_NEXT_ELEMENT (res) = NULL;
9014 GROUP_SIZE (res) = 0;
9015 GROUP_STORE_COUNT (res) = 0;
9016 GROUP_GAP (res) = 0;
9017 GROUP_SAME_DR_STMT (res) = NULL;
9019 return res;
9023 /* Create a hash table for stmt_vec_info. */
9025 void
9026 init_stmt_vec_info_vec (void)
9028 gcc_assert (!stmt_vec_info_vec.exists ());
9029 stmt_vec_info_vec.create (50);
9033 /* Free hash table for stmt_vec_info. */
9035 void
9036 free_stmt_vec_info_vec (void)
9038 unsigned int i;
9039 stmt_vec_info info;
9040 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9041 if (info != NULL)
9042 free_stmt_vec_info (STMT_VINFO_STMT (info));
9043 gcc_assert (stmt_vec_info_vec.exists ());
9044 stmt_vec_info_vec.release ();
9048 /* Free stmt vectorization related info. */
9050 void
9051 free_stmt_vec_info (gimple *stmt)
9053 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9055 if (!stmt_info)
9056 return;
9058 /* Check if this statement has a related "pattern stmt"
9059 (introduced by the vectorizer during the pattern recognition
9060 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9061 too. */
9062 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9064 stmt_vec_info patt_info
9065 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9066 if (patt_info)
9068 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9069 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9070 gimple_set_bb (patt_stmt, NULL);
9071 tree lhs = gimple_get_lhs (patt_stmt);
9072 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9073 release_ssa_name (lhs);
9074 if (seq)
9076 gimple_stmt_iterator si;
9077 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9079 gimple *seq_stmt = gsi_stmt (si);
9080 gimple_set_bb (seq_stmt, NULL);
9081 lhs = gimple_get_lhs (seq_stmt);
9082 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9083 release_ssa_name (lhs);
9084 free_stmt_vec_info (seq_stmt);
9087 free_stmt_vec_info (patt_stmt);
9091 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9092 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9093 set_vinfo_for_stmt (stmt, NULL);
9094 free (stmt_info);
9098 /* Function get_vectype_for_scalar_type_and_size.
9100 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9101 by the target. */
9103 static tree
9104 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
9106 tree orig_scalar_type = scalar_type;
9107 scalar_mode inner_mode;
9108 machine_mode simd_mode;
9109 int nunits;
9110 tree vectype;
9112 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9113 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9114 return NULL_TREE;
9116 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9118 /* For vector types of elements whose mode precision doesn't
9119 match their types precision we use a element type of mode
9120 precision. The vectorization routines will have to make sure
9121 they support the proper result truncation/extension.
9122 We also make sure to build vector types with INTEGER_TYPE
9123 component type only. */
9124 if (INTEGRAL_TYPE_P (scalar_type)
9125 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9126 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9127 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9128 TYPE_UNSIGNED (scalar_type));
9130 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9131 When the component mode passes the above test simply use a type
9132 corresponding to that mode. The theory is that any use that
9133 would cause problems with this will disable vectorization anyway. */
9134 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9135 && !INTEGRAL_TYPE_P (scalar_type))
9136 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9138 /* We can't build a vector type of elements with alignment bigger than
9139 their size. */
9140 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9141 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9142 TYPE_UNSIGNED (scalar_type));
9144 /* If we felt back to using the mode fail if there was
9145 no scalar type for it. */
9146 if (scalar_type == NULL_TREE)
9147 return NULL_TREE;
9149 /* If no size was supplied use the mode the target prefers. Otherwise
9150 lookup a vector mode of the specified size. */
9151 if (size == 0)
9152 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9153 else if (!mode_for_vector (inner_mode, size / nbytes).exists (&simd_mode))
9154 return NULL_TREE;
9155 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
9156 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9157 if (nunits < 1)
9158 return NULL_TREE;
9160 vectype = build_vector_type (scalar_type, nunits);
9162 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9163 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9164 return NULL_TREE;
9166 /* Re-attach the address-space qualifier if we canonicalized the scalar
9167 type. */
9168 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9169 return build_qualified_type
9170 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9172 return vectype;
9175 unsigned int current_vector_size;
9177 /* Function get_vectype_for_scalar_type.
9179 Returns the vector type corresponding to SCALAR_TYPE as supported
9180 by the target. */
9182 tree
9183 get_vectype_for_scalar_type (tree scalar_type)
9185 tree vectype;
9186 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9187 current_vector_size);
9188 if (vectype
9189 && current_vector_size == 0)
9190 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9191 return vectype;
9194 /* Function get_mask_type_for_scalar_type.
9196 Returns the mask type corresponding to a result of comparison
9197 of vectors of specified SCALAR_TYPE as supported by target. */
9199 tree
9200 get_mask_type_for_scalar_type (tree scalar_type)
9202 tree vectype = get_vectype_for_scalar_type (scalar_type);
9204 if (!vectype)
9205 return NULL;
9207 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9208 current_vector_size);
9211 /* Function get_same_sized_vectype
9213 Returns a vector type corresponding to SCALAR_TYPE of size
9214 VECTOR_TYPE if supported by the target. */
9216 tree
9217 get_same_sized_vectype (tree scalar_type, tree vector_type)
9219 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9220 return build_same_sized_truth_vector_type (vector_type);
9222 return get_vectype_for_scalar_type_and_size
9223 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9226 /* Function vect_is_simple_use.
9228 Input:
9229 VINFO - the vect info of the loop or basic block that is being vectorized.
9230 OPERAND - operand in the loop or bb.
9231 Output:
9232 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9233 DT - the type of definition
9235 Returns whether a stmt with OPERAND can be vectorized.
9236 For loops, supportable operands are constants, loop invariants, and operands
9237 that are defined by the current iteration of the loop. Unsupportable
9238 operands are those that are defined by a previous iteration of the loop (as
9239 is the case in reduction/induction computations).
9240 For basic blocks, supportable operands are constants and bb invariants.
9241 For now, operands defined outside the basic block are not supported. */
9243 bool
9244 vect_is_simple_use (tree operand, vec_info *vinfo,
9245 gimple **def_stmt, enum vect_def_type *dt)
9247 *def_stmt = NULL;
9248 *dt = vect_unknown_def_type;
9250 if (dump_enabled_p ())
9252 dump_printf_loc (MSG_NOTE, vect_location,
9253 "vect_is_simple_use: operand ");
9254 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9255 dump_printf (MSG_NOTE, "\n");
9258 if (CONSTANT_CLASS_P (operand))
9260 *dt = vect_constant_def;
9261 return true;
9264 if (is_gimple_min_invariant (operand))
9266 *dt = vect_external_def;
9267 return true;
9270 if (TREE_CODE (operand) != SSA_NAME)
9272 if (dump_enabled_p ())
9273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9274 "not ssa-name.\n");
9275 return false;
9278 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9280 *dt = vect_external_def;
9281 return true;
9284 *def_stmt = SSA_NAME_DEF_STMT (operand);
9285 if (dump_enabled_p ())
9287 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9288 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9291 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9292 *dt = vect_external_def;
9293 else
9295 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9296 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9299 if (dump_enabled_p ())
9301 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9302 switch (*dt)
9304 case vect_uninitialized_def:
9305 dump_printf (MSG_NOTE, "uninitialized\n");
9306 break;
9307 case vect_constant_def:
9308 dump_printf (MSG_NOTE, "constant\n");
9309 break;
9310 case vect_external_def:
9311 dump_printf (MSG_NOTE, "external\n");
9312 break;
9313 case vect_internal_def:
9314 dump_printf (MSG_NOTE, "internal\n");
9315 break;
9316 case vect_induction_def:
9317 dump_printf (MSG_NOTE, "induction\n");
9318 break;
9319 case vect_reduction_def:
9320 dump_printf (MSG_NOTE, "reduction\n");
9321 break;
9322 case vect_double_reduction_def:
9323 dump_printf (MSG_NOTE, "double reduction\n");
9324 break;
9325 case vect_nested_cycle:
9326 dump_printf (MSG_NOTE, "nested cycle\n");
9327 break;
9328 case vect_unknown_def_type:
9329 dump_printf (MSG_NOTE, "unknown\n");
9330 break;
9334 if (*dt == vect_unknown_def_type)
9336 if (dump_enabled_p ())
9337 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9338 "Unsupported pattern.\n");
9339 return false;
9342 switch (gimple_code (*def_stmt))
9344 case GIMPLE_PHI:
9345 case GIMPLE_ASSIGN:
9346 case GIMPLE_CALL:
9347 break;
9348 default:
9349 if (dump_enabled_p ())
9350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9351 "unsupported defining stmt:\n");
9352 return false;
9355 return true;
9358 /* Function vect_is_simple_use.
9360 Same as vect_is_simple_use but also determines the vector operand
9361 type of OPERAND and stores it to *VECTYPE. If the definition of
9362 OPERAND is vect_uninitialized_def, vect_constant_def or
9363 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9364 is responsible to compute the best suited vector type for the
9365 scalar operand. */
9367 bool
9368 vect_is_simple_use (tree operand, vec_info *vinfo,
9369 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9371 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9372 return false;
9374 /* Now get a vector type if the def is internal, otherwise supply
9375 NULL_TREE and leave it up to the caller to figure out a proper
9376 type for the use stmt. */
9377 if (*dt == vect_internal_def
9378 || *dt == vect_induction_def
9379 || *dt == vect_reduction_def
9380 || *dt == vect_double_reduction_def
9381 || *dt == vect_nested_cycle)
9383 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9385 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9386 && !STMT_VINFO_RELEVANT (stmt_info)
9387 && !STMT_VINFO_LIVE_P (stmt_info))
9388 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9390 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9391 gcc_assert (*vectype != NULL_TREE);
9393 else if (*dt == vect_uninitialized_def
9394 || *dt == vect_constant_def
9395 || *dt == vect_external_def)
9396 *vectype = NULL_TREE;
9397 else
9398 gcc_unreachable ();
9400 return true;
9404 /* Function supportable_widening_operation
9406 Check whether an operation represented by the code CODE is a
9407 widening operation that is supported by the target platform in
9408 vector form (i.e., when operating on arguments of type VECTYPE_IN
9409 producing a result of type VECTYPE_OUT).
9411 Widening operations we currently support are NOP (CONVERT), FLOAT
9412 and WIDEN_MULT. This function checks if these operations are supported
9413 by the target platform either directly (via vector tree-codes), or via
9414 target builtins.
9416 Output:
9417 - CODE1 and CODE2 are codes of vector operations to be used when
9418 vectorizing the operation, if available.
9419 - MULTI_STEP_CVT determines the number of required intermediate steps in
9420 case of multi-step conversion (like char->short->int - in that case
9421 MULTI_STEP_CVT will be 1).
9422 - INTERM_TYPES contains the intermediate type required to perform the
9423 widening operation (short in the above example). */
9425 bool
9426 supportable_widening_operation (enum tree_code code, gimple *stmt,
9427 tree vectype_out, tree vectype_in,
9428 enum tree_code *code1, enum tree_code *code2,
9429 int *multi_step_cvt,
9430 vec<tree> *interm_types)
9432 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9433 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9434 struct loop *vect_loop = NULL;
9435 machine_mode vec_mode;
9436 enum insn_code icode1, icode2;
9437 optab optab1, optab2;
9438 tree vectype = vectype_in;
9439 tree wide_vectype = vectype_out;
9440 enum tree_code c1, c2;
9441 int i;
9442 tree prev_type, intermediate_type;
9443 machine_mode intermediate_mode, prev_mode;
9444 optab optab3, optab4;
9446 *multi_step_cvt = 0;
9447 if (loop_info)
9448 vect_loop = LOOP_VINFO_LOOP (loop_info);
9450 switch (code)
9452 case WIDEN_MULT_EXPR:
9453 /* The result of a vectorized widening operation usually requires
9454 two vectors (because the widened results do not fit into one vector).
9455 The generated vector results would normally be expected to be
9456 generated in the same order as in the original scalar computation,
9457 i.e. if 8 results are generated in each vector iteration, they are
9458 to be organized as follows:
9459 vect1: [res1,res2,res3,res4],
9460 vect2: [res5,res6,res7,res8].
9462 However, in the special case that the result of the widening
9463 operation is used in a reduction computation only, the order doesn't
9464 matter (because when vectorizing a reduction we change the order of
9465 the computation). Some targets can take advantage of this and
9466 generate more efficient code. For example, targets like Altivec,
9467 that support widen_mult using a sequence of {mult_even,mult_odd}
9468 generate the following vectors:
9469 vect1: [res1,res3,res5,res7],
9470 vect2: [res2,res4,res6,res8].
9472 When vectorizing outer-loops, we execute the inner-loop sequentially
9473 (each vectorized inner-loop iteration contributes to VF outer-loop
9474 iterations in parallel). We therefore don't allow to change the
9475 order of the computation in the inner-loop during outer-loop
9476 vectorization. */
9477 /* TODO: Another case in which order doesn't *really* matter is when we
9478 widen and then contract again, e.g. (short)((int)x * y >> 8).
9479 Normally, pack_trunc performs an even/odd permute, whereas the
9480 repack from an even/odd expansion would be an interleave, which
9481 would be significantly simpler for e.g. AVX2. */
9482 /* In any case, in order to avoid duplicating the code below, recurse
9483 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9484 are properly set up for the caller. If we fail, we'll continue with
9485 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9486 if (vect_loop
9487 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9488 && !nested_in_vect_loop_p (vect_loop, stmt)
9489 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9490 stmt, vectype_out, vectype_in,
9491 code1, code2, multi_step_cvt,
9492 interm_types))
9494 /* Elements in a vector with vect_used_by_reduction property cannot
9495 be reordered if the use chain with this property does not have the
9496 same operation. One such an example is s += a * b, where elements
9497 in a and b cannot be reordered. Here we check if the vector defined
9498 by STMT is only directly used in the reduction statement. */
9499 tree lhs = gimple_assign_lhs (stmt);
9500 use_operand_p dummy;
9501 gimple *use_stmt;
9502 stmt_vec_info use_stmt_info = NULL;
9503 if (single_imm_use (lhs, &dummy, &use_stmt)
9504 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9505 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9506 return true;
9508 c1 = VEC_WIDEN_MULT_LO_EXPR;
9509 c2 = VEC_WIDEN_MULT_HI_EXPR;
9510 break;
9512 case DOT_PROD_EXPR:
9513 c1 = DOT_PROD_EXPR;
9514 c2 = DOT_PROD_EXPR;
9515 break;
9517 case SAD_EXPR:
9518 c1 = SAD_EXPR;
9519 c2 = SAD_EXPR;
9520 break;
9522 case VEC_WIDEN_MULT_EVEN_EXPR:
9523 /* Support the recursion induced just above. */
9524 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9525 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9526 break;
9528 case WIDEN_LSHIFT_EXPR:
9529 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9530 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9531 break;
9533 CASE_CONVERT:
9534 c1 = VEC_UNPACK_LO_EXPR;
9535 c2 = VEC_UNPACK_HI_EXPR;
9536 break;
9538 case FLOAT_EXPR:
9539 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9540 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9541 break;
9543 case FIX_TRUNC_EXPR:
9544 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9545 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9546 computing the operation. */
9547 return false;
9549 default:
9550 gcc_unreachable ();
9553 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9554 std::swap (c1, c2);
9556 if (code == FIX_TRUNC_EXPR)
9558 /* The signedness is determined from output operand. */
9559 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9560 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9562 else
9564 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9565 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9568 if (!optab1 || !optab2)
9569 return false;
9571 vec_mode = TYPE_MODE (vectype);
9572 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9573 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9574 return false;
9576 *code1 = c1;
9577 *code2 = c2;
9579 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9580 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9581 /* For scalar masks we may have different boolean
9582 vector types having the same QImode. Thus we
9583 add additional check for elements number. */
9584 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9585 || (TYPE_VECTOR_SUBPARTS (vectype) / 2
9586 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9588 /* Check if it's a multi-step conversion that can be done using intermediate
9589 types. */
9591 prev_type = vectype;
9592 prev_mode = vec_mode;
9594 if (!CONVERT_EXPR_CODE_P (code))
9595 return false;
9597 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9598 intermediate steps in promotion sequence. We try
9599 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9600 not. */
9601 interm_types->create (MAX_INTERM_CVT_STEPS);
9602 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9604 intermediate_mode = insn_data[icode1].operand[0].mode;
9605 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9607 intermediate_type
9608 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
9609 current_vector_size);
9610 if (intermediate_mode != TYPE_MODE (intermediate_type))
9611 return false;
9613 else
9614 intermediate_type
9615 = lang_hooks.types.type_for_mode (intermediate_mode,
9616 TYPE_UNSIGNED (prev_type));
9618 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9619 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9621 if (!optab3 || !optab4
9622 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9623 || insn_data[icode1].operand[0].mode != intermediate_mode
9624 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9625 || insn_data[icode2].operand[0].mode != intermediate_mode
9626 || ((icode1 = optab_handler (optab3, intermediate_mode))
9627 == CODE_FOR_nothing)
9628 || ((icode2 = optab_handler (optab4, intermediate_mode))
9629 == CODE_FOR_nothing))
9630 break;
9632 interm_types->quick_push (intermediate_type);
9633 (*multi_step_cvt)++;
9635 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9636 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9637 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9638 || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
9639 == TYPE_VECTOR_SUBPARTS (wide_vectype)));
9641 prev_type = intermediate_type;
9642 prev_mode = intermediate_mode;
9645 interm_types->release ();
9646 return false;
9650 /* Function supportable_narrowing_operation
9652 Check whether an operation represented by the code CODE is a
9653 narrowing operation that is supported by the target platform in
9654 vector form (i.e., when operating on arguments of type VECTYPE_IN
9655 and producing a result of type VECTYPE_OUT).
9657 Narrowing operations we currently support are NOP (CONVERT) and
9658 FIX_TRUNC. This function checks if these operations are supported by
9659 the target platform directly via vector tree-codes.
9661 Output:
9662 - CODE1 is the code of a vector operation to be used when
9663 vectorizing the operation, if available.
9664 - MULTI_STEP_CVT determines the number of required intermediate steps in
9665 case of multi-step conversion (like int->short->char - in that case
9666 MULTI_STEP_CVT will be 1).
9667 - INTERM_TYPES contains the intermediate type required to perform the
9668 narrowing operation (short in the above example). */
9670 bool
9671 supportable_narrowing_operation (enum tree_code code,
9672 tree vectype_out, tree vectype_in,
9673 enum tree_code *code1, int *multi_step_cvt,
9674 vec<tree> *interm_types)
9676 machine_mode vec_mode;
9677 enum insn_code icode1;
9678 optab optab1, interm_optab;
9679 tree vectype = vectype_in;
9680 tree narrow_vectype = vectype_out;
9681 enum tree_code c1;
9682 tree intermediate_type, prev_type;
9683 machine_mode intermediate_mode, prev_mode;
9684 int i;
9685 bool uns;
9687 *multi_step_cvt = 0;
9688 switch (code)
9690 CASE_CONVERT:
9691 c1 = VEC_PACK_TRUNC_EXPR;
9692 break;
9694 case FIX_TRUNC_EXPR:
9695 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9696 break;
9698 case FLOAT_EXPR:
9699 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9700 tree code and optabs used for computing the operation. */
9701 return false;
9703 default:
9704 gcc_unreachable ();
9707 if (code == FIX_TRUNC_EXPR)
9708 /* The signedness is determined from output operand. */
9709 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9710 else
9711 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9713 if (!optab1)
9714 return false;
9716 vec_mode = TYPE_MODE (vectype);
9717 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9718 return false;
9720 *code1 = c1;
9722 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9723 /* For scalar masks we may have different boolean
9724 vector types having the same QImode. Thus we
9725 add additional check for elements number. */
9726 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9727 || (TYPE_VECTOR_SUBPARTS (vectype) * 2
9728 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9730 /* Check if it's a multi-step conversion that can be done using intermediate
9731 types. */
9732 prev_mode = vec_mode;
9733 prev_type = vectype;
9734 if (code == FIX_TRUNC_EXPR)
9735 uns = TYPE_UNSIGNED (vectype_out);
9736 else
9737 uns = TYPE_UNSIGNED (vectype);
9739 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9740 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9741 costly than signed. */
9742 if (code == FIX_TRUNC_EXPR && uns)
9744 enum insn_code icode2;
9746 intermediate_type
9747 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9748 interm_optab
9749 = optab_for_tree_code (c1, intermediate_type, optab_default);
9750 if (interm_optab != unknown_optab
9751 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9752 && insn_data[icode1].operand[0].mode
9753 == insn_data[icode2].operand[0].mode)
9755 uns = false;
9756 optab1 = interm_optab;
9757 icode1 = icode2;
9761 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9762 intermediate steps in promotion sequence. We try
9763 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9764 interm_types->create (MAX_INTERM_CVT_STEPS);
9765 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9767 intermediate_mode = insn_data[icode1].operand[0].mode;
9768 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9770 intermediate_type
9771 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9772 current_vector_size);
9773 if (intermediate_mode != TYPE_MODE (intermediate_type))
9774 return false;
9776 else
9777 intermediate_type
9778 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9779 interm_optab
9780 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9781 optab_default);
9782 if (!interm_optab
9783 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9784 || insn_data[icode1].operand[0].mode != intermediate_mode
9785 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9786 == CODE_FOR_nothing))
9787 break;
9789 interm_types->quick_push (intermediate_type);
9790 (*multi_step_cvt)++;
9792 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9793 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9794 || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
9795 == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9797 prev_mode = intermediate_mode;
9798 prev_type = intermediate_type;
9799 optab1 = interm_optab;
9802 interm_types->release ();
9803 return false;