[AArch64] PR target/68129: Define TARGET_SUPPORTS_WIDE_INT
[official-gcc.git] / gcc / tree-vect-stmts.c
blob51dff9eb19ee553a95ecd09497840eab0c51dfa8
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
51 /* For lang_hooks.types.type_for_mode. */
52 #include "langhooks.h"
54 /* Return the vectorized type for the given statement. */
56 tree
57 stmt_vectype (struct _stmt_vec_info *stmt_info)
59 return STMT_VINFO_VECTYPE (stmt_info);
62 /* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
64 bool
65 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
67 gimple *stmt = STMT_VINFO_STMT (stmt_info);
68 basic_block bb = gimple_bb (stmt);
69 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
70 struct loop* loop;
72 if (!loop_vinfo)
73 return false;
75 loop = LOOP_VINFO_LOOP (loop_vinfo);
77 return (bb->loop_father == loop->inner);
80 /* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
84 unsigned
85 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
86 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
87 int misalign, enum vect_cost_model_location where)
89 if (body_cost_vec)
91 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
92 stmt_info_for_cost si = { count, kind,
93 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
94 misalign };
95 body_cost_vec->safe_push (si);
96 return (unsigned)
97 (builtin_vectorization_cost (kind, vectype, misalign) * count);
99 else
100 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
101 count, kind, stmt_info, misalign, where);
104 /* Return a variable of type ELEM_TYPE[NELEMS]. */
106 static tree
107 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
109 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
110 "vect_array");
113 /* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
118 static tree
119 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
120 tree array, unsigned HOST_WIDE_INT n)
122 tree vect_type, vect, vect_name, array_ref;
123 gimple *new_stmt;
125 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
126 vect_type = TREE_TYPE (TREE_TYPE (array));
127 vect = vect_create_destination_var (scalar_dest, vect_type);
128 array_ref = build4 (ARRAY_REF, vect_type, array,
129 build_int_cst (size_type_node, n),
130 NULL_TREE, NULL_TREE);
132 new_stmt = gimple_build_assign (vect, array_ref);
133 vect_name = make_ssa_name (vect, new_stmt);
134 gimple_assign_set_lhs (new_stmt, vect_name);
135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
137 return vect_name;
140 /* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
144 static void
145 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
146 tree array, unsigned HOST_WIDE_INT n)
148 tree array_ref;
149 gimple *new_stmt;
151 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
152 build_int_cst (size_type_node, n),
153 NULL_TREE, NULL_TREE);
155 new_stmt = gimple_build_assign (array_ref, vect);
156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
159 /* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
161 (and its group). */
163 static tree
164 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
166 tree mem_ref, alias_ptr_type;
168 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
169 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
170 /* Arrays have the same alignment as their type. */
171 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
172 return mem_ref;
175 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
177 /* Function vect_mark_relevant.
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
181 static void
182 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
183 enum vect_relevant relevant, bool live_p,
184 bool used_in_pattern)
186 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
187 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
188 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
189 gimple *pattern_stmt;
191 if (dump_enabled_p ())
192 dump_printf_loc (MSG_NOTE, vect_location,
193 "mark relevant %d, live %d.\n", relevant, live_p);
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
201 bool found = false;
202 if (!used_in_pattern)
204 imm_use_iterator imm_iter;
205 use_operand_p use_p;
206 gimple *use_stmt;
207 tree lhs;
208 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
209 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
211 if (is_gimple_assign (stmt))
212 lhs = gimple_assign_lhs (stmt);
213 else
214 lhs = gimple_call_lhs (stmt);
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
218 stmt. */
219 if (lhs && TREE_CODE (lhs) == SSA_NAME)
220 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
222 if (is_gimple_debug (USE_STMT (use_p)))
223 continue;
224 use_stmt = USE_STMT (use_p);
226 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
227 continue;
229 if (vinfo_for_stmt (use_stmt)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
232 found = true;
233 break;
238 if (!found)
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
245 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
247 if (dump_enabled_p ())
248 dump_printf_loc (MSG_NOTE, vect_location,
249 "last stmt in pattern. don't mark"
250 " relevant/live.\n");
251 stmt_info = vinfo_for_stmt (pattern_stmt);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
253 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
254 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
255 stmt = pattern_stmt;
259 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
260 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
261 STMT_VINFO_RELEVANT (stmt_info) = relevant;
263 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE, vect_location,
268 "already marked relevant/live.\n");
269 return;
272 worklist->safe_push (stmt);
276 /* Function vect_stmt_relevant_p.
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
286 CHECKME: what other side effects would the vectorizer allow? */
288 static bool
289 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
290 enum vect_relevant *relevant, bool *live_p)
292 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
293 ssa_op_iter op_iter;
294 imm_use_iterator imm_iter;
295 use_operand_p use_p;
296 def_operand_p def_p;
298 *relevant = vect_unused_in_scope;
299 *live_p = false;
301 /* cond stmt other than loop exit cond. */
302 if (is_ctrl_stmt (stmt)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
304 != loop_exit_ctrl_vec_info_type)
305 *relevant = vect_used_in_scope;
307 /* changing memory. */
308 if (gimple_code (stmt) != GIMPLE_PHI)
309 if (gimple_vdef (stmt)
310 && !gimple_clobber_p (stmt))
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location,
314 "vec_stmt_relevant_p: stmt has vdefs.\n");
315 *relevant = vect_used_in_scope;
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
321 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
323 basic_block bb = gimple_bb (USE_STMT (use_p));
324 if (!flow_bb_inside_loop_p (loop, bb))
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "vec_stmt_relevant_p: used out of loop.\n");
330 if (is_gimple_debug (USE_STMT (use_p)))
331 continue;
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
336 gcc_assert (bb == single_exit (loop)->dest);
338 *live_p = true;
343 return (*live_p || *relevant);
347 /* Function exist_non_indexing_operands_for_use_p
349 USE is one of the uses attached to STMT. Check if USE is
350 used in STMT for anything other than indexing an array. */
352 static bool
353 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
355 tree operand;
356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
358 /* USE corresponds to some operand in STMT. If there is no data
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info))
362 return true;
364 /* STMT has a data_ref. FORNOW this means that its of one of
365 the following forms:
366 -1- ARRAY_REF = var
367 -2- var = ARRAY_REF
368 (This should have been verified in analyze_data_refs).
370 'var' in the second case corresponds to a def, not a use,
371 so USE cannot correspond to any operands that are not used
372 for array indexing.
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
377 if (!gimple_assign_copy_p (stmt))
379 if (is_gimple_call (stmt)
380 && gimple_call_internal_p (stmt))
381 switch (gimple_call_internal_fn (stmt))
383 case IFN_MASK_STORE:
384 operand = gimple_call_arg (stmt, 3);
385 if (operand == use)
386 return true;
387 /* FALLTHRU */
388 case IFN_MASK_LOAD:
389 operand = gimple_call_arg (stmt, 2);
390 if (operand == use)
391 return true;
392 break;
393 default:
394 break;
396 return false;
399 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
400 return false;
401 operand = gimple_assign_rhs1 (stmt);
402 if (TREE_CODE (operand) != SSA_NAME)
403 return false;
405 if (operand == use)
406 return true;
408 return false;
413 Function process_use.
415 Inputs:
416 - a USE in STMT in a loop represented by LOOP_VINFO
417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
418 that defined USE. This is done by calling mark_relevant and passing it
419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
421 be performed.
423 Outputs:
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
428 Exceptions:
429 - case 1: If USE is used only for address computations (e.g. array indexing),
430 which does not need to be directly vectorized, then the liveness/relevance
431 of the respective DEF_STMT is left unchanged.
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
437 Return true if everything is as expected. Return false otherwise. */
439 static bool
440 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
441 enum vect_relevant relevant, vec<gimple *> *worklist,
442 bool force)
444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
445 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
446 stmt_vec_info dstmt_vinfo;
447 basic_block bb, def_bb;
448 gimple *def_stmt;
449 enum vect_def_type dt;
451 /* case 1: we are only interested in uses that need to be vectorized. Uses
452 that are used for address computation are not considered relevant. */
453 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
454 return true;
456 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
460 "not vectorized: unsupported use in stmt.\n");
461 return false;
464 if (!def_stmt || gimple_nop_p (def_stmt))
465 return true;
467 def_bb = gimple_bb (def_stmt);
468 if (!flow_bb_inside_loop_p (loop, def_bb))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
472 return true;
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo = vinfo_for_stmt (def_stmt);
481 bb = gimple_bb (stmt);
482 if (gimple_code (stmt) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
484 && gimple_code (def_stmt) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
486 && bb->loop_father == def_bb->loop_father)
488 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE, vect_location,
490 "reduc-stmt defining reduc-phi in the same nest.\n");
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
492 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
495 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
496 return true;
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
501 d = def_stmt
502 inner-loop:
503 stmt # use (d)
504 outer-loop-tail-bb:
505 ... */
506 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE, vect_location,
510 "outer-loop def-stmt defining inner-loop stmt.\n");
512 switch (relevant)
514 case vect_unused_in_scope:
515 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
516 vect_used_in_scope : vect_unused_in_scope;
517 break;
519 case vect_used_in_outer_by_reduction:
520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
521 relevant = vect_used_by_reduction;
522 break;
524 case vect_used_in_outer:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
526 relevant = vect_used_in_scope;
527 break;
529 case vect_used_in_scope:
530 break;
532 default:
533 gcc_unreachable ();
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
540 inner-loop:
541 d = def_stmt
542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
543 stmt # use (d) */
544 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
546 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE, vect_location,
548 "inner-loop def-stmt defining outer-loop stmt.\n");
550 switch (relevant)
552 case vect_unused_in_scope:
553 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
554 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
555 vect_used_in_outer_by_reduction : vect_unused_in_scope;
556 break;
558 case vect_used_by_reduction:
559 relevant = vect_used_in_outer_by_reduction;
560 break;
562 case vect_used_in_scope:
563 relevant = vect_used_in_outer;
564 break;
566 default:
567 gcc_unreachable ();
571 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
572 is_pattern_stmt_p (stmt_vinfo));
573 return true;
577 /* Function vect_mark_stmts_to_be_vectorized.
579 Not all stmts in the loop need to be vectorized. For example:
581 for i...
582 for j...
583 1. T0 = i + j
584 2. T1 = a[T0]
586 3. j = j + 1
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
591 This pass detects such stmts. */
593 bool
594 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
596 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
597 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
598 unsigned int nbbs = loop->num_nodes;
599 gimple_stmt_iterator si;
600 gimple *stmt;
601 unsigned int i;
602 stmt_vec_info stmt_vinfo;
603 basic_block bb;
604 gimple *phi;
605 bool live_p;
606 enum vect_relevant relevant, tmp_relevant;
607 enum vect_def_type def_type;
609 if (dump_enabled_p ())
610 dump_printf_loc (MSG_NOTE, vect_location,
611 "=== vect_mark_stmts_to_be_vectorized ===\n");
613 auto_vec<gimple *, 64> worklist;
615 /* 1. Init worklist. */
616 for (i = 0; i < nbbs; i++)
618 bb = bbs[i];
619 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
621 phi = gsi_stmt (si);
622 if (dump_enabled_p ())
624 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
628 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
629 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
631 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
633 stmt = gsi_stmt (si);
634 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
640 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
641 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
645 /* 2. Process_worklist */
646 while (worklist.length () > 0)
648 use_operand_p use_p;
649 ssa_op_iter iter;
651 stmt = worklist.pop ();
652 if (dump_enabled_p ())
654 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
660 liveness and relevance properties of STMT. */
661 stmt_vinfo = vinfo_for_stmt (stmt);
662 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
663 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
672 live_p = false
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
675 those that are used by a reduction computation, and those that are
676 (also) used by a regular computation. This allows us later on to
677 identify stmts that are used solely by a reduction, and therefore the
678 order of the results that they produce does not have to be kept. */
680 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
681 tmp_relevant = relevant;
682 switch (def_type)
684 case vect_reduction_def:
685 switch (tmp_relevant)
687 case vect_unused_in_scope:
688 relevant = vect_used_by_reduction;
689 break;
691 case vect_used_by_reduction:
692 if (gimple_code (stmt) == GIMPLE_PHI)
693 break;
694 /* fall through */
696 default:
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
703 live_p = false;
704 break;
706 case vect_nested_cycle:
707 if (tmp_relevant != vect_unused_in_scope
708 && tmp_relevant != vect_used_in_outer_by_reduction
709 && tmp_relevant != vect_used_in_outer)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
713 "unsupported use of nested cycle.\n");
715 return false;
718 live_p = false;
719 break;
721 case vect_double_reduction_def:
722 if (tmp_relevant != vect_unused_in_scope
723 && tmp_relevant != vect_used_by_reduction)
725 if (dump_enabled_p ())
726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
727 "unsupported use of double reduction.\n");
729 return false;
732 live_p = false;
733 break;
735 default:
736 break;
739 if (is_pattern_stmt_p (stmt_vinfo))
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt))
746 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
747 tree op = gimple_assign_rhs1 (stmt);
749 i = 1;
750 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
752 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
753 live_p, relevant, &worklist, false)
754 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
755 live_p, relevant, &worklist, false))
756 return false;
757 i = 2;
759 for (; i < gimple_num_ops (stmt); i++)
761 op = gimple_op (stmt, i);
762 if (TREE_CODE (op) == SSA_NAME
763 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
764 &worklist, false))
765 return false;
768 else if (is_gimple_call (stmt))
770 for (i = 0; i < gimple_call_num_args (stmt); i++)
772 tree arg = gimple_call_arg (stmt, i);
773 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
774 &worklist, false))
775 return false;
779 else
780 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
782 tree op = USE_FROM_PTR (use_p);
783 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
784 &worklist, false))
785 return false;
788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
790 tree off;
791 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
792 gcc_assert (decl);
793 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
794 &worklist, true))
795 return false;
797 } /* while worklist */
799 return true;
803 /* Function vect_model_simple_cost.
805 Models cost for simple operations, i.e. those that only emit ncopies of a
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
809 void
810 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
811 enum vect_def_type *dt,
812 stmt_vector_for_cost *prologue_cost_vec,
813 stmt_vector_for_cost *body_cost_vec)
815 int i;
816 int inside_cost = 0, prologue_cost = 0;
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info))
820 return;
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i = 0; i < 2; i++)
824 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
825 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
826 stmt_info, 0, vect_prologue);
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
829 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
830 stmt_info, 0, vect_body);
832 if (dump_enabled_p ())
833 dump_printf_loc (MSG_NOTE, vect_location,
834 "vect_model_simple_cost: inside_cost = %d, "
835 "prologue_cost = %d .\n", inside_cost, prologue_cost);
839 /* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
844 static void
845 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
846 enum vect_def_type *dt, int pwr)
848 int i, tmp;
849 int inside_cost = 0, prologue_cost = 0;
850 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
852 void *target_cost_data;
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info))
856 return;
858 if (loop_vinfo)
859 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
860 else
861 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
863 for (i = 0; i < pwr + 1; i++)
865 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
866 (i + 1) : i;
867 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
868 vec_promote_demote, stmt_info, 0,
869 vect_body);
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i = 0; i < 2; i++)
874 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
875 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
876 stmt_info, 0, vect_prologue);
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE, vect_location,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
881 "prologue_cost = %d .\n", inside_cost, prologue_cost);
884 /* Function vect_cost_group_size
886 For grouped load or store, return the group_size only if it is the first
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
890 static int
891 vect_cost_group_size (stmt_vec_info stmt_info)
893 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
895 if (first_stmt == STMT_VINFO_STMT (stmt_info))
896 return GROUP_SIZE (stmt_info);
898 return 1;
902 /* Function vect_model_store_cost
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
907 void
908 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
909 bool store_lanes_p, enum vect_def_type dt,
910 slp_tree slp_node,
911 stmt_vector_for_cost *prologue_cost_vec,
912 stmt_vector_for_cost *body_cost_vec)
914 int group_size;
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *first_dr;
917 gimple *first_stmt;
919 if (dt == vect_constant_def || dt == vect_external_def)
920 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
921 stmt_info, 0, vect_prologue);
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
926 if (slp_node)
928 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
929 group_size = 1;
931 else
933 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
934 group_size = vect_cost_group_size (stmt_info);
937 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
939 /* Not a grouped access. */
940 else
942 group_size = 1;
943 first_dr = STMT_VINFO_DATA_REF (stmt_info);
946 /* We assume that the cost of a single store-lanes instruction is
947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
950 if (!store_lanes_p && group_size > 1
951 && !STMT_VINFO_STRIDED_P (stmt_info))
953 /* Uses a high and low interleave or shuffle operations for each
954 needed permute. */
955 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
956 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
957 stmt_info, 0, vect_body);
959 if (dump_enabled_p ())
960 dump_printf_loc (MSG_NOTE, vect_location,
961 "vect_model_store_cost: strided group_size = %d .\n",
962 group_size);
965 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
966 /* Costs of the stores. */
967 if (STMT_VINFO_STRIDED_P (stmt_info)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
970 /* N scalar stores plus extracting the elements. */
971 inside_cost += record_stmt_cost (body_cost_vec,
972 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
973 scalar_store, stmt_info, 0, vect_body);
975 else
976 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
978 if (STMT_VINFO_STRIDED_P (stmt_info))
979 inside_cost += record_stmt_cost (body_cost_vec,
980 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
981 vec_to_scalar, stmt_info, 0, vect_body);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE, vect_location,
985 "vect_model_store_cost: inside_cost = %d, "
986 "prologue_cost = %d .\n", inside_cost, prologue_cost);
990 /* Calculate cost of DR's memory access. */
991 void
992 vect_get_store_cost (struct data_reference *dr, int ncopies,
993 unsigned int *inside_cost,
994 stmt_vector_for_cost *body_cost_vec)
996 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
997 gimple *stmt = DR_STMT (dr);
998 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1000 switch (alignment_support_scheme)
1002 case dr_aligned:
1004 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1005 vector_store, stmt_info, 0,
1006 vect_body);
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE, vect_location,
1010 "vect_model_store_cost: aligned.\n");
1011 break;
1014 case dr_unaligned_supported:
1016 /* Here, we assign an additional cost for the unaligned store. */
1017 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1018 unaligned_store, stmt_info,
1019 DR_MISALIGNMENT (dr), vect_body);
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_NOTE, vect_location,
1022 "vect_model_store_cost: unaligned supported by "
1023 "hardware.\n");
1024 break;
1027 case dr_unaligned_unsupported:
1029 *inside_cost = VECT_MAX_COST;
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1033 "vect_model_store_cost: unsupported access.\n");
1034 break;
1037 default:
1038 gcc_unreachable ();
1043 /* Function vect_model_load_cost
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
1047 accesses are supported for loads, we also account for the costs of the
1048 access scheme chosen. */
1050 void
1051 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1052 bool load_lanes_p, slp_tree slp_node,
1053 stmt_vector_for_cost *prologue_cost_vec,
1054 stmt_vector_for_cost *body_cost_vec)
1056 int group_size;
1057 gimple *first_stmt;
1058 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1059 unsigned int inside_cost = 0, prologue_cost = 0;
1061 /* Grouped accesses? */
1062 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1065 group_size = vect_cost_group_size (stmt_info);
1066 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1068 /* Not a grouped access. */
1069 else
1071 group_size = 1;
1072 first_dr = dr;
1075 /* We assume that the cost of a single load-lanes instruction is
1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
1079 if (!load_lanes_p && group_size > 1
1080 && !STMT_VINFO_STRIDED_P (stmt_info))
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1085 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1086 stmt_info, 0, vect_body);
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_NOTE, vect_location,
1090 "vect_model_load_cost: strided group_size = %d .\n",
1091 group_size);
1094 /* The loads themselves. */
1095 if (STMT_VINFO_STRIDED_P (stmt_info)
1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1102 scalar_load, stmt_info, 0, vect_body);
1104 else
1105 vect_get_load_cost (first_dr, ncopies,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1107 || group_size > 1 || slp_node),
1108 &inside_cost, &prologue_cost,
1109 prologue_cost_vec, body_cost_vec, true);
1110 if (STMT_VINFO_STRIDED_P (stmt_info))
1111 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1112 stmt_info, 0, vect_body);
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_NOTE, vect_location,
1116 "vect_model_load_cost: inside_cost = %d, "
1117 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1121 /* Calculate cost of DR's memory access. */
1122 void
1123 vect_get_load_cost (struct data_reference *dr, int ncopies,
1124 bool add_realign_cost, unsigned int *inside_cost,
1125 unsigned int *prologue_cost,
1126 stmt_vector_for_cost *prologue_cost_vec,
1127 stmt_vector_for_cost *body_cost_vec,
1128 bool record_prologue_costs)
1130 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1131 gimple *stmt = DR_STMT (dr);
1132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1134 switch (alignment_support_scheme)
1136 case dr_aligned:
1138 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1139 stmt_info, 0, vect_body);
1141 if (dump_enabled_p ())
1142 dump_printf_loc (MSG_NOTE, vect_location,
1143 "vect_model_load_cost: aligned.\n");
1145 break;
1147 case dr_unaligned_supported:
1149 /* Here, we assign an additional cost for the unaligned load. */
1150 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1151 unaligned_load, stmt_info,
1152 DR_MISALIGNMENT (dr), vect_body);
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_NOTE, vect_location,
1156 "vect_model_load_cost: unaligned supported by "
1157 "hardware.\n");
1159 break;
1161 case dr_explicit_realign:
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1164 vector_load, stmt_info, 0, vect_body);
1165 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1166 vec_perm, stmt_info, 0, vect_body);
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
1170 prologue costs. */
1171 if (targetm.vectorize.builtin_mask_for_load)
1172 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1173 stmt_info, 0, vect_body);
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE, vect_location,
1177 "vect_model_load_cost: explicit realign\n");
1179 break;
1181 case dr_explicit_realign_optimized:
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_NOTE, vect_location,
1185 "vect_model_load_cost: unaligned software "
1186 "pipelined.\n");
1188 /* Unaligned software pipeline has a load of an address, an initial
1189 load, and possibly a mask operation to "prime" the loop. However,
1190 if this is an access in a group of loads, which provide grouped
1191 access, then the above cost should only be considered for one
1192 access in the group. Inside the loop, there is a load op
1193 and a realignment op. */
1195 if (add_realign_cost && record_prologue_costs)
1197 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1198 vector_stmt, stmt_info,
1199 0, vect_prologue);
1200 if (targetm.vectorize.builtin_mask_for_load)
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
1206 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1207 stmt_info, 0, vect_body);
1208 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1209 stmt_info, 0, vect_body);
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE, vect_location,
1213 "vect_model_load_cost: explicit realign optimized"
1214 "\n");
1216 break;
1219 case dr_unaligned_unsupported:
1221 *inside_cost = VECT_MAX_COST;
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1225 "vect_model_load_cost: unsupported access.\n");
1226 break;
1229 default:
1230 gcc_unreachable ();
1234 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
1237 static void
1238 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1240 if (gsi)
1241 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1242 else
1244 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1245 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1247 if (loop_vinfo)
1249 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1250 basic_block new_bb;
1251 edge pe;
1253 if (nested_in_vect_loop_p (loop, stmt))
1254 loop = loop->inner;
1256 pe = loop_preheader_edge (loop);
1257 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1258 gcc_assert (!new_bb);
1260 else
1262 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1263 basic_block bb;
1264 gimple_stmt_iterator gsi_bb_start;
1266 gcc_assert (bb_vinfo);
1267 bb = BB_VINFO_BB (bb_vinfo);
1268 gsi_bb_start = gsi_after_labels (bb);
1269 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1273 if (dump_enabled_p ())
1275 dump_printf_loc (MSG_NOTE, vect_location,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1281 /* Function vect_init_vector.
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1291 tree
1292 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1294 gimple *init_stmt;
1295 tree new_temp;
1297 if (TREE_CODE (type) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1300 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1302 if (CONSTANT_CLASS_P (val))
1303 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1304 else
1306 new_temp = make_ssa_name (TREE_TYPE (type));
1307 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1309 val = new_temp;
1312 val = build_vector_from_val (type, val);
1315 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1316 init_stmt = gimple_build_assign (new_temp, val);
1317 vect_init_vector_1 (stmt, init_stmt, gsi);
1318 return new_temp;
1322 /* Function vect_get_vec_def_for_operand.
1324 OP is an operand in STMT. This function returns a (vector) def that will be
1325 used in the vectorized stmt for STMT.
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
1331 needs to be introduced. */
1333 tree
1334 vect_get_vec_def_for_operand (tree op, gimple *stmt)
1336 tree vec_oprnd;
1337 gimple *vec_stmt;
1338 gimple *def_stmt;
1339 stmt_vec_info def_stmt_info = NULL;
1340 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1341 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1342 enum vect_def_type dt;
1343 bool is_simple_use;
1344 tree vector_type;
1346 if (dump_enabled_p ())
1348 dump_printf_loc (MSG_NOTE, vect_location,
1349 "vect_get_vec_def_for_operand: ");
1350 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1351 dump_printf (MSG_NOTE, "\n");
1354 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1355 gcc_assert (is_simple_use);
1356 if (dump_enabled_p ())
1358 int loc_printed = 0;
1359 if (def_stmt)
1361 if (loc_printed)
1362 dump_printf (MSG_NOTE, " def_stmt = ");
1363 else
1364 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1365 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1369 switch (dt)
1371 /* operand is a constant or a loop invariant. */
1372 case vect_constant_def:
1373 case vect_external_def:
1375 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1376 gcc_assert (vector_type);
1377 return vect_init_vector (stmt, op, vector_type, NULL);
1380 /* operand is defined inside the loop. */
1381 case vect_internal_def:
1383 /* Get the def from the vectorized stmt. */
1384 def_stmt_info = vinfo_for_stmt (def_stmt);
1386 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1387 /* Get vectorized pattern statement. */
1388 if (!vec_stmt
1389 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1390 && !STMT_VINFO_RELEVANT (def_stmt_info))
1391 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1392 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1393 gcc_assert (vec_stmt);
1394 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1395 vec_oprnd = PHI_RESULT (vec_stmt);
1396 else if (is_gimple_call (vec_stmt))
1397 vec_oprnd = gimple_call_lhs (vec_stmt);
1398 else
1399 vec_oprnd = gimple_assign_lhs (vec_stmt);
1400 return vec_oprnd;
1403 /* operand is defined by a loop header phi - reduction */
1404 case vect_reduction_def:
1405 case vect_double_reduction_def:
1406 case vect_nested_cycle:
1407 /* Code should use get_initial_def_for_reduction. */
1408 gcc_unreachable ();
1410 /* operand is defined by loop-header phi - induction. */
1411 case vect_induction_def:
1413 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1415 /* Get the def from the vectorized stmt. */
1416 def_stmt_info = vinfo_for_stmt (def_stmt);
1417 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1418 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1419 vec_oprnd = PHI_RESULT (vec_stmt);
1420 else
1421 vec_oprnd = gimple_get_lhs (vec_stmt);
1422 return vec_oprnd;
1425 default:
1426 gcc_unreachable ();
1431 /* Function vect_get_vec_def_for_stmt_copy
1433 Return a vector-def for an operand. This function is used when the
1434 vectorized stmt to be created (by the caller to this function) is a "copy"
1435 created in case the vectorized result cannot fit in one vector, and several
1436 copies of the vector-stmt are required. In this case the vector-def is
1437 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1438 of the stmt that defines VEC_OPRND.
1439 DT is the type of the vector def VEC_OPRND.
1441 Context:
1442 In case the vectorization factor (VF) is bigger than the number
1443 of elements that can fit in a vectype (nunits), we have to generate
1444 more than one vector stmt to vectorize the scalar stmt. This situation
1445 arises when there are multiple data-types operated upon in the loop; the
1446 smallest data-type determines the VF, and as a result, when vectorizing
1447 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1448 vector stmt (each computing a vector of 'nunits' results, and together
1449 computing 'VF' results in each iteration). This function is called when
1450 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1451 which VF=16 and nunits=4, so the number of copies required is 4):
1453 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1455 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1456 VS1.1: vx.1 = memref1 VS1.2
1457 VS1.2: vx.2 = memref2 VS1.3
1458 VS1.3: vx.3 = memref3
1460 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1461 VSnew.1: vz1 = vx.1 + ... VSnew.2
1462 VSnew.2: vz2 = vx.2 + ... VSnew.3
1463 VSnew.3: vz3 = vx.3 + ...
1465 The vectorization of S1 is explained in vectorizable_load.
1466 The vectorization of S2:
1467 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1468 the function 'vect_get_vec_def_for_operand' is called to
1469 get the relevant vector-def for each operand of S2. For operand x it
1470 returns the vector-def 'vx.0'.
1472 To create the remaining copies of the vector-stmt (VSnew.j), this
1473 function is called to get the relevant vector-def for each operand. It is
1474 obtained from the respective VS1.j stmt, which is recorded in the
1475 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1477 For example, to obtain the vector-def 'vx.1' in order to create the
1478 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1479 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1480 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1481 and return its def ('vx.1').
1482 Overall, to create the above sequence this function will be called 3 times:
1483 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1484 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1485 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1487 tree
1488 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1490 gimple *vec_stmt_for_operand;
1491 stmt_vec_info def_stmt_info;
1493 /* Do nothing; can reuse same def. */
1494 if (dt == vect_external_def || dt == vect_constant_def )
1495 return vec_oprnd;
1497 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1498 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1499 gcc_assert (def_stmt_info);
1500 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1501 gcc_assert (vec_stmt_for_operand);
1502 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1503 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1504 else
1505 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1506 return vec_oprnd;
1510 /* Get vectorized definitions for the operands to create a copy of an original
1511 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1513 static void
1514 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1515 vec<tree> *vec_oprnds0,
1516 vec<tree> *vec_oprnds1)
1518 tree vec_oprnd = vec_oprnds0->pop ();
1520 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1521 vec_oprnds0->quick_push (vec_oprnd);
1523 if (vec_oprnds1 && vec_oprnds1->length ())
1525 vec_oprnd = vec_oprnds1->pop ();
1526 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1527 vec_oprnds1->quick_push (vec_oprnd);
1532 /* Get vectorized definitions for OP0 and OP1.
1533 REDUC_INDEX is the index of reduction operand in case of reduction,
1534 and -1 otherwise. */
1536 void
1537 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1538 vec<tree> *vec_oprnds0,
1539 vec<tree> *vec_oprnds1,
1540 slp_tree slp_node, int reduc_index)
1542 if (slp_node)
1544 int nops = (op1 == NULL_TREE) ? 1 : 2;
1545 auto_vec<tree> ops (nops);
1546 auto_vec<vec<tree> > vec_defs (nops);
1548 ops.quick_push (op0);
1549 if (op1)
1550 ops.quick_push (op1);
1552 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1554 *vec_oprnds0 = vec_defs[0];
1555 if (op1)
1556 *vec_oprnds1 = vec_defs[1];
1558 else
1560 tree vec_oprnd;
1562 vec_oprnds0->create (1);
1563 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1564 vec_oprnds0->quick_push (vec_oprnd);
1566 if (op1)
1568 vec_oprnds1->create (1);
1569 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1570 vec_oprnds1->quick_push (vec_oprnd);
1576 /* Function vect_finish_stmt_generation.
1578 Insert a new stmt. */
1580 void
1581 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1582 gimple_stmt_iterator *gsi)
1584 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1585 vec_info *vinfo = stmt_info->vinfo;
1587 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1589 if (!gsi_end_p (*gsi)
1590 && gimple_has_mem_ops (vec_stmt))
1592 gimple *at_stmt = gsi_stmt (*gsi);
1593 tree vuse = gimple_vuse (at_stmt);
1594 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1596 tree vdef = gimple_vdef (at_stmt);
1597 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1598 /* If we have an SSA vuse and insert a store, update virtual
1599 SSA form to avoid triggering the renamer. Do so only
1600 if we can easily see all uses - which is what almost always
1601 happens with the way vectorized stmts are inserted. */
1602 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1603 && ((is_gimple_assign (vec_stmt)
1604 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1605 || (is_gimple_call (vec_stmt)
1606 && !(gimple_call_flags (vec_stmt)
1607 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1609 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1610 gimple_set_vdef (vec_stmt, new_vdef);
1611 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1615 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1617 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1619 if (dump_enabled_p ())
1621 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1622 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1625 gimple_set_location (vec_stmt, gimple_location (stmt));
1627 /* While EH edges will generally prevent vectorization, stmt might
1628 e.g. be in a must-not-throw region. Ensure newly created stmts
1629 that could throw are part of the same region. */
1630 int lp_nr = lookup_stmt_eh_lp (stmt);
1631 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1632 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1635 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1636 a function declaration if the target has a vectorized version
1637 of the function, or NULL_TREE if the function cannot be vectorized. */
1639 tree
1640 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1642 tree fndecl = gimple_call_fndecl (call);
1644 /* We only handle functions that do not read or clobber memory -- i.e.
1645 const or novops ones. */
1646 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1647 return NULL_TREE;
1649 if (!fndecl
1650 || TREE_CODE (fndecl) != FUNCTION_DECL
1651 || !DECL_BUILT_IN (fndecl))
1652 return NULL_TREE;
1654 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1655 vectype_in);
1659 static tree permute_vec_elements (tree, tree, tree, gimple *,
1660 gimple_stmt_iterator *);
1663 /* Function vectorizable_mask_load_store.
1665 Check if STMT performs a conditional load or store that can be vectorized.
1666 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1667 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1668 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1670 static bool
1671 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1672 gimple **vec_stmt, slp_tree slp_node)
1674 tree vec_dest = NULL;
1675 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1676 stmt_vec_info prev_stmt_info;
1677 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1678 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1679 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1680 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1681 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1682 tree elem_type;
1683 gimple *new_stmt;
1684 tree dummy;
1685 tree dataref_ptr = NULL_TREE;
1686 gimple *ptr_incr;
1687 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1688 int ncopies;
1689 int i, j;
1690 bool inv_p;
1691 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1692 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1693 int gather_scale = 1;
1694 enum vect_def_type gather_dt = vect_unknown_def_type;
1695 bool is_store;
1696 tree mask;
1697 gimple *def_stmt;
1698 enum vect_def_type dt;
1700 if (slp_node != NULL)
1701 return false;
1703 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1704 gcc_assert (ncopies >= 1);
1706 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1707 mask = gimple_call_arg (stmt, 2);
1708 if (TYPE_PRECISION (TREE_TYPE (mask))
1709 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1710 return false;
1712 /* FORNOW. This restriction should be relaxed. */
1713 if (nested_in_vect_loop && ncopies > 1)
1715 if (dump_enabled_p ())
1716 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1717 "multiple types in nested loop.");
1718 return false;
1721 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1722 return false;
1724 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1725 return false;
1727 if (!STMT_VINFO_DATA_REF (stmt_info))
1728 return false;
1730 elem_type = TREE_TYPE (vectype);
1732 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1733 return false;
1735 if (STMT_VINFO_STRIDED_P (stmt_info))
1736 return false;
1738 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1740 gimple *def_stmt;
1741 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1742 &gather_off, &gather_scale);
1743 gcc_assert (gather_decl);
1744 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1745 &gather_off_vectype))
1747 if (dump_enabled_p ())
1748 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1749 "gather index use not simple.");
1750 return false;
1753 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1754 tree masktype
1755 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1756 if (TREE_CODE (masktype) == INTEGER_TYPE)
1758 if (dump_enabled_p ())
1759 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1760 "masked gather with integer mask not supported.");
1761 return false;
1764 else if (tree_int_cst_compare (nested_in_vect_loop
1765 ? STMT_VINFO_DR_STEP (stmt_info)
1766 : DR_STEP (dr), size_zero_node) <= 0)
1767 return false;
1768 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1769 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1770 return false;
1772 if (TREE_CODE (mask) != SSA_NAME)
1773 return false;
1775 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt))
1776 return false;
1778 if (is_store)
1780 tree rhs = gimple_call_arg (stmt, 3);
1781 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt))
1782 return false;
1785 if (!vec_stmt) /* transformation not required. */
1787 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1788 if (is_store)
1789 vect_model_store_cost (stmt_info, ncopies, false, dt,
1790 NULL, NULL, NULL);
1791 else
1792 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1793 return true;
1796 /** Transform. **/
1798 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1800 tree vec_oprnd0 = NULL_TREE, op;
1801 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1802 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1803 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1804 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1805 tree mask_perm_mask = NULL_TREE;
1806 edge pe = loop_preheader_edge (loop);
1807 gimple_seq seq;
1808 basic_block new_bb;
1809 enum { NARROW, NONE, WIDEN } modifier;
1810 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1812 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1813 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1814 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1815 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1816 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1817 scaletype = TREE_VALUE (arglist);
1818 gcc_checking_assert (types_compatible_p (srctype, rettype)
1819 && types_compatible_p (srctype, masktype));
1821 if (nunits == gather_off_nunits)
1822 modifier = NONE;
1823 else if (nunits == gather_off_nunits / 2)
1825 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1826 modifier = WIDEN;
1828 for (i = 0; i < gather_off_nunits; ++i)
1829 sel[i] = i | nunits;
1831 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1833 else if (nunits == gather_off_nunits * 2)
1835 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1836 modifier = NARROW;
1838 for (i = 0; i < nunits; ++i)
1839 sel[i] = i < gather_off_nunits
1840 ? i : i + nunits - gather_off_nunits;
1842 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1843 ncopies *= 2;
1844 for (i = 0; i < nunits; ++i)
1845 sel[i] = i | gather_off_nunits;
1846 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1848 else
1849 gcc_unreachable ();
1851 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1853 ptr = fold_convert (ptrtype, gather_base);
1854 if (!is_gimple_min_invariant (ptr))
1856 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1857 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1858 gcc_assert (!new_bb);
1861 scale = build_int_cst (scaletype, gather_scale);
1863 prev_stmt_info = NULL;
1864 for (j = 0; j < ncopies; ++j)
1866 if (modifier == WIDEN && (j & 1))
1867 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1868 perm_mask, stmt, gsi);
1869 else if (j == 0)
1870 op = vec_oprnd0
1871 = vect_get_vec_def_for_operand (gather_off, stmt);
1872 else
1873 op = vec_oprnd0
1874 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1876 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1878 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1879 == TYPE_VECTOR_SUBPARTS (idxtype));
1880 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
1881 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1882 new_stmt
1883 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1884 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1885 op = var;
1888 if (mask_perm_mask && (j & 1))
1889 mask_op = permute_vec_elements (mask_op, mask_op,
1890 mask_perm_mask, stmt, gsi);
1891 else
1893 if (j == 0)
1894 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1895 else
1897 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1898 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1901 mask_op = vec_mask;
1902 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1904 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1905 == TYPE_VECTOR_SUBPARTS (masktype));
1906 var = vect_get_new_ssa_name (masktype, vect_simple_var);
1907 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1908 new_stmt
1909 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1910 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1911 mask_op = var;
1915 new_stmt
1916 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1917 scale);
1919 if (!useless_type_conversion_p (vectype, rettype))
1921 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1922 == TYPE_VECTOR_SUBPARTS (rettype));
1923 op = vect_get_new_ssa_name (rettype, vect_simple_var);
1924 gimple_call_set_lhs (new_stmt, op);
1925 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1926 var = make_ssa_name (vec_dest);
1927 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1928 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1930 else
1932 var = make_ssa_name (vec_dest, new_stmt);
1933 gimple_call_set_lhs (new_stmt, var);
1936 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1938 if (modifier == NARROW)
1940 if ((j & 1) == 0)
1942 prev_res = var;
1943 continue;
1945 var = permute_vec_elements (prev_res, var,
1946 perm_mask, stmt, gsi);
1947 new_stmt = SSA_NAME_DEF_STMT (var);
1950 if (prev_stmt_info == NULL)
1951 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1952 else
1953 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1954 prev_stmt_info = vinfo_for_stmt (new_stmt);
1957 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1958 from the IL. */
1959 tree lhs = gimple_call_lhs (stmt);
1960 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1961 set_vinfo_for_stmt (new_stmt, stmt_info);
1962 set_vinfo_for_stmt (stmt, NULL);
1963 STMT_VINFO_STMT (stmt_info) = new_stmt;
1964 gsi_replace (gsi, new_stmt, true);
1965 return true;
1967 else if (is_store)
1969 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1970 prev_stmt_info = NULL;
1971 for (i = 0; i < ncopies; i++)
1973 unsigned align, misalign;
1975 if (i == 0)
1977 tree rhs = gimple_call_arg (stmt, 3);
1978 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
1979 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1980 /* We should have catched mismatched types earlier. */
1981 gcc_assert (useless_type_conversion_p (vectype,
1982 TREE_TYPE (vec_rhs)));
1983 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
1984 NULL_TREE, &dummy, gsi,
1985 &ptr_incr, false, &inv_p);
1986 gcc_assert (!inv_p);
1988 else
1990 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
1991 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
1992 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1993 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1994 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
1995 TYPE_SIZE_UNIT (vectype));
1998 align = TYPE_ALIGN_UNIT (vectype);
1999 if (aligned_access_p (dr))
2000 misalign = 0;
2001 else if (DR_MISALIGNMENT (dr) == -1)
2003 align = TYPE_ALIGN_UNIT (elem_type);
2004 misalign = 0;
2006 else
2007 misalign = DR_MISALIGNMENT (dr);
2008 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2009 misalign);
2010 new_stmt
2011 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2012 gimple_call_arg (stmt, 1),
2013 vec_mask, vec_rhs);
2014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2015 if (i == 0)
2016 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2017 else
2018 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2019 prev_stmt_info = vinfo_for_stmt (new_stmt);
2022 else
2024 tree vec_mask = NULL_TREE;
2025 prev_stmt_info = NULL;
2026 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2027 for (i = 0; i < ncopies; i++)
2029 unsigned align, misalign;
2031 if (i == 0)
2033 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2034 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2035 NULL_TREE, &dummy, gsi,
2036 &ptr_incr, false, &inv_p);
2037 gcc_assert (!inv_p);
2039 else
2041 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2042 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2043 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2044 TYPE_SIZE_UNIT (vectype));
2047 align = TYPE_ALIGN_UNIT (vectype);
2048 if (aligned_access_p (dr))
2049 misalign = 0;
2050 else if (DR_MISALIGNMENT (dr) == -1)
2052 align = TYPE_ALIGN_UNIT (elem_type);
2053 misalign = 0;
2055 else
2056 misalign = DR_MISALIGNMENT (dr);
2057 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2058 misalign);
2059 new_stmt
2060 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2061 gimple_call_arg (stmt, 1),
2062 vec_mask);
2063 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2064 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2065 if (i == 0)
2066 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2067 else
2068 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2069 prev_stmt_info = vinfo_for_stmt (new_stmt);
2073 if (!is_store)
2075 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2076 from the IL. */
2077 tree lhs = gimple_call_lhs (stmt);
2078 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2079 set_vinfo_for_stmt (new_stmt, stmt_info);
2080 set_vinfo_for_stmt (stmt, NULL);
2081 STMT_VINFO_STMT (stmt_info) = new_stmt;
2082 gsi_replace (gsi, new_stmt, true);
2085 return true;
2089 /* Function vectorizable_call.
2091 Check if GS performs a function call that can be vectorized.
2092 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2093 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2094 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2096 static bool
2097 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2098 slp_tree slp_node)
2100 gcall *stmt;
2101 tree vec_dest;
2102 tree scalar_dest;
2103 tree op, type;
2104 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2105 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2106 tree vectype_out, vectype_in;
2107 int nunits_in;
2108 int nunits_out;
2109 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2110 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2111 vec_info *vinfo = stmt_info->vinfo;
2112 tree fndecl, new_temp, rhs_type;
2113 gimple *def_stmt;
2114 enum vect_def_type dt[3]
2115 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2116 gimple *new_stmt = NULL;
2117 int ncopies, j;
2118 vec<tree> vargs = vNULL;
2119 enum { NARROW, NONE, WIDEN } modifier;
2120 size_t i, nargs;
2121 tree lhs;
2123 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2124 return false;
2126 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2127 return false;
2129 /* Is GS a vectorizable call? */
2130 stmt = dyn_cast <gcall *> (gs);
2131 if (!stmt)
2132 return false;
2134 if (gimple_call_internal_p (stmt)
2135 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2136 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2137 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2138 slp_node);
2140 if (gimple_call_lhs (stmt) == NULL_TREE
2141 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2142 return false;
2144 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2146 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2148 /* Process function arguments. */
2149 rhs_type = NULL_TREE;
2150 vectype_in = NULL_TREE;
2151 nargs = gimple_call_num_args (stmt);
2153 /* Bail out if the function has more than three arguments, we do not have
2154 interesting builtin functions to vectorize with more than two arguments
2155 except for fma. No arguments is also not good. */
2156 if (nargs == 0 || nargs > 3)
2157 return false;
2159 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2160 if (gimple_call_internal_p (stmt)
2161 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2163 nargs = 0;
2164 rhs_type = unsigned_type_node;
2167 for (i = 0; i < nargs; i++)
2169 tree opvectype;
2171 op = gimple_call_arg (stmt, i);
2173 /* We can only handle calls with arguments of the same type. */
2174 if (rhs_type
2175 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2177 if (dump_enabled_p ())
2178 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2179 "argument types differ.\n");
2180 return false;
2182 if (!rhs_type)
2183 rhs_type = TREE_TYPE (op);
2185 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2187 if (dump_enabled_p ())
2188 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2189 "use not simple.\n");
2190 return false;
2193 if (!vectype_in)
2194 vectype_in = opvectype;
2195 else if (opvectype
2196 && opvectype != vectype_in)
2198 if (dump_enabled_p ())
2199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2200 "argument vector types differ.\n");
2201 return false;
2204 /* If all arguments are external or constant defs use a vector type with
2205 the same size as the output vector type. */
2206 if (!vectype_in)
2207 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2208 if (vec_stmt)
2209 gcc_assert (vectype_in);
2210 if (!vectype_in)
2212 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2215 "no vectype for scalar type ");
2216 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2217 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2220 return false;
2223 /* FORNOW */
2224 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2225 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2226 if (nunits_in == nunits_out / 2)
2227 modifier = NARROW;
2228 else if (nunits_out == nunits_in)
2229 modifier = NONE;
2230 else if (nunits_out == nunits_in / 2)
2231 modifier = WIDEN;
2232 else
2233 return false;
2235 /* For now, we only vectorize functions if a target specific builtin
2236 is available. TODO -- in some cases, it might be profitable to
2237 insert the calls for pieces of the vector, in order to be able
2238 to vectorize other operations in the loop. */
2239 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2240 if (fndecl == NULL_TREE)
2242 if (gimple_call_internal_p (stmt)
2243 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2244 && !slp_node
2245 && loop_vinfo
2246 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2247 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2248 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2249 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2251 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2252 { 0, 1, 2, ... vf - 1 } vector. */
2253 gcc_assert (nargs == 0);
2255 else
2257 if (dump_enabled_p ())
2258 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2259 "function is not vectorizable.\n");
2260 return false;
2264 gcc_assert (!gimple_vuse (stmt));
2266 if (slp_node || PURE_SLP_STMT (stmt_info))
2267 ncopies = 1;
2268 else if (modifier == NARROW)
2269 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2270 else
2271 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2273 /* Sanity check: make sure that at least one copy of the vectorized stmt
2274 needs to be generated. */
2275 gcc_assert (ncopies >= 1);
2277 if (!vec_stmt) /* transformation not required. */
2279 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2280 if (dump_enabled_p ())
2281 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2282 "\n");
2283 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2284 return true;
2287 /** Transform. **/
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2292 /* Handle def. */
2293 scalar_dest = gimple_call_lhs (stmt);
2294 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2296 prev_stmt_info = NULL;
2297 switch (modifier)
2299 case NONE:
2300 for (j = 0; j < ncopies; ++j)
2302 /* Build argument list for the vectorized call. */
2303 if (j == 0)
2304 vargs.create (nargs);
2305 else
2306 vargs.truncate (0);
2308 if (slp_node)
2310 auto_vec<vec<tree> > vec_defs (nargs);
2311 vec<tree> vec_oprnds0;
2313 for (i = 0; i < nargs; i++)
2314 vargs.quick_push (gimple_call_arg (stmt, i));
2315 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2316 vec_oprnds0 = vec_defs[0];
2318 /* Arguments are ready. Create the new vector stmt. */
2319 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2321 size_t k;
2322 for (k = 0; k < nargs; k++)
2324 vec<tree> vec_oprndsk = vec_defs[k];
2325 vargs[k] = vec_oprndsk[i];
2327 new_stmt = gimple_build_call_vec (fndecl, vargs);
2328 new_temp = make_ssa_name (vec_dest, new_stmt);
2329 gimple_call_set_lhs (new_stmt, new_temp);
2330 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2331 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2334 for (i = 0; i < nargs; i++)
2336 vec<tree> vec_oprndsi = vec_defs[i];
2337 vec_oprndsi.release ();
2339 continue;
2342 for (i = 0; i < nargs; i++)
2344 op = gimple_call_arg (stmt, i);
2345 if (j == 0)
2346 vec_oprnd0
2347 = vect_get_vec_def_for_operand (op, stmt);
2348 else
2350 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2351 vec_oprnd0
2352 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2355 vargs.quick_push (vec_oprnd0);
2358 if (gimple_call_internal_p (stmt)
2359 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2361 tree *v = XALLOCAVEC (tree, nunits_out);
2362 int k;
2363 for (k = 0; k < nunits_out; ++k)
2364 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2365 tree cst = build_vector (vectype_out, v);
2366 tree new_var
2367 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2368 gimple *init_stmt = gimple_build_assign (new_var, cst);
2369 vect_init_vector_1 (stmt, init_stmt, NULL);
2370 new_temp = make_ssa_name (vec_dest);
2371 new_stmt = gimple_build_assign (new_temp, new_var);
2373 else
2375 new_stmt = gimple_build_call_vec (fndecl, vargs);
2376 new_temp = make_ssa_name (vec_dest, new_stmt);
2377 gimple_call_set_lhs (new_stmt, new_temp);
2379 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2381 if (j == 0)
2382 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2383 else
2384 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2386 prev_stmt_info = vinfo_for_stmt (new_stmt);
2389 break;
2391 case NARROW:
2392 for (j = 0; j < ncopies; ++j)
2394 /* Build argument list for the vectorized call. */
2395 if (j == 0)
2396 vargs.create (nargs * 2);
2397 else
2398 vargs.truncate (0);
2400 if (slp_node)
2402 auto_vec<vec<tree> > vec_defs (nargs);
2403 vec<tree> vec_oprnds0;
2405 for (i = 0; i < nargs; i++)
2406 vargs.quick_push (gimple_call_arg (stmt, i));
2407 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2408 vec_oprnds0 = vec_defs[0];
2410 /* Arguments are ready. Create the new vector stmt. */
2411 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2413 size_t k;
2414 vargs.truncate (0);
2415 for (k = 0; k < nargs; k++)
2417 vec<tree> vec_oprndsk = vec_defs[k];
2418 vargs.quick_push (vec_oprndsk[i]);
2419 vargs.quick_push (vec_oprndsk[i + 1]);
2421 new_stmt = gimple_build_call_vec (fndecl, vargs);
2422 new_temp = make_ssa_name (vec_dest, new_stmt);
2423 gimple_call_set_lhs (new_stmt, new_temp);
2424 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2425 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2428 for (i = 0; i < nargs; i++)
2430 vec<tree> vec_oprndsi = vec_defs[i];
2431 vec_oprndsi.release ();
2433 continue;
2436 for (i = 0; i < nargs; i++)
2438 op = gimple_call_arg (stmt, i);
2439 if (j == 0)
2441 vec_oprnd0
2442 = vect_get_vec_def_for_operand (op, stmt);
2443 vec_oprnd1
2444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2446 else
2448 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2449 vec_oprnd0
2450 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2451 vec_oprnd1
2452 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2455 vargs.quick_push (vec_oprnd0);
2456 vargs.quick_push (vec_oprnd1);
2459 new_stmt = gimple_build_call_vec (fndecl, vargs);
2460 new_temp = make_ssa_name (vec_dest, new_stmt);
2461 gimple_call_set_lhs (new_stmt, new_temp);
2462 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2464 if (j == 0)
2465 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2466 else
2467 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2469 prev_stmt_info = vinfo_for_stmt (new_stmt);
2472 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2474 break;
2476 case WIDEN:
2477 /* No current target implements this case. */
2478 return false;
2481 vargs.release ();
2483 /* The call in STMT might prevent it from being removed in dce.
2484 We however cannot remove it here, due to the way the ssa name
2485 it defines is mapped to the new definition. So just replace
2486 rhs of the statement with something harmless. */
2488 if (slp_node)
2489 return true;
2491 type = TREE_TYPE (scalar_dest);
2492 if (is_pattern_stmt_p (stmt_info))
2493 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2494 else
2495 lhs = gimple_call_lhs (stmt);
2497 if (gimple_call_internal_p (stmt)
2498 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2500 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2501 with vf - 1 rather than 0, that is the last iteration of the
2502 vectorized loop. */
2503 imm_use_iterator iter;
2504 use_operand_p use_p;
2505 gimple *use_stmt;
2506 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2508 basic_block use_bb = gimple_bb (use_stmt);
2509 if (use_bb
2510 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2512 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2513 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2514 ncopies * nunits_out - 1));
2515 update_stmt (use_stmt);
2520 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2521 set_vinfo_for_stmt (new_stmt, stmt_info);
2522 set_vinfo_for_stmt (stmt, NULL);
2523 STMT_VINFO_STMT (stmt_info) = new_stmt;
2524 gsi_replace (gsi, new_stmt, false);
2526 return true;
2530 struct simd_call_arg_info
2532 tree vectype;
2533 tree op;
2534 enum vect_def_type dt;
2535 HOST_WIDE_INT linear_step;
2536 unsigned int align;
2537 bool simd_lane_linear;
2540 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2541 is linear within simd lane (but not within whole loop), note it in
2542 *ARGINFO. */
2544 static void
2545 vect_simd_lane_linear (tree op, struct loop *loop,
2546 struct simd_call_arg_info *arginfo)
2548 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2550 if (!is_gimple_assign (def_stmt)
2551 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2552 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2553 return;
2555 tree base = gimple_assign_rhs1 (def_stmt);
2556 HOST_WIDE_INT linear_step = 0;
2557 tree v = gimple_assign_rhs2 (def_stmt);
2558 while (TREE_CODE (v) == SSA_NAME)
2560 tree t;
2561 def_stmt = SSA_NAME_DEF_STMT (v);
2562 if (is_gimple_assign (def_stmt))
2563 switch (gimple_assign_rhs_code (def_stmt))
2565 case PLUS_EXPR:
2566 t = gimple_assign_rhs2 (def_stmt);
2567 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2568 return;
2569 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2570 v = gimple_assign_rhs1 (def_stmt);
2571 continue;
2572 case MULT_EXPR:
2573 t = gimple_assign_rhs2 (def_stmt);
2574 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2575 return;
2576 linear_step = tree_to_shwi (t);
2577 v = gimple_assign_rhs1 (def_stmt);
2578 continue;
2579 CASE_CONVERT:
2580 t = gimple_assign_rhs1 (def_stmt);
2581 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2582 || (TYPE_PRECISION (TREE_TYPE (v))
2583 < TYPE_PRECISION (TREE_TYPE (t))))
2584 return;
2585 if (!linear_step)
2586 linear_step = 1;
2587 v = t;
2588 continue;
2589 default:
2590 return;
2592 else if (is_gimple_call (def_stmt)
2593 && gimple_call_internal_p (def_stmt)
2594 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2595 && loop->simduid
2596 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2597 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2598 == loop->simduid))
2600 if (!linear_step)
2601 linear_step = 1;
2602 arginfo->linear_step = linear_step;
2603 arginfo->op = base;
2604 arginfo->simd_lane_linear = true;
2605 return;
2610 /* Function vectorizable_simd_clone_call.
2612 Check if STMT performs a function call that can be vectorized
2613 by calling a simd clone of the function.
2614 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2615 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2616 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2618 static bool
2619 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2620 gimple **vec_stmt, slp_tree slp_node)
2622 tree vec_dest;
2623 tree scalar_dest;
2624 tree op, type;
2625 tree vec_oprnd0 = NULL_TREE;
2626 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2627 tree vectype;
2628 unsigned int nunits;
2629 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2630 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2631 vec_info *vinfo = stmt_info->vinfo;
2632 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2633 tree fndecl, new_temp;
2634 gimple *def_stmt;
2635 gimple *new_stmt = NULL;
2636 int ncopies, j;
2637 vec<simd_call_arg_info> arginfo = vNULL;
2638 vec<tree> vargs = vNULL;
2639 size_t i, nargs;
2640 tree lhs, rtype, ratype;
2641 vec<constructor_elt, va_gc> *ret_ctor_elts;
2643 /* Is STMT a vectorizable call? */
2644 if (!is_gimple_call (stmt))
2645 return false;
2647 fndecl = gimple_call_fndecl (stmt);
2648 if (fndecl == NULL_TREE)
2649 return false;
2651 struct cgraph_node *node = cgraph_node::get (fndecl);
2652 if (node == NULL || node->simd_clones == NULL)
2653 return false;
2655 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2656 return false;
2658 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2659 return false;
2661 if (gimple_call_lhs (stmt)
2662 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2663 return false;
2665 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2667 vectype = STMT_VINFO_VECTYPE (stmt_info);
2669 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2670 return false;
2672 /* FORNOW */
2673 if (slp_node || PURE_SLP_STMT (stmt_info))
2674 return false;
2676 /* Process function arguments. */
2677 nargs = gimple_call_num_args (stmt);
2679 /* Bail out if the function has zero arguments. */
2680 if (nargs == 0)
2681 return false;
2683 arginfo.create (nargs);
2685 for (i = 0; i < nargs; i++)
2687 simd_call_arg_info thisarginfo;
2688 affine_iv iv;
2690 thisarginfo.linear_step = 0;
2691 thisarginfo.align = 0;
2692 thisarginfo.op = NULL_TREE;
2693 thisarginfo.simd_lane_linear = false;
2695 op = gimple_call_arg (stmt, i);
2696 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2697 &thisarginfo.vectype)
2698 || thisarginfo.dt == vect_uninitialized_def)
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2702 "use not simple.\n");
2703 arginfo.release ();
2704 return false;
2707 if (thisarginfo.dt == vect_constant_def
2708 || thisarginfo.dt == vect_external_def)
2709 gcc_assert (thisarginfo.vectype == NULL_TREE);
2710 else
2711 gcc_assert (thisarginfo.vectype != NULL_TREE);
2713 /* For linear arguments, the analyze phase should have saved
2714 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2715 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2716 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2718 gcc_assert (vec_stmt);
2719 thisarginfo.linear_step
2720 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2721 thisarginfo.op
2722 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2723 thisarginfo.simd_lane_linear
2724 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2725 == boolean_true_node);
2726 /* If loop has been peeled for alignment, we need to adjust it. */
2727 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2728 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2729 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2731 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2732 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2733 tree opt = TREE_TYPE (thisarginfo.op);
2734 bias = fold_convert (TREE_TYPE (step), bias);
2735 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2736 thisarginfo.op
2737 = fold_build2 (POINTER_TYPE_P (opt)
2738 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2739 thisarginfo.op, bias);
2742 else if (!vec_stmt
2743 && thisarginfo.dt != vect_constant_def
2744 && thisarginfo.dt != vect_external_def
2745 && loop_vinfo
2746 && TREE_CODE (op) == SSA_NAME
2747 && simple_iv (loop, loop_containing_stmt (stmt), op,
2748 &iv, false)
2749 && tree_fits_shwi_p (iv.step))
2751 thisarginfo.linear_step = tree_to_shwi (iv.step);
2752 thisarginfo.op = iv.base;
2754 else if ((thisarginfo.dt == vect_constant_def
2755 || thisarginfo.dt == vect_external_def)
2756 && POINTER_TYPE_P (TREE_TYPE (op)))
2757 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2758 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2759 linear too. */
2760 if (POINTER_TYPE_P (TREE_TYPE (op))
2761 && !thisarginfo.linear_step
2762 && !vec_stmt
2763 && thisarginfo.dt != vect_constant_def
2764 && thisarginfo.dt != vect_external_def
2765 && loop_vinfo
2766 && !slp_node
2767 && TREE_CODE (op) == SSA_NAME)
2768 vect_simd_lane_linear (op, loop, &thisarginfo);
2770 arginfo.quick_push (thisarginfo);
2773 unsigned int badness = 0;
2774 struct cgraph_node *bestn = NULL;
2775 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2776 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2777 else
2778 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2779 n = n->simdclone->next_clone)
2781 unsigned int this_badness = 0;
2782 if (n->simdclone->simdlen
2783 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2784 || n->simdclone->nargs != nargs)
2785 continue;
2786 if (n->simdclone->simdlen
2787 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2788 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2789 - exact_log2 (n->simdclone->simdlen)) * 1024;
2790 if (n->simdclone->inbranch)
2791 this_badness += 2048;
2792 int target_badness = targetm.simd_clone.usable (n);
2793 if (target_badness < 0)
2794 continue;
2795 this_badness += target_badness * 512;
2796 /* FORNOW: Have to add code to add the mask argument. */
2797 if (n->simdclone->inbranch)
2798 continue;
2799 for (i = 0; i < nargs; i++)
2801 switch (n->simdclone->args[i].arg_type)
2803 case SIMD_CLONE_ARG_TYPE_VECTOR:
2804 if (!useless_type_conversion_p
2805 (n->simdclone->args[i].orig_type,
2806 TREE_TYPE (gimple_call_arg (stmt, i))))
2807 i = -1;
2808 else if (arginfo[i].dt == vect_constant_def
2809 || arginfo[i].dt == vect_external_def
2810 || arginfo[i].linear_step)
2811 this_badness += 64;
2812 break;
2813 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2814 if (arginfo[i].dt != vect_constant_def
2815 && arginfo[i].dt != vect_external_def)
2816 i = -1;
2817 break;
2818 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2819 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2820 if (arginfo[i].dt == vect_constant_def
2821 || arginfo[i].dt == vect_external_def
2822 || (arginfo[i].linear_step
2823 != n->simdclone->args[i].linear_step))
2824 i = -1;
2825 break;
2826 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2827 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2828 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2829 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2830 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2831 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
2832 /* FORNOW */
2833 i = -1;
2834 break;
2835 case SIMD_CLONE_ARG_TYPE_MASK:
2836 gcc_unreachable ();
2838 if (i == (size_t) -1)
2839 break;
2840 if (n->simdclone->args[i].alignment > arginfo[i].align)
2842 i = -1;
2843 break;
2845 if (arginfo[i].align)
2846 this_badness += (exact_log2 (arginfo[i].align)
2847 - exact_log2 (n->simdclone->args[i].alignment));
2849 if (i == (size_t) -1)
2850 continue;
2851 if (bestn == NULL || this_badness < badness)
2853 bestn = n;
2854 badness = this_badness;
2858 if (bestn == NULL)
2860 arginfo.release ();
2861 return false;
2864 for (i = 0; i < nargs; i++)
2865 if ((arginfo[i].dt == vect_constant_def
2866 || arginfo[i].dt == vect_external_def)
2867 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2869 arginfo[i].vectype
2870 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2871 i)));
2872 if (arginfo[i].vectype == NULL
2873 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2874 > bestn->simdclone->simdlen))
2876 arginfo.release ();
2877 return false;
2881 fndecl = bestn->decl;
2882 nunits = bestn->simdclone->simdlen;
2883 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2885 /* If the function isn't const, only allow it in simd loops where user
2886 has asserted that at least nunits consecutive iterations can be
2887 performed using SIMD instructions. */
2888 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2889 && gimple_vuse (stmt))
2891 arginfo.release ();
2892 return false;
2895 /* Sanity check: make sure that at least one copy of the vectorized stmt
2896 needs to be generated. */
2897 gcc_assert (ncopies >= 1);
2899 if (!vec_stmt) /* transformation not required. */
2901 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2902 for (i = 0; i < nargs; i++)
2903 if (bestn->simdclone->args[i].arg_type
2904 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2906 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2907 + 1);
2908 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2909 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2910 ? size_type_node : TREE_TYPE (arginfo[i].op);
2911 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2912 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2913 tree sll = arginfo[i].simd_lane_linear
2914 ? boolean_true_node : boolean_false_node;
2915 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
2917 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2918 if (dump_enabled_p ())
2919 dump_printf_loc (MSG_NOTE, vect_location,
2920 "=== vectorizable_simd_clone_call ===\n");
2921 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2922 arginfo.release ();
2923 return true;
2926 /** Transform. **/
2928 if (dump_enabled_p ())
2929 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2931 /* Handle def. */
2932 scalar_dest = gimple_call_lhs (stmt);
2933 vec_dest = NULL_TREE;
2934 rtype = NULL_TREE;
2935 ratype = NULL_TREE;
2936 if (scalar_dest)
2938 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2939 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2940 if (TREE_CODE (rtype) == ARRAY_TYPE)
2942 ratype = rtype;
2943 rtype = TREE_TYPE (ratype);
2947 prev_stmt_info = NULL;
2948 for (j = 0; j < ncopies; ++j)
2950 /* Build argument list for the vectorized call. */
2951 if (j == 0)
2952 vargs.create (nargs);
2953 else
2954 vargs.truncate (0);
2956 for (i = 0; i < nargs; i++)
2958 unsigned int k, l, m, o;
2959 tree atype;
2960 op = gimple_call_arg (stmt, i);
2961 switch (bestn->simdclone->args[i].arg_type)
2963 case SIMD_CLONE_ARG_TYPE_VECTOR:
2964 atype = bestn->simdclone->args[i].vector_type;
2965 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2966 for (m = j * o; m < (j + 1) * o; m++)
2968 if (TYPE_VECTOR_SUBPARTS (atype)
2969 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2971 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2972 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2973 / TYPE_VECTOR_SUBPARTS (atype));
2974 gcc_assert ((k & (k - 1)) == 0);
2975 if (m == 0)
2976 vec_oprnd0
2977 = vect_get_vec_def_for_operand (op, stmt);
2978 else
2980 vec_oprnd0 = arginfo[i].op;
2981 if ((m & (k - 1)) == 0)
2982 vec_oprnd0
2983 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2984 vec_oprnd0);
2986 arginfo[i].op = vec_oprnd0;
2987 vec_oprnd0
2988 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2989 size_int (prec),
2990 bitsize_int ((m & (k - 1)) * prec));
2991 new_stmt
2992 = gimple_build_assign (make_ssa_name (atype),
2993 vec_oprnd0);
2994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2995 vargs.safe_push (gimple_assign_lhs (new_stmt));
2997 else
2999 k = (TYPE_VECTOR_SUBPARTS (atype)
3000 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3001 gcc_assert ((k & (k - 1)) == 0);
3002 vec<constructor_elt, va_gc> *ctor_elts;
3003 if (k != 1)
3004 vec_alloc (ctor_elts, k);
3005 else
3006 ctor_elts = NULL;
3007 for (l = 0; l < k; l++)
3009 if (m == 0 && l == 0)
3010 vec_oprnd0
3011 = vect_get_vec_def_for_operand (op, stmt);
3012 else
3013 vec_oprnd0
3014 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3015 arginfo[i].op);
3016 arginfo[i].op = vec_oprnd0;
3017 if (k == 1)
3018 break;
3019 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3020 vec_oprnd0);
3022 if (k == 1)
3023 vargs.safe_push (vec_oprnd0);
3024 else
3026 vec_oprnd0 = build_constructor (atype, ctor_elts);
3027 new_stmt
3028 = gimple_build_assign (make_ssa_name (atype),
3029 vec_oprnd0);
3030 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3031 vargs.safe_push (gimple_assign_lhs (new_stmt));
3035 break;
3036 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3037 vargs.safe_push (op);
3038 break;
3039 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3040 if (j == 0)
3042 gimple_seq stmts;
3043 arginfo[i].op
3044 = force_gimple_operand (arginfo[i].op, &stmts, true,
3045 NULL_TREE);
3046 if (stmts != NULL)
3048 basic_block new_bb;
3049 edge pe = loop_preheader_edge (loop);
3050 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3051 gcc_assert (!new_bb);
3053 if (arginfo[i].simd_lane_linear)
3055 vargs.safe_push (arginfo[i].op);
3056 break;
3058 tree phi_res = copy_ssa_name (op);
3059 gphi *new_phi = create_phi_node (phi_res, loop->header);
3060 set_vinfo_for_stmt (new_phi,
3061 new_stmt_vec_info (new_phi, loop_vinfo));
3062 add_phi_arg (new_phi, arginfo[i].op,
3063 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3064 enum tree_code code
3065 = POINTER_TYPE_P (TREE_TYPE (op))
3066 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3067 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3068 ? sizetype : TREE_TYPE (op);
3069 widest_int cst
3070 = wi::mul (bestn->simdclone->args[i].linear_step,
3071 ncopies * nunits);
3072 tree tcst = wide_int_to_tree (type, cst);
3073 tree phi_arg = copy_ssa_name (op);
3074 new_stmt
3075 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3076 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3077 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3078 set_vinfo_for_stmt (new_stmt,
3079 new_stmt_vec_info (new_stmt, loop_vinfo));
3080 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3081 UNKNOWN_LOCATION);
3082 arginfo[i].op = phi_res;
3083 vargs.safe_push (phi_res);
3085 else
3087 enum tree_code code
3088 = POINTER_TYPE_P (TREE_TYPE (op))
3089 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3090 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3091 ? sizetype : TREE_TYPE (op);
3092 widest_int cst
3093 = wi::mul (bestn->simdclone->args[i].linear_step,
3094 j * nunits);
3095 tree tcst = wide_int_to_tree (type, cst);
3096 new_temp = make_ssa_name (TREE_TYPE (op));
3097 new_stmt = gimple_build_assign (new_temp, code,
3098 arginfo[i].op, tcst);
3099 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3100 vargs.safe_push (new_temp);
3102 break;
3103 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3104 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3105 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3106 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3107 default:
3108 gcc_unreachable ();
3112 new_stmt = gimple_build_call_vec (fndecl, vargs);
3113 if (vec_dest)
3115 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3116 if (ratype)
3117 new_temp = create_tmp_var (ratype);
3118 else if (TYPE_VECTOR_SUBPARTS (vectype)
3119 == TYPE_VECTOR_SUBPARTS (rtype))
3120 new_temp = make_ssa_name (vec_dest, new_stmt);
3121 else
3122 new_temp = make_ssa_name (rtype, new_stmt);
3123 gimple_call_set_lhs (new_stmt, new_temp);
3125 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3127 if (vec_dest)
3129 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3131 unsigned int k, l;
3132 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3133 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3134 gcc_assert ((k & (k - 1)) == 0);
3135 for (l = 0; l < k; l++)
3137 tree t;
3138 if (ratype)
3140 t = build_fold_addr_expr (new_temp);
3141 t = build2 (MEM_REF, vectype, t,
3142 build_int_cst (TREE_TYPE (t),
3143 l * prec / BITS_PER_UNIT));
3145 else
3146 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3147 size_int (prec), bitsize_int (l * prec));
3148 new_stmt
3149 = gimple_build_assign (make_ssa_name (vectype), t);
3150 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3151 if (j == 0 && l == 0)
3152 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3153 else
3154 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3156 prev_stmt_info = vinfo_for_stmt (new_stmt);
3159 if (ratype)
3161 tree clobber = build_constructor (ratype, NULL);
3162 TREE_THIS_VOLATILE (clobber) = 1;
3163 new_stmt = gimple_build_assign (new_temp, clobber);
3164 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3166 continue;
3168 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3170 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3171 / TYPE_VECTOR_SUBPARTS (rtype));
3172 gcc_assert ((k & (k - 1)) == 0);
3173 if ((j & (k - 1)) == 0)
3174 vec_alloc (ret_ctor_elts, k);
3175 if (ratype)
3177 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3178 for (m = 0; m < o; m++)
3180 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3181 size_int (m), NULL_TREE, NULL_TREE);
3182 new_stmt
3183 = gimple_build_assign (make_ssa_name (rtype), tem);
3184 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3185 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3186 gimple_assign_lhs (new_stmt));
3188 tree clobber = build_constructor (ratype, NULL);
3189 TREE_THIS_VOLATILE (clobber) = 1;
3190 new_stmt = gimple_build_assign (new_temp, clobber);
3191 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3193 else
3194 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3195 if ((j & (k - 1)) != k - 1)
3196 continue;
3197 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3198 new_stmt
3199 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3200 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3202 if ((unsigned) j == k - 1)
3203 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3204 else
3205 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3207 prev_stmt_info = vinfo_for_stmt (new_stmt);
3208 continue;
3210 else if (ratype)
3212 tree t = build_fold_addr_expr (new_temp);
3213 t = build2 (MEM_REF, vectype, t,
3214 build_int_cst (TREE_TYPE (t), 0));
3215 new_stmt
3216 = gimple_build_assign (make_ssa_name (vec_dest), t);
3217 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3218 tree clobber = build_constructor (ratype, NULL);
3219 TREE_THIS_VOLATILE (clobber) = 1;
3220 vect_finish_stmt_generation (stmt,
3221 gimple_build_assign (new_temp,
3222 clobber), gsi);
3226 if (j == 0)
3227 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3228 else
3229 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3231 prev_stmt_info = vinfo_for_stmt (new_stmt);
3234 vargs.release ();
3236 /* The call in STMT might prevent it from being removed in dce.
3237 We however cannot remove it here, due to the way the ssa name
3238 it defines is mapped to the new definition. So just replace
3239 rhs of the statement with something harmless. */
3241 if (slp_node)
3242 return true;
3244 if (scalar_dest)
3246 type = TREE_TYPE (scalar_dest);
3247 if (is_pattern_stmt_p (stmt_info))
3248 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3249 else
3250 lhs = gimple_call_lhs (stmt);
3251 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3253 else
3254 new_stmt = gimple_build_nop ();
3255 set_vinfo_for_stmt (new_stmt, stmt_info);
3256 set_vinfo_for_stmt (stmt, NULL);
3257 STMT_VINFO_STMT (stmt_info) = new_stmt;
3258 gsi_replace (gsi, new_stmt, true);
3259 unlink_stmt_vdef (stmt);
3261 return true;
3265 /* Function vect_gen_widened_results_half
3267 Create a vector stmt whose code, type, number of arguments, and result
3268 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3269 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3270 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3271 needs to be created (DECL is a function-decl of a target-builtin).
3272 STMT is the original scalar stmt that we are vectorizing. */
3274 static gimple *
3275 vect_gen_widened_results_half (enum tree_code code,
3276 tree decl,
3277 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3278 tree vec_dest, gimple_stmt_iterator *gsi,
3279 gimple *stmt)
3281 gimple *new_stmt;
3282 tree new_temp;
3284 /* Generate half of the widened result: */
3285 if (code == CALL_EXPR)
3287 /* Target specific support */
3288 if (op_type == binary_op)
3289 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3290 else
3291 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3292 new_temp = make_ssa_name (vec_dest, new_stmt);
3293 gimple_call_set_lhs (new_stmt, new_temp);
3295 else
3297 /* Generic support */
3298 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3299 if (op_type != binary_op)
3300 vec_oprnd1 = NULL;
3301 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3302 new_temp = make_ssa_name (vec_dest, new_stmt);
3303 gimple_assign_set_lhs (new_stmt, new_temp);
3305 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3307 return new_stmt;
3311 /* Get vectorized definitions for loop-based vectorization. For the first
3312 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3313 scalar operand), and for the rest we get a copy with
3314 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3315 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3316 The vectors are collected into VEC_OPRNDS. */
3318 static void
3319 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3320 vec<tree> *vec_oprnds, int multi_step_cvt)
3322 tree vec_oprnd;
3324 /* Get first vector operand. */
3325 /* All the vector operands except the very first one (that is scalar oprnd)
3326 are stmt copies. */
3327 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3328 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3329 else
3330 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3332 vec_oprnds->quick_push (vec_oprnd);
3334 /* Get second vector operand. */
3335 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3336 vec_oprnds->quick_push (vec_oprnd);
3338 *oprnd = vec_oprnd;
3340 /* For conversion in multiple steps, continue to get operands
3341 recursively. */
3342 if (multi_step_cvt)
3343 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3347 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3348 For multi-step conversions store the resulting vectors and call the function
3349 recursively. */
3351 static void
3352 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3353 int multi_step_cvt, gimple *stmt,
3354 vec<tree> vec_dsts,
3355 gimple_stmt_iterator *gsi,
3356 slp_tree slp_node, enum tree_code code,
3357 stmt_vec_info *prev_stmt_info)
3359 unsigned int i;
3360 tree vop0, vop1, new_tmp, vec_dest;
3361 gimple *new_stmt;
3362 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3364 vec_dest = vec_dsts.pop ();
3366 for (i = 0; i < vec_oprnds->length (); i += 2)
3368 /* Create demotion operation. */
3369 vop0 = (*vec_oprnds)[i];
3370 vop1 = (*vec_oprnds)[i + 1];
3371 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3372 new_tmp = make_ssa_name (vec_dest, new_stmt);
3373 gimple_assign_set_lhs (new_stmt, new_tmp);
3374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3376 if (multi_step_cvt)
3377 /* Store the resulting vector for next recursive call. */
3378 (*vec_oprnds)[i/2] = new_tmp;
3379 else
3381 /* This is the last step of the conversion sequence. Store the
3382 vectors in SLP_NODE or in vector info of the scalar statement
3383 (or in STMT_VINFO_RELATED_STMT chain). */
3384 if (slp_node)
3385 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3386 else
3388 if (!*prev_stmt_info)
3389 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3390 else
3391 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3393 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3398 /* For multi-step demotion operations we first generate demotion operations
3399 from the source type to the intermediate types, and then combine the
3400 results (stored in VEC_OPRNDS) in demotion operation to the destination
3401 type. */
3402 if (multi_step_cvt)
3404 /* At each level of recursion we have half of the operands we had at the
3405 previous level. */
3406 vec_oprnds->truncate ((i+1)/2);
3407 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3408 stmt, vec_dsts, gsi, slp_node,
3409 VEC_PACK_TRUNC_EXPR,
3410 prev_stmt_info);
3413 vec_dsts.quick_push (vec_dest);
3417 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3418 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3419 the resulting vectors and call the function recursively. */
3421 static void
3422 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3423 vec<tree> *vec_oprnds1,
3424 gimple *stmt, tree vec_dest,
3425 gimple_stmt_iterator *gsi,
3426 enum tree_code code1,
3427 enum tree_code code2, tree decl1,
3428 tree decl2, int op_type)
3430 int i;
3431 tree vop0, vop1, new_tmp1, new_tmp2;
3432 gimple *new_stmt1, *new_stmt2;
3433 vec<tree> vec_tmp = vNULL;
3435 vec_tmp.create (vec_oprnds0->length () * 2);
3436 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3438 if (op_type == binary_op)
3439 vop1 = (*vec_oprnds1)[i];
3440 else
3441 vop1 = NULL_TREE;
3443 /* Generate the two halves of promotion operation. */
3444 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3445 op_type, vec_dest, gsi, stmt);
3446 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3447 op_type, vec_dest, gsi, stmt);
3448 if (is_gimple_call (new_stmt1))
3450 new_tmp1 = gimple_call_lhs (new_stmt1);
3451 new_tmp2 = gimple_call_lhs (new_stmt2);
3453 else
3455 new_tmp1 = gimple_assign_lhs (new_stmt1);
3456 new_tmp2 = gimple_assign_lhs (new_stmt2);
3459 /* Store the results for the next step. */
3460 vec_tmp.quick_push (new_tmp1);
3461 vec_tmp.quick_push (new_tmp2);
3464 vec_oprnds0->release ();
3465 *vec_oprnds0 = vec_tmp;
3469 /* Check if STMT performs a conversion operation, that can be vectorized.
3470 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3471 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3472 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3474 static bool
3475 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3476 gimple **vec_stmt, slp_tree slp_node)
3478 tree vec_dest;
3479 tree scalar_dest;
3480 tree op0, op1 = NULL_TREE;
3481 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3482 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3483 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3484 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3485 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3486 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3487 tree new_temp;
3488 gimple *def_stmt;
3489 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3490 gimple *new_stmt = NULL;
3491 stmt_vec_info prev_stmt_info;
3492 int nunits_in;
3493 int nunits_out;
3494 tree vectype_out, vectype_in;
3495 int ncopies, i, j;
3496 tree lhs_type, rhs_type;
3497 enum { NARROW, NONE, WIDEN } modifier;
3498 vec<tree> vec_oprnds0 = vNULL;
3499 vec<tree> vec_oprnds1 = vNULL;
3500 tree vop0;
3501 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3502 vec_info *vinfo = stmt_info->vinfo;
3503 int multi_step_cvt = 0;
3504 vec<tree> vec_dsts = vNULL;
3505 vec<tree> interm_types = vNULL;
3506 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3507 int op_type;
3508 machine_mode rhs_mode;
3509 unsigned short fltsz;
3511 /* Is STMT a vectorizable conversion? */
3513 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3514 return false;
3516 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3517 return false;
3519 if (!is_gimple_assign (stmt))
3520 return false;
3522 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3523 return false;
3525 code = gimple_assign_rhs_code (stmt);
3526 if (!CONVERT_EXPR_CODE_P (code)
3527 && code != FIX_TRUNC_EXPR
3528 && code != FLOAT_EXPR
3529 && code != WIDEN_MULT_EXPR
3530 && code != WIDEN_LSHIFT_EXPR)
3531 return false;
3533 op_type = TREE_CODE_LENGTH (code);
3535 /* Check types of lhs and rhs. */
3536 scalar_dest = gimple_assign_lhs (stmt);
3537 lhs_type = TREE_TYPE (scalar_dest);
3538 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3540 op0 = gimple_assign_rhs1 (stmt);
3541 rhs_type = TREE_TYPE (op0);
3543 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3544 && !((INTEGRAL_TYPE_P (lhs_type)
3545 && INTEGRAL_TYPE_P (rhs_type))
3546 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3547 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3548 return false;
3550 if ((INTEGRAL_TYPE_P (lhs_type)
3551 && (TYPE_PRECISION (lhs_type)
3552 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3553 || (INTEGRAL_TYPE_P (rhs_type)
3554 && (TYPE_PRECISION (rhs_type)
3555 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3557 if (dump_enabled_p ())
3558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3559 "type conversion to/from bit-precision unsupported."
3560 "\n");
3561 return false;
3564 /* Check the operands of the operation. */
3565 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3567 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3569 "use not simple.\n");
3570 return false;
3572 if (op_type == binary_op)
3574 bool ok;
3576 op1 = gimple_assign_rhs2 (stmt);
3577 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3578 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3579 OP1. */
3580 if (CONSTANT_CLASS_P (op0))
3581 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3582 else
3583 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3585 if (!ok)
3587 if (dump_enabled_p ())
3588 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3589 "use not simple.\n");
3590 return false;
3594 /* If op0 is an external or constant defs use a vector type of
3595 the same size as the output vector type. */
3596 if (!vectype_in)
3597 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3598 if (vec_stmt)
3599 gcc_assert (vectype_in);
3600 if (!vectype_in)
3602 if (dump_enabled_p ())
3604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3605 "no vectype for scalar type ");
3606 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3607 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3610 return false;
3613 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3614 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3615 if (nunits_in < nunits_out)
3616 modifier = NARROW;
3617 else if (nunits_out == nunits_in)
3618 modifier = NONE;
3619 else
3620 modifier = WIDEN;
3622 /* Multiple types in SLP are handled by creating the appropriate number of
3623 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3624 case of SLP. */
3625 if (slp_node || PURE_SLP_STMT (stmt_info))
3626 ncopies = 1;
3627 else if (modifier == NARROW)
3628 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3629 else
3630 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3632 /* Sanity check: make sure that at least one copy of the vectorized stmt
3633 needs to be generated. */
3634 gcc_assert (ncopies >= 1);
3636 /* Supportable by target? */
3637 switch (modifier)
3639 case NONE:
3640 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3641 return false;
3642 if (supportable_convert_operation (code, vectype_out, vectype_in,
3643 &decl1, &code1))
3644 break;
3645 /* FALLTHRU */
3646 unsupported:
3647 if (dump_enabled_p ())
3648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3649 "conversion not supported by target.\n");
3650 return false;
3652 case WIDEN:
3653 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3654 &code1, &code2, &multi_step_cvt,
3655 &interm_types))
3657 /* Binary widening operation can only be supported directly by the
3658 architecture. */
3659 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3660 break;
3663 if (code != FLOAT_EXPR
3664 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3665 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3666 goto unsupported;
3668 rhs_mode = TYPE_MODE (rhs_type);
3669 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3670 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3671 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3672 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3674 cvt_type
3675 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3676 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3677 if (cvt_type == NULL_TREE)
3678 goto unsupported;
3680 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3682 if (!supportable_convert_operation (code, vectype_out,
3683 cvt_type, &decl1, &codecvt1))
3684 goto unsupported;
3686 else if (!supportable_widening_operation (code, stmt, vectype_out,
3687 cvt_type, &codecvt1,
3688 &codecvt2, &multi_step_cvt,
3689 &interm_types))
3690 continue;
3691 else
3692 gcc_assert (multi_step_cvt == 0);
3694 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3695 vectype_in, &code1, &code2,
3696 &multi_step_cvt, &interm_types))
3697 break;
3700 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3701 goto unsupported;
3703 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3704 codecvt2 = ERROR_MARK;
3705 else
3707 multi_step_cvt++;
3708 interm_types.safe_push (cvt_type);
3709 cvt_type = NULL_TREE;
3711 break;
3713 case NARROW:
3714 gcc_assert (op_type == unary_op);
3715 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3716 &code1, &multi_step_cvt,
3717 &interm_types))
3718 break;
3720 if (code != FIX_TRUNC_EXPR
3721 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3722 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3723 goto unsupported;
3725 rhs_mode = TYPE_MODE (rhs_type);
3726 cvt_type
3727 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3728 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3729 if (cvt_type == NULL_TREE)
3730 goto unsupported;
3731 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3732 &decl1, &codecvt1))
3733 goto unsupported;
3734 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3735 &code1, &multi_step_cvt,
3736 &interm_types))
3737 break;
3738 goto unsupported;
3740 default:
3741 gcc_unreachable ();
3744 if (!vec_stmt) /* transformation not required. */
3746 if (dump_enabled_p ())
3747 dump_printf_loc (MSG_NOTE, vect_location,
3748 "=== vectorizable_conversion ===\n");
3749 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3751 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3752 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3754 else if (modifier == NARROW)
3756 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3757 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3759 else
3761 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3762 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3764 interm_types.release ();
3765 return true;
3768 /** Transform. **/
3769 if (dump_enabled_p ())
3770 dump_printf_loc (MSG_NOTE, vect_location,
3771 "transform conversion. ncopies = %d.\n", ncopies);
3773 if (op_type == binary_op)
3775 if (CONSTANT_CLASS_P (op0))
3776 op0 = fold_convert (TREE_TYPE (op1), op0);
3777 else if (CONSTANT_CLASS_P (op1))
3778 op1 = fold_convert (TREE_TYPE (op0), op1);
3781 /* In case of multi-step conversion, we first generate conversion operations
3782 to the intermediate types, and then from that types to the final one.
3783 We create vector destinations for the intermediate type (TYPES) received
3784 from supportable_*_operation, and store them in the correct order
3785 for future use in vect_create_vectorized_*_stmts (). */
3786 vec_dsts.create (multi_step_cvt + 1);
3787 vec_dest = vect_create_destination_var (scalar_dest,
3788 (cvt_type && modifier == WIDEN)
3789 ? cvt_type : vectype_out);
3790 vec_dsts.quick_push (vec_dest);
3792 if (multi_step_cvt)
3794 for (i = interm_types.length () - 1;
3795 interm_types.iterate (i, &intermediate_type); i--)
3797 vec_dest = vect_create_destination_var (scalar_dest,
3798 intermediate_type);
3799 vec_dsts.quick_push (vec_dest);
3803 if (cvt_type)
3804 vec_dest = vect_create_destination_var (scalar_dest,
3805 modifier == WIDEN
3806 ? vectype_out : cvt_type);
3808 if (!slp_node)
3810 if (modifier == WIDEN)
3812 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3813 if (op_type == binary_op)
3814 vec_oprnds1.create (1);
3816 else if (modifier == NARROW)
3817 vec_oprnds0.create (
3818 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3820 else if (code == WIDEN_LSHIFT_EXPR)
3821 vec_oprnds1.create (slp_node->vec_stmts_size);
3823 last_oprnd = op0;
3824 prev_stmt_info = NULL;
3825 switch (modifier)
3827 case NONE:
3828 for (j = 0; j < ncopies; j++)
3830 if (j == 0)
3831 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3832 -1);
3833 else
3834 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3836 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3838 /* Arguments are ready, create the new vector stmt. */
3839 if (code1 == CALL_EXPR)
3841 new_stmt = gimple_build_call (decl1, 1, vop0);
3842 new_temp = make_ssa_name (vec_dest, new_stmt);
3843 gimple_call_set_lhs (new_stmt, new_temp);
3845 else
3847 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3848 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3849 new_temp = make_ssa_name (vec_dest, new_stmt);
3850 gimple_assign_set_lhs (new_stmt, new_temp);
3853 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3854 if (slp_node)
3855 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3856 else
3858 if (!prev_stmt_info)
3859 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3860 else
3861 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3862 prev_stmt_info = vinfo_for_stmt (new_stmt);
3866 break;
3868 case WIDEN:
3869 /* In case the vectorization factor (VF) is bigger than the number
3870 of elements that we can fit in a vectype (nunits), we have to
3871 generate more than one vector stmt - i.e - we need to "unroll"
3872 the vector stmt by a factor VF/nunits. */
3873 for (j = 0; j < ncopies; j++)
3875 /* Handle uses. */
3876 if (j == 0)
3878 if (slp_node)
3880 if (code == WIDEN_LSHIFT_EXPR)
3882 unsigned int k;
3884 vec_oprnd1 = op1;
3885 /* Store vec_oprnd1 for every vector stmt to be created
3886 for SLP_NODE. We check during the analysis that all
3887 the shift arguments are the same. */
3888 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3889 vec_oprnds1.quick_push (vec_oprnd1);
3891 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3892 slp_node, -1);
3894 else
3895 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3896 &vec_oprnds1, slp_node, -1);
3898 else
3900 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
3901 vec_oprnds0.quick_push (vec_oprnd0);
3902 if (op_type == binary_op)
3904 if (code == WIDEN_LSHIFT_EXPR)
3905 vec_oprnd1 = op1;
3906 else
3907 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
3908 vec_oprnds1.quick_push (vec_oprnd1);
3912 else
3914 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3915 vec_oprnds0.truncate (0);
3916 vec_oprnds0.quick_push (vec_oprnd0);
3917 if (op_type == binary_op)
3919 if (code == WIDEN_LSHIFT_EXPR)
3920 vec_oprnd1 = op1;
3921 else
3922 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3923 vec_oprnd1);
3924 vec_oprnds1.truncate (0);
3925 vec_oprnds1.quick_push (vec_oprnd1);
3929 /* Arguments are ready. Create the new vector stmts. */
3930 for (i = multi_step_cvt; i >= 0; i--)
3932 tree this_dest = vec_dsts[i];
3933 enum tree_code c1 = code1, c2 = code2;
3934 if (i == 0 && codecvt2 != ERROR_MARK)
3936 c1 = codecvt1;
3937 c2 = codecvt2;
3939 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3940 &vec_oprnds1,
3941 stmt, this_dest, gsi,
3942 c1, c2, decl1, decl2,
3943 op_type);
3946 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3948 if (cvt_type)
3950 if (codecvt1 == CALL_EXPR)
3952 new_stmt = gimple_build_call (decl1, 1, vop0);
3953 new_temp = make_ssa_name (vec_dest, new_stmt);
3954 gimple_call_set_lhs (new_stmt, new_temp);
3956 else
3958 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3959 new_temp = make_ssa_name (vec_dest);
3960 new_stmt = gimple_build_assign (new_temp, codecvt1,
3961 vop0);
3964 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3966 else
3967 new_stmt = SSA_NAME_DEF_STMT (vop0);
3969 if (slp_node)
3970 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3971 else
3973 if (!prev_stmt_info)
3974 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3975 else
3976 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3977 prev_stmt_info = vinfo_for_stmt (new_stmt);
3982 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3983 break;
3985 case NARROW:
3986 /* In case the vectorization factor (VF) is bigger than the number
3987 of elements that we can fit in a vectype (nunits), we have to
3988 generate more than one vector stmt - i.e - we need to "unroll"
3989 the vector stmt by a factor VF/nunits. */
3990 for (j = 0; j < ncopies; j++)
3992 /* Handle uses. */
3993 if (slp_node)
3994 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3995 slp_node, -1);
3996 else
3998 vec_oprnds0.truncate (0);
3999 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4000 vect_pow2 (multi_step_cvt) - 1);
4003 /* Arguments are ready. Create the new vector stmts. */
4004 if (cvt_type)
4005 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4007 if (codecvt1 == CALL_EXPR)
4009 new_stmt = gimple_build_call (decl1, 1, vop0);
4010 new_temp = make_ssa_name (vec_dest, new_stmt);
4011 gimple_call_set_lhs (new_stmt, new_temp);
4013 else
4015 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4016 new_temp = make_ssa_name (vec_dest);
4017 new_stmt = gimple_build_assign (new_temp, codecvt1,
4018 vop0);
4021 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4022 vec_oprnds0[i] = new_temp;
4025 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4026 stmt, vec_dsts, gsi,
4027 slp_node, code1,
4028 &prev_stmt_info);
4031 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4032 break;
4035 vec_oprnds0.release ();
4036 vec_oprnds1.release ();
4037 vec_dsts.release ();
4038 interm_types.release ();
4040 return true;
4044 /* Function vectorizable_assignment.
4046 Check if STMT performs an assignment (copy) that can be vectorized.
4047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4051 static bool
4052 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4053 gimple **vec_stmt, slp_tree slp_node)
4055 tree vec_dest;
4056 tree scalar_dest;
4057 tree op;
4058 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4059 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4060 tree new_temp;
4061 gimple *def_stmt;
4062 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4063 int ncopies;
4064 int i, j;
4065 vec<tree> vec_oprnds = vNULL;
4066 tree vop;
4067 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4068 vec_info *vinfo = stmt_info->vinfo;
4069 gimple *new_stmt = NULL;
4070 stmt_vec_info prev_stmt_info = NULL;
4071 enum tree_code code;
4072 tree vectype_in;
4074 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4075 return false;
4077 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4078 return false;
4080 /* Is vectorizable assignment? */
4081 if (!is_gimple_assign (stmt))
4082 return false;
4084 scalar_dest = gimple_assign_lhs (stmt);
4085 if (TREE_CODE (scalar_dest) != SSA_NAME)
4086 return false;
4088 code = gimple_assign_rhs_code (stmt);
4089 if (gimple_assign_single_p (stmt)
4090 || code == PAREN_EXPR
4091 || CONVERT_EXPR_CODE_P (code))
4092 op = gimple_assign_rhs1 (stmt);
4093 else
4094 return false;
4096 if (code == VIEW_CONVERT_EXPR)
4097 op = TREE_OPERAND (op, 0);
4099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4100 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4102 /* Multiple types in SLP are handled by creating the appropriate number of
4103 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4104 case of SLP. */
4105 if (slp_node || PURE_SLP_STMT (stmt_info))
4106 ncopies = 1;
4107 else
4108 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4110 gcc_assert (ncopies >= 1);
4112 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4114 if (dump_enabled_p ())
4115 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4116 "use not simple.\n");
4117 return false;
4120 /* We can handle NOP_EXPR conversions that do not change the number
4121 of elements or the vector size. */
4122 if ((CONVERT_EXPR_CODE_P (code)
4123 || code == VIEW_CONVERT_EXPR)
4124 && (!vectype_in
4125 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4126 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4127 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4128 return false;
4130 /* We do not handle bit-precision changes. */
4131 if ((CONVERT_EXPR_CODE_P (code)
4132 || code == VIEW_CONVERT_EXPR)
4133 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4134 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4135 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4136 || ((TYPE_PRECISION (TREE_TYPE (op))
4137 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4138 /* But a conversion that does not change the bit-pattern is ok. */
4139 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4140 > TYPE_PRECISION (TREE_TYPE (op)))
4141 && TYPE_UNSIGNED (TREE_TYPE (op))))
4143 if (dump_enabled_p ())
4144 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4145 "type conversion to/from bit-precision "
4146 "unsupported.\n");
4147 return false;
4150 if (!vec_stmt) /* transformation not required. */
4152 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4153 if (dump_enabled_p ())
4154 dump_printf_loc (MSG_NOTE, vect_location,
4155 "=== vectorizable_assignment ===\n");
4156 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4157 return true;
4160 /** Transform. **/
4161 if (dump_enabled_p ())
4162 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4164 /* Handle def. */
4165 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4167 /* Handle use. */
4168 for (j = 0; j < ncopies; j++)
4170 /* Handle uses. */
4171 if (j == 0)
4172 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4173 else
4174 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4176 /* Arguments are ready. create the new vector stmt. */
4177 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4179 if (CONVERT_EXPR_CODE_P (code)
4180 || code == VIEW_CONVERT_EXPR)
4181 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4182 new_stmt = gimple_build_assign (vec_dest, vop);
4183 new_temp = make_ssa_name (vec_dest, new_stmt);
4184 gimple_assign_set_lhs (new_stmt, new_temp);
4185 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4186 if (slp_node)
4187 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4190 if (slp_node)
4191 continue;
4193 if (j == 0)
4194 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4195 else
4196 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4198 prev_stmt_info = vinfo_for_stmt (new_stmt);
4201 vec_oprnds.release ();
4202 return true;
4206 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4207 either as shift by a scalar or by a vector. */
4209 bool
4210 vect_supportable_shift (enum tree_code code, tree scalar_type)
4213 machine_mode vec_mode;
4214 optab optab;
4215 int icode;
4216 tree vectype;
4218 vectype = get_vectype_for_scalar_type (scalar_type);
4219 if (!vectype)
4220 return false;
4222 optab = optab_for_tree_code (code, vectype, optab_scalar);
4223 if (!optab
4224 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4226 optab = optab_for_tree_code (code, vectype, optab_vector);
4227 if (!optab
4228 || (optab_handler (optab, TYPE_MODE (vectype))
4229 == CODE_FOR_nothing))
4230 return false;
4233 vec_mode = TYPE_MODE (vectype);
4234 icode = (int) optab_handler (optab, vec_mode);
4235 if (icode == CODE_FOR_nothing)
4236 return false;
4238 return true;
4242 /* Function vectorizable_shift.
4244 Check if STMT performs a shift operation that can be vectorized.
4245 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4246 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4247 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4249 static bool
4250 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4251 gimple **vec_stmt, slp_tree slp_node)
4253 tree vec_dest;
4254 tree scalar_dest;
4255 tree op0, op1 = NULL;
4256 tree vec_oprnd1 = NULL_TREE;
4257 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4258 tree vectype;
4259 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4260 enum tree_code code;
4261 machine_mode vec_mode;
4262 tree new_temp;
4263 optab optab;
4264 int icode;
4265 machine_mode optab_op2_mode;
4266 gimple *def_stmt;
4267 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4268 gimple *new_stmt = NULL;
4269 stmt_vec_info prev_stmt_info;
4270 int nunits_in;
4271 int nunits_out;
4272 tree vectype_out;
4273 tree op1_vectype;
4274 int ncopies;
4275 int j, i;
4276 vec<tree> vec_oprnds0 = vNULL;
4277 vec<tree> vec_oprnds1 = vNULL;
4278 tree vop0, vop1;
4279 unsigned int k;
4280 bool scalar_shift_arg = true;
4281 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4282 vec_info *vinfo = stmt_info->vinfo;
4283 int vf;
4285 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4286 return false;
4288 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4289 return false;
4291 /* Is STMT a vectorizable binary/unary operation? */
4292 if (!is_gimple_assign (stmt))
4293 return false;
4295 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4296 return false;
4298 code = gimple_assign_rhs_code (stmt);
4300 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4301 || code == RROTATE_EXPR))
4302 return false;
4304 scalar_dest = gimple_assign_lhs (stmt);
4305 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4306 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4307 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4309 if (dump_enabled_p ())
4310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4311 "bit-precision shifts not supported.\n");
4312 return false;
4315 op0 = gimple_assign_rhs1 (stmt);
4316 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4318 if (dump_enabled_p ())
4319 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4320 "use not simple.\n");
4321 return false;
4323 /* If op0 is an external or constant def use a vector type with
4324 the same size as the output vector type. */
4325 if (!vectype)
4326 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4327 if (vec_stmt)
4328 gcc_assert (vectype);
4329 if (!vectype)
4331 if (dump_enabled_p ())
4332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4333 "no vectype for scalar type\n");
4334 return false;
4337 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4338 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4339 if (nunits_out != nunits_in)
4340 return false;
4342 op1 = gimple_assign_rhs2 (stmt);
4343 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4345 if (dump_enabled_p ())
4346 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4347 "use not simple.\n");
4348 return false;
4351 if (loop_vinfo)
4352 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4353 else
4354 vf = 1;
4356 /* Multiple types in SLP are handled by creating the appropriate number of
4357 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4358 case of SLP. */
4359 if (slp_node || PURE_SLP_STMT (stmt_info))
4360 ncopies = 1;
4361 else
4362 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4364 gcc_assert (ncopies >= 1);
4366 /* Determine whether the shift amount is a vector, or scalar. If the
4367 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4369 if ((dt[1] == vect_internal_def
4370 || dt[1] == vect_induction_def)
4371 && !slp_node)
4372 scalar_shift_arg = false;
4373 else if (dt[1] == vect_constant_def
4374 || dt[1] == vect_external_def
4375 || dt[1] == vect_internal_def)
4377 /* In SLP, need to check whether the shift count is the same,
4378 in loops if it is a constant or invariant, it is always
4379 a scalar shift. */
4380 if (slp_node)
4382 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4383 gimple *slpstmt;
4385 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4386 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4387 scalar_shift_arg = false;
4390 else
4392 if (dump_enabled_p ())
4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4394 "operand mode requires invariant argument.\n");
4395 return false;
4398 /* Vector shifted by vector. */
4399 if (!scalar_shift_arg)
4401 optab = optab_for_tree_code (code, vectype, optab_vector);
4402 if (dump_enabled_p ())
4403 dump_printf_loc (MSG_NOTE, vect_location,
4404 "vector/vector shift/rotate found.\n");
4406 if (!op1_vectype)
4407 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4408 if (op1_vectype == NULL_TREE
4409 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4411 if (dump_enabled_p ())
4412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4413 "unusable type for last operand in"
4414 " vector/vector shift/rotate.\n");
4415 return false;
4418 /* See if the machine has a vector shifted by scalar insn and if not
4419 then see if it has a vector shifted by vector insn. */
4420 else
4422 optab = optab_for_tree_code (code, vectype, optab_scalar);
4423 if (optab
4424 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4426 if (dump_enabled_p ())
4427 dump_printf_loc (MSG_NOTE, vect_location,
4428 "vector/scalar shift/rotate found.\n");
4430 else
4432 optab = optab_for_tree_code (code, vectype, optab_vector);
4433 if (optab
4434 && (optab_handler (optab, TYPE_MODE (vectype))
4435 != CODE_FOR_nothing))
4437 scalar_shift_arg = false;
4439 if (dump_enabled_p ())
4440 dump_printf_loc (MSG_NOTE, vect_location,
4441 "vector/vector shift/rotate found.\n");
4443 /* Unlike the other binary operators, shifts/rotates have
4444 the rhs being int, instead of the same type as the lhs,
4445 so make sure the scalar is the right type if we are
4446 dealing with vectors of long long/long/short/char. */
4447 if (dt[1] == vect_constant_def)
4448 op1 = fold_convert (TREE_TYPE (vectype), op1);
4449 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4450 TREE_TYPE (op1)))
4452 if (slp_node
4453 && TYPE_MODE (TREE_TYPE (vectype))
4454 != TYPE_MODE (TREE_TYPE (op1)))
4456 if (dump_enabled_p ())
4457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4458 "unusable type for last operand in"
4459 " vector/vector shift/rotate.\n");
4460 return false;
4462 if (vec_stmt && !slp_node)
4464 op1 = fold_convert (TREE_TYPE (vectype), op1);
4465 op1 = vect_init_vector (stmt, op1,
4466 TREE_TYPE (vectype), NULL);
4473 /* Supportable by target? */
4474 if (!optab)
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4478 "no optab.\n");
4479 return false;
4481 vec_mode = TYPE_MODE (vectype);
4482 icode = (int) optab_handler (optab, vec_mode);
4483 if (icode == CODE_FOR_nothing)
4485 if (dump_enabled_p ())
4486 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4487 "op not supported by target.\n");
4488 /* Check only during analysis. */
4489 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4490 || (vf < vect_min_worthwhile_factor (code)
4491 && !vec_stmt))
4492 return false;
4493 if (dump_enabled_p ())
4494 dump_printf_loc (MSG_NOTE, vect_location,
4495 "proceeding using word mode.\n");
4498 /* Worthwhile without SIMD support? Check only during analysis. */
4499 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4500 && vf < vect_min_worthwhile_factor (code)
4501 && !vec_stmt)
4503 if (dump_enabled_p ())
4504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4505 "not worthwhile without SIMD support.\n");
4506 return false;
4509 if (!vec_stmt) /* transformation not required. */
4511 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4512 if (dump_enabled_p ())
4513 dump_printf_loc (MSG_NOTE, vect_location,
4514 "=== vectorizable_shift ===\n");
4515 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4516 return true;
4519 /** Transform. **/
4521 if (dump_enabled_p ())
4522 dump_printf_loc (MSG_NOTE, vect_location,
4523 "transform binary/unary operation.\n");
4525 /* Handle def. */
4526 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4528 prev_stmt_info = NULL;
4529 for (j = 0; j < ncopies; j++)
4531 /* Handle uses. */
4532 if (j == 0)
4534 if (scalar_shift_arg)
4536 /* Vector shl and shr insn patterns can be defined with scalar
4537 operand 2 (shift operand). In this case, use constant or loop
4538 invariant op1 directly, without extending it to vector mode
4539 first. */
4540 optab_op2_mode = insn_data[icode].operand[2].mode;
4541 if (!VECTOR_MODE_P (optab_op2_mode))
4543 if (dump_enabled_p ())
4544 dump_printf_loc (MSG_NOTE, vect_location,
4545 "operand 1 using scalar mode.\n");
4546 vec_oprnd1 = op1;
4547 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4548 vec_oprnds1.quick_push (vec_oprnd1);
4549 if (slp_node)
4551 /* Store vec_oprnd1 for every vector stmt to be created
4552 for SLP_NODE. We check during the analysis that all
4553 the shift arguments are the same.
4554 TODO: Allow different constants for different vector
4555 stmts generated for an SLP instance. */
4556 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4557 vec_oprnds1.quick_push (vec_oprnd1);
4562 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4563 (a special case for certain kind of vector shifts); otherwise,
4564 operand 1 should be of a vector type (the usual case). */
4565 if (vec_oprnd1)
4566 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4567 slp_node, -1);
4568 else
4569 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4570 slp_node, -1);
4572 else
4573 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4575 /* Arguments are ready. Create the new vector stmt. */
4576 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4578 vop1 = vec_oprnds1[i];
4579 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4580 new_temp = make_ssa_name (vec_dest, new_stmt);
4581 gimple_assign_set_lhs (new_stmt, new_temp);
4582 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4583 if (slp_node)
4584 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4587 if (slp_node)
4588 continue;
4590 if (j == 0)
4591 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4592 else
4593 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4594 prev_stmt_info = vinfo_for_stmt (new_stmt);
4597 vec_oprnds0.release ();
4598 vec_oprnds1.release ();
4600 return true;
4604 /* Function vectorizable_operation.
4606 Check if STMT performs a binary, unary or ternary operation that can
4607 be vectorized.
4608 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4609 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4610 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4612 static bool
4613 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4614 gimple **vec_stmt, slp_tree slp_node)
4616 tree vec_dest;
4617 tree scalar_dest;
4618 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4619 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4620 tree vectype;
4621 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4622 enum tree_code code;
4623 machine_mode vec_mode;
4624 tree new_temp;
4625 int op_type;
4626 optab optab;
4627 bool target_support_p;
4628 gimple *def_stmt;
4629 enum vect_def_type dt[3]
4630 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4631 gimple *new_stmt = NULL;
4632 stmt_vec_info prev_stmt_info;
4633 int nunits_in;
4634 int nunits_out;
4635 tree vectype_out;
4636 int ncopies;
4637 int j, i;
4638 vec<tree> vec_oprnds0 = vNULL;
4639 vec<tree> vec_oprnds1 = vNULL;
4640 vec<tree> vec_oprnds2 = vNULL;
4641 tree vop0, vop1, vop2;
4642 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4643 vec_info *vinfo = stmt_info->vinfo;
4644 int vf;
4646 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4647 return false;
4649 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4650 return false;
4652 /* Is STMT a vectorizable binary/unary operation? */
4653 if (!is_gimple_assign (stmt))
4654 return false;
4656 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4657 return false;
4659 code = gimple_assign_rhs_code (stmt);
4661 /* For pointer addition, we should use the normal plus for
4662 the vector addition. */
4663 if (code == POINTER_PLUS_EXPR)
4664 code = PLUS_EXPR;
4666 /* Support only unary or binary operations. */
4667 op_type = TREE_CODE_LENGTH (code);
4668 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4670 if (dump_enabled_p ())
4671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4672 "num. args = %d (not unary/binary/ternary op).\n",
4673 op_type);
4674 return false;
4677 scalar_dest = gimple_assign_lhs (stmt);
4678 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4680 /* Most operations cannot handle bit-precision types without extra
4681 truncations. */
4682 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4683 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4684 /* Exception are bitwise binary operations. */
4685 && code != BIT_IOR_EXPR
4686 && code != BIT_XOR_EXPR
4687 && code != BIT_AND_EXPR)
4689 if (dump_enabled_p ())
4690 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4691 "bit-precision arithmetic not supported.\n");
4692 return false;
4695 op0 = gimple_assign_rhs1 (stmt);
4696 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4698 if (dump_enabled_p ())
4699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4700 "use not simple.\n");
4701 return false;
4703 /* If op0 is an external or constant def use a vector type with
4704 the same size as the output vector type. */
4705 if (!vectype)
4707 /* For boolean type we cannot determine vectype by
4708 invariant value (don't know whether it is a vector
4709 of booleans or vector of integers). We use output
4710 vectype because operations on boolean don't change
4711 type. */
4712 if (TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE)
4714 if (TREE_CODE (TREE_TYPE (scalar_dest)) != BOOLEAN_TYPE)
4716 if (dump_enabled_p ())
4717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4718 "not supported operation on bool value.\n");
4719 return false;
4721 vectype = vectype_out;
4723 else
4724 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4726 if (vec_stmt)
4727 gcc_assert (vectype);
4728 if (!vectype)
4730 if (dump_enabled_p ())
4732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4733 "no vectype for scalar type ");
4734 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4735 TREE_TYPE (op0));
4736 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4739 return false;
4742 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4743 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4744 if (nunits_out != nunits_in)
4745 return false;
4747 if (op_type == binary_op || op_type == ternary_op)
4749 op1 = gimple_assign_rhs2 (stmt);
4750 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4752 if (dump_enabled_p ())
4753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4754 "use not simple.\n");
4755 return false;
4758 if (op_type == ternary_op)
4760 op2 = gimple_assign_rhs3 (stmt);
4761 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4763 if (dump_enabled_p ())
4764 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4765 "use not simple.\n");
4766 return false;
4770 if (loop_vinfo)
4771 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4772 else
4773 vf = 1;
4775 /* Multiple types in SLP are handled by creating the appropriate number of
4776 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4777 case of SLP. */
4778 if (slp_node || PURE_SLP_STMT (stmt_info))
4779 ncopies = 1;
4780 else
4781 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4783 gcc_assert (ncopies >= 1);
4785 /* Shifts are handled in vectorizable_shift (). */
4786 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4787 || code == RROTATE_EXPR)
4788 return false;
4790 /* Supportable by target? */
4792 vec_mode = TYPE_MODE (vectype);
4793 if (code == MULT_HIGHPART_EXPR)
4794 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4795 else
4797 optab = optab_for_tree_code (code, vectype, optab_default);
4798 if (!optab)
4800 if (dump_enabled_p ())
4801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4802 "no optab.\n");
4803 return false;
4805 target_support_p = (optab_handler (optab, vec_mode)
4806 != CODE_FOR_nothing);
4809 if (!target_support_p)
4811 if (dump_enabled_p ())
4812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4813 "op not supported by target.\n");
4814 /* Check only during analysis. */
4815 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4816 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4817 return false;
4818 if (dump_enabled_p ())
4819 dump_printf_loc (MSG_NOTE, vect_location,
4820 "proceeding using word mode.\n");
4823 /* Worthwhile without SIMD support? Check only during analysis. */
4824 if (!VECTOR_MODE_P (vec_mode)
4825 && !vec_stmt
4826 && vf < vect_min_worthwhile_factor (code))
4828 if (dump_enabled_p ())
4829 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4830 "not worthwhile without SIMD support.\n");
4831 return false;
4834 if (!vec_stmt) /* transformation not required. */
4836 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4837 if (dump_enabled_p ())
4838 dump_printf_loc (MSG_NOTE, vect_location,
4839 "=== vectorizable_operation ===\n");
4840 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4841 return true;
4844 /** Transform. **/
4846 if (dump_enabled_p ())
4847 dump_printf_loc (MSG_NOTE, vect_location,
4848 "transform binary/unary operation.\n");
4850 /* Handle def. */
4851 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4853 /* In case the vectorization factor (VF) is bigger than the number
4854 of elements that we can fit in a vectype (nunits), we have to generate
4855 more than one vector stmt - i.e - we need to "unroll" the
4856 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4857 from one copy of the vector stmt to the next, in the field
4858 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4859 stages to find the correct vector defs to be used when vectorizing
4860 stmts that use the defs of the current stmt. The example below
4861 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4862 we need to create 4 vectorized stmts):
4864 before vectorization:
4865 RELATED_STMT VEC_STMT
4866 S1: x = memref - -
4867 S2: z = x + 1 - -
4869 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4870 there):
4871 RELATED_STMT VEC_STMT
4872 VS1_0: vx0 = memref0 VS1_1 -
4873 VS1_1: vx1 = memref1 VS1_2 -
4874 VS1_2: vx2 = memref2 VS1_3 -
4875 VS1_3: vx3 = memref3 - -
4876 S1: x = load - VS1_0
4877 S2: z = x + 1 - -
4879 step2: vectorize stmt S2 (done here):
4880 To vectorize stmt S2 we first need to find the relevant vector
4881 def for the first operand 'x'. This is, as usual, obtained from
4882 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4883 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4884 relevant vector def 'vx0'. Having found 'vx0' we can generate
4885 the vector stmt VS2_0, and as usual, record it in the
4886 STMT_VINFO_VEC_STMT of stmt S2.
4887 When creating the second copy (VS2_1), we obtain the relevant vector
4888 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4889 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4890 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4891 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4892 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4893 chain of stmts and pointers:
4894 RELATED_STMT VEC_STMT
4895 VS1_0: vx0 = memref0 VS1_1 -
4896 VS1_1: vx1 = memref1 VS1_2 -
4897 VS1_2: vx2 = memref2 VS1_3 -
4898 VS1_3: vx3 = memref3 - -
4899 S1: x = load - VS1_0
4900 VS2_0: vz0 = vx0 + v1 VS2_1 -
4901 VS2_1: vz1 = vx1 + v1 VS2_2 -
4902 VS2_2: vz2 = vx2 + v1 VS2_3 -
4903 VS2_3: vz3 = vx3 + v1 - -
4904 S2: z = x + 1 - VS2_0 */
4906 prev_stmt_info = NULL;
4907 for (j = 0; j < ncopies; j++)
4909 /* Handle uses. */
4910 if (j == 0)
4912 if (op_type == binary_op || op_type == ternary_op)
4913 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4914 slp_node, -1);
4915 else
4916 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4917 slp_node, -1);
4918 if (op_type == ternary_op)
4920 vec_oprnds2.create (1);
4921 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4922 stmt));
4925 else
4927 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4928 if (op_type == ternary_op)
4930 tree vec_oprnd = vec_oprnds2.pop ();
4931 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4932 vec_oprnd));
4936 /* Arguments are ready. Create the new vector stmt. */
4937 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4939 vop1 = ((op_type == binary_op || op_type == ternary_op)
4940 ? vec_oprnds1[i] : NULL_TREE);
4941 vop2 = ((op_type == ternary_op)
4942 ? vec_oprnds2[i] : NULL_TREE);
4943 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4944 new_temp = make_ssa_name (vec_dest, new_stmt);
4945 gimple_assign_set_lhs (new_stmt, new_temp);
4946 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4947 if (slp_node)
4948 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4951 if (slp_node)
4952 continue;
4954 if (j == 0)
4955 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4956 else
4957 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4958 prev_stmt_info = vinfo_for_stmt (new_stmt);
4961 vec_oprnds0.release ();
4962 vec_oprnds1.release ();
4963 vec_oprnds2.release ();
4965 return true;
4968 /* A helper function to ensure data reference DR's base alignment
4969 for STMT_INFO. */
4971 static void
4972 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4974 if (!dr->aux)
4975 return;
4977 if (DR_VECT_AUX (dr)->base_misaligned)
4979 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4980 tree base_decl = DR_VECT_AUX (dr)->base_decl;
4982 if (decl_in_symtab_p (base_decl))
4983 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
4984 else
4986 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4987 DECL_USER_ALIGN (base_decl) = 1;
4989 DR_VECT_AUX (dr)->base_misaligned = false;
4994 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4995 reversal of the vector elements. If that is impossible to do,
4996 returns NULL. */
4998 static tree
4999 perm_mask_for_reverse (tree vectype)
5001 int i, nunits;
5002 unsigned char *sel;
5004 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5005 sel = XALLOCAVEC (unsigned char, nunits);
5007 for (i = 0; i < nunits; ++i)
5008 sel[i] = nunits - 1 - i;
5010 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5011 return NULL_TREE;
5012 return vect_gen_perm_mask_checked (vectype, sel);
5015 /* Function vectorizable_store.
5017 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5018 can be vectorized.
5019 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5020 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5021 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5023 static bool
5024 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5025 slp_tree slp_node)
5027 tree scalar_dest;
5028 tree data_ref;
5029 tree op;
5030 tree vec_oprnd = NULL_TREE;
5031 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5032 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5033 tree elem_type;
5034 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5035 struct loop *loop = NULL;
5036 machine_mode vec_mode;
5037 tree dummy;
5038 enum dr_alignment_support alignment_support_scheme;
5039 gimple *def_stmt;
5040 enum vect_def_type dt;
5041 stmt_vec_info prev_stmt_info = NULL;
5042 tree dataref_ptr = NULL_TREE;
5043 tree dataref_offset = NULL_TREE;
5044 gimple *ptr_incr = NULL;
5045 int ncopies;
5046 int j;
5047 gimple *next_stmt, *first_stmt = NULL;
5048 bool grouped_store = false;
5049 bool store_lanes_p = false;
5050 unsigned int group_size, i;
5051 vec<tree> dr_chain = vNULL;
5052 vec<tree> oprnds = vNULL;
5053 vec<tree> result_chain = vNULL;
5054 bool inv_p;
5055 bool negative = false;
5056 tree offset = NULL_TREE;
5057 vec<tree> vec_oprnds = vNULL;
5058 bool slp = (slp_node != NULL);
5059 unsigned int vec_num;
5060 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5061 vec_info *vinfo = stmt_info->vinfo;
5062 tree aggr_type;
5063 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5064 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5065 int scatter_scale = 1;
5066 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5067 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5068 gimple *new_stmt;
5070 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5071 return false;
5073 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5074 return false;
5076 /* Is vectorizable store? */
5078 if (!is_gimple_assign (stmt))
5079 return false;
5081 scalar_dest = gimple_assign_lhs (stmt);
5082 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5083 && is_pattern_stmt_p (stmt_info))
5084 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5085 if (TREE_CODE (scalar_dest) != ARRAY_REF
5086 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5087 && TREE_CODE (scalar_dest) != INDIRECT_REF
5088 && TREE_CODE (scalar_dest) != COMPONENT_REF
5089 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5090 && TREE_CODE (scalar_dest) != REALPART_EXPR
5091 && TREE_CODE (scalar_dest) != MEM_REF)
5092 return false;
5094 gcc_assert (gimple_assign_single_p (stmt));
5096 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5097 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5099 if (loop_vinfo)
5100 loop = LOOP_VINFO_LOOP (loop_vinfo);
5102 /* Multiple types in SLP are handled by creating the appropriate number of
5103 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5104 case of SLP. */
5105 if (slp || PURE_SLP_STMT (stmt_info))
5106 ncopies = 1;
5107 else
5108 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5110 gcc_assert (ncopies >= 1);
5112 /* FORNOW. This restriction should be relaxed. */
5113 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5115 if (dump_enabled_p ())
5116 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5117 "multiple types in nested loop.\n");
5118 return false;
5121 op = gimple_assign_rhs1 (stmt);
5122 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5124 if (dump_enabled_p ())
5125 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5126 "use not simple.\n");
5127 return false;
5130 elem_type = TREE_TYPE (vectype);
5131 vec_mode = TYPE_MODE (vectype);
5133 /* FORNOW. In some cases can vectorize even if data-type not supported
5134 (e.g. - array initialization with 0). */
5135 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5136 return false;
5138 if (!STMT_VINFO_DATA_REF (stmt_info))
5139 return false;
5141 if (!STMT_VINFO_STRIDED_P (stmt_info))
5143 negative =
5144 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5145 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5146 size_zero_node) < 0;
5147 if (negative && ncopies > 1)
5149 if (dump_enabled_p ())
5150 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5151 "multiple types with negative step.\n");
5152 return false;
5154 if (negative)
5156 gcc_assert (!grouped_store);
5157 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5158 if (alignment_support_scheme != dr_aligned
5159 && alignment_support_scheme != dr_unaligned_supported)
5161 if (dump_enabled_p ())
5162 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5163 "negative step but alignment required.\n");
5164 return false;
5166 if (dt != vect_constant_def
5167 && dt != vect_external_def
5168 && !perm_mask_for_reverse (vectype))
5170 if (dump_enabled_p ())
5171 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5172 "negative step and reversing not supported.\n");
5173 return false;
5178 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5180 grouped_store = true;
5181 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5182 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5183 if (!slp
5184 && !PURE_SLP_STMT (stmt_info)
5185 && !STMT_VINFO_STRIDED_P (stmt_info))
5187 if (vect_store_lanes_supported (vectype, group_size))
5188 store_lanes_p = true;
5189 else if (!vect_grouped_store_supported (vectype, group_size))
5190 return false;
5193 if (STMT_VINFO_STRIDED_P (stmt_info)
5194 && (slp || PURE_SLP_STMT (stmt_info))
5195 && (group_size > nunits
5196 || nunits % group_size != 0))
5198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5199 "unhandled strided group store\n");
5200 return false;
5203 if (first_stmt == stmt)
5205 /* STMT is the leader of the group. Check the operands of all the
5206 stmts of the group. */
5207 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5208 while (next_stmt)
5210 gcc_assert (gimple_assign_single_p (next_stmt));
5211 op = gimple_assign_rhs1 (next_stmt);
5212 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5214 if (dump_enabled_p ())
5215 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5216 "use not simple.\n");
5217 return false;
5219 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5224 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5226 gimple *def_stmt;
5227 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5228 &scatter_off, &scatter_scale);
5229 gcc_assert (scatter_decl);
5230 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5231 &scatter_off_vectype))
5233 if (dump_enabled_p ())
5234 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5235 "scatter index use not simple.");
5236 return false;
5240 if (!vec_stmt) /* transformation not required. */
5242 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5243 /* The SLP costs are calculated during SLP analysis. */
5244 if (!PURE_SLP_STMT (stmt_info))
5245 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5246 NULL, NULL, NULL);
5247 return true;
5250 /** Transform. **/
5252 ensure_base_align (stmt_info, dr);
5254 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5256 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5257 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5258 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5259 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5260 edge pe = loop_preheader_edge (loop);
5261 gimple_seq seq;
5262 basic_block new_bb;
5263 enum { NARROW, NONE, WIDEN } modifier;
5264 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5266 if (nunits == (unsigned int) scatter_off_nunits)
5267 modifier = NONE;
5268 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5270 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5271 modifier = WIDEN;
5273 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5274 sel[i] = i | nunits;
5276 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5277 gcc_assert (perm_mask != NULL_TREE);
5279 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5281 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5282 modifier = NARROW;
5284 for (i = 0; i < (unsigned int) nunits; ++i)
5285 sel[i] = i | scatter_off_nunits;
5287 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5288 gcc_assert (perm_mask != NULL_TREE);
5289 ncopies *= 2;
5291 else
5292 gcc_unreachable ();
5294 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5295 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5296 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5297 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5298 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5299 scaletype = TREE_VALUE (arglist);
5301 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5302 && TREE_CODE (rettype) == VOID_TYPE);
5304 ptr = fold_convert (ptrtype, scatter_base);
5305 if (!is_gimple_min_invariant (ptr))
5307 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5308 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5309 gcc_assert (!new_bb);
5312 /* Currently we support only unconditional scatter stores,
5313 so mask should be all ones. */
5314 mask = build_int_cst (masktype, -1);
5315 mask = vect_init_vector (stmt, mask, masktype, NULL);
5317 scale = build_int_cst (scaletype, scatter_scale);
5319 prev_stmt_info = NULL;
5320 for (j = 0; j < ncopies; ++j)
5322 if (j == 0)
5324 src = vec_oprnd1
5325 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5326 op = vec_oprnd0
5327 = vect_get_vec_def_for_operand (scatter_off, stmt);
5329 else if (modifier != NONE && (j & 1))
5331 if (modifier == WIDEN)
5333 src = vec_oprnd1
5334 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5335 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5336 stmt, gsi);
5338 else if (modifier == NARROW)
5340 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5341 stmt, gsi);
5342 op = vec_oprnd0
5343 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5345 else
5346 gcc_unreachable ();
5348 else
5350 src = vec_oprnd1
5351 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5352 op = vec_oprnd0
5353 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5356 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5358 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5359 == TYPE_VECTOR_SUBPARTS (srctype));
5360 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5361 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5362 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5363 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5364 src = var;
5367 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5369 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5370 == TYPE_VECTOR_SUBPARTS (idxtype));
5371 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5372 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5373 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5374 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5375 op = var;
5378 new_stmt
5379 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5381 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5383 if (prev_stmt_info == NULL)
5384 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5385 else
5386 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5387 prev_stmt_info = vinfo_for_stmt (new_stmt);
5389 return true;
5392 if (grouped_store)
5394 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5395 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5397 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5399 /* FORNOW */
5400 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5402 /* We vectorize all the stmts of the interleaving group when we
5403 reach the last stmt in the group. */
5404 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5405 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5406 && !slp)
5408 *vec_stmt = NULL;
5409 return true;
5412 if (slp)
5414 grouped_store = false;
5415 /* VEC_NUM is the number of vect stmts to be created for this
5416 group. */
5417 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5418 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5419 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5420 op = gimple_assign_rhs1 (first_stmt);
5422 else
5423 /* VEC_NUM is the number of vect stmts to be created for this
5424 group. */
5425 vec_num = group_size;
5427 else
5429 first_stmt = stmt;
5430 first_dr = dr;
5431 group_size = vec_num = 1;
5434 if (dump_enabled_p ())
5435 dump_printf_loc (MSG_NOTE, vect_location,
5436 "transform store. ncopies = %d\n", ncopies);
5438 if (STMT_VINFO_STRIDED_P (stmt_info))
5440 gimple_stmt_iterator incr_gsi;
5441 bool insert_after;
5442 gimple *incr;
5443 tree offvar;
5444 tree ivstep;
5445 tree running_off;
5446 gimple_seq stmts = NULL;
5447 tree stride_base, stride_step, alias_off;
5448 tree vec_oprnd;
5449 unsigned int g;
5451 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5453 stride_base
5454 = fold_build_pointer_plus
5455 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5456 size_binop (PLUS_EXPR,
5457 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5458 convert_to_ptrofftype (DR_INIT(first_dr))));
5459 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5461 /* For a store with loop-invariant (but other than power-of-2)
5462 stride (i.e. not a grouped access) like so:
5464 for (i = 0; i < n; i += stride)
5465 array[i] = ...;
5467 we generate a new induction variable and new stores from
5468 the components of the (vectorized) rhs:
5470 for (j = 0; ; j += VF*stride)
5471 vectemp = ...;
5472 tmp1 = vectemp[0];
5473 array[j] = tmp1;
5474 tmp2 = vectemp[1];
5475 array[j + stride] = tmp2;
5479 unsigned nstores = nunits;
5480 tree ltype = elem_type;
5481 if (slp)
5483 nstores = nunits / group_size;
5484 if (group_size < nunits)
5485 ltype = build_vector_type (elem_type, group_size);
5486 else
5487 ltype = vectype;
5488 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5489 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5490 group_size = 1;
5493 ivstep = stride_step;
5494 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5495 build_int_cst (TREE_TYPE (ivstep),
5496 ncopies * nstores));
5498 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5500 create_iv (stride_base, ivstep, NULL,
5501 loop, &incr_gsi, insert_after,
5502 &offvar, NULL);
5503 incr = gsi_stmt (incr_gsi);
5504 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5506 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5507 if (stmts)
5508 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5510 prev_stmt_info = NULL;
5511 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5512 next_stmt = first_stmt;
5513 for (g = 0; g < group_size; g++)
5515 running_off = offvar;
5516 if (g)
5518 tree size = TYPE_SIZE_UNIT (ltype);
5519 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5520 size);
5521 tree newoff = copy_ssa_name (running_off, NULL);
5522 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5523 running_off, pos);
5524 vect_finish_stmt_generation (stmt, incr, gsi);
5525 running_off = newoff;
5527 for (j = 0; j < ncopies; j++)
5529 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5530 and first_stmt == stmt. */
5531 if (j == 0)
5533 if (slp)
5535 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5536 slp_node, -1);
5537 vec_oprnd = vec_oprnds[0];
5539 else
5541 gcc_assert (gimple_assign_single_p (next_stmt));
5542 op = gimple_assign_rhs1 (next_stmt);
5543 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5546 else
5548 if (slp)
5549 vec_oprnd = vec_oprnds[j];
5550 else
5552 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5553 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5557 for (i = 0; i < nstores; i++)
5559 tree newref, newoff;
5560 gimple *incr, *assign;
5561 tree size = TYPE_SIZE (ltype);
5562 /* Extract the i'th component. */
5563 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5564 bitsize_int (i), size);
5565 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5566 size, pos);
5568 elem = force_gimple_operand_gsi (gsi, elem, true,
5569 NULL_TREE, true,
5570 GSI_SAME_STMT);
5572 newref = build2 (MEM_REF, ltype,
5573 running_off, alias_off);
5575 /* And store it to *running_off. */
5576 assign = gimple_build_assign (newref, elem);
5577 vect_finish_stmt_generation (stmt, assign, gsi);
5579 newoff = copy_ssa_name (running_off, NULL);
5580 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5581 running_off, stride_step);
5582 vect_finish_stmt_generation (stmt, incr, gsi);
5584 running_off = newoff;
5585 if (g == group_size - 1
5586 && !slp)
5588 if (j == 0 && i == 0)
5589 STMT_VINFO_VEC_STMT (stmt_info)
5590 = *vec_stmt = assign;
5591 else
5592 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5593 prev_stmt_info = vinfo_for_stmt (assign);
5597 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5599 return true;
5602 dr_chain.create (group_size);
5603 oprnds.create (group_size);
5605 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5606 gcc_assert (alignment_support_scheme);
5607 /* Targets with store-lane instructions must not require explicit
5608 realignment. */
5609 gcc_assert (!store_lanes_p
5610 || alignment_support_scheme == dr_aligned
5611 || alignment_support_scheme == dr_unaligned_supported);
5613 if (negative)
5614 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5616 if (store_lanes_p)
5617 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5618 else
5619 aggr_type = vectype;
5621 /* In case the vectorization factor (VF) is bigger than the number
5622 of elements that we can fit in a vectype (nunits), we have to generate
5623 more than one vector stmt - i.e - we need to "unroll" the
5624 vector stmt by a factor VF/nunits. For more details see documentation in
5625 vect_get_vec_def_for_copy_stmt. */
5627 /* In case of interleaving (non-unit grouped access):
5629 S1: &base + 2 = x2
5630 S2: &base = x0
5631 S3: &base + 1 = x1
5632 S4: &base + 3 = x3
5634 We create vectorized stores starting from base address (the access of the
5635 first stmt in the chain (S2 in the above example), when the last store stmt
5636 of the chain (S4) is reached:
5638 VS1: &base = vx2
5639 VS2: &base + vec_size*1 = vx0
5640 VS3: &base + vec_size*2 = vx1
5641 VS4: &base + vec_size*3 = vx3
5643 Then permutation statements are generated:
5645 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5646 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5649 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5650 (the order of the data-refs in the output of vect_permute_store_chain
5651 corresponds to the order of scalar stmts in the interleaving chain - see
5652 the documentation of vect_permute_store_chain()).
5654 In case of both multiple types and interleaving, above vector stores and
5655 permutation stmts are created for every copy. The result vector stmts are
5656 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5657 STMT_VINFO_RELATED_STMT for the next copies.
5660 prev_stmt_info = NULL;
5661 for (j = 0; j < ncopies; j++)
5664 if (j == 0)
5666 if (slp)
5668 /* Get vectorized arguments for SLP_NODE. */
5669 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5670 NULL, slp_node, -1);
5672 vec_oprnd = vec_oprnds[0];
5674 else
5676 /* For interleaved stores we collect vectorized defs for all the
5677 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5678 used as an input to vect_permute_store_chain(), and OPRNDS as
5679 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5681 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5682 OPRNDS are of size 1. */
5683 next_stmt = first_stmt;
5684 for (i = 0; i < group_size; i++)
5686 /* Since gaps are not supported for interleaved stores,
5687 GROUP_SIZE is the exact number of stmts in the chain.
5688 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5689 there is no interleaving, GROUP_SIZE is 1, and only one
5690 iteration of the loop will be executed. */
5691 gcc_assert (next_stmt
5692 && gimple_assign_single_p (next_stmt));
5693 op = gimple_assign_rhs1 (next_stmt);
5695 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5696 dr_chain.quick_push (vec_oprnd);
5697 oprnds.quick_push (vec_oprnd);
5698 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5702 /* We should have catched mismatched types earlier. */
5703 gcc_assert (useless_type_conversion_p (vectype,
5704 TREE_TYPE (vec_oprnd)));
5705 bool simd_lane_access_p
5706 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5707 if (simd_lane_access_p
5708 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5709 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5710 && integer_zerop (DR_OFFSET (first_dr))
5711 && integer_zerop (DR_INIT (first_dr))
5712 && alias_sets_conflict_p (get_alias_set (aggr_type),
5713 get_alias_set (DR_REF (first_dr))))
5715 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5716 dataref_offset = build_int_cst (reference_alias_ptr_type
5717 (DR_REF (first_dr)), 0);
5718 inv_p = false;
5720 else
5721 dataref_ptr
5722 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5723 simd_lane_access_p ? loop : NULL,
5724 offset, &dummy, gsi, &ptr_incr,
5725 simd_lane_access_p, &inv_p);
5726 gcc_assert (bb_vinfo || !inv_p);
5728 else
5730 /* For interleaved stores we created vectorized defs for all the
5731 defs stored in OPRNDS in the previous iteration (previous copy).
5732 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5733 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5734 next copy.
5735 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5736 OPRNDS are of size 1. */
5737 for (i = 0; i < group_size; i++)
5739 op = oprnds[i];
5740 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5741 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5742 dr_chain[i] = vec_oprnd;
5743 oprnds[i] = vec_oprnd;
5745 if (dataref_offset)
5746 dataref_offset
5747 = int_const_binop (PLUS_EXPR, dataref_offset,
5748 TYPE_SIZE_UNIT (aggr_type));
5749 else
5750 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5751 TYPE_SIZE_UNIT (aggr_type));
5754 if (store_lanes_p)
5756 tree vec_array;
5758 /* Combine all the vectors into an array. */
5759 vec_array = create_vector_array (vectype, vec_num);
5760 for (i = 0; i < vec_num; i++)
5762 vec_oprnd = dr_chain[i];
5763 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5766 /* Emit:
5767 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5768 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5769 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5770 gimple_call_set_lhs (new_stmt, data_ref);
5771 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5773 else
5775 new_stmt = NULL;
5776 if (grouped_store)
5778 if (j == 0)
5779 result_chain.create (group_size);
5780 /* Permute. */
5781 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5782 &result_chain);
5785 next_stmt = first_stmt;
5786 for (i = 0; i < vec_num; i++)
5788 unsigned align, misalign;
5790 if (i > 0)
5791 /* Bump the vector pointer. */
5792 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5793 stmt, NULL_TREE);
5795 if (slp)
5796 vec_oprnd = vec_oprnds[i];
5797 else if (grouped_store)
5798 /* For grouped stores vectorized defs are interleaved in
5799 vect_permute_store_chain(). */
5800 vec_oprnd = result_chain[i];
5802 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5803 dataref_ptr,
5804 dataref_offset
5805 ? dataref_offset
5806 : build_int_cst (reference_alias_ptr_type
5807 (DR_REF (first_dr)), 0));
5808 align = TYPE_ALIGN_UNIT (vectype);
5809 if (aligned_access_p (first_dr))
5810 misalign = 0;
5811 else if (DR_MISALIGNMENT (first_dr) == -1)
5813 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5814 align = TYPE_ALIGN_UNIT (elem_type);
5815 else
5816 align = get_object_alignment (DR_REF (first_dr))
5817 / BITS_PER_UNIT;
5818 misalign = 0;
5819 TREE_TYPE (data_ref)
5820 = build_aligned_type (TREE_TYPE (data_ref),
5821 align * BITS_PER_UNIT);
5823 else
5825 TREE_TYPE (data_ref)
5826 = build_aligned_type (TREE_TYPE (data_ref),
5827 TYPE_ALIGN (elem_type));
5828 misalign = DR_MISALIGNMENT (first_dr);
5830 if (dataref_offset == NULL_TREE
5831 && TREE_CODE (dataref_ptr) == SSA_NAME)
5832 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5833 misalign);
5835 if (negative
5836 && dt != vect_constant_def
5837 && dt != vect_external_def)
5839 tree perm_mask = perm_mask_for_reverse (vectype);
5840 tree perm_dest
5841 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5842 vectype);
5843 tree new_temp = make_ssa_name (perm_dest);
5845 /* Generate the permute statement. */
5846 gimple *perm_stmt
5847 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5848 vec_oprnd, perm_mask);
5849 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5851 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5852 vec_oprnd = new_temp;
5855 /* Arguments are ready. Create the new vector stmt. */
5856 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5857 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5859 if (slp)
5860 continue;
5862 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5863 if (!next_stmt)
5864 break;
5867 if (!slp)
5869 if (j == 0)
5870 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5871 else
5872 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5873 prev_stmt_info = vinfo_for_stmt (new_stmt);
5877 dr_chain.release ();
5878 oprnds.release ();
5879 result_chain.release ();
5880 vec_oprnds.release ();
5882 return true;
5885 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5886 VECTOR_CST mask. No checks are made that the target platform supports the
5887 mask, so callers may wish to test can_vec_perm_p separately, or use
5888 vect_gen_perm_mask_checked. */
5890 tree
5891 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5893 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5894 int i, nunits;
5896 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5898 mask_elt_type = lang_hooks.types.type_for_mode
5899 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5900 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5902 mask_elts = XALLOCAVEC (tree, nunits);
5903 for (i = nunits - 1; i >= 0; i--)
5904 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5905 mask_vec = build_vector (mask_type, mask_elts);
5907 return mask_vec;
5910 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5911 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5913 tree
5914 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5916 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5917 return vect_gen_perm_mask_any (vectype, sel);
5920 /* Given a vector variable X and Y, that was generated for the scalar
5921 STMT, generate instructions to permute the vector elements of X and Y
5922 using permutation mask MASK_VEC, insert them at *GSI and return the
5923 permuted vector variable. */
5925 static tree
5926 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
5927 gimple_stmt_iterator *gsi)
5929 tree vectype = TREE_TYPE (x);
5930 tree perm_dest, data_ref;
5931 gimple *perm_stmt;
5933 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5934 data_ref = make_ssa_name (perm_dest);
5936 /* Generate the permute statement. */
5937 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5938 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5940 return data_ref;
5943 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5944 inserting them on the loops preheader edge. Returns true if we
5945 were successful in doing so (and thus STMT can be moved then),
5946 otherwise returns false. */
5948 static bool
5949 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
5951 ssa_op_iter i;
5952 tree op;
5953 bool any = false;
5955 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5957 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5958 if (!gimple_nop_p (def_stmt)
5959 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5961 /* Make sure we don't need to recurse. While we could do
5962 so in simple cases when there are more complex use webs
5963 we don't have an easy way to preserve stmt order to fulfil
5964 dependencies within them. */
5965 tree op2;
5966 ssa_op_iter i2;
5967 if (gimple_code (def_stmt) == GIMPLE_PHI)
5968 return false;
5969 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5971 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
5972 if (!gimple_nop_p (def_stmt2)
5973 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5974 return false;
5976 any = true;
5980 if (!any)
5981 return true;
5983 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5985 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
5986 if (!gimple_nop_p (def_stmt)
5987 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5989 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5990 gsi_remove (&gsi, false);
5991 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5995 return true;
5998 /* vectorizable_load.
6000 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6001 can be vectorized.
6002 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6003 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6004 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6006 static bool
6007 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6008 slp_tree slp_node, slp_instance slp_node_instance)
6010 tree scalar_dest;
6011 tree vec_dest = NULL;
6012 tree data_ref = NULL;
6013 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6014 stmt_vec_info prev_stmt_info;
6015 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6016 struct loop *loop = NULL;
6017 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6018 bool nested_in_vect_loop = false;
6019 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6020 tree elem_type;
6021 tree new_temp;
6022 machine_mode mode;
6023 gimple *new_stmt = NULL;
6024 tree dummy;
6025 enum dr_alignment_support alignment_support_scheme;
6026 tree dataref_ptr = NULL_TREE;
6027 tree dataref_offset = NULL_TREE;
6028 gimple *ptr_incr = NULL;
6029 int ncopies;
6030 int i, j, group_size = -1, group_gap_adj;
6031 tree msq = NULL_TREE, lsq;
6032 tree offset = NULL_TREE;
6033 tree byte_offset = NULL_TREE;
6034 tree realignment_token = NULL_TREE;
6035 gphi *phi = NULL;
6036 vec<tree> dr_chain = vNULL;
6037 bool grouped_load = false;
6038 bool load_lanes_p = false;
6039 gimple *first_stmt;
6040 bool inv_p;
6041 bool negative = false;
6042 bool compute_in_loop = false;
6043 struct loop *at_loop;
6044 int vec_num;
6045 bool slp = (slp_node != NULL);
6046 bool slp_perm = false;
6047 enum tree_code code;
6048 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6049 int vf;
6050 tree aggr_type;
6051 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6052 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6053 int gather_scale = 1;
6054 enum vect_def_type gather_dt = vect_unknown_def_type;
6055 vec_info *vinfo = stmt_info->vinfo;
6057 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6058 return false;
6060 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6061 return false;
6063 /* Is vectorizable load? */
6064 if (!is_gimple_assign (stmt))
6065 return false;
6067 scalar_dest = gimple_assign_lhs (stmt);
6068 if (TREE_CODE (scalar_dest) != SSA_NAME)
6069 return false;
6071 code = gimple_assign_rhs_code (stmt);
6072 if (code != ARRAY_REF
6073 && code != BIT_FIELD_REF
6074 && code != INDIRECT_REF
6075 && code != COMPONENT_REF
6076 && code != IMAGPART_EXPR
6077 && code != REALPART_EXPR
6078 && code != MEM_REF
6079 && TREE_CODE_CLASS (code) != tcc_declaration)
6080 return false;
6082 if (!STMT_VINFO_DATA_REF (stmt_info))
6083 return false;
6085 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6086 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6088 if (loop_vinfo)
6090 loop = LOOP_VINFO_LOOP (loop_vinfo);
6091 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6092 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6094 else
6095 vf = 1;
6097 /* Multiple types in SLP are handled by creating the appropriate number of
6098 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6099 case of SLP. */
6100 if (slp || PURE_SLP_STMT (stmt_info))
6101 ncopies = 1;
6102 else
6103 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6105 gcc_assert (ncopies >= 1);
6107 /* FORNOW. This restriction should be relaxed. */
6108 if (nested_in_vect_loop && ncopies > 1)
6110 if (dump_enabled_p ())
6111 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6112 "multiple types in nested loop.\n");
6113 return false;
6116 /* Invalidate assumptions made by dependence analysis when vectorization
6117 on the unrolled body effectively re-orders stmts. */
6118 if (ncopies > 1
6119 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6120 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6121 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6123 if (dump_enabled_p ())
6124 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6125 "cannot perform implicit CSE when unrolling "
6126 "with negative dependence distance\n");
6127 return false;
6130 elem_type = TREE_TYPE (vectype);
6131 mode = TYPE_MODE (vectype);
6133 /* FORNOW. In some cases can vectorize even if data-type not supported
6134 (e.g. - data copies). */
6135 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6137 if (dump_enabled_p ())
6138 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6139 "Aligned load, but unsupported type.\n");
6140 return false;
6143 /* Check if the load is a part of an interleaving chain. */
6144 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6146 grouped_load = true;
6147 /* FORNOW */
6148 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6150 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6152 /* If this is single-element interleaving with an element distance
6153 that leaves unused vector loads around punt - we at least create
6154 very sub-optimal code in that case (and blow up memory,
6155 see PR65518). */
6156 if (first_stmt == stmt
6157 && !GROUP_NEXT_ELEMENT (stmt_info)
6158 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6160 if (dump_enabled_p ())
6161 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6162 "single-element interleaving not supported "
6163 "for not adjacent vector loads\n");
6164 return false;
6167 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6168 slp_perm = true;
6170 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6171 if (!slp
6172 && !PURE_SLP_STMT (stmt_info)
6173 && !STMT_VINFO_STRIDED_P (stmt_info))
6175 if (vect_load_lanes_supported (vectype, group_size))
6176 load_lanes_p = true;
6177 else if (!vect_grouped_load_supported (vectype, group_size))
6178 return false;
6181 /* Invalidate assumptions made by dependence analysis when vectorization
6182 on the unrolled body effectively re-orders stmts. */
6183 if (!PURE_SLP_STMT (stmt_info)
6184 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6185 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6186 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6188 if (dump_enabled_p ())
6189 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6190 "cannot perform implicit CSE when performing "
6191 "group loads with negative dependence distance\n");
6192 return false;
6195 /* Similarly when the stmt is a load that is both part of a SLP
6196 instance and a loop vectorized stmt via the same-dr mechanism
6197 we have to give up. */
6198 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6199 && (STMT_SLP_TYPE (stmt_info)
6200 != STMT_SLP_TYPE (vinfo_for_stmt
6201 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6203 if (dump_enabled_p ())
6204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6205 "conflicting SLP types for CSEd load\n");
6206 return false;
6211 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6213 gimple *def_stmt;
6214 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6215 &gather_off, &gather_scale);
6216 gcc_assert (gather_decl);
6217 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6218 &gather_off_vectype))
6220 if (dump_enabled_p ())
6221 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6222 "gather index use not simple.\n");
6223 return false;
6226 else if (STMT_VINFO_STRIDED_P (stmt_info))
6228 if ((grouped_load
6229 && (slp || PURE_SLP_STMT (stmt_info)))
6230 && (group_size > nunits
6231 || nunits % group_size != 0))
6233 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6234 "unhandled strided group load\n");
6235 return false;
6238 else
6240 negative = tree_int_cst_compare (nested_in_vect_loop
6241 ? STMT_VINFO_DR_STEP (stmt_info)
6242 : DR_STEP (dr),
6243 size_zero_node) < 0;
6244 if (negative && ncopies > 1)
6246 if (dump_enabled_p ())
6247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6248 "multiple types with negative step.\n");
6249 return false;
6252 if (negative)
6254 if (grouped_load)
6256 if (dump_enabled_p ())
6257 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6258 "negative step for group load not supported"
6259 "\n");
6260 return false;
6262 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6263 if (alignment_support_scheme != dr_aligned
6264 && alignment_support_scheme != dr_unaligned_supported)
6266 if (dump_enabled_p ())
6267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6268 "negative step but alignment required.\n");
6269 return false;
6271 if (!perm_mask_for_reverse (vectype))
6273 if (dump_enabled_p ())
6274 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6275 "negative step and reversing not supported."
6276 "\n");
6277 return false;
6282 if (!vec_stmt) /* transformation not required. */
6284 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6285 /* The SLP costs are calculated during SLP analysis. */
6286 if (!PURE_SLP_STMT (stmt_info))
6287 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6288 NULL, NULL, NULL);
6289 return true;
6292 if (dump_enabled_p ())
6293 dump_printf_loc (MSG_NOTE, vect_location,
6294 "transform load. ncopies = %d\n", ncopies);
6296 /** Transform. **/
6298 ensure_base_align (stmt_info, dr);
6300 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6302 tree vec_oprnd0 = NULL_TREE, op;
6303 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6304 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6305 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6306 edge pe = loop_preheader_edge (loop);
6307 gimple_seq seq;
6308 basic_block new_bb;
6309 enum { NARROW, NONE, WIDEN } modifier;
6310 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6312 if (nunits == gather_off_nunits)
6313 modifier = NONE;
6314 else if (nunits == gather_off_nunits / 2)
6316 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6317 modifier = WIDEN;
6319 for (i = 0; i < gather_off_nunits; ++i)
6320 sel[i] = i | nunits;
6322 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6324 else if (nunits == gather_off_nunits * 2)
6326 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6327 modifier = NARROW;
6329 for (i = 0; i < nunits; ++i)
6330 sel[i] = i < gather_off_nunits
6331 ? i : i + nunits - gather_off_nunits;
6333 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6334 ncopies *= 2;
6336 else
6337 gcc_unreachable ();
6339 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6340 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6341 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6342 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6343 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6344 scaletype = TREE_VALUE (arglist);
6345 gcc_checking_assert (types_compatible_p (srctype, rettype));
6347 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6349 ptr = fold_convert (ptrtype, gather_base);
6350 if (!is_gimple_min_invariant (ptr))
6352 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6353 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6354 gcc_assert (!new_bb);
6357 /* Currently we support only unconditional gather loads,
6358 so mask should be all ones. */
6359 if (TREE_CODE (masktype) == INTEGER_TYPE)
6360 mask = build_int_cst (masktype, -1);
6361 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6363 mask = build_int_cst (TREE_TYPE (masktype), -1);
6364 mask = build_vector_from_val (masktype, mask);
6365 mask = vect_init_vector (stmt, mask, masktype, NULL);
6367 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6369 REAL_VALUE_TYPE r;
6370 long tmp[6];
6371 for (j = 0; j < 6; ++j)
6372 tmp[j] = -1;
6373 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6374 mask = build_real (TREE_TYPE (masktype), r);
6375 mask = build_vector_from_val (masktype, mask);
6376 mask = vect_init_vector (stmt, mask, masktype, NULL);
6378 else
6379 gcc_unreachable ();
6381 scale = build_int_cst (scaletype, gather_scale);
6383 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6384 merge = build_int_cst (TREE_TYPE (rettype), 0);
6385 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6387 REAL_VALUE_TYPE r;
6388 long tmp[6];
6389 for (j = 0; j < 6; ++j)
6390 tmp[j] = 0;
6391 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6392 merge = build_real (TREE_TYPE (rettype), r);
6394 else
6395 gcc_unreachable ();
6396 merge = build_vector_from_val (rettype, merge);
6397 merge = vect_init_vector (stmt, merge, rettype, NULL);
6399 prev_stmt_info = NULL;
6400 for (j = 0; j < ncopies; ++j)
6402 if (modifier == WIDEN && (j & 1))
6403 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6404 perm_mask, stmt, gsi);
6405 else if (j == 0)
6406 op = vec_oprnd0
6407 = vect_get_vec_def_for_operand (gather_off, stmt);
6408 else
6409 op = vec_oprnd0
6410 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6412 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6414 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6415 == TYPE_VECTOR_SUBPARTS (idxtype));
6416 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6417 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6418 new_stmt
6419 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6420 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6421 op = var;
6424 new_stmt
6425 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6427 if (!useless_type_conversion_p (vectype, rettype))
6429 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6430 == TYPE_VECTOR_SUBPARTS (rettype));
6431 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6432 gimple_call_set_lhs (new_stmt, op);
6433 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6434 var = make_ssa_name (vec_dest);
6435 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6436 new_stmt
6437 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6439 else
6441 var = make_ssa_name (vec_dest, new_stmt);
6442 gimple_call_set_lhs (new_stmt, var);
6445 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6447 if (modifier == NARROW)
6449 if ((j & 1) == 0)
6451 prev_res = var;
6452 continue;
6454 var = permute_vec_elements (prev_res, var,
6455 perm_mask, stmt, gsi);
6456 new_stmt = SSA_NAME_DEF_STMT (var);
6459 if (prev_stmt_info == NULL)
6460 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6461 else
6462 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6463 prev_stmt_info = vinfo_for_stmt (new_stmt);
6465 return true;
6467 else if (STMT_VINFO_STRIDED_P (stmt_info))
6469 gimple_stmt_iterator incr_gsi;
6470 bool insert_after;
6471 gimple *incr;
6472 tree offvar;
6473 tree ivstep;
6474 tree running_off;
6475 vec<constructor_elt, va_gc> *v = NULL;
6476 gimple_seq stmts = NULL;
6477 tree stride_base, stride_step, alias_off;
6479 gcc_assert (!nested_in_vect_loop);
6481 if (slp && grouped_load)
6482 first_dr = STMT_VINFO_DATA_REF
6483 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6484 else
6485 first_dr = dr;
6487 stride_base
6488 = fold_build_pointer_plus
6489 (DR_BASE_ADDRESS (first_dr),
6490 size_binop (PLUS_EXPR,
6491 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6492 convert_to_ptrofftype (DR_INIT (first_dr))));
6493 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6495 /* For a load with loop-invariant (but other than power-of-2)
6496 stride (i.e. not a grouped access) like so:
6498 for (i = 0; i < n; i += stride)
6499 ... = array[i];
6501 we generate a new induction variable and new accesses to
6502 form a new vector (or vectors, depending on ncopies):
6504 for (j = 0; ; j += VF*stride)
6505 tmp1 = array[j];
6506 tmp2 = array[j + stride];
6508 vectemp = {tmp1, tmp2, ...}
6511 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6512 build_int_cst (TREE_TYPE (stride_step), vf));
6514 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6516 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6517 loop, &incr_gsi, insert_after,
6518 &offvar, NULL);
6519 incr = gsi_stmt (incr_gsi);
6520 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6522 stride_step = force_gimple_operand (unshare_expr (stride_step),
6523 &stmts, true, NULL_TREE);
6524 if (stmts)
6525 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6527 prev_stmt_info = NULL;
6528 running_off = offvar;
6529 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6530 int nloads = nunits;
6531 tree ltype = TREE_TYPE (vectype);
6532 auto_vec<tree> dr_chain;
6533 if (slp)
6535 nloads = nunits / group_size;
6536 if (group_size < nunits)
6537 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6538 else
6539 ltype = vectype;
6540 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6541 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6542 if (slp_perm)
6543 dr_chain.create (ncopies);
6545 for (j = 0; j < ncopies; j++)
6547 tree vec_inv;
6549 if (nloads > 1)
6551 vec_alloc (v, nloads);
6552 for (i = 0; i < nloads; i++)
6554 tree newref, newoff;
6555 gimple *incr;
6556 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6558 newref = force_gimple_operand_gsi (gsi, newref, true,
6559 NULL_TREE, true,
6560 GSI_SAME_STMT);
6561 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6562 newoff = copy_ssa_name (running_off);
6563 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6564 running_off, stride_step);
6565 vect_finish_stmt_generation (stmt, incr, gsi);
6567 running_off = newoff;
6570 vec_inv = build_constructor (vectype, v);
6571 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6572 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6574 else
6576 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6577 build2 (MEM_REF, ltype,
6578 running_off, alias_off));
6579 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6581 tree newoff = copy_ssa_name (running_off);
6582 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6583 running_off, stride_step);
6584 vect_finish_stmt_generation (stmt, incr, gsi);
6586 running_off = newoff;
6589 if (slp)
6591 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6592 if (slp_perm)
6593 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6595 else
6597 if (j == 0)
6598 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6599 else
6600 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6601 prev_stmt_info = vinfo_for_stmt (new_stmt);
6604 if (slp_perm)
6605 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6606 slp_node_instance, false);
6607 return true;
6610 if (grouped_load)
6612 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6613 if (slp
6614 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6615 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6616 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6618 /* Check if the chain of loads is already vectorized. */
6619 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6620 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6621 ??? But we can only do so if there is exactly one
6622 as we have no way to get at the rest. Leave the CSE
6623 opportunity alone.
6624 ??? With the group load eventually participating
6625 in multiple different permutations (having multiple
6626 slp nodes which refer to the same group) the CSE
6627 is even wrong code. See PR56270. */
6628 && !slp)
6630 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6631 return true;
6633 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6634 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6635 group_gap_adj = 0;
6637 /* VEC_NUM is the number of vect stmts to be created for this group. */
6638 if (slp)
6640 grouped_load = false;
6641 /* For SLP permutation support we need to load the whole group,
6642 not only the number of vector stmts the permutation result
6643 fits in. */
6644 if (slp_perm)
6645 vec_num = (group_size * vf + nunits - 1) / nunits;
6646 else
6647 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6648 group_gap_adj = vf * group_size - nunits * vec_num;
6650 else
6651 vec_num = group_size;
6653 else
6655 first_stmt = stmt;
6656 first_dr = dr;
6657 group_size = vec_num = 1;
6658 group_gap_adj = 0;
6661 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6662 gcc_assert (alignment_support_scheme);
6663 /* Targets with load-lane instructions must not require explicit
6664 realignment. */
6665 gcc_assert (!load_lanes_p
6666 || alignment_support_scheme == dr_aligned
6667 || alignment_support_scheme == dr_unaligned_supported);
6669 /* In case the vectorization factor (VF) is bigger than the number
6670 of elements that we can fit in a vectype (nunits), we have to generate
6671 more than one vector stmt - i.e - we need to "unroll" the
6672 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6673 from one copy of the vector stmt to the next, in the field
6674 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6675 stages to find the correct vector defs to be used when vectorizing
6676 stmts that use the defs of the current stmt. The example below
6677 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6678 need to create 4 vectorized stmts):
6680 before vectorization:
6681 RELATED_STMT VEC_STMT
6682 S1: x = memref - -
6683 S2: z = x + 1 - -
6685 step 1: vectorize stmt S1:
6686 We first create the vector stmt VS1_0, and, as usual, record a
6687 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6688 Next, we create the vector stmt VS1_1, and record a pointer to
6689 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6690 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6691 stmts and pointers:
6692 RELATED_STMT VEC_STMT
6693 VS1_0: vx0 = memref0 VS1_1 -
6694 VS1_1: vx1 = memref1 VS1_2 -
6695 VS1_2: vx2 = memref2 VS1_3 -
6696 VS1_3: vx3 = memref3 - -
6697 S1: x = load - VS1_0
6698 S2: z = x + 1 - -
6700 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6701 information we recorded in RELATED_STMT field is used to vectorize
6702 stmt S2. */
6704 /* In case of interleaving (non-unit grouped access):
6706 S1: x2 = &base + 2
6707 S2: x0 = &base
6708 S3: x1 = &base + 1
6709 S4: x3 = &base + 3
6711 Vectorized loads are created in the order of memory accesses
6712 starting from the access of the first stmt of the chain:
6714 VS1: vx0 = &base
6715 VS2: vx1 = &base + vec_size*1
6716 VS3: vx3 = &base + vec_size*2
6717 VS4: vx4 = &base + vec_size*3
6719 Then permutation statements are generated:
6721 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6722 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6725 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6726 (the order of the data-refs in the output of vect_permute_load_chain
6727 corresponds to the order of scalar stmts in the interleaving chain - see
6728 the documentation of vect_permute_load_chain()).
6729 The generation of permutation stmts and recording them in
6730 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6732 In case of both multiple types and interleaving, the vector loads and
6733 permutation stmts above are created for every copy. The result vector
6734 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6735 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6737 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6738 on a target that supports unaligned accesses (dr_unaligned_supported)
6739 we generate the following code:
6740 p = initial_addr;
6741 indx = 0;
6742 loop {
6743 p = p + indx * vectype_size;
6744 vec_dest = *(p);
6745 indx = indx + 1;
6748 Otherwise, the data reference is potentially unaligned on a target that
6749 does not support unaligned accesses (dr_explicit_realign_optimized) -
6750 then generate the following code, in which the data in each iteration is
6751 obtained by two vector loads, one from the previous iteration, and one
6752 from the current iteration:
6753 p1 = initial_addr;
6754 msq_init = *(floor(p1))
6755 p2 = initial_addr + VS - 1;
6756 realignment_token = call target_builtin;
6757 indx = 0;
6758 loop {
6759 p2 = p2 + indx * vectype_size
6760 lsq = *(floor(p2))
6761 vec_dest = realign_load (msq, lsq, realignment_token)
6762 indx = indx + 1;
6763 msq = lsq;
6764 } */
6766 /* If the misalignment remains the same throughout the execution of the
6767 loop, we can create the init_addr and permutation mask at the loop
6768 preheader. Otherwise, it needs to be created inside the loop.
6769 This can only occur when vectorizing memory accesses in the inner-loop
6770 nested within an outer-loop that is being vectorized. */
6772 if (nested_in_vect_loop
6773 && (TREE_INT_CST_LOW (DR_STEP (dr))
6774 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6776 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6777 compute_in_loop = true;
6780 if ((alignment_support_scheme == dr_explicit_realign_optimized
6781 || alignment_support_scheme == dr_explicit_realign)
6782 && !compute_in_loop)
6784 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6785 alignment_support_scheme, NULL_TREE,
6786 &at_loop);
6787 if (alignment_support_scheme == dr_explicit_realign_optimized)
6789 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6790 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6791 size_one_node);
6794 else
6795 at_loop = loop;
6797 if (negative)
6798 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6800 if (load_lanes_p)
6801 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6802 else
6803 aggr_type = vectype;
6805 prev_stmt_info = NULL;
6806 for (j = 0; j < ncopies; j++)
6808 /* 1. Create the vector or array pointer update chain. */
6809 if (j == 0)
6811 bool simd_lane_access_p
6812 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6813 if (simd_lane_access_p
6814 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6815 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6816 && integer_zerop (DR_OFFSET (first_dr))
6817 && integer_zerop (DR_INIT (first_dr))
6818 && alias_sets_conflict_p (get_alias_set (aggr_type),
6819 get_alias_set (DR_REF (first_dr)))
6820 && (alignment_support_scheme == dr_aligned
6821 || alignment_support_scheme == dr_unaligned_supported))
6823 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6824 dataref_offset = build_int_cst (reference_alias_ptr_type
6825 (DR_REF (first_dr)), 0);
6826 inv_p = false;
6828 else
6829 dataref_ptr
6830 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6831 offset, &dummy, gsi, &ptr_incr,
6832 simd_lane_access_p, &inv_p,
6833 byte_offset);
6835 else if (dataref_offset)
6836 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6837 TYPE_SIZE_UNIT (aggr_type));
6838 else
6839 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6840 TYPE_SIZE_UNIT (aggr_type));
6842 if (grouped_load || slp_perm)
6843 dr_chain.create (vec_num);
6845 if (load_lanes_p)
6847 tree vec_array;
6849 vec_array = create_vector_array (vectype, vec_num);
6851 /* Emit:
6852 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6853 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6854 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6855 gimple_call_set_lhs (new_stmt, vec_array);
6856 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6858 /* Extract each vector into an SSA_NAME. */
6859 for (i = 0; i < vec_num; i++)
6861 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6862 vec_array, i);
6863 dr_chain.quick_push (new_temp);
6866 /* Record the mapping between SSA_NAMEs and statements. */
6867 vect_record_grouped_load_vectors (stmt, dr_chain);
6869 else
6871 for (i = 0; i < vec_num; i++)
6873 if (i > 0)
6874 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6875 stmt, NULL_TREE);
6877 /* 2. Create the vector-load in the loop. */
6878 switch (alignment_support_scheme)
6880 case dr_aligned:
6881 case dr_unaligned_supported:
6883 unsigned int align, misalign;
6885 data_ref
6886 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6887 dataref_offset
6888 ? dataref_offset
6889 : build_int_cst (reference_alias_ptr_type
6890 (DR_REF (first_dr)), 0));
6891 align = TYPE_ALIGN_UNIT (vectype);
6892 if (alignment_support_scheme == dr_aligned)
6894 gcc_assert (aligned_access_p (first_dr));
6895 misalign = 0;
6897 else if (DR_MISALIGNMENT (first_dr) == -1)
6899 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6900 align = TYPE_ALIGN_UNIT (elem_type);
6901 else
6902 align = (get_object_alignment (DR_REF (first_dr))
6903 / BITS_PER_UNIT);
6904 misalign = 0;
6905 TREE_TYPE (data_ref)
6906 = build_aligned_type (TREE_TYPE (data_ref),
6907 align * BITS_PER_UNIT);
6909 else
6911 TREE_TYPE (data_ref)
6912 = build_aligned_type (TREE_TYPE (data_ref),
6913 TYPE_ALIGN (elem_type));
6914 misalign = DR_MISALIGNMENT (first_dr);
6916 if (dataref_offset == NULL_TREE
6917 && TREE_CODE (dataref_ptr) == SSA_NAME)
6918 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6919 align, misalign);
6920 break;
6922 case dr_explicit_realign:
6924 tree ptr, bump;
6926 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6928 if (compute_in_loop)
6929 msq = vect_setup_realignment (first_stmt, gsi,
6930 &realignment_token,
6931 dr_explicit_realign,
6932 dataref_ptr, NULL);
6934 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6935 ptr = copy_ssa_name (dataref_ptr);
6936 else
6937 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6938 new_stmt = gimple_build_assign
6939 (ptr, BIT_AND_EXPR, dataref_ptr,
6940 build_int_cst
6941 (TREE_TYPE (dataref_ptr),
6942 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6943 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6944 data_ref
6945 = build2 (MEM_REF, vectype, ptr,
6946 build_int_cst (reference_alias_ptr_type
6947 (DR_REF (first_dr)), 0));
6948 vec_dest = vect_create_destination_var (scalar_dest,
6949 vectype);
6950 new_stmt = gimple_build_assign (vec_dest, data_ref);
6951 new_temp = make_ssa_name (vec_dest, new_stmt);
6952 gimple_assign_set_lhs (new_stmt, new_temp);
6953 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6954 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6955 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6956 msq = new_temp;
6958 bump = size_binop (MULT_EXPR, vs,
6959 TYPE_SIZE_UNIT (elem_type));
6960 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6961 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6962 new_stmt = gimple_build_assign
6963 (NULL_TREE, BIT_AND_EXPR, ptr,
6964 build_int_cst
6965 (TREE_TYPE (ptr),
6966 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6967 ptr = copy_ssa_name (ptr, new_stmt);
6968 gimple_assign_set_lhs (new_stmt, ptr);
6969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6970 data_ref
6971 = build2 (MEM_REF, vectype, ptr,
6972 build_int_cst (reference_alias_ptr_type
6973 (DR_REF (first_dr)), 0));
6974 break;
6976 case dr_explicit_realign_optimized:
6977 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6978 new_temp = copy_ssa_name (dataref_ptr);
6979 else
6980 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6981 new_stmt = gimple_build_assign
6982 (new_temp, BIT_AND_EXPR, dataref_ptr,
6983 build_int_cst
6984 (TREE_TYPE (dataref_ptr),
6985 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6986 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6987 data_ref
6988 = build2 (MEM_REF, vectype, new_temp,
6989 build_int_cst (reference_alias_ptr_type
6990 (DR_REF (first_dr)), 0));
6991 break;
6992 default:
6993 gcc_unreachable ();
6995 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6996 new_stmt = gimple_build_assign (vec_dest, data_ref);
6997 new_temp = make_ssa_name (vec_dest, new_stmt);
6998 gimple_assign_set_lhs (new_stmt, new_temp);
6999 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7001 /* 3. Handle explicit realignment if necessary/supported.
7002 Create in loop:
7003 vec_dest = realign_load (msq, lsq, realignment_token) */
7004 if (alignment_support_scheme == dr_explicit_realign_optimized
7005 || alignment_support_scheme == dr_explicit_realign)
7007 lsq = gimple_assign_lhs (new_stmt);
7008 if (!realignment_token)
7009 realignment_token = dataref_ptr;
7010 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7011 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7012 msq, lsq, realignment_token);
7013 new_temp = make_ssa_name (vec_dest, new_stmt);
7014 gimple_assign_set_lhs (new_stmt, new_temp);
7015 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7017 if (alignment_support_scheme == dr_explicit_realign_optimized)
7019 gcc_assert (phi);
7020 if (i == vec_num - 1 && j == ncopies - 1)
7021 add_phi_arg (phi, lsq,
7022 loop_latch_edge (containing_loop),
7023 UNKNOWN_LOCATION);
7024 msq = lsq;
7028 /* 4. Handle invariant-load. */
7029 if (inv_p && !bb_vinfo)
7031 gcc_assert (!grouped_load);
7032 /* If we have versioned for aliasing or the loop doesn't
7033 have any data dependencies that would preclude this,
7034 then we are sure this is a loop invariant load and
7035 thus we can insert it on the preheader edge. */
7036 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7037 && !nested_in_vect_loop
7038 && hoist_defs_of_uses (stmt, loop))
7040 if (dump_enabled_p ())
7042 dump_printf_loc (MSG_NOTE, vect_location,
7043 "hoisting out of the vectorized "
7044 "loop: ");
7045 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7047 tree tem = copy_ssa_name (scalar_dest);
7048 gsi_insert_on_edge_immediate
7049 (loop_preheader_edge (loop),
7050 gimple_build_assign (tem,
7051 unshare_expr
7052 (gimple_assign_rhs1 (stmt))));
7053 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7055 else
7057 gimple_stmt_iterator gsi2 = *gsi;
7058 gsi_next (&gsi2);
7059 new_temp = vect_init_vector (stmt, scalar_dest,
7060 vectype, &gsi2);
7062 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7063 set_vinfo_for_stmt (new_stmt,
7064 new_stmt_vec_info (new_stmt, vinfo));
7067 if (negative)
7069 tree perm_mask = perm_mask_for_reverse (vectype);
7070 new_temp = permute_vec_elements (new_temp, new_temp,
7071 perm_mask, stmt, gsi);
7072 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7075 /* Collect vector loads and later create their permutation in
7076 vect_transform_grouped_load (). */
7077 if (grouped_load || slp_perm)
7078 dr_chain.quick_push (new_temp);
7080 /* Store vector loads in the corresponding SLP_NODE. */
7081 if (slp && !slp_perm)
7082 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7084 /* Bump the vector pointer to account for a gap or for excess
7085 elements loaded for a permuted SLP load. */
7086 if (group_gap_adj != 0)
7088 bool ovf;
7089 tree bump
7090 = wide_int_to_tree (sizetype,
7091 wi::smul (TYPE_SIZE_UNIT (elem_type),
7092 group_gap_adj, &ovf));
7093 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7094 stmt, bump);
7098 if (slp && !slp_perm)
7099 continue;
7101 if (slp_perm)
7103 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7104 slp_node_instance, false))
7106 dr_chain.release ();
7107 return false;
7110 else
7112 if (grouped_load)
7114 if (!load_lanes_p)
7115 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7116 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7118 else
7120 if (j == 0)
7121 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7122 else
7123 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7124 prev_stmt_info = vinfo_for_stmt (new_stmt);
7127 dr_chain.release ();
7130 return true;
7133 /* Function vect_is_simple_cond.
7135 Input:
7136 LOOP - the loop that is being vectorized.
7137 COND - Condition that is checked for simple use.
7139 Output:
7140 *COMP_VECTYPE - the vector type for the comparison.
7142 Returns whether a COND can be vectorized. Checks whether
7143 condition operands are supportable using vec_is_simple_use. */
7145 static bool
7146 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7148 tree lhs, rhs;
7149 enum vect_def_type dt;
7150 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7152 if (!COMPARISON_CLASS_P (cond))
7153 return false;
7155 lhs = TREE_OPERAND (cond, 0);
7156 rhs = TREE_OPERAND (cond, 1);
7158 if (TREE_CODE (lhs) == SSA_NAME)
7160 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7161 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7162 return false;
7164 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7165 && TREE_CODE (lhs) != FIXED_CST)
7166 return false;
7168 if (TREE_CODE (rhs) == SSA_NAME)
7170 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7171 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7172 return false;
7174 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7175 && TREE_CODE (rhs) != FIXED_CST)
7176 return false;
7178 *comp_vectype = vectype1 ? vectype1 : vectype2;
7179 return true;
7182 /* vectorizable_condition.
7184 Check if STMT is conditional modify expression that can be vectorized.
7185 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7186 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7187 at GSI.
7189 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7190 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7191 else clause if it is 2).
7193 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7195 bool
7196 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7197 gimple **vec_stmt, tree reduc_def, int reduc_index,
7198 slp_tree slp_node)
7200 tree scalar_dest = NULL_TREE;
7201 tree vec_dest = NULL_TREE;
7202 tree cond_expr, then_clause, else_clause;
7203 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7204 tree comp_vectype = NULL_TREE;
7205 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7206 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7207 tree vec_compare, vec_cond_expr;
7208 tree new_temp;
7209 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7210 enum vect_def_type dt, dts[4];
7211 int ncopies;
7212 enum tree_code code;
7213 stmt_vec_info prev_stmt_info = NULL;
7214 int i, j;
7215 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7216 vec<tree> vec_oprnds0 = vNULL;
7217 vec<tree> vec_oprnds1 = vNULL;
7218 vec<tree> vec_oprnds2 = vNULL;
7219 vec<tree> vec_oprnds3 = vNULL;
7220 tree vec_cmp_type;
7222 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7223 return false;
7225 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7227 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7228 return false;
7230 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7231 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7232 && reduc_def))
7233 return false;
7235 /* FORNOW: not yet supported. */
7236 if (STMT_VINFO_LIVE_P (stmt_info))
7238 if (dump_enabled_p ())
7239 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7240 "value used after loop.\n");
7241 return false;
7245 /* Is vectorizable conditional operation? */
7246 if (!is_gimple_assign (stmt))
7247 return false;
7249 code = gimple_assign_rhs_code (stmt);
7251 if (code != COND_EXPR)
7252 return false;
7254 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7255 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7257 if (slp_node || PURE_SLP_STMT (stmt_info))
7258 ncopies = 1;
7259 else
7260 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7262 gcc_assert (ncopies >= 1);
7263 if (reduc_index && ncopies > 1)
7264 return false; /* FORNOW */
7266 cond_expr = gimple_assign_rhs1 (stmt);
7267 then_clause = gimple_assign_rhs2 (stmt);
7268 else_clause = gimple_assign_rhs3 (stmt);
7270 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7271 || !comp_vectype)
7272 return false;
7274 gimple *def_stmt;
7275 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
7276 return false;
7277 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
7278 return false;
7280 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7281 if (vec_cmp_type == NULL_TREE)
7282 return false;
7284 if (!vec_stmt)
7286 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7287 return expand_vec_cond_expr_p (vectype, comp_vectype);
7290 /* Transform. */
7292 if (!slp_node)
7294 vec_oprnds0.create (1);
7295 vec_oprnds1.create (1);
7296 vec_oprnds2.create (1);
7297 vec_oprnds3.create (1);
7300 /* Handle def. */
7301 scalar_dest = gimple_assign_lhs (stmt);
7302 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7304 /* Handle cond expr. */
7305 for (j = 0; j < ncopies; j++)
7307 gassign *new_stmt = NULL;
7308 if (j == 0)
7310 if (slp_node)
7312 auto_vec<tree, 4> ops;
7313 auto_vec<vec<tree>, 4> vec_defs;
7315 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7316 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7317 ops.safe_push (then_clause);
7318 ops.safe_push (else_clause);
7319 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7320 vec_oprnds3 = vec_defs.pop ();
7321 vec_oprnds2 = vec_defs.pop ();
7322 vec_oprnds1 = vec_defs.pop ();
7323 vec_oprnds0 = vec_defs.pop ();
7325 ops.release ();
7326 vec_defs.release ();
7328 else
7330 gimple *gtemp;
7331 vec_cond_lhs =
7332 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0), stmt);
7333 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7334 loop_vinfo, &gtemp, &dts[0]);
7336 vec_cond_rhs =
7337 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7338 stmt);
7339 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7340 loop_vinfo, &gtemp, &dts[1]);
7341 if (reduc_index == 1)
7342 vec_then_clause = reduc_def;
7343 else
7345 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7346 stmt);
7347 vect_is_simple_use (then_clause, loop_vinfo,
7348 &gtemp, &dts[2]);
7350 if (reduc_index == 2)
7351 vec_else_clause = reduc_def;
7352 else
7354 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7355 stmt);
7356 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7360 else
7362 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7363 vec_oprnds0.pop ());
7364 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7365 vec_oprnds1.pop ());
7366 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7367 vec_oprnds2.pop ());
7368 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7369 vec_oprnds3.pop ());
7372 if (!slp_node)
7374 vec_oprnds0.quick_push (vec_cond_lhs);
7375 vec_oprnds1.quick_push (vec_cond_rhs);
7376 vec_oprnds2.quick_push (vec_then_clause);
7377 vec_oprnds3.quick_push (vec_else_clause);
7380 /* Arguments are ready. Create the new vector stmt. */
7381 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7383 vec_cond_rhs = vec_oprnds1[i];
7384 vec_then_clause = vec_oprnds2[i];
7385 vec_else_clause = vec_oprnds3[i];
7387 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7388 vec_cond_lhs, vec_cond_rhs);
7389 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7390 vec_compare, vec_then_clause, vec_else_clause);
7392 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7393 new_temp = make_ssa_name (vec_dest, new_stmt);
7394 gimple_assign_set_lhs (new_stmt, new_temp);
7395 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7396 if (slp_node)
7397 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7400 if (slp_node)
7401 continue;
7403 if (j == 0)
7404 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7405 else
7406 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7408 prev_stmt_info = vinfo_for_stmt (new_stmt);
7411 vec_oprnds0.release ();
7412 vec_oprnds1.release ();
7413 vec_oprnds2.release ();
7414 vec_oprnds3.release ();
7416 return true;
7420 /* Make sure the statement is vectorizable. */
7422 bool
7423 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7425 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7426 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7427 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7428 bool ok;
7429 tree scalar_type, vectype;
7430 gimple *pattern_stmt;
7431 gimple_seq pattern_def_seq;
7433 if (dump_enabled_p ())
7435 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7436 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7439 if (gimple_has_volatile_ops (stmt))
7441 if (dump_enabled_p ())
7442 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7443 "not vectorized: stmt has volatile operands\n");
7445 return false;
7448 /* Skip stmts that do not need to be vectorized. In loops this is expected
7449 to include:
7450 - the COND_EXPR which is the loop exit condition
7451 - any LABEL_EXPRs in the loop
7452 - computations that are used only for array indexing or loop control.
7453 In basic blocks we only analyze statements that are a part of some SLP
7454 instance, therefore, all the statements are relevant.
7456 Pattern statement needs to be analyzed instead of the original statement
7457 if the original statement is not relevant. Otherwise, we analyze both
7458 statements. In basic blocks we are called from some SLP instance
7459 traversal, don't analyze pattern stmts instead, the pattern stmts
7460 already will be part of SLP instance. */
7462 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7463 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7464 && !STMT_VINFO_LIVE_P (stmt_info))
7466 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7467 && pattern_stmt
7468 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7469 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7471 /* Analyze PATTERN_STMT instead of the original stmt. */
7472 stmt = pattern_stmt;
7473 stmt_info = vinfo_for_stmt (pattern_stmt);
7474 if (dump_enabled_p ())
7476 dump_printf_loc (MSG_NOTE, vect_location,
7477 "==> examining pattern statement: ");
7478 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7481 else
7483 if (dump_enabled_p ())
7484 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7486 return true;
7489 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7490 && node == NULL
7491 && pattern_stmt
7492 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7493 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7495 /* Analyze PATTERN_STMT too. */
7496 if (dump_enabled_p ())
7498 dump_printf_loc (MSG_NOTE, vect_location,
7499 "==> examining pattern statement: ");
7500 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7503 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7504 return false;
7507 if (is_pattern_stmt_p (stmt_info)
7508 && node == NULL
7509 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7511 gimple_stmt_iterator si;
7513 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7515 gimple *pattern_def_stmt = gsi_stmt (si);
7516 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7517 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7519 /* Analyze def stmt of STMT if it's a pattern stmt. */
7520 if (dump_enabled_p ())
7522 dump_printf_loc (MSG_NOTE, vect_location,
7523 "==> examining pattern def statement: ");
7524 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7527 if (!vect_analyze_stmt (pattern_def_stmt,
7528 need_to_vectorize, node))
7529 return false;
7534 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7536 case vect_internal_def:
7537 break;
7539 case vect_reduction_def:
7540 case vect_nested_cycle:
7541 gcc_assert (!bb_vinfo
7542 && (relevance == vect_used_in_outer
7543 || relevance == vect_used_in_outer_by_reduction
7544 || relevance == vect_used_by_reduction
7545 || relevance == vect_unused_in_scope));
7546 break;
7548 case vect_induction_def:
7549 case vect_constant_def:
7550 case vect_external_def:
7551 case vect_unknown_def_type:
7552 default:
7553 gcc_unreachable ();
7556 if (bb_vinfo)
7558 gcc_assert (PURE_SLP_STMT (stmt_info));
7560 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7561 if (dump_enabled_p ())
7563 dump_printf_loc (MSG_NOTE, vect_location,
7564 "get vectype for scalar type: ");
7565 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7566 dump_printf (MSG_NOTE, "\n");
7569 vectype = get_vectype_for_scalar_type (scalar_type);
7570 if (!vectype)
7572 if (dump_enabled_p ())
7574 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7575 "not SLPed: unsupported data-type ");
7576 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7577 scalar_type);
7578 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7580 return false;
7583 if (dump_enabled_p ())
7585 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7586 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7587 dump_printf (MSG_NOTE, "\n");
7590 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7593 if (STMT_VINFO_RELEVANT_P (stmt_info))
7595 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7596 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7597 || (is_gimple_call (stmt)
7598 && gimple_call_lhs (stmt) == NULL_TREE));
7599 *need_to_vectorize = true;
7602 if (PURE_SLP_STMT (stmt_info) && !node)
7604 dump_printf_loc (MSG_NOTE, vect_location,
7605 "handled only by SLP analysis\n");
7606 return true;
7609 ok = true;
7610 if (!bb_vinfo
7611 && (STMT_VINFO_RELEVANT_P (stmt_info)
7612 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7613 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7614 || vectorizable_conversion (stmt, NULL, NULL, node)
7615 || vectorizable_shift (stmt, NULL, NULL, node)
7616 || vectorizable_operation (stmt, NULL, NULL, node)
7617 || vectorizable_assignment (stmt, NULL, NULL, node)
7618 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7619 || vectorizable_call (stmt, NULL, NULL, node)
7620 || vectorizable_store (stmt, NULL, NULL, node)
7621 || vectorizable_reduction (stmt, NULL, NULL, node)
7622 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7623 else
7625 if (bb_vinfo)
7626 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7627 || vectorizable_conversion (stmt, NULL, NULL, node)
7628 || vectorizable_shift (stmt, NULL, NULL, node)
7629 || vectorizable_operation (stmt, NULL, NULL, node)
7630 || vectorizable_assignment (stmt, NULL, NULL, node)
7631 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7632 || vectorizable_call (stmt, NULL, NULL, node)
7633 || vectorizable_store (stmt, NULL, NULL, node)
7634 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7637 if (!ok)
7639 if (dump_enabled_p ())
7641 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7642 "not vectorized: relevant stmt not ");
7643 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7644 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7647 return false;
7650 if (bb_vinfo)
7651 return true;
7653 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7654 need extra handling, except for vectorizable reductions. */
7655 if (STMT_VINFO_LIVE_P (stmt_info)
7656 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7657 ok = vectorizable_live_operation (stmt, NULL, NULL);
7659 if (!ok)
7661 if (dump_enabled_p ())
7663 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7664 "not vectorized: live stmt not ");
7665 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7666 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7669 return false;
7672 return true;
7676 /* Function vect_transform_stmt.
7678 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7680 bool
7681 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
7682 bool *grouped_store, slp_tree slp_node,
7683 slp_instance slp_node_instance)
7685 bool is_store = false;
7686 gimple *vec_stmt = NULL;
7687 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7688 bool done;
7690 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7692 switch (STMT_VINFO_TYPE (stmt_info))
7694 case type_demotion_vec_info_type:
7695 case type_promotion_vec_info_type:
7696 case type_conversion_vec_info_type:
7697 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7698 gcc_assert (done);
7699 break;
7701 case induc_vec_info_type:
7702 gcc_assert (!slp_node);
7703 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7704 gcc_assert (done);
7705 break;
7707 case shift_vec_info_type:
7708 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7709 gcc_assert (done);
7710 break;
7712 case op_vec_info_type:
7713 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7714 gcc_assert (done);
7715 break;
7717 case assignment_vec_info_type:
7718 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7719 gcc_assert (done);
7720 break;
7722 case load_vec_info_type:
7723 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7724 slp_node_instance);
7725 gcc_assert (done);
7726 break;
7728 case store_vec_info_type:
7729 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7730 gcc_assert (done);
7731 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7733 /* In case of interleaving, the whole chain is vectorized when the
7734 last store in the chain is reached. Store stmts before the last
7735 one are skipped, and there vec_stmt_info shouldn't be freed
7736 meanwhile. */
7737 *grouped_store = true;
7738 if (STMT_VINFO_VEC_STMT (stmt_info))
7739 is_store = true;
7741 else
7742 is_store = true;
7743 break;
7745 case condition_vec_info_type:
7746 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7747 gcc_assert (done);
7748 break;
7750 case call_vec_info_type:
7751 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7752 stmt = gsi_stmt (*gsi);
7753 if (is_gimple_call (stmt)
7754 && gimple_call_internal_p (stmt)
7755 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7756 is_store = true;
7757 break;
7759 case call_simd_clone_vec_info_type:
7760 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7761 stmt = gsi_stmt (*gsi);
7762 break;
7764 case reduc_vec_info_type:
7765 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7766 gcc_assert (done);
7767 break;
7769 default:
7770 if (!STMT_VINFO_LIVE_P (stmt_info))
7772 if (dump_enabled_p ())
7773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7774 "stmt not supported.\n");
7775 gcc_unreachable ();
7779 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7780 This would break hybrid SLP vectorization. */
7781 if (slp_node)
7782 gcc_assert (!vec_stmt
7783 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7785 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7786 is being vectorized, but outside the immediately enclosing loop. */
7787 if (vec_stmt
7788 && STMT_VINFO_LOOP_VINFO (stmt_info)
7789 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7790 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7791 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7792 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7793 || STMT_VINFO_RELEVANT (stmt_info) ==
7794 vect_used_in_outer_by_reduction))
7796 struct loop *innerloop = LOOP_VINFO_LOOP (
7797 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7798 imm_use_iterator imm_iter;
7799 use_operand_p use_p;
7800 tree scalar_dest;
7801 gimple *exit_phi;
7803 if (dump_enabled_p ())
7804 dump_printf_loc (MSG_NOTE, vect_location,
7805 "Record the vdef for outer-loop vectorization.\n");
7807 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7808 (to be used when vectorizing outer-loop stmts that use the DEF of
7809 STMT). */
7810 if (gimple_code (stmt) == GIMPLE_PHI)
7811 scalar_dest = PHI_RESULT (stmt);
7812 else
7813 scalar_dest = gimple_assign_lhs (stmt);
7815 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7817 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7819 exit_phi = USE_STMT (use_p);
7820 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7825 /* Handle stmts whose DEF is used outside the loop-nest that is
7826 being vectorized. */
7827 if (STMT_VINFO_LIVE_P (stmt_info)
7828 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7830 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7831 gcc_assert (done);
7834 if (vec_stmt)
7835 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7837 return is_store;
7841 /* Remove a group of stores (for SLP or interleaving), free their
7842 stmt_vec_info. */
7844 void
7845 vect_remove_stores (gimple *first_stmt)
7847 gimple *next = first_stmt;
7848 gimple *tmp;
7849 gimple_stmt_iterator next_si;
7851 while (next)
7853 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7855 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7856 if (is_pattern_stmt_p (stmt_info))
7857 next = STMT_VINFO_RELATED_STMT (stmt_info);
7858 /* Free the attached stmt_vec_info and remove the stmt. */
7859 next_si = gsi_for_stmt (next);
7860 unlink_stmt_vdef (next);
7861 gsi_remove (&next_si, true);
7862 release_defs (next);
7863 free_stmt_vec_info (next);
7864 next = tmp;
7869 /* Function new_stmt_vec_info.
7871 Create and initialize a new stmt_vec_info struct for STMT. */
7873 stmt_vec_info
7874 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
7876 stmt_vec_info res;
7877 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7879 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7880 STMT_VINFO_STMT (res) = stmt;
7881 res->vinfo = vinfo;
7882 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7883 STMT_VINFO_LIVE_P (res) = false;
7884 STMT_VINFO_VECTYPE (res) = NULL;
7885 STMT_VINFO_VEC_STMT (res) = NULL;
7886 STMT_VINFO_VECTORIZABLE (res) = true;
7887 STMT_VINFO_IN_PATTERN_P (res) = false;
7888 STMT_VINFO_RELATED_STMT (res) = NULL;
7889 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7890 STMT_VINFO_DATA_REF (res) = NULL;
7891 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
7893 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7894 STMT_VINFO_DR_OFFSET (res) = NULL;
7895 STMT_VINFO_DR_INIT (res) = NULL;
7896 STMT_VINFO_DR_STEP (res) = NULL;
7897 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7899 if (gimple_code (stmt) == GIMPLE_PHI
7900 && is_loop_header_bb_p (gimple_bb (stmt)))
7901 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7902 else
7903 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7905 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7906 STMT_SLP_TYPE (res) = loop_vect;
7907 GROUP_FIRST_ELEMENT (res) = NULL;
7908 GROUP_NEXT_ELEMENT (res) = NULL;
7909 GROUP_SIZE (res) = 0;
7910 GROUP_STORE_COUNT (res) = 0;
7911 GROUP_GAP (res) = 0;
7912 GROUP_SAME_DR_STMT (res) = NULL;
7914 return res;
7918 /* Create a hash table for stmt_vec_info. */
7920 void
7921 init_stmt_vec_info_vec (void)
7923 gcc_assert (!stmt_vec_info_vec.exists ());
7924 stmt_vec_info_vec.create (50);
7928 /* Free hash table for stmt_vec_info. */
7930 void
7931 free_stmt_vec_info_vec (void)
7933 unsigned int i;
7934 stmt_vec_info info;
7935 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7936 if (info != NULL)
7937 free_stmt_vec_info (STMT_VINFO_STMT (info));
7938 gcc_assert (stmt_vec_info_vec.exists ());
7939 stmt_vec_info_vec.release ();
7943 /* Free stmt vectorization related info. */
7945 void
7946 free_stmt_vec_info (gimple *stmt)
7948 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7950 if (!stmt_info)
7951 return;
7953 /* Check if this statement has a related "pattern stmt"
7954 (introduced by the vectorizer during the pattern recognition
7955 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7956 too. */
7957 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7959 stmt_vec_info patt_info
7960 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7961 if (patt_info)
7963 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7964 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
7965 gimple_set_bb (patt_stmt, NULL);
7966 tree lhs = gimple_get_lhs (patt_stmt);
7967 if (TREE_CODE (lhs) == SSA_NAME)
7968 release_ssa_name (lhs);
7969 if (seq)
7971 gimple_stmt_iterator si;
7972 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7974 gimple *seq_stmt = gsi_stmt (si);
7975 gimple_set_bb (seq_stmt, NULL);
7976 lhs = gimple_get_lhs (seq_stmt);
7977 if (TREE_CODE (lhs) == SSA_NAME)
7978 release_ssa_name (lhs);
7979 free_stmt_vec_info (seq_stmt);
7982 free_stmt_vec_info (patt_stmt);
7986 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7987 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7988 set_vinfo_for_stmt (stmt, NULL);
7989 free (stmt_info);
7993 /* Function get_vectype_for_scalar_type_and_size.
7995 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7996 by the target. */
7998 static tree
7999 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
8001 machine_mode inner_mode = TYPE_MODE (scalar_type);
8002 machine_mode simd_mode;
8003 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
8004 int nunits;
8005 tree vectype;
8007 if (nbytes == 0)
8008 return NULL_TREE;
8010 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8011 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8012 return NULL_TREE;
8014 /* For vector types of elements whose mode precision doesn't
8015 match their types precision we use a element type of mode
8016 precision. The vectorization routines will have to make sure
8017 they support the proper result truncation/extension.
8018 We also make sure to build vector types with INTEGER_TYPE
8019 component type only. */
8020 if (INTEGRAL_TYPE_P (scalar_type)
8021 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8022 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8023 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8024 TYPE_UNSIGNED (scalar_type));
8026 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8027 When the component mode passes the above test simply use a type
8028 corresponding to that mode. The theory is that any use that
8029 would cause problems with this will disable vectorization anyway. */
8030 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8031 && !INTEGRAL_TYPE_P (scalar_type))
8032 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8034 /* We can't build a vector type of elements with alignment bigger than
8035 their size. */
8036 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8037 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8038 TYPE_UNSIGNED (scalar_type));
8040 /* If we felt back to using the mode fail if there was
8041 no scalar type for it. */
8042 if (scalar_type == NULL_TREE)
8043 return NULL_TREE;
8045 /* If no size was supplied use the mode the target prefers. Otherwise
8046 lookup a vector mode of the specified size. */
8047 if (size == 0)
8048 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8049 else
8050 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8051 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8052 if (nunits <= 1)
8053 return NULL_TREE;
8055 vectype = build_vector_type (scalar_type, nunits);
8057 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8058 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8059 return NULL_TREE;
8061 return vectype;
8064 unsigned int current_vector_size;
8066 /* Function get_vectype_for_scalar_type.
8068 Returns the vector type corresponding to SCALAR_TYPE as supported
8069 by the target. */
8071 tree
8072 get_vectype_for_scalar_type (tree scalar_type)
8074 tree vectype;
8075 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8076 current_vector_size);
8077 if (vectype
8078 && current_vector_size == 0)
8079 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8080 return vectype;
8083 /* Function get_same_sized_vectype
8085 Returns a vector type corresponding to SCALAR_TYPE of size
8086 VECTOR_TYPE if supported by the target. */
8088 tree
8089 get_same_sized_vectype (tree scalar_type, tree vector_type)
8091 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8092 return build_same_sized_truth_vector_type (vector_type);
8094 return get_vectype_for_scalar_type_and_size
8095 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8098 /* Function vect_is_simple_use.
8100 Input:
8101 VINFO - the vect info of the loop or basic block that is being vectorized.
8102 OPERAND - operand in the loop or bb.
8103 Output:
8104 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8105 DT - the type of definition
8107 Returns whether a stmt with OPERAND can be vectorized.
8108 For loops, supportable operands are constants, loop invariants, and operands
8109 that are defined by the current iteration of the loop. Unsupportable
8110 operands are those that are defined by a previous iteration of the loop (as
8111 is the case in reduction/induction computations).
8112 For basic blocks, supportable operands are constants and bb invariants.
8113 For now, operands defined outside the basic block are not supported. */
8115 bool
8116 vect_is_simple_use (tree operand, vec_info *vinfo,
8117 gimple **def_stmt, enum vect_def_type *dt)
8119 *def_stmt = NULL;
8120 *dt = vect_unknown_def_type;
8122 if (dump_enabled_p ())
8124 dump_printf_loc (MSG_NOTE, vect_location,
8125 "vect_is_simple_use: operand ");
8126 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8127 dump_printf (MSG_NOTE, "\n");
8130 if (CONSTANT_CLASS_P (operand))
8132 *dt = vect_constant_def;
8133 return true;
8136 if (is_gimple_min_invariant (operand))
8138 *dt = vect_external_def;
8139 return true;
8142 if (TREE_CODE (operand) != SSA_NAME)
8144 if (dump_enabled_p ())
8145 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8146 "not ssa-name.\n");
8147 return false;
8150 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8152 *dt = vect_external_def;
8153 return true;
8156 *def_stmt = SSA_NAME_DEF_STMT (operand);
8157 if (dump_enabled_p ())
8159 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8160 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8163 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8164 *dt = vect_external_def;
8165 else
8167 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8168 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8169 *dt = vect_external_def;
8170 else
8171 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8174 if (dump_enabled_p ())
8176 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8177 switch (*dt)
8179 case vect_uninitialized_def:
8180 dump_printf (MSG_NOTE, "uninitialized\n");
8181 break;
8182 case vect_constant_def:
8183 dump_printf (MSG_NOTE, "constant\n");
8184 break;
8185 case vect_external_def:
8186 dump_printf (MSG_NOTE, "external\n");
8187 break;
8188 case vect_internal_def:
8189 dump_printf (MSG_NOTE, "internal\n");
8190 break;
8191 case vect_induction_def:
8192 dump_printf (MSG_NOTE, "induction\n");
8193 break;
8194 case vect_reduction_def:
8195 dump_printf (MSG_NOTE, "reduction\n");
8196 break;
8197 case vect_double_reduction_def:
8198 dump_printf (MSG_NOTE, "double reduction\n");
8199 break;
8200 case vect_nested_cycle:
8201 dump_printf (MSG_NOTE, "nested cycle\n");
8202 break;
8203 case vect_unknown_def_type:
8204 dump_printf (MSG_NOTE, "unknown\n");
8205 break;
8209 if (*dt == vect_unknown_def_type)
8211 if (dump_enabled_p ())
8212 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8213 "Unsupported pattern.\n");
8214 return false;
8217 switch (gimple_code (*def_stmt))
8219 case GIMPLE_PHI:
8220 case GIMPLE_ASSIGN:
8221 case GIMPLE_CALL:
8222 break;
8223 default:
8224 if (dump_enabled_p ())
8225 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8226 "unsupported defining stmt:\n");
8227 return false;
8230 return true;
8233 /* Function vect_is_simple_use.
8235 Same as vect_is_simple_use but also determines the vector operand
8236 type of OPERAND and stores it to *VECTYPE. If the definition of
8237 OPERAND is vect_uninitialized_def, vect_constant_def or
8238 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8239 is responsible to compute the best suited vector type for the
8240 scalar operand. */
8242 bool
8243 vect_is_simple_use (tree operand, vec_info *vinfo,
8244 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8246 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8247 return false;
8249 /* Now get a vector type if the def is internal, otherwise supply
8250 NULL_TREE and leave it up to the caller to figure out a proper
8251 type for the use stmt. */
8252 if (*dt == vect_internal_def
8253 || *dt == vect_induction_def
8254 || *dt == vect_reduction_def
8255 || *dt == vect_double_reduction_def
8256 || *dt == vect_nested_cycle)
8258 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8260 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8261 && !STMT_VINFO_RELEVANT (stmt_info)
8262 && !STMT_VINFO_LIVE_P (stmt_info))
8263 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8265 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8266 gcc_assert (*vectype != NULL_TREE);
8268 else if (*dt == vect_uninitialized_def
8269 || *dt == vect_constant_def
8270 || *dt == vect_external_def)
8271 *vectype = NULL_TREE;
8272 else
8273 gcc_unreachable ();
8275 return true;
8279 /* Function supportable_widening_operation
8281 Check whether an operation represented by the code CODE is a
8282 widening operation that is supported by the target platform in
8283 vector form (i.e., when operating on arguments of type VECTYPE_IN
8284 producing a result of type VECTYPE_OUT).
8286 Widening operations we currently support are NOP (CONVERT), FLOAT
8287 and WIDEN_MULT. This function checks if these operations are supported
8288 by the target platform either directly (via vector tree-codes), or via
8289 target builtins.
8291 Output:
8292 - CODE1 and CODE2 are codes of vector operations to be used when
8293 vectorizing the operation, if available.
8294 - MULTI_STEP_CVT determines the number of required intermediate steps in
8295 case of multi-step conversion (like char->short->int - in that case
8296 MULTI_STEP_CVT will be 1).
8297 - INTERM_TYPES contains the intermediate type required to perform the
8298 widening operation (short in the above example). */
8300 bool
8301 supportable_widening_operation (enum tree_code code, gimple *stmt,
8302 tree vectype_out, tree vectype_in,
8303 enum tree_code *code1, enum tree_code *code2,
8304 int *multi_step_cvt,
8305 vec<tree> *interm_types)
8307 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8308 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8309 struct loop *vect_loop = NULL;
8310 machine_mode vec_mode;
8311 enum insn_code icode1, icode2;
8312 optab optab1, optab2;
8313 tree vectype = vectype_in;
8314 tree wide_vectype = vectype_out;
8315 enum tree_code c1, c2;
8316 int i;
8317 tree prev_type, intermediate_type;
8318 machine_mode intermediate_mode, prev_mode;
8319 optab optab3, optab4;
8321 *multi_step_cvt = 0;
8322 if (loop_info)
8323 vect_loop = LOOP_VINFO_LOOP (loop_info);
8325 switch (code)
8327 case WIDEN_MULT_EXPR:
8328 /* The result of a vectorized widening operation usually requires
8329 two vectors (because the widened results do not fit into one vector).
8330 The generated vector results would normally be expected to be
8331 generated in the same order as in the original scalar computation,
8332 i.e. if 8 results are generated in each vector iteration, they are
8333 to be organized as follows:
8334 vect1: [res1,res2,res3,res4],
8335 vect2: [res5,res6,res7,res8].
8337 However, in the special case that the result of the widening
8338 operation is used in a reduction computation only, the order doesn't
8339 matter (because when vectorizing a reduction we change the order of
8340 the computation). Some targets can take advantage of this and
8341 generate more efficient code. For example, targets like Altivec,
8342 that support widen_mult using a sequence of {mult_even,mult_odd}
8343 generate the following vectors:
8344 vect1: [res1,res3,res5,res7],
8345 vect2: [res2,res4,res6,res8].
8347 When vectorizing outer-loops, we execute the inner-loop sequentially
8348 (each vectorized inner-loop iteration contributes to VF outer-loop
8349 iterations in parallel). We therefore don't allow to change the
8350 order of the computation in the inner-loop during outer-loop
8351 vectorization. */
8352 /* TODO: Another case in which order doesn't *really* matter is when we
8353 widen and then contract again, e.g. (short)((int)x * y >> 8).
8354 Normally, pack_trunc performs an even/odd permute, whereas the
8355 repack from an even/odd expansion would be an interleave, which
8356 would be significantly simpler for e.g. AVX2. */
8357 /* In any case, in order to avoid duplicating the code below, recurse
8358 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8359 are properly set up for the caller. If we fail, we'll continue with
8360 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8361 if (vect_loop
8362 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8363 && !nested_in_vect_loop_p (vect_loop, stmt)
8364 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8365 stmt, vectype_out, vectype_in,
8366 code1, code2, multi_step_cvt,
8367 interm_types))
8369 /* Elements in a vector with vect_used_by_reduction property cannot
8370 be reordered if the use chain with this property does not have the
8371 same operation. One such an example is s += a * b, where elements
8372 in a and b cannot be reordered. Here we check if the vector defined
8373 by STMT is only directly used in the reduction statement. */
8374 tree lhs = gimple_assign_lhs (stmt);
8375 use_operand_p dummy;
8376 gimple *use_stmt;
8377 stmt_vec_info use_stmt_info = NULL;
8378 if (single_imm_use (lhs, &dummy, &use_stmt)
8379 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8380 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8381 return true;
8383 c1 = VEC_WIDEN_MULT_LO_EXPR;
8384 c2 = VEC_WIDEN_MULT_HI_EXPR;
8385 break;
8387 case DOT_PROD_EXPR:
8388 c1 = DOT_PROD_EXPR;
8389 c2 = DOT_PROD_EXPR;
8390 break;
8392 case SAD_EXPR:
8393 c1 = SAD_EXPR;
8394 c2 = SAD_EXPR;
8395 break;
8397 case VEC_WIDEN_MULT_EVEN_EXPR:
8398 /* Support the recursion induced just above. */
8399 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8400 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8401 break;
8403 case WIDEN_LSHIFT_EXPR:
8404 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8405 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8406 break;
8408 CASE_CONVERT:
8409 c1 = VEC_UNPACK_LO_EXPR;
8410 c2 = VEC_UNPACK_HI_EXPR;
8411 break;
8413 case FLOAT_EXPR:
8414 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8415 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8416 break;
8418 case FIX_TRUNC_EXPR:
8419 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8420 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8421 computing the operation. */
8422 return false;
8424 default:
8425 gcc_unreachable ();
8428 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8429 std::swap (c1, c2);
8431 if (code == FIX_TRUNC_EXPR)
8433 /* The signedness is determined from output operand. */
8434 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8435 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8437 else
8439 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8440 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8443 if (!optab1 || !optab2)
8444 return false;
8446 vec_mode = TYPE_MODE (vectype);
8447 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8448 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8449 return false;
8451 *code1 = c1;
8452 *code2 = c2;
8454 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8455 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8456 return true;
8458 /* Check if it's a multi-step conversion that can be done using intermediate
8459 types. */
8461 prev_type = vectype;
8462 prev_mode = vec_mode;
8464 if (!CONVERT_EXPR_CODE_P (code))
8465 return false;
8467 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8468 intermediate steps in promotion sequence. We try
8469 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8470 not. */
8471 interm_types->create (MAX_INTERM_CVT_STEPS);
8472 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8474 intermediate_mode = insn_data[icode1].operand[0].mode;
8475 intermediate_type
8476 = lang_hooks.types.type_for_mode (intermediate_mode,
8477 TYPE_UNSIGNED (prev_type));
8478 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8479 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8481 if (!optab3 || !optab4
8482 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8483 || insn_data[icode1].operand[0].mode != intermediate_mode
8484 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8485 || insn_data[icode2].operand[0].mode != intermediate_mode
8486 || ((icode1 = optab_handler (optab3, intermediate_mode))
8487 == CODE_FOR_nothing)
8488 || ((icode2 = optab_handler (optab4, intermediate_mode))
8489 == CODE_FOR_nothing))
8490 break;
8492 interm_types->quick_push (intermediate_type);
8493 (*multi_step_cvt)++;
8495 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8496 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8497 return true;
8499 prev_type = intermediate_type;
8500 prev_mode = intermediate_mode;
8503 interm_types->release ();
8504 return false;
8508 /* Function supportable_narrowing_operation
8510 Check whether an operation represented by the code CODE is a
8511 narrowing operation that is supported by the target platform in
8512 vector form (i.e., when operating on arguments of type VECTYPE_IN
8513 and producing a result of type VECTYPE_OUT).
8515 Narrowing operations we currently support are NOP (CONVERT) and
8516 FIX_TRUNC. This function checks if these operations are supported by
8517 the target platform directly via vector tree-codes.
8519 Output:
8520 - CODE1 is the code of a vector operation to be used when
8521 vectorizing the operation, if available.
8522 - MULTI_STEP_CVT determines the number of required intermediate steps in
8523 case of multi-step conversion (like int->short->char - in that case
8524 MULTI_STEP_CVT will be 1).
8525 - INTERM_TYPES contains the intermediate type required to perform the
8526 narrowing operation (short in the above example). */
8528 bool
8529 supportable_narrowing_operation (enum tree_code code,
8530 tree vectype_out, tree vectype_in,
8531 enum tree_code *code1, int *multi_step_cvt,
8532 vec<tree> *interm_types)
8534 machine_mode vec_mode;
8535 enum insn_code icode1;
8536 optab optab1, interm_optab;
8537 tree vectype = vectype_in;
8538 tree narrow_vectype = vectype_out;
8539 enum tree_code c1;
8540 tree intermediate_type;
8541 machine_mode intermediate_mode, prev_mode;
8542 int i;
8543 bool uns;
8545 *multi_step_cvt = 0;
8546 switch (code)
8548 CASE_CONVERT:
8549 c1 = VEC_PACK_TRUNC_EXPR;
8550 break;
8552 case FIX_TRUNC_EXPR:
8553 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8554 break;
8556 case FLOAT_EXPR:
8557 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8558 tree code and optabs used for computing the operation. */
8559 return false;
8561 default:
8562 gcc_unreachable ();
8565 if (code == FIX_TRUNC_EXPR)
8566 /* The signedness is determined from output operand. */
8567 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8568 else
8569 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8571 if (!optab1)
8572 return false;
8574 vec_mode = TYPE_MODE (vectype);
8575 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8576 return false;
8578 *code1 = c1;
8580 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8581 return true;
8583 /* Check if it's a multi-step conversion that can be done using intermediate
8584 types. */
8585 prev_mode = vec_mode;
8586 if (code == FIX_TRUNC_EXPR)
8587 uns = TYPE_UNSIGNED (vectype_out);
8588 else
8589 uns = TYPE_UNSIGNED (vectype);
8591 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8592 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8593 costly than signed. */
8594 if (code == FIX_TRUNC_EXPR && uns)
8596 enum insn_code icode2;
8598 intermediate_type
8599 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8600 interm_optab
8601 = optab_for_tree_code (c1, intermediate_type, optab_default);
8602 if (interm_optab != unknown_optab
8603 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8604 && insn_data[icode1].operand[0].mode
8605 == insn_data[icode2].operand[0].mode)
8607 uns = false;
8608 optab1 = interm_optab;
8609 icode1 = icode2;
8613 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8614 intermediate steps in promotion sequence. We try
8615 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8616 interm_types->create (MAX_INTERM_CVT_STEPS);
8617 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8619 intermediate_mode = insn_data[icode1].operand[0].mode;
8620 intermediate_type
8621 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8622 interm_optab
8623 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8624 optab_default);
8625 if (!interm_optab
8626 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8627 || insn_data[icode1].operand[0].mode != intermediate_mode
8628 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8629 == CODE_FOR_nothing))
8630 break;
8632 interm_types->quick_push (intermediate_type);
8633 (*multi_step_cvt)++;
8635 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8636 return true;
8638 prev_mode = intermediate_mode;
8639 optab1 = interm_optab;
8642 interm_types->release ();
8643 return false;