* testsuite/libgomp.oacc-c-c++-common/collapse-2.c: Sequential
[official-gcc.git] / gcc / tree-vect-stmts.c
blob0f64aaf19795880c63d910d71cc094a8d74b976f
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
51 /* For lang_hooks.types.type_for_mode. */
52 #include "langhooks.h"
54 /* Return the vectorized type for the given statement. */
56 tree
57 stmt_vectype (struct _stmt_vec_info *stmt_info)
59 return STMT_VINFO_VECTYPE (stmt_info);
62 /* Return TRUE iff the given statement is in an inner loop relative to
63 the loop being vectorized. */
64 bool
65 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
67 gimple *stmt = STMT_VINFO_STMT (stmt_info);
68 basic_block bb = gimple_bb (stmt);
69 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
70 struct loop* loop;
72 if (!loop_vinfo)
73 return false;
75 loop = LOOP_VINFO_LOOP (loop_vinfo);
77 return (bb->loop_father == loop->inner);
80 /* Record the cost of a statement, either by directly informing the
81 target model or by saving it in a vector for later processing.
82 Return a preliminary estimate of the statement's cost. */
84 unsigned
85 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
86 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
87 int misalign, enum vect_cost_model_location where)
89 if (body_cost_vec)
91 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
92 stmt_info_for_cost si = { count, kind,
93 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
94 misalign };
95 body_cost_vec->safe_push (si);
96 return (unsigned)
97 (builtin_vectorization_cost (kind, vectype, misalign) * count);
99 else
100 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
101 count, kind, stmt_info, misalign, where);
104 /* Return a variable of type ELEM_TYPE[NELEMS]. */
106 static tree
107 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
109 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
110 "vect_array");
113 /* ARRAY is an array of vectors created by create_vector_array.
114 Return an SSA_NAME for the vector in index N. The reference
115 is part of the vectorization of STMT and the vector is associated
116 with scalar destination SCALAR_DEST. */
118 static tree
119 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
120 tree array, unsigned HOST_WIDE_INT n)
122 tree vect_type, vect, vect_name, array_ref;
123 gimple *new_stmt;
125 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
126 vect_type = TREE_TYPE (TREE_TYPE (array));
127 vect = vect_create_destination_var (scalar_dest, vect_type);
128 array_ref = build4 (ARRAY_REF, vect_type, array,
129 build_int_cst (size_type_node, n),
130 NULL_TREE, NULL_TREE);
132 new_stmt = gimple_build_assign (vect, array_ref);
133 vect_name = make_ssa_name (vect, new_stmt);
134 gimple_assign_set_lhs (new_stmt, vect_name);
135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
137 return vect_name;
140 /* ARRAY is an array of vectors created by create_vector_array.
141 Emit code to store SSA_NAME VECT in index N of the array.
142 The store is part of the vectorization of STMT. */
144 static void
145 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
146 tree array, unsigned HOST_WIDE_INT n)
148 tree array_ref;
149 gimple *new_stmt;
151 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
152 build_int_cst (size_type_node, n),
153 NULL_TREE, NULL_TREE);
155 new_stmt = gimple_build_assign (array_ref, vect);
156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
159 /* PTR is a pointer to an array of type TYPE. Return a representation
160 of *PTR. The memory reference replaces those in FIRST_DR
161 (and its group). */
163 static tree
164 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
166 tree mem_ref, alias_ptr_type;
168 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
169 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
170 /* Arrays have the same alignment as their type. */
171 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
172 return mem_ref;
175 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
177 /* Function vect_mark_relevant.
179 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
181 static void
182 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
183 enum vect_relevant relevant, bool live_p,
184 bool used_in_pattern)
186 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
187 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
188 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
189 gimple *pattern_stmt;
191 if (dump_enabled_p ())
192 dump_printf_loc (MSG_NOTE, vect_location,
193 "mark relevant %d, live %d.\n", relevant, live_p);
195 /* If this stmt is an original stmt in a pattern, we might need to mark its
196 related pattern stmt instead of the original stmt. However, such stmts
197 may have their own uses that are not in any pattern, in such cases the
198 stmt itself should be marked. */
199 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
201 bool found = false;
202 if (!used_in_pattern)
204 imm_use_iterator imm_iter;
205 use_operand_p use_p;
206 gimple *use_stmt;
207 tree lhs;
208 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
209 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
211 if (is_gimple_assign (stmt))
212 lhs = gimple_assign_lhs (stmt);
213 else
214 lhs = gimple_call_lhs (stmt);
216 /* This use is out of pattern use, if LHS has other uses that are
217 pattern uses, we should mark the stmt itself, and not the pattern
218 stmt. */
219 if (lhs && TREE_CODE (lhs) == SSA_NAME)
220 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
222 if (is_gimple_debug (USE_STMT (use_p)))
223 continue;
224 use_stmt = USE_STMT (use_p);
226 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
227 continue;
229 if (vinfo_for_stmt (use_stmt)
230 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
232 found = true;
233 break;
238 if (!found)
240 /* This is the last stmt in a sequence that was detected as a
241 pattern that can potentially be vectorized. Don't mark the stmt
242 as relevant/live because it's not going to be vectorized.
243 Instead mark the pattern-stmt that replaces it. */
245 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
247 if (dump_enabled_p ())
248 dump_printf_loc (MSG_NOTE, vect_location,
249 "last stmt in pattern. don't mark"
250 " relevant/live.\n");
251 stmt_info = vinfo_for_stmt (pattern_stmt);
252 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
253 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
254 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
255 stmt = pattern_stmt;
259 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
260 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
261 STMT_VINFO_RELEVANT (stmt_info) = relevant;
263 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
264 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE, vect_location,
268 "already marked relevant/live.\n");
269 return;
272 worklist->safe_push (stmt);
276 /* Function vect_stmt_relevant_p.
278 Return true if STMT in loop that is represented by LOOP_VINFO is
279 "relevant for vectorization".
281 A stmt is considered "relevant for vectorization" if:
282 - it has uses outside the loop.
283 - it has vdefs (it alters memory).
284 - control stmts in the loop (except for the exit condition).
286 CHECKME: what other side effects would the vectorizer allow? */
288 static bool
289 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
290 enum vect_relevant *relevant, bool *live_p)
292 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
293 ssa_op_iter op_iter;
294 imm_use_iterator imm_iter;
295 use_operand_p use_p;
296 def_operand_p def_p;
298 *relevant = vect_unused_in_scope;
299 *live_p = false;
301 /* cond stmt other than loop exit cond. */
302 if (is_ctrl_stmt (stmt)
303 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
304 != loop_exit_ctrl_vec_info_type)
305 *relevant = vect_used_in_scope;
307 /* changing memory. */
308 if (gimple_code (stmt) != GIMPLE_PHI)
309 if (gimple_vdef (stmt)
310 && !gimple_clobber_p (stmt))
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location,
314 "vec_stmt_relevant_p: stmt has vdefs.\n");
315 *relevant = vect_used_in_scope;
318 /* uses outside the loop. */
319 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
321 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
323 basic_block bb = gimple_bb (USE_STMT (use_p));
324 if (!flow_bb_inside_loop_p (loop, bb))
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "vec_stmt_relevant_p: used out of loop.\n");
330 if (is_gimple_debug (USE_STMT (use_p)))
331 continue;
333 /* We expect all such uses to be in the loop exit phis
334 (because of loop closed form) */
335 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
336 gcc_assert (bb == single_exit (loop)->dest);
338 *live_p = true;
343 return (*live_p || *relevant);
347 /* Function exist_non_indexing_operands_for_use_p
349 USE is one of the uses attached to STMT. Check if USE is
350 used in STMT for anything other than indexing an array. */
352 static bool
353 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
355 tree operand;
356 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
358 /* USE corresponds to some operand in STMT. If there is no data
359 reference in STMT, then any operand that corresponds to USE
360 is not indexing an array. */
361 if (!STMT_VINFO_DATA_REF (stmt_info))
362 return true;
364 /* STMT has a data_ref. FORNOW this means that its of one of
365 the following forms:
366 -1- ARRAY_REF = var
367 -2- var = ARRAY_REF
368 (This should have been verified in analyze_data_refs).
370 'var' in the second case corresponds to a def, not a use,
371 so USE cannot correspond to any operands that are not used
372 for array indexing.
374 Therefore, all we need to check is if STMT falls into the
375 first case, and whether var corresponds to USE. */
377 if (!gimple_assign_copy_p (stmt))
379 if (is_gimple_call (stmt)
380 && gimple_call_internal_p (stmt))
381 switch (gimple_call_internal_fn (stmt))
383 case IFN_MASK_STORE:
384 operand = gimple_call_arg (stmt, 3);
385 if (operand == use)
386 return true;
387 /* FALLTHRU */
388 case IFN_MASK_LOAD:
389 operand = gimple_call_arg (stmt, 2);
390 if (operand == use)
391 return true;
392 break;
393 default:
394 break;
396 return false;
399 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
400 return false;
401 operand = gimple_assign_rhs1 (stmt);
402 if (TREE_CODE (operand) != SSA_NAME)
403 return false;
405 if (operand == use)
406 return true;
408 return false;
413 Function process_use.
415 Inputs:
416 - a USE in STMT in a loop represented by LOOP_VINFO
417 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
418 that defined USE. This is done by calling mark_relevant and passing it
419 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
420 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
421 be performed.
423 Outputs:
424 Generally, LIVE_P and RELEVANT are used to define the liveness and
425 relevance info of the DEF_STMT of this USE:
426 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
427 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
428 Exceptions:
429 - case 1: If USE is used only for address computations (e.g. array indexing),
430 which does not need to be directly vectorized, then the liveness/relevance
431 of the respective DEF_STMT is left unchanged.
432 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
433 skip DEF_STMT cause it had already been processed.
434 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
435 be modified accordingly.
437 Return true if everything is as expected. Return false otherwise. */
439 static bool
440 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
441 enum vect_relevant relevant, vec<gimple *> *worklist,
442 bool force)
444 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
445 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
446 stmt_vec_info dstmt_vinfo;
447 basic_block bb, def_bb;
448 gimple *def_stmt;
449 enum vect_def_type dt;
451 /* case 1: we are only interested in uses that need to be vectorized. Uses
452 that are used for address computation are not considered relevant. */
453 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
454 return true;
456 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
460 "not vectorized: unsupported use in stmt.\n");
461 return false;
464 if (!def_stmt || gimple_nop_p (def_stmt))
465 return true;
467 def_bb = gimple_bb (def_stmt);
468 if (!flow_bb_inside_loop_p (loop, def_bb))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
472 return true;
475 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
476 DEF_STMT must have already been processed, because this should be the
477 only way that STMT, which is a reduction-phi, was put in the worklist,
478 as there should be no other uses for DEF_STMT in the loop. So we just
479 check that everything is as expected, and we are done. */
480 dstmt_vinfo = vinfo_for_stmt (def_stmt);
481 bb = gimple_bb (stmt);
482 if (gimple_code (stmt) == GIMPLE_PHI
483 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
484 && gimple_code (def_stmt) != GIMPLE_PHI
485 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
486 && bb->loop_father == def_bb->loop_father)
488 if (dump_enabled_p ())
489 dump_printf_loc (MSG_NOTE, vect_location,
490 "reduc-stmt defining reduc-phi in the same nest.\n");
491 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
492 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
493 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
494 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
495 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
496 return true;
499 /* case 3a: outer-loop stmt defining an inner-loop stmt:
500 outer-loop-header-bb:
501 d = def_stmt
502 inner-loop:
503 stmt # use (d)
504 outer-loop-tail-bb:
505 ... */
506 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE, vect_location,
510 "outer-loop def-stmt defining inner-loop stmt.\n");
512 switch (relevant)
514 case vect_unused_in_scope:
515 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
516 vect_used_in_scope : vect_unused_in_scope;
517 break;
519 case vect_used_in_outer_by_reduction:
520 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
521 relevant = vect_used_by_reduction;
522 break;
524 case vect_used_in_outer:
525 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
526 relevant = vect_used_in_scope;
527 break;
529 case vect_used_in_scope:
530 break;
532 default:
533 gcc_unreachable ();
537 /* case 3b: inner-loop stmt defining an outer-loop stmt:
538 outer-loop-header-bb:
540 inner-loop:
541 d = def_stmt
542 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
543 stmt # use (d) */
544 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
546 if (dump_enabled_p ())
547 dump_printf_loc (MSG_NOTE, vect_location,
548 "inner-loop def-stmt defining outer-loop stmt.\n");
550 switch (relevant)
552 case vect_unused_in_scope:
553 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
554 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
555 vect_used_in_outer_by_reduction : vect_unused_in_scope;
556 break;
558 case vect_used_by_reduction:
559 relevant = vect_used_in_outer_by_reduction;
560 break;
562 case vect_used_in_scope:
563 relevant = vect_used_in_outer;
564 break;
566 default:
567 gcc_unreachable ();
571 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
572 is_pattern_stmt_p (stmt_vinfo));
573 return true;
577 /* Function vect_mark_stmts_to_be_vectorized.
579 Not all stmts in the loop need to be vectorized. For example:
581 for i...
582 for j...
583 1. T0 = i + j
584 2. T1 = a[T0]
586 3. j = j + 1
588 Stmt 1 and 3 do not need to be vectorized, because loop control and
589 addressing of vectorized data-refs are handled differently.
591 This pass detects such stmts. */
593 bool
594 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
596 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
597 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
598 unsigned int nbbs = loop->num_nodes;
599 gimple_stmt_iterator si;
600 gimple *stmt;
601 unsigned int i;
602 stmt_vec_info stmt_vinfo;
603 basic_block bb;
604 gimple *phi;
605 bool live_p;
606 enum vect_relevant relevant, tmp_relevant;
607 enum vect_def_type def_type;
609 if (dump_enabled_p ())
610 dump_printf_loc (MSG_NOTE, vect_location,
611 "=== vect_mark_stmts_to_be_vectorized ===\n");
613 auto_vec<gimple *, 64> worklist;
615 /* 1. Init worklist. */
616 for (i = 0; i < nbbs; i++)
618 bb = bbs[i];
619 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
621 phi = gsi_stmt (si);
622 if (dump_enabled_p ())
624 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
625 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
628 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
629 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
631 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
633 stmt = gsi_stmt (si);
634 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
637 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
640 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
641 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
645 /* 2. Process_worklist */
646 while (worklist.length () > 0)
648 use_operand_p use_p;
649 ssa_op_iter iter;
651 stmt = worklist.pop ();
652 if (dump_enabled_p ())
654 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
655 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
658 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
659 (DEF_STMT) as relevant/irrelevant and live/dead according to the
660 liveness and relevance properties of STMT. */
661 stmt_vinfo = vinfo_for_stmt (stmt);
662 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
663 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
665 /* Generally, the liveness and relevance properties of STMT are
666 propagated as is to the DEF_STMTs of its USEs:
667 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
668 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
670 One exception is when STMT has been identified as defining a reduction
671 variable; in this case we set the liveness/relevance as follows:
672 live_p = false
673 relevant = vect_used_by_reduction
674 This is because we distinguish between two kinds of relevant stmts -
675 those that are used by a reduction computation, and those that are
676 (also) used by a regular computation. This allows us later on to
677 identify stmts that are used solely by a reduction, and therefore the
678 order of the results that they produce does not have to be kept. */
680 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
681 tmp_relevant = relevant;
682 switch (def_type)
684 case vect_reduction_def:
685 switch (tmp_relevant)
687 case vect_unused_in_scope:
688 relevant = vect_used_by_reduction;
689 break;
691 case vect_used_by_reduction:
692 if (gimple_code (stmt) == GIMPLE_PHI)
693 break;
694 /* fall through */
696 default:
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "unsupported use of reduction.\n");
700 return false;
703 live_p = false;
704 break;
706 case vect_nested_cycle:
707 if (tmp_relevant != vect_unused_in_scope
708 && tmp_relevant != vect_used_in_outer_by_reduction
709 && tmp_relevant != vect_used_in_outer)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
713 "unsupported use of nested cycle.\n");
715 return false;
718 live_p = false;
719 break;
721 case vect_double_reduction_def:
722 if (tmp_relevant != vect_unused_in_scope
723 && tmp_relevant != vect_used_by_reduction)
725 if (dump_enabled_p ())
726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
727 "unsupported use of double reduction.\n");
729 return false;
732 live_p = false;
733 break;
735 default:
736 break;
739 if (is_pattern_stmt_p (stmt_vinfo))
741 /* Pattern statements are not inserted into the code, so
742 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
743 have to scan the RHS or function arguments instead. */
744 if (is_gimple_assign (stmt))
746 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
747 tree op = gimple_assign_rhs1 (stmt);
749 i = 1;
750 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
752 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
753 live_p, relevant, &worklist, false)
754 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
755 live_p, relevant, &worklist, false))
756 return false;
757 i = 2;
759 for (; i < gimple_num_ops (stmt); i++)
761 op = gimple_op (stmt, i);
762 if (TREE_CODE (op) == SSA_NAME
763 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
764 &worklist, false))
765 return false;
768 else if (is_gimple_call (stmt))
770 for (i = 0; i < gimple_call_num_args (stmt); i++)
772 tree arg = gimple_call_arg (stmt, i);
773 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
774 &worklist, false))
775 return false;
779 else
780 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
782 tree op = USE_FROM_PTR (use_p);
783 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
784 &worklist, false))
785 return false;
788 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
790 tree off;
791 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
792 gcc_assert (decl);
793 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
794 &worklist, true))
795 return false;
797 } /* while worklist */
799 return true;
803 /* Function vect_model_simple_cost.
805 Models cost for simple operations, i.e. those that only emit ncopies of a
806 single op. Right now, this does not account for multiple insns that could
807 be generated for the single vector op. We will handle that shortly. */
809 void
810 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
811 enum vect_def_type *dt,
812 stmt_vector_for_cost *prologue_cost_vec,
813 stmt_vector_for_cost *body_cost_vec)
815 int i;
816 int inside_cost = 0, prologue_cost = 0;
818 /* The SLP costs were already calculated during SLP tree build. */
819 if (PURE_SLP_STMT (stmt_info))
820 return;
822 /* FORNOW: Assuming maximum 2 args per stmts. */
823 for (i = 0; i < 2; i++)
824 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
825 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
826 stmt_info, 0, vect_prologue);
828 /* Pass the inside-of-loop statements to the target-specific cost model. */
829 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
830 stmt_info, 0, vect_body);
832 if (dump_enabled_p ())
833 dump_printf_loc (MSG_NOTE, vect_location,
834 "vect_model_simple_cost: inside_cost = %d, "
835 "prologue_cost = %d .\n", inside_cost, prologue_cost);
839 /* Model cost for type demotion and promotion operations. PWR is normally
840 zero for single-step promotions and demotions. It will be one if
841 two-step promotion/demotion is required, and so on. Each additional
842 step doubles the number of instructions required. */
844 static void
845 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
846 enum vect_def_type *dt, int pwr)
848 int i, tmp;
849 int inside_cost = 0, prologue_cost = 0;
850 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
851 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
852 void *target_cost_data;
854 /* The SLP costs were already calculated during SLP tree build. */
855 if (PURE_SLP_STMT (stmt_info))
856 return;
858 if (loop_vinfo)
859 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
860 else
861 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
863 for (i = 0; i < pwr + 1; i++)
865 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
866 (i + 1) : i;
867 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
868 vec_promote_demote, stmt_info, 0,
869 vect_body);
872 /* FORNOW: Assuming maximum 2 args per stmts. */
873 for (i = 0; i < 2; i++)
874 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
875 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
876 stmt_info, 0, vect_prologue);
878 if (dump_enabled_p ())
879 dump_printf_loc (MSG_NOTE, vect_location,
880 "vect_model_promotion_demotion_cost: inside_cost = %d, "
881 "prologue_cost = %d .\n", inside_cost, prologue_cost);
884 /* Function vect_cost_group_size
886 For grouped load or store, return the group_size only if it is the first
887 load or store of a group, else return 1. This ensures that group size is
888 only returned once per group. */
890 static int
891 vect_cost_group_size (stmt_vec_info stmt_info)
893 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
895 if (first_stmt == STMT_VINFO_STMT (stmt_info))
896 return GROUP_SIZE (stmt_info);
898 return 1;
902 /* Function vect_model_store_cost
904 Models cost for stores. In the case of grouped accesses, one access
905 has the overhead of the grouped access attributed to it. */
907 void
908 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
909 bool store_lanes_p, enum vect_def_type dt,
910 slp_tree slp_node,
911 stmt_vector_for_cost *prologue_cost_vec,
912 stmt_vector_for_cost *body_cost_vec)
914 int group_size;
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *first_dr;
917 gimple *first_stmt;
919 if (dt == vect_constant_def || dt == vect_external_def)
920 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
921 stmt_info, 0, vect_prologue);
923 /* Grouped access? */
924 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
926 if (slp_node)
928 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
929 group_size = 1;
931 else
933 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
934 group_size = vect_cost_group_size (stmt_info);
937 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
939 /* Not a grouped access. */
940 else
942 group_size = 1;
943 first_dr = STMT_VINFO_DATA_REF (stmt_info);
946 /* We assume that the cost of a single store-lanes instruction is
947 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
948 access is instead being provided by a permute-and-store operation,
949 include the cost of the permutes. */
950 if (!store_lanes_p && group_size > 1
951 && !STMT_VINFO_STRIDED_P (stmt_info))
953 /* Uses a high and low interleave or shuffle operations for each
954 needed permute. */
955 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
956 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
957 stmt_info, 0, vect_body);
959 if (dump_enabled_p ())
960 dump_printf_loc (MSG_NOTE, vect_location,
961 "vect_model_store_cost: strided group_size = %d .\n",
962 group_size);
965 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
966 /* Costs of the stores. */
967 if (STMT_VINFO_STRIDED_P (stmt_info)
968 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
970 /* N scalar stores plus extracting the elements. */
971 inside_cost += record_stmt_cost (body_cost_vec,
972 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
973 scalar_store, stmt_info, 0, vect_body);
975 else
976 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
978 if (STMT_VINFO_STRIDED_P (stmt_info))
979 inside_cost += record_stmt_cost (body_cost_vec,
980 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
981 vec_to_scalar, stmt_info, 0, vect_body);
983 if (dump_enabled_p ())
984 dump_printf_loc (MSG_NOTE, vect_location,
985 "vect_model_store_cost: inside_cost = %d, "
986 "prologue_cost = %d .\n", inside_cost, prologue_cost);
990 /* Calculate cost of DR's memory access. */
991 void
992 vect_get_store_cost (struct data_reference *dr, int ncopies,
993 unsigned int *inside_cost,
994 stmt_vector_for_cost *body_cost_vec)
996 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
997 gimple *stmt = DR_STMT (dr);
998 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1000 switch (alignment_support_scheme)
1002 case dr_aligned:
1004 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1005 vector_store, stmt_info, 0,
1006 vect_body);
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE, vect_location,
1010 "vect_model_store_cost: aligned.\n");
1011 break;
1014 case dr_unaligned_supported:
1016 /* Here, we assign an additional cost for the unaligned store. */
1017 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1018 unaligned_store, stmt_info,
1019 DR_MISALIGNMENT (dr), vect_body);
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_NOTE, vect_location,
1022 "vect_model_store_cost: unaligned supported by "
1023 "hardware.\n");
1024 break;
1027 case dr_unaligned_unsupported:
1029 *inside_cost = VECT_MAX_COST;
1031 if (dump_enabled_p ())
1032 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1033 "vect_model_store_cost: unsupported access.\n");
1034 break;
1037 default:
1038 gcc_unreachable ();
1043 /* Function vect_model_load_cost
1045 Models cost for loads. In the case of grouped accesses, the last access
1046 has the overhead of the grouped access attributed to it. Since unaligned
1047 accesses are supported for loads, we also account for the costs of the
1048 access scheme chosen. */
1050 void
1051 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1052 bool load_lanes_p, slp_tree slp_node,
1053 stmt_vector_for_cost *prologue_cost_vec,
1054 stmt_vector_for_cost *body_cost_vec)
1056 int group_size;
1057 gimple *first_stmt;
1058 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1059 unsigned int inside_cost = 0, prologue_cost = 0;
1061 /* Grouped accesses? */
1062 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1063 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1065 group_size = vect_cost_group_size (stmt_info);
1066 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1068 /* Not a grouped access. */
1069 else
1071 group_size = 1;
1072 first_dr = dr;
1075 /* We assume that the cost of a single load-lanes instruction is
1076 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1077 access is instead being provided by a load-and-permute operation,
1078 include the cost of the permutes. */
1079 if (!load_lanes_p && group_size > 1
1080 && !STMT_VINFO_STRIDED_P (stmt_info))
1082 /* Uses an even and odd extract operations or shuffle operations
1083 for each needed permute. */
1084 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1085 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1086 stmt_info, 0, vect_body);
1088 if (dump_enabled_p ())
1089 dump_printf_loc (MSG_NOTE, vect_location,
1090 "vect_model_load_cost: strided group_size = %d .\n",
1091 group_size);
1094 /* The loads themselves. */
1095 if (STMT_VINFO_STRIDED_P (stmt_info)
1096 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1098 /* N scalar loads plus gathering them into a vector. */
1099 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1102 scalar_load, stmt_info, 0, vect_body);
1104 else
1105 vect_get_load_cost (first_dr, ncopies,
1106 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1107 || group_size > 1 || slp_node),
1108 &inside_cost, &prologue_cost,
1109 prologue_cost_vec, body_cost_vec, true);
1110 if (STMT_VINFO_STRIDED_P (stmt_info))
1111 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1112 stmt_info, 0, vect_body);
1114 if (dump_enabled_p ())
1115 dump_printf_loc (MSG_NOTE, vect_location,
1116 "vect_model_load_cost: inside_cost = %d, "
1117 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1121 /* Calculate cost of DR's memory access. */
1122 void
1123 vect_get_load_cost (struct data_reference *dr, int ncopies,
1124 bool add_realign_cost, unsigned int *inside_cost,
1125 unsigned int *prologue_cost,
1126 stmt_vector_for_cost *prologue_cost_vec,
1127 stmt_vector_for_cost *body_cost_vec,
1128 bool record_prologue_costs)
1130 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1131 gimple *stmt = DR_STMT (dr);
1132 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1134 switch (alignment_support_scheme)
1136 case dr_aligned:
1138 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1139 stmt_info, 0, vect_body);
1141 if (dump_enabled_p ())
1142 dump_printf_loc (MSG_NOTE, vect_location,
1143 "vect_model_load_cost: aligned.\n");
1145 break;
1147 case dr_unaligned_supported:
1149 /* Here, we assign an additional cost for the unaligned load. */
1150 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1151 unaligned_load, stmt_info,
1152 DR_MISALIGNMENT (dr), vect_body);
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_NOTE, vect_location,
1156 "vect_model_load_cost: unaligned supported by "
1157 "hardware.\n");
1159 break;
1161 case dr_explicit_realign:
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1164 vector_load, stmt_info, 0, vect_body);
1165 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1166 vec_perm, stmt_info, 0, vect_body);
1168 /* FIXME: If the misalignment remains fixed across the iterations of
1169 the containing loop, the following cost should be added to the
1170 prologue costs. */
1171 if (targetm.vectorize.builtin_mask_for_load)
1172 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1173 stmt_info, 0, vect_body);
1175 if (dump_enabled_p ())
1176 dump_printf_loc (MSG_NOTE, vect_location,
1177 "vect_model_load_cost: explicit realign\n");
1179 break;
1181 case dr_explicit_realign_optimized:
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_NOTE, vect_location,
1185 "vect_model_load_cost: unaligned software "
1186 "pipelined.\n");
1188 /* Unaligned software pipeline has a load of an address, an initial
1189 load, and possibly a mask operation to "prime" the loop. However,
1190 if this is an access in a group of loads, which provide grouped
1191 access, then the above cost should only be considered for one
1192 access in the group. Inside the loop, there is a load op
1193 and a realignment op. */
1195 if (add_realign_cost && record_prologue_costs)
1197 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1198 vector_stmt, stmt_info,
1199 0, vect_prologue);
1200 if (targetm.vectorize.builtin_mask_for_load)
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
1206 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1207 stmt_info, 0, vect_body);
1208 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1209 stmt_info, 0, vect_body);
1211 if (dump_enabled_p ())
1212 dump_printf_loc (MSG_NOTE, vect_location,
1213 "vect_model_load_cost: explicit realign optimized"
1214 "\n");
1216 break;
1219 case dr_unaligned_unsupported:
1221 *inside_cost = VECT_MAX_COST;
1223 if (dump_enabled_p ())
1224 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1225 "vect_model_load_cost: unsupported access.\n");
1226 break;
1229 default:
1230 gcc_unreachable ();
1234 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1235 the loop preheader for the vectorized stmt STMT. */
1237 static void
1238 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1240 if (gsi)
1241 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1242 else
1244 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1245 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1247 if (loop_vinfo)
1249 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1250 basic_block new_bb;
1251 edge pe;
1253 if (nested_in_vect_loop_p (loop, stmt))
1254 loop = loop->inner;
1256 pe = loop_preheader_edge (loop);
1257 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1258 gcc_assert (!new_bb);
1260 else
1262 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1263 basic_block bb;
1264 gimple_stmt_iterator gsi_bb_start;
1266 gcc_assert (bb_vinfo);
1267 bb = BB_VINFO_BB (bb_vinfo);
1268 gsi_bb_start = gsi_after_labels (bb);
1269 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1273 if (dump_enabled_p ())
1275 dump_printf_loc (MSG_NOTE, vect_location,
1276 "created new init_stmt: ");
1277 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1281 /* Function vect_init_vector.
1283 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1284 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1285 vector type a vector with all elements equal to VAL is created first.
1286 Place the initialization at BSI if it is not NULL. Otherwise, place the
1287 initialization at the loop preheader.
1288 Return the DEF of INIT_STMT.
1289 It will be used in the vectorization of STMT. */
1291 tree
1292 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1294 gimple *init_stmt;
1295 tree new_temp;
1297 if (TREE_CODE (type) == VECTOR_TYPE
1298 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1300 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1302 if (CONSTANT_CLASS_P (val))
1303 val = fold_convert (TREE_TYPE (type), val);
1304 else
1306 new_temp = make_ssa_name (TREE_TYPE (type));
1307 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1308 vect_init_vector_1 (stmt, init_stmt, gsi);
1309 val = new_temp;
1312 val = build_vector_from_val (type, val);
1315 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1316 init_stmt = gimple_build_assign (new_temp, val);
1317 vect_init_vector_1 (stmt, init_stmt, gsi);
1318 return new_temp;
1322 /* Function vect_get_vec_def_for_operand.
1324 OP is an operand in STMT. This function returns a (vector) def that will be
1325 used in the vectorized stmt for STMT.
1327 In the case that OP is an SSA_NAME which is defined in the loop, then
1328 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1330 In case OP is an invariant or constant, a new stmt that creates a vector def
1331 needs to be introduced. VECTYPE may be used to specify a required type for
1332 vector invariant. */
1334 tree
1335 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1337 tree vec_oprnd;
1338 gimple *vec_stmt;
1339 gimple *def_stmt;
1340 stmt_vec_info def_stmt_info = NULL;
1341 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1342 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1343 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1344 enum vect_def_type dt;
1345 bool is_simple_use;
1346 tree vector_type;
1348 if (dump_enabled_p ())
1350 dump_printf_loc (MSG_NOTE, vect_location,
1351 "vect_get_vec_def_for_operand: ");
1352 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1353 dump_printf (MSG_NOTE, "\n");
1356 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1357 gcc_assert (is_simple_use);
1358 if (dump_enabled_p ())
1360 int loc_printed = 0;
1361 if (def_stmt)
1363 if (loc_printed)
1364 dump_printf (MSG_NOTE, " def_stmt = ");
1365 else
1366 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1367 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1371 switch (dt)
1373 /* operand is a constant or a loop invariant. */
1374 case vect_constant_def:
1375 case vect_external_def:
1377 if (vectype)
1378 vector_type = vectype;
1379 else if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
1380 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1381 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1382 else
1383 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1385 gcc_assert (vector_type);
1386 return vect_init_vector (stmt, op, vector_type, NULL);
1389 /* operand is defined inside the loop. */
1390 case vect_internal_def:
1392 /* Get the def from the vectorized stmt. */
1393 def_stmt_info = vinfo_for_stmt (def_stmt);
1395 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1396 /* Get vectorized pattern statement. */
1397 if (!vec_stmt
1398 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1399 && !STMT_VINFO_RELEVANT (def_stmt_info))
1400 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1401 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1402 gcc_assert (vec_stmt);
1403 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1404 vec_oprnd = PHI_RESULT (vec_stmt);
1405 else if (is_gimple_call (vec_stmt))
1406 vec_oprnd = gimple_call_lhs (vec_stmt);
1407 else
1408 vec_oprnd = gimple_assign_lhs (vec_stmt);
1409 return vec_oprnd;
1412 /* operand is defined by a loop header phi - reduction */
1413 case vect_reduction_def:
1414 case vect_double_reduction_def:
1415 case vect_nested_cycle:
1416 /* Code should use get_initial_def_for_reduction. */
1417 gcc_unreachable ();
1419 /* operand is defined by loop-header phi - induction. */
1420 case vect_induction_def:
1422 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1424 /* Get the def from the vectorized stmt. */
1425 def_stmt_info = vinfo_for_stmt (def_stmt);
1426 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1427 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1428 vec_oprnd = PHI_RESULT (vec_stmt);
1429 else
1430 vec_oprnd = gimple_get_lhs (vec_stmt);
1431 return vec_oprnd;
1434 default:
1435 gcc_unreachable ();
1440 /* Function vect_get_vec_def_for_stmt_copy
1442 Return a vector-def for an operand. This function is used when the
1443 vectorized stmt to be created (by the caller to this function) is a "copy"
1444 created in case the vectorized result cannot fit in one vector, and several
1445 copies of the vector-stmt are required. In this case the vector-def is
1446 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1447 of the stmt that defines VEC_OPRND.
1448 DT is the type of the vector def VEC_OPRND.
1450 Context:
1451 In case the vectorization factor (VF) is bigger than the number
1452 of elements that can fit in a vectype (nunits), we have to generate
1453 more than one vector stmt to vectorize the scalar stmt. This situation
1454 arises when there are multiple data-types operated upon in the loop; the
1455 smallest data-type determines the VF, and as a result, when vectorizing
1456 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1457 vector stmt (each computing a vector of 'nunits' results, and together
1458 computing 'VF' results in each iteration). This function is called when
1459 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1460 which VF=16 and nunits=4, so the number of copies required is 4):
1462 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1464 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1465 VS1.1: vx.1 = memref1 VS1.2
1466 VS1.2: vx.2 = memref2 VS1.3
1467 VS1.3: vx.3 = memref3
1469 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1470 VSnew.1: vz1 = vx.1 + ... VSnew.2
1471 VSnew.2: vz2 = vx.2 + ... VSnew.3
1472 VSnew.3: vz3 = vx.3 + ...
1474 The vectorization of S1 is explained in vectorizable_load.
1475 The vectorization of S2:
1476 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1477 the function 'vect_get_vec_def_for_operand' is called to
1478 get the relevant vector-def for each operand of S2. For operand x it
1479 returns the vector-def 'vx.0'.
1481 To create the remaining copies of the vector-stmt (VSnew.j), this
1482 function is called to get the relevant vector-def for each operand. It is
1483 obtained from the respective VS1.j stmt, which is recorded in the
1484 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1486 For example, to obtain the vector-def 'vx.1' in order to create the
1487 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1488 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1489 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1490 and return its def ('vx.1').
1491 Overall, to create the above sequence this function will be called 3 times:
1492 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1493 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1494 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1496 tree
1497 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1499 gimple *vec_stmt_for_operand;
1500 stmt_vec_info def_stmt_info;
1502 /* Do nothing; can reuse same def. */
1503 if (dt == vect_external_def || dt == vect_constant_def )
1504 return vec_oprnd;
1506 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1507 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1508 gcc_assert (def_stmt_info);
1509 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1510 gcc_assert (vec_stmt_for_operand);
1511 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1512 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1513 else
1514 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1515 return vec_oprnd;
1519 /* Get vectorized definitions for the operands to create a copy of an original
1520 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1522 static void
1523 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1524 vec<tree> *vec_oprnds0,
1525 vec<tree> *vec_oprnds1)
1527 tree vec_oprnd = vec_oprnds0->pop ();
1529 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1530 vec_oprnds0->quick_push (vec_oprnd);
1532 if (vec_oprnds1 && vec_oprnds1->length ())
1534 vec_oprnd = vec_oprnds1->pop ();
1535 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1536 vec_oprnds1->quick_push (vec_oprnd);
1541 /* Get vectorized definitions for OP0 and OP1.
1542 REDUC_INDEX is the index of reduction operand in case of reduction,
1543 and -1 otherwise. */
1545 void
1546 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1547 vec<tree> *vec_oprnds0,
1548 vec<tree> *vec_oprnds1,
1549 slp_tree slp_node, int reduc_index)
1551 if (slp_node)
1553 int nops = (op1 == NULL_TREE) ? 1 : 2;
1554 auto_vec<tree> ops (nops);
1555 auto_vec<vec<tree> > vec_defs (nops);
1557 ops.quick_push (op0);
1558 if (op1)
1559 ops.quick_push (op1);
1561 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1563 *vec_oprnds0 = vec_defs[0];
1564 if (op1)
1565 *vec_oprnds1 = vec_defs[1];
1567 else
1569 tree vec_oprnd;
1571 vec_oprnds0->create (1);
1572 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1573 vec_oprnds0->quick_push (vec_oprnd);
1575 if (op1)
1577 vec_oprnds1->create (1);
1578 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1579 vec_oprnds1->quick_push (vec_oprnd);
1585 /* Function vect_finish_stmt_generation.
1587 Insert a new stmt. */
1589 void
1590 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1591 gimple_stmt_iterator *gsi)
1593 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1594 vec_info *vinfo = stmt_info->vinfo;
1596 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1598 if (!gsi_end_p (*gsi)
1599 && gimple_has_mem_ops (vec_stmt))
1601 gimple *at_stmt = gsi_stmt (*gsi);
1602 tree vuse = gimple_vuse (at_stmt);
1603 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1605 tree vdef = gimple_vdef (at_stmt);
1606 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1607 /* If we have an SSA vuse and insert a store, update virtual
1608 SSA form to avoid triggering the renamer. Do so only
1609 if we can easily see all uses - which is what almost always
1610 happens with the way vectorized stmts are inserted. */
1611 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1612 && ((is_gimple_assign (vec_stmt)
1613 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1614 || (is_gimple_call (vec_stmt)
1615 && !(gimple_call_flags (vec_stmt)
1616 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1618 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1619 gimple_set_vdef (vec_stmt, new_vdef);
1620 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1624 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1626 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1628 if (dump_enabled_p ())
1630 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1631 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1634 gimple_set_location (vec_stmt, gimple_location (stmt));
1636 /* While EH edges will generally prevent vectorization, stmt might
1637 e.g. be in a must-not-throw region. Ensure newly created stmts
1638 that could throw are part of the same region. */
1639 int lp_nr = lookup_stmt_eh_lp (stmt);
1640 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1641 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1644 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1645 a function declaration if the target has a vectorized version
1646 of the function, or NULL_TREE if the function cannot be vectorized. */
1648 tree
1649 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1651 tree fndecl = gimple_call_fndecl (call);
1653 /* We only handle functions that do not read or clobber memory -- i.e.
1654 const or novops ones. */
1655 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1656 return NULL_TREE;
1658 if (!fndecl
1659 || TREE_CODE (fndecl) != FUNCTION_DECL
1660 || !DECL_BUILT_IN (fndecl))
1661 return NULL_TREE;
1663 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1664 vectype_in);
1668 static tree permute_vec_elements (tree, tree, tree, gimple *,
1669 gimple_stmt_iterator *);
1672 /* Function vectorizable_mask_load_store.
1674 Check if STMT performs a conditional load or store that can be vectorized.
1675 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1676 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1677 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1679 static bool
1680 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1681 gimple **vec_stmt, slp_tree slp_node)
1683 tree vec_dest = NULL;
1684 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1685 stmt_vec_info prev_stmt_info;
1686 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1687 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1688 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1689 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1690 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1691 tree rhs_vectype = NULL_TREE;
1692 tree mask_vectype;
1693 tree elem_type;
1694 gimple *new_stmt;
1695 tree dummy;
1696 tree dataref_ptr = NULL_TREE;
1697 gimple *ptr_incr;
1698 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1699 int ncopies;
1700 int i, j;
1701 bool inv_p;
1702 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1703 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1704 int gather_scale = 1;
1705 enum vect_def_type gather_dt = vect_unknown_def_type;
1706 bool is_store;
1707 tree mask;
1708 gimple *def_stmt;
1709 enum vect_def_type dt;
1711 if (slp_node != NULL)
1712 return false;
1714 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1715 gcc_assert (ncopies >= 1);
1717 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1718 mask = gimple_call_arg (stmt, 2);
1720 if (TREE_CODE (TREE_TYPE (mask)) != BOOLEAN_TYPE)
1721 return false;
1723 /* FORNOW. This restriction should be relaxed. */
1724 if (nested_in_vect_loop && ncopies > 1)
1726 if (dump_enabled_p ())
1727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1728 "multiple types in nested loop.");
1729 return false;
1732 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1733 return false;
1735 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1736 return false;
1738 if (!STMT_VINFO_DATA_REF (stmt_info))
1739 return false;
1741 elem_type = TREE_TYPE (vectype);
1743 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1744 return false;
1746 if (STMT_VINFO_STRIDED_P (stmt_info))
1747 return false;
1749 if (TREE_CODE (mask) != SSA_NAME)
1750 return false;
1752 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
1753 return false;
1755 if (!mask_vectype)
1756 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
1758 if (!mask_vectype)
1759 return false;
1761 if (is_store)
1763 tree rhs = gimple_call_arg (stmt, 3);
1764 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
1765 return false;
1768 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1770 gimple *def_stmt;
1771 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1772 &gather_off, &gather_scale);
1773 gcc_assert (gather_decl);
1774 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1775 &gather_off_vectype))
1777 if (dump_enabled_p ())
1778 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1779 "gather index use not simple.");
1780 return false;
1783 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1784 tree masktype
1785 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1786 if (TREE_CODE (masktype) == INTEGER_TYPE)
1788 if (dump_enabled_p ())
1789 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1790 "masked gather with integer mask not supported.");
1791 return false;
1794 else if (tree_int_cst_compare (nested_in_vect_loop
1795 ? STMT_VINFO_DR_STEP (stmt_info)
1796 : DR_STEP (dr), size_zero_node) <= 0)
1797 return false;
1798 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1799 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
1800 TYPE_MODE (mask_vectype),
1801 !is_store)
1802 || (rhs_vectype
1803 && !useless_type_conversion_p (vectype, rhs_vectype)))
1804 return false;
1806 if (!vec_stmt) /* transformation not required. */
1808 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1809 if (is_store)
1810 vect_model_store_cost (stmt_info, ncopies, false, dt,
1811 NULL, NULL, NULL);
1812 else
1813 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1814 return true;
1817 /** Transform. **/
1819 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1821 tree vec_oprnd0 = NULL_TREE, op;
1822 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1823 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1824 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1825 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1826 tree mask_perm_mask = NULL_TREE;
1827 edge pe = loop_preheader_edge (loop);
1828 gimple_seq seq;
1829 basic_block new_bb;
1830 enum { NARROW, NONE, WIDEN } modifier;
1831 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1833 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1834 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1835 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1836 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1837 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1838 scaletype = TREE_VALUE (arglist);
1839 gcc_checking_assert (types_compatible_p (srctype, rettype)
1840 && types_compatible_p (srctype, masktype));
1842 if (nunits == gather_off_nunits)
1843 modifier = NONE;
1844 else if (nunits == gather_off_nunits / 2)
1846 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1847 modifier = WIDEN;
1849 for (i = 0; i < gather_off_nunits; ++i)
1850 sel[i] = i | nunits;
1852 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1854 else if (nunits == gather_off_nunits * 2)
1856 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1857 modifier = NARROW;
1859 for (i = 0; i < nunits; ++i)
1860 sel[i] = i < gather_off_nunits
1861 ? i : i + nunits - gather_off_nunits;
1863 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1864 ncopies *= 2;
1865 for (i = 0; i < nunits; ++i)
1866 sel[i] = i | gather_off_nunits;
1867 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1869 else
1870 gcc_unreachable ();
1872 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1874 ptr = fold_convert (ptrtype, gather_base);
1875 if (!is_gimple_min_invariant (ptr))
1877 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1878 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1879 gcc_assert (!new_bb);
1882 scale = build_int_cst (scaletype, gather_scale);
1884 prev_stmt_info = NULL;
1885 for (j = 0; j < ncopies; ++j)
1887 if (modifier == WIDEN && (j & 1))
1888 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1889 perm_mask, stmt, gsi);
1890 else if (j == 0)
1891 op = vec_oprnd0
1892 = vect_get_vec_def_for_operand (gather_off, stmt);
1893 else
1894 op = vec_oprnd0
1895 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1897 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1899 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1900 == TYPE_VECTOR_SUBPARTS (idxtype));
1901 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
1902 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1903 new_stmt
1904 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1905 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1906 op = var;
1909 if (mask_perm_mask && (j & 1))
1910 mask_op = permute_vec_elements (mask_op, mask_op,
1911 mask_perm_mask, stmt, gsi);
1912 else
1914 if (j == 0)
1915 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1916 else
1918 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1919 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1922 mask_op = vec_mask;
1923 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1925 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1926 == TYPE_VECTOR_SUBPARTS (masktype));
1927 var = vect_get_new_ssa_name (masktype, vect_simple_var);
1928 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1929 new_stmt
1930 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1931 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1932 mask_op = var;
1936 new_stmt
1937 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1938 scale);
1940 if (!useless_type_conversion_p (vectype, rettype))
1942 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1943 == TYPE_VECTOR_SUBPARTS (rettype));
1944 op = vect_get_new_ssa_name (rettype, vect_simple_var);
1945 gimple_call_set_lhs (new_stmt, op);
1946 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1947 var = make_ssa_name (vec_dest);
1948 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1949 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1951 else
1953 var = make_ssa_name (vec_dest, new_stmt);
1954 gimple_call_set_lhs (new_stmt, var);
1957 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1959 if (modifier == NARROW)
1961 if ((j & 1) == 0)
1963 prev_res = var;
1964 continue;
1966 var = permute_vec_elements (prev_res, var,
1967 perm_mask, stmt, gsi);
1968 new_stmt = SSA_NAME_DEF_STMT (var);
1971 if (prev_stmt_info == NULL)
1972 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
1973 else
1974 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
1975 prev_stmt_info = vinfo_for_stmt (new_stmt);
1978 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1979 from the IL. */
1980 if (STMT_VINFO_RELATED_STMT (stmt_info))
1982 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
1983 stmt_info = vinfo_for_stmt (stmt);
1985 tree lhs = gimple_call_lhs (stmt);
1986 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
1987 set_vinfo_for_stmt (new_stmt, stmt_info);
1988 set_vinfo_for_stmt (stmt, NULL);
1989 STMT_VINFO_STMT (stmt_info) = new_stmt;
1990 gsi_replace (gsi, new_stmt, true);
1991 return true;
1993 else if (is_store)
1995 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
1996 prev_stmt_info = NULL;
1997 for (i = 0; i < ncopies; i++)
1999 unsigned align, misalign;
2001 if (i == 0)
2003 tree rhs = gimple_call_arg (stmt, 3);
2004 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2005 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2006 /* We should have catched mismatched types earlier. */
2007 gcc_assert (useless_type_conversion_p (vectype,
2008 TREE_TYPE (vec_rhs)));
2009 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2010 NULL_TREE, &dummy, gsi,
2011 &ptr_incr, false, &inv_p);
2012 gcc_assert (!inv_p);
2014 else
2016 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2017 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2018 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2019 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2020 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2021 TYPE_SIZE_UNIT (vectype));
2024 align = TYPE_ALIGN_UNIT (vectype);
2025 if (aligned_access_p (dr))
2026 misalign = 0;
2027 else if (DR_MISALIGNMENT (dr) == -1)
2029 align = TYPE_ALIGN_UNIT (elem_type);
2030 misalign = 0;
2032 else
2033 misalign = DR_MISALIGNMENT (dr);
2034 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2035 misalign);
2036 new_stmt
2037 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2038 gimple_call_arg (stmt, 1),
2039 vec_mask, vec_rhs);
2040 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2041 if (i == 0)
2042 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2043 else
2044 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2045 prev_stmt_info = vinfo_for_stmt (new_stmt);
2048 else
2050 tree vec_mask = NULL_TREE;
2051 prev_stmt_info = NULL;
2052 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2053 for (i = 0; i < ncopies; i++)
2055 unsigned align, misalign;
2057 if (i == 0)
2059 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2060 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2061 NULL_TREE, &dummy, gsi,
2062 &ptr_incr, false, &inv_p);
2063 gcc_assert (!inv_p);
2065 else
2067 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2068 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2069 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2070 TYPE_SIZE_UNIT (vectype));
2073 align = TYPE_ALIGN_UNIT (vectype);
2074 if (aligned_access_p (dr))
2075 misalign = 0;
2076 else if (DR_MISALIGNMENT (dr) == -1)
2078 align = TYPE_ALIGN_UNIT (elem_type);
2079 misalign = 0;
2081 else
2082 misalign = DR_MISALIGNMENT (dr);
2083 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2084 misalign);
2085 new_stmt
2086 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2087 gimple_call_arg (stmt, 1),
2088 vec_mask);
2089 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2090 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2091 if (i == 0)
2092 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2093 else
2094 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2095 prev_stmt_info = vinfo_for_stmt (new_stmt);
2099 if (!is_store)
2101 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2102 from the IL. */
2103 if (STMT_VINFO_RELATED_STMT (stmt_info))
2105 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2106 stmt_info = vinfo_for_stmt (stmt);
2108 tree lhs = gimple_call_lhs (stmt);
2109 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2110 set_vinfo_for_stmt (new_stmt, stmt_info);
2111 set_vinfo_for_stmt (stmt, NULL);
2112 STMT_VINFO_STMT (stmt_info) = new_stmt;
2113 gsi_replace (gsi, new_stmt, true);
2116 return true;
2120 /* Function vectorizable_call.
2122 Check if GS performs a function call that can be vectorized.
2123 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2124 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2125 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2127 static bool
2128 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2129 slp_tree slp_node)
2131 gcall *stmt;
2132 tree vec_dest;
2133 tree scalar_dest;
2134 tree op, type;
2135 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2136 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2137 tree vectype_out, vectype_in;
2138 int nunits_in;
2139 int nunits_out;
2140 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2141 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2142 vec_info *vinfo = stmt_info->vinfo;
2143 tree fndecl, new_temp, rhs_type;
2144 gimple *def_stmt;
2145 enum vect_def_type dt[3]
2146 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2147 gimple *new_stmt = NULL;
2148 int ncopies, j;
2149 vec<tree> vargs = vNULL;
2150 enum { NARROW, NONE, WIDEN } modifier;
2151 size_t i, nargs;
2152 tree lhs;
2154 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2155 return false;
2157 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2158 return false;
2160 /* Is GS a vectorizable call? */
2161 stmt = dyn_cast <gcall *> (gs);
2162 if (!stmt)
2163 return false;
2165 if (gimple_call_internal_p (stmt)
2166 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2167 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2168 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2169 slp_node);
2171 if (gimple_call_lhs (stmt) == NULL_TREE
2172 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2173 return false;
2175 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2177 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2179 /* Process function arguments. */
2180 rhs_type = NULL_TREE;
2181 vectype_in = NULL_TREE;
2182 nargs = gimple_call_num_args (stmt);
2184 /* Bail out if the function has more than three arguments, we do not have
2185 interesting builtin functions to vectorize with more than two arguments
2186 except for fma. No arguments is also not good. */
2187 if (nargs == 0 || nargs > 3)
2188 return false;
2190 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2191 if (gimple_call_internal_p (stmt)
2192 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2194 nargs = 0;
2195 rhs_type = unsigned_type_node;
2198 for (i = 0; i < nargs; i++)
2200 tree opvectype;
2202 op = gimple_call_arg (stmt, i);
2204 /* We can only handle calls with arguments of the same type. */
2205 if (rhs_type
2206 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2208 if (dump_enabled_p ())
2209 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2210 "argument types differ.\n");
2211 return false;
2213 if (!rhs_type)
2214 rhs_type = TREE_TYPE (op);
2216 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2218 if (dump_enabled_p ())
2219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2220 "use not simple.\n");
2221 return false;
2224 if (!vectype_in)
2225 vectype_in = opvectype;
2226 else if (opvectype
2227 && opvectype != vectype_in)
2229 if (dump_enabled_p ())
2230 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2231 "argument vector types differ.\n");
2232 return false;
2235 /* If all arguments are external or constant defs use a vector type with
2236 the same size as the output vector type. */
2237 if (!vectype_in)
2238 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2239 if (vec_stmt)
2240 gcc_assert (vectype_in);
2241 if (!vectype_in)
2243 if (dump_enabled_p ())
2245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2246 "no vectype for scalar type ");
2247 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2248 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2251 return false;
2254 /* FORNOW */
2255 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2256 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2257 if (nunits_in == nunits_out / 2)
2258 modifier = NARROW;
2259 else if (nunits_out == nunits_in)
2260 modifier = NONE;
2261 else if (nunits_out == nunits_in / 2)
2262 modifier = WIDEN;
2263 else
2264 return false;
2266 /* For now, we only vectorize functions if a target specific builtin
2267 is available. TODO -- in some cases, it might be profitable to
2268 insert the calls for pieces of the vector, in order to be able
2269 to vectorize other operations in the loop. */
2270 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2271 if (fndecl == NULL_TREE)
2273 if (gimple_call_internal_p (stmt)
2274 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2275 && !slp_node
2276 && loop_vinfo
2277 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2278 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2279 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2280 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2282 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2283 { 0, 1, 2, ... vf - 1 } vector. */
2284 gcc_assert (nargs == 0);
2286 else
2288 if (dump_enabled_p ())
2289 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2290 "function is not vectorizable.\n");
2291 return false;
2295 gcc_assert (!gimple_vuse (stmt));
2297 if (slp_node || PURE_SLP_STMT (stmt_info))
2298 ncopies = 1;
2299 else if (modifier == NARROW)
2300 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2301 else
2302 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2304 /* Sanity check: make sure that at least one copy of the vectorized stmt
2305 needs to be generated. */
2306 gcc_assert (ncopies >= 1);
2308 if (!vec_stmt) /* transformation not required. */
2310 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2311 if (dump_enabled_p ())
2312 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2313 "\n");
2314 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2315 return true;
2318 /** Transform. **/
2320 if (dump_enabled_p ())
2321 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2323 /* Handle def. */
2324 scalar_dest = gimple_call_lhs (stmt);
2325 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2327 prev_stmt_info = NULL;
2328 switch (modifier)
2330 case NONE:
2331 for (j = 0; j < ncopies; ++j)
2333 /* Build argument list for the vectorized call. */
2334 if (j == 0)
2335 vargs.create (nargs);
2336 else
2337 vargs.truncate (0);
2339 if (slp_node)
2341 auto_vec<vec<tree> > vec_defs (nargs);
2342 vec<tree> vec_oprnds0;
2344 for (i = 0; i < nargs; i++)
2345 vargs.quick_push (gimple_call_arg (stmt, i));
2346 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2347 vec_oprnds0 = vec_defs[0];
2349 /* Arguments are ready. Create the new vector stmt. */
2350 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2352 size_t k;
2353 for (k = 0; k < nargs; k++)
2355 vec<tree> vec_oprndsk = vec_defs[k];
2356 vargs[k] = vec_oprndsk[i];
2358 new_stmt = gimple_build_call_vec (fndecl, vargs);
2359 new_temp = make_ssa_name (vec_dest, new_stmt);
2360 gimple_call_set_lhs (new_stmt, new_temp);
2361 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2362 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2365 for (i = 0; i < nargs; i++)
2367 vec<tree> vec_oprndsi = vec_defs[i];
2368 vec_oprndsi.release ();
2370 continue;
2373 for (i = 0; i < nargs; i++)
2375 op = gimple_call_arg (stmt, i);
2376 if (j == 0)
2377 vec_oprnd0
2378 = vect_get_vec_def_for_operand (op, stmt);
2379 else
2381 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2382 vec_oprnd0
2383 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2386 vargs.quick_push (vec_oprnd0);
2389 if (gimple_call_internal_p (stmt)
2390 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2392 tree *v = XALLOCAVEC (tree, nunits_out);
2393 int k;
2394 for (k = 0; k < nunits_out; ++k)
2395 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2396 tree cst = build_vector (vectype_out, v);
2397 tree new_var
2398 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2399 gimple *init_stmt = gimple_build_assign (new_var, cst);
2400 vect_init_vector_1 (stmt, init_stmt, NULL);
2401 new_temp = make_ssa_name (vec_dest);
2402 new_stmt = gimple_build_assign (new_temp, new_var);
2404 else
2406 new_stmt = gimple_build_call_vec (fndecl, vargs);
2407 new_temp = make_ssa_name (vec_dest, new_stmt);
2408 gimple_call_set_lhs (new_stmt, new_temp);
2410 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2412 if (j == 0)
2413 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2414 else
2415 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2417 prev_stmt_info = vinfo_for_stmt (new_stmt);
2420 break;
2422 case NARROW:
2423 for (j = 0; j < ncopies; ++j)
2425 /* Build argument list for the vectorized call. */
2426 if (j == 0)
2427 vargs.create (nargs * 2);
2428 else
2429 vargs.truncate (0);
2431 if (slp_node)
2433 auto_vec<vec<tree> > vec_defs (nargs);
2434 vec<tree> vec_oprnds0;
2436 for (i = 0; i < nargs; i++)
2437 vargs.quick_push (gimple_call_arg (stmt, i));
2438 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2439 vec_oprnds0 = vec_defs[0];
2441 /* Arguments are ready. Create the new vector stmt. */
2442 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2444 size_t k;
2445 vargs.truncate (0);
2446 for (k = 0; k < nargs; k++)
2448 vec<tree> vec_oprndsk = vec_defs[k];
2449 vargs.quick_push (vec_oprndsk[i]);
2450 vargs.quick_push (vec_oprndsk[i + 1]);
2452 new_stmt = gimple_build_call_vec (fndecl, vargs);
2453 new_temp = make_ssa_name (vec_dest, new_stmt);
2454 gimple_call_set_lhs (new_stmt, new_temp);
2455 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2456 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2459 for (i = 0; i < nargs; i++)
2461 vec<tree> vec_oprndsi = vec_defs[i];
2462 vec_oprndsi.release ();
2464 continue;
2467 for (i = 0; i < nargs; i++)
2469 op = gimple_call_arg (stmt, i);
2470 if (j == 0)
2472 vec_oprnd0
2473 = vect_get_vec_def_for_operand (op, stmt);
2474 vec_oprnd1
2475 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2477 else
2479 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2480 vec_oprnd0
2481 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2482 vec_oprnd1
2483 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2486 vargs.quick_push (vec_oprnd0);
2487 vargs.quick_push (vec_oprnd1);
2490 new_stmt = gimple_build_call_vec (fndecl, vargs);
2491 new_temp = make_ssa_name (vec_dest, new_stmt);
2492 gimple_call_set_lhs (new_stmt, new_temp);
2493 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2495 if (j == 0)
2496 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2497 else
2498 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2500 prev_stmt_info = vinfo_for_stmt (new_stmt);
2503 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2505 break;
2507 case WIDEN:
2508 /* No current target implements this case. */
2509 return false;
2512 vargs.release ();
2514 /* The call in STMT might prevent it from being removed in dce.
2515 We however cannot remove it here, due to the way the ssa name
2516 it defines is mapped to the new definition. So just replace
2517 rhs of the statement with something harmless. */
2519 if (slp_node)
2520 return true;
2522 type = TREE_TYPE (scalar_dest);
2523 if (is_pattern_stmt_p (stmt_info))
2524 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2525 else
2526 lhs = gimple_call_lhs (stmt);
2528 if (gimple_call_internal_p (stmt)
2529 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2531 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2532 with vf - 1 rather than 0, that is the last iteration of the
2533 vectorized loop. */
2534 imm_use_iterator iter;
2535 use_operand_p use_p;
2536 gimple *use_stmt;
2537 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2539 basic_block use_bb = gimple_bb (use_stmt);
2540 if (use_bb
2541 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2543 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2544 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2545 ncopies * nunits_out - 1));
2546 update_stmt (use_stmt);
2551 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2552 set_vinfo_for_stmt (new_stmt, stmt_info);
2553 set_vinfo_for_stmt (stmt, NULL);
2554 STMT_VINFO_STMT (stmt_info) = new_stmt;
2555 gsi_replace (gsi, new_stmt, false);
2557 return true;
2561 struct simd_call_arg_info
2563 tree vectype;
2564 tree op;
2565 enum vect_def_type dt;
2566 HOST_WIDE_INT linear_step;
2567 unsigned int align;
2568 bool simd_lane_linear;
2571 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2572 is linear within simd lane (but not within whole loop), note it in
2573 *ARGINFO. */
2575 static void
2576 vect_simd_lane_linear (tree op, struct loop *loop,
2577 struct simd_call_arg_info *arginfo)
2579 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2581 if (!is_gimple_assign (def_stmt)
2582 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2583 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2584 return;
2586 tree base = gimple_assign_rhs1 (def_stmt);
2587 HOST_WIDE_INT linear_step = 0;
2588 tree v = gimple_assign_rhs2 (def_stmt);
2589 while (TREE_CODE (v) == SSA_NAME)
2591 tree t;
2592 def_stmt = SSA_NAME_DEF_STMT (v);
2593 if (is_gimple_assign (def_stmt))
2594 switch (gimple_assign_rhs_code (def_stmt))
2596 case PLUS_EXPR:
2597 t = gimple_assign_rhs2 (def_stmt);
2598 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2599 return;
2600 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2601 v = gimple_assign_rhs1 (def_stmt);
2602 continue;
2603 case MULT_EXPR:
2604 t = gimple_assign_rhs2 (def_stmt);
2605 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2606 return;
2607 linear_step = tree_to_shwi (t);
2608 v = gimple_assign_rhs1 (def_stmt);
2609 continue;
2610 CASE_CONVERT:
2611 t = gimple_assign_rhs1 (def_stmt);
2612 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2613 || (TYPE_PRECISION (TREE_TYPE (v))
2614 < TYPE_PRECISION (TREE_TYPE (t))))
2615 return;
2616 if (!linear_step)
2617 linear_step = 1;
2618 v = t;
2619 continue;
2620 default:
2621 return;
2623 else if (is_gimple_call (def_stmt)
2624 && gimple_call_internal_p (def_stmt)
2625 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2626 && loop->simduid
2627 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2628 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2629 == loop->simduid))
2631 if (!linear_step)
2632 linear_step = 1;
2633 arginfo->linear_step = linear_step;
2634 arginfo->op = base;
2635 arginfo->simd_lane_linear = true;
2636 return;
2641 /* Function vectorizable_simd_clone_call.
2643 Check if STMT performs a function call that can be vectorized
2644 by calling a simd clone of the function.
2645 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2646 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2647 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2649 static bool
2650 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2651 gimple **vec_stmt, slp_tree slp_node)
2653 tree vec_dest;
2654 tree scalar_dest;
2655 tree op, type;
2656 tree vec_oprnd0 = NULL_TREE;
2657 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2658 tree vectype;
2659 unsigned int nunits;
2660 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2661 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2662 vec_info *vinfo = stmt_info->vinfo;
2663 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2664 tree fndecl, new_temp;
2665 gimple *def_stmt;
2666 gimple *new_stmt = NULL;
2667 int ncopies, j;
2668 vec<simd_call_arg_info> arginfo = vNULL;
2669 vec<tree> vargs = vNULL;
2670 size_t i, nargs;
2671 tree lhs, rtype, ratype;
2672 vec<constructor_elt, va_gc> *ret_ctor_elts;
2674 /* Is STMT a vectorizable call? */
2675 if (!is_gimple_call (stmt))
2676 return false;
2678 fndecl = gimple_call_fndecl (stmt);
2679 if (fndecl == NULL_TREE)
2680 return false;
2682 struct cgraph_node *node = cgraph_node::get (fndecl);
2683 if (node == NULL || node->simd_clones == NULL)
2684 return false;
2686 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2687 return false;
2689 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2690 return false;
2692 if (gimple_call_lhs (stmt)
2693 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2694 return false;
2696 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2698 vectype = STMT_VINFO_VECTYPE (stmt_info);
2700 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2701 return false;
2703 /* FORNOW */
2704 if (slp_node || PURE_SLP_STMT (stmt_info))
2705 return false;
2707 /* Process function arguments. */
2708 nargs = gimple_call_num_args (stmt);
2710 /* Bail out if the function has zero arguments. */
2711 if (nargs == 0)
2712 return false;
2714 arginfo.create (nargs);
2716 for (i = 0; i < nargs; i++)
2718 simd_call_arg_info thisarginfo;
2719 affine_iv iv;
2721 thisarginfo.linear_step = 0;
2722 thisarginfo.align = 0;
2723 thisarginfo.op = NULL_TREE;
2724 thisarginfo.simd_lane_linear = false;
2726 op = gimple_call_arg (stmt, i);
2727 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2728 &thisarginfo.vectype)
2729 || thisarginfo.dt == vect_uninitialized_def)
2731 if (dump_enabled_p ())
2732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2733 "use not simple.\n");
2734 arginfo.release ();
2735 return false;
2738 if (thisarginfo.dt == vect_constant_def
2739 || thisarginfo.dt == vect_external_def)
2740 gcc_assert (thisarginfo.vectype == NULL_TREE);
2741 else
2742 gcc_assert (thisarginfo.vectype != NULL_TREE);
2744 /* For linear arguments, the analyze phase should have saved
2745 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2746 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2747 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2749 gcc_assert (vec_stmt);
2750 thisarginfo.linear_step
2751 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2752 thisarginfo.op
2753 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2754 thisarginfo.simd_lane_linear
2755 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2756 == boolean_true_node);
2757 /* If loop has been peeled for alignment, we need to adjust it. */
2758 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2759 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2760 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2762 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2763 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2764 tree opt = TREE_TYPE (thisarginfo.op);
2765 bias = fold_convert (TREE_TYPE (step), bias);
2766 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2767 thisarginfo.op
2768 = fold_build2 (POINTER_TYPE_P (opt)
2769 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2770 thisarginfo.op, bias);
2773 else if (!vec_stmt
2774 && thisarginfo.dt != vect_constant_def
2775 && thisarginfo.dt != vect_external_def
2776 && loop_vinfo
2777 && TREE_CODE (op) == SSA_NAME
2778 && simple_iv (loop, loop_containing_stmt (stmt), op,
2779 &iv, false)
2780 && tree_fits_shwi_p (iv.step))
2782 thisarginfo.linear_step = tree_to_shwi (iv.step);
2783 thisarginfo.op = iv.base;
2785 else if ((thisarginfo.dt == vect_constant_def
2786 || thisarginfo.dt == vect_external_def)
2787 && POINTER_TYPE_P (TREE_TYPE (op)))
2788 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2789 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2790 linear too. */
2791 if (POINTER_TYPE_P (TREE_TYPE (op))
2792 && !thisarginfo.linear_step
2793 && !vec_stmt
2794 && thisarginfo.dt != vect_constant_def
2795 && thisarginfo.dt != vect_external_def
2796 && loop_vinfo
2797 && !slp_node
2798 && TREE_CODE (op) == SSA_NAME)
2799 vect_simd_lane_linear (op, loop, &thisarginfo);
2801 arginfo.quick_push (thisarginfo);
2804 unsigned int badness = 0;
2805 struct cgraph_node *bestn = NULL;
2806 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2807 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2808 else
2809 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2810 n = n->simdclone->next_clone)
2812 unsigned int this_badness = 0;
2813 if (n->simdclone->simdlen
2814 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2815 || n->simdclone->nargs != nargs)
2816 continue;
2817 if (n->simdclone->simdlen
2818 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2819 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2820 - exact_log2 (n->simdclone->simdlen)) * 1024;
2821 if (n->simdclone->inbranch)
2822 this_badness += 2048;
2823 int target_badness = targetm.simd_clone.usable (n);
2824 if (target_badness < 0)
2825 continue;
2826 this_badness += target_badness * 512;
2827 /* FORNOW: Have to add code to add the mask argument. */
2828 if (n->simdclone->inbranch)
2829 continue;
2830 for (i = 0; i < nargs; i++)
2832 switch (n->simdclone->args[i].arg_type)
2834 case SIMD_CLONE_ARG_TYPE_VECTOR:
2835 if (!useless_type_conversion_p
2836 (n->simdclone->args[i].orig_type,
2837 TREE_TYPE (gimple_call_arg (stmt, i))))
2838 i = -1;
2839 else if (arginfo[i].dt == vect_constant_def
2840 || arginfo[i].dt == vect_external_def
2841 || arginfo[i].linear_step)
2842 this_badness += 64;
2843 break;
2844 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2845 if (arginfo[i].dt != vect_constant_def
2846 && arginfo[i].dt != vect_external_def)
2847 i = -1;
2848 break;
2849 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2850 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2851 if (arginfo[i].dt == vect_constant_def
2852 || arginfo[i].dt == vect_external_def
2853 || (arginfo[i].linear_step
2854 != n->simdclone->args[i].linear_step))
2855 i = -1;
2856 break;
2857 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2858 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2859 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2860 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2861 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2862 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
2863 /* FORNOW */
2864 i = -1;
2865 break;
2866 case SIMD_CLONE_ARG_TYPE_MASK:
2867 gcc_unreachable ();
2869 if (i == (size_t) -1)
2870 break;
2871 if (n->simdclone->args[i].alignment > arginfo[i].align)
2873 i = -1;
2874 break;
2876 if (arginfo[i].align)
2877 this_badness += (exact_log2 (arginfo[i].align)
2878 - exact_log2 (n->simdclone->args[i].alignment));
2880 if (i == (size_t) -1)
2881 continue;
2882 if (bestn == NULL || this_badness < badness)
2884 bestn = n;
2885 badness = this_badness;
2889 if (bestn == NULL)
2891 arginfo.release ();
2892 return false;
2895 for (i = 0; i < nargs; i++)
2896 if ((arginfo[i].dt == vect_constant_def
2897 || arginfo[i].dt == vect_external_def)
2898 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2900 arginfo[i].vectype
2901 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2902 i)));
2903 if (arginfo[i].vectype == NULL
2904 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2905 > bestn->simdclone->simdlen))
2907 arginfo.release ();
2908 return false;
2912 fndecl = bestn->decl;
2913 nunits = bestn->simdclone->simdlen;
2914 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2916 /* If the function isn't const, only allow it in simd loops where user
2917 has asserted that at least nunits consecutive iterations can be
2918 performed using SIMD instructions. */
2919 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2920 && gimple_vuse (stmt))
2922 arginfo.release ();
2923 return false;
2926 /* Sanity check: make sure that at least one copy of the vectorized stmt
2927 needs to be generated. */
2928 gcc_assert (ncopies >= 1);
2930 if (!vec_stmt) /* transformation not required. */
2932 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2933 for (i = 0; i < nargs; i++)
2934 if (bestn->simdclone->args[i].arg_type
2935 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2937 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2938 + 1);
2939 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2940 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2941 ? size_type_node : TREE_TYPE (arginfo[i].op);
2942 tree ls = build_int_cst (lst, arginfo[i].linear_step);
2943 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
2944 tree sll = arginfo[i].simd_lane_linear
2945 ? boolean_true_node : boolean_false_node;
2946 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
2948 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2949 if (dump_enabled_p ())
2950 dump_printf_loc (MSG_NOTE, vect_location,
2951 "=== vectorizable_simd_clone_call ===\n");
2952 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2953 arginfo.release ();
2954 return true;
2957 /** Transform. **/
2959 if (dump_enabled_p ())
2960 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2962 /* Handle def. */
2963 scalar_dest = gimple_call_lhs (stmt);
2964 vec_dest = NULL_TREE;
2965 rtype = NULL_TREE;
2966 ratype = NULL_TREE;
2967 if (scalar_dest)
2969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2970 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2971 if (TREE_CODE (rtype) == ARRAY_TYPE)
2973 ratype = rtype;
2974 rtype = TREE_TYPE (ratype);
2978 prev_stmt_info = NULL;
2979 for (j = 0; j < ncopies; ++j)
2981 /* Build argument list for the vectorized call. */
2982 if (j == 0)
2983 vargs.create (nargs);
2984 else
2985 vargs.truncate (0);
2987 for (i = 0; i < nargs; i++)
2989 unsigned int k, l, m, o;
2990 tree atype;
2991 op = gimple_call_arg (stmt, i);
2992 switch (bestn->simdclone->args[i].arg_type)
2994 case SIMD_CLONE_ARG_TYPE_VECTOR:
2995 atype = bestn->simdclone->args[i].vector_type;
2996 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2997 for (m = j * o; m < (j + 1) * o; m++)
2999 if (TYPE_VECTOR_SUBPARTS (atype)
3000 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3002 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3003 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3004 / TYPE_VECTOR_SUBPARTS (atype));
3005 gcc_assert ((k & (k - 1)) == 0);
3006 if (m == 0)
3007 vec_oprnd0
3008 = vect_get_vec_def_for_operand (op, stmt);
3009 else
3011 vec_oprnd0 = arginfo[i].op;
3012 if ((m & (k - 1)) == 0)
3013 vec_oprnd0
3014 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3015 vec_oprnd0);
3017 arginfo[i].op = vec_oprnd0;
3018 vec_oprnd0
3019 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3020 size_int (prec),
3021 bitsize_int ((m & (k - 1)) * prec));
3022 new_stmt
3023 = gimple_build_assign (make_ssa_name (atype),
3024 vec_oprnd0);
3025 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3026 vargs.safe_push (gimple_assign_lhs (new_stmt));
3028 else
3030 k = (TYPE_VECTOR_SUBPARTS (atype)
3031 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3032 gcc_assert ((k & (k - 1)) == 0);
3033 vec<constructor_elt, va_gc> *ctor_elts;
3034 if (k != 1)
3035 vec_alloc (ctor_elts, k);
3036 else
3037 ctor_elts = NULL;
3038 for (l = 0; l < k; l++)
3040 if (m == 0 && l == 0)
3041 vec_oprnd0
3042 = vect_get_vec_def_for_operand (op, stmt);
3043 else
3044 vec_oprnd0
3045 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3046 arginfo[i].op);
3047 arginfo[i].op = vec_oprnd0;
3048 if (k == 1)
3049 break;
3050 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3051 vec_oprnd0);
3053 if (k == 1)
3054 vargs.safe_push (vec_oprnd0);
3055 else
3057 vec_oprnd0 = build_constructor (atype, ctor_elts);
3058 new_stmt
3059 = gimple_build_assign (make_ssa_name (atype),
3060 vec_oprnd0);
3061 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3062 vargs.safe_push (gimple_assign_lhs (new_stmt));
3066 break;
3067 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3068 vargs.safe_push (op);
3069 break;
3070 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3071 if (j == 0)
3073 gimple_seq stmts;
3074 arginfo[i].op
3075 = force_gimple_operand (arginfo[i].op, &stmts, true,
3076 NULL_TREE);
3077 if (stmts != NULL)
3079 basic_block new_bb;
3080 edge pe = loop_preheader_edge (loop);
3081 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3082 gcc_assert (!new_bb);
3084 if (arginfo[i].simd_lane_linear)
3086 vargs.safe_push (arginfo[i].op);
3087 break;
3089 tree phi_res = copy_ssa_name (op);
3090 gphi *new_phi = create_phi_node (phi_res, loop->header);
3091 set_vinfo_for_stmt (new_phi,
3092 new_stmt_vec_info (new_phi, loop_vinfo));
3093 add_phi_arg (new_phi, arginfo[i].op,
3094 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3095 enum tree_code code
3096 = POINTER_TYPE_P (TREE_TYPE (op))
3097 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3098 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3099 ? sizetype : TREE_TYPE (op);
3100 widest_int cst
3101 = wi::mul (bestn->simdclone->args[i].linear_step,
3102 ncopies * nunits);
3103 tree tcst = wide_int_to_tree (type, cst);
3104 tree phi_arg = copy_ssa_name (op);
3105 new_stmt
3106 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3107 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3108 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3109 set_vinfo_for_stmt (new_stmt,
3110 new_stmt_vec_info (new_stmt, loop_vinfo));
3111 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3112 UNKNOWN_LOCATION);
3113 arginfo[i].op = phi_res;
3114 vargs.safe_push (phi_res);
3116 else
3118 enum tree_code code
3119 = POINTER_TYPE_P (TREE_TYPE (op))
3120 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3121 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3122 ? sizetype : TREE_TYPE (op);
3123 widest_int cst
3124 = wi::mul (bestn->simdclone->args[i].linear_step,
3125 j * nunits);
3126 tree tcst = wide_int_to_tree (type, cst);
3127 new_temp = make_ssa_name (TREE_TYPE (op));
3128 new_stmt = gimple_build_assign (new_temp, code,
3129 arginfo[i].op, tcst);
3130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3131 vargs.safe_push (new_temp);
3133 break;
3134 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3135 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3136 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3137 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3138 default:
3139 gcc_unreachable ();
3143 new_stmt = gimple_build_call_vec (fndecl, vargs);
3144 if (vec_dest)
3146 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3147 if (ratype)
3148 new_temp = create_tmp_var (ratype);
3149 else if (TYPE_VECTOR_SUBPARTS (vectype)
3150 == TYPE_VECTOR_SUBPARTS (rtype))
3151 new_temp = make_ssa_name (vec_dest, new_stmt);
3152 else
3153 new_temp = make_ssa_name (rtype, new_stmt);
3154 gimple_call_set_lhs (new_stmt, new_temp);
3156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3158 if (vec_dest)
3160 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3162 unsigned int k, l;
3163 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3164 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3165 gcc_assert ((k & (k - 1)) == 0);
3166 for (l = 0; l < k; l++)
3168 tree t;
3169 if (ratype)
3171 t = build_fold_addr_expr (new_temp);
3172 t = build2 (MEM_REF, vectype, t,
3173 build_int_cst (TREE_TYPE (t),
3174 l * prec / BITS_PER_UNIT));
3176 else
3177 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3178 size_int (prec), bitsize_int (l * prec));
3179 new_stmt
3180 = gimple_build_assign (make_ssa_name (vectype), t);
3181 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3182 if (j == 0 && l == 0)
3183 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3184 else
3185 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3187 prev_stmt_info = vinfo_for_stmt (new_stmt);
3190 if (ratype)
3192 tree clobber = build_constructor (ratype, NULL);
3193 TREE_THIS_VOLATILE (clobber) = 1;
3194 new_stmt = gimple_build_assign (new_temp, clobber);
3195 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3197 continue;
3199 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3201 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3202 / TYPE_VECTOR_SUBPARTS (rtype));
3203 gcc_assert ((k & (k - 1)) == 0);
3204 if ((j & (k - 1)) == 0)
3205 vec_alloc (ret_ctor_elts, k);
3206 if (ratype)
3208 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3209 for (m = 0; m < o; m++)
3211 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3212 size_int (m), NULL_TREE, NULL_TREE);
3213 new_stmt
3214 = gimple_build_assign (make_ssa_name (rtype), tem);
3215 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3216 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3217 gimple_assign_lhs (new_stmt));
3219 tree clobber = build_constructor (ratype, NULL);
3220 TREE_THIS_VOLATILE (clobber) = 1;
3221 new_stmt = gimple_build_assign (new_temp, clobber);
3222 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3224 else
3225 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3226 if ((j & (k - 1)) != k - 1)
3227 continue;
3228 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3229 new_stmt
3230 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3231 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3233 if ((unsigned) j == k - 1)
3234 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3235 else
3236 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3238 prev_stmt_info = vinfo_for_stmt (new_stmt);
3239 continue;
3241 else if (ratype)
3243 tree t = build_fold_addr_expr (new_temp);
3244 t = build2 (MEM_REF, vectype, t,
3245 build_int_cst (TREE_TYPE (t), 0));
3246 new_stmt
3247 = gimple_build_assign (make_ssa_name (vec_dest), t);
3248 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3249 tree clobber = build_constructor (ratype, NULL);
3250 TREE_THIS_VOLATILE (clobber) = 1;
3251 vect_finish_stmt_generation (stmt,
3252 gimple_build_assign (new_temp,
3253 clobber), gsi);
3257 if (j == 0)
3258 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3259 else
3260 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3262 prev_stmt_info = vinfo_for_stmt (new_stmt);
3265 vargs.release ();
3267 /* The call in STMT might prevent it from being removed in dce.
3268 We however cannot remove it here, due to the way the ssa name
3269 it defines is mapped to the new definition. So just replace
3270 rhs of the statement with something harmless. */
3272 if (slp_node)
3273 return true;
3275 if (scalar_dest)
3277 type = TREE_TYPE (scalar_dest);
3278 if (is_pattern_stmt_p (stmt_info))
3279 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3280 else
3281 lhs = gimple_call_lhs (stmt);
3282 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3284 else
3285 new_stmt = gimple_build_nop ();
3286 set_vinfo_for_stmt (new_stmt, stmt_info);
3287 set_vinfo_for_stmt (stmt, NULL);
3288 STMT_VINFO_STMT (stmt_info) = new_stmt;
3289 gsi_replace (gsi, new_stmt, true);
3290 unlink_stmt_vdef (stmt);
3292 return true;
3296 /* Function vect_gen_widened_results_half
3298 Create a vector stmt whose code, type, number of arguments, and result
3299 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3300 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3301 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3302 needs to be created (DECL is a function-decl of a target-builtin).
3303 STMT is the original scalar stmt that we are vectorizing. */
3305 static gimple *
3306 vect_gen_widened_results_half (enum tree_code code,
3307 tree decl,
3308 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3309 tree vec_dest, gimple_stmt_iterator *gsi,
3310 gimple *stmt)
3312 gimple *new_stmt;
3313 tree new_temp;
3315 /* Generate half of the widened result: */
3316 if (code == CALL_EXPR)
3318 /* Target specific support */
3319 if (op_type == binary_op)
3320 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3321 else
3322 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3323 new_temp = make_ssa_name (vec_dest, new_stmt);
3324 gimple_call_set_lhs (new_stmt, new_temp);
3326 else
3328 /* Generic support */
3329 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3330 if (op_type != binary_op)
3331 vec_oprnd1 = NULL;
3332 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3333 new_temp = make_ssa_name (vec_dest, new_stmt);
3334 gimple_assign_set_lhs (new_stmt, new_temp);
3336 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3338 return new_stmt;
3342 /* Get vectorized definitions for loop-based vectorization. For the first
3343 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3344 scalar operand), and for the rest we get a copy with
3345 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3346 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3347 The vectors are collected into VEC_OPRNDS. */
3349 static void
3350 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3351 vec<tree> *vec_oprnds, int multi_step_cvt)
3353 tree vec_oprnd;
3355 /* Get first vector operand. */
3356 /* All the vector operands except the very first one (that is scalar oprnd)
3357 are stmt copies. */
3358 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3359 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3360 else
3361 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3363 vec_oprnds->quick_push (vec_oprnd);
3365 /* Get second vector operand. */
3366 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3367 vec_oprnds->quick_push (vec_oprnd);
3369 *oprnd = vec_oprnd;
3371 /* For conversion in multiple steps, continue to get operands
3372 recursively. */
3373 if (multi_step_cvt)
3374 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3378 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3379 For multi-step conversions store the resulting vectors and call the function
3380 recursively. */
3382 static void
3383 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3384 int multi_step_cvt, gimple *stmt,
3385 vec<tree> vec_dsts,
3386 gimple_stmt_iterator *gsi,
3387 slp_tree slp_node, enum tree_code code,
3388 stmt_vec_info *prev_stmt_info)
3390 unsigned int i;
3391 tree vop0, vop1, new_tmp, vec_dest;
3392 gimple *new_stmt;
3393 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3395 vec_dest = vec_dsts.pop ();
3397 for (i = 0; i < vec_oprnds->length (); i += 2)
3399 /* Create demotion operation. */
3400 vop0 = (*vec_oprnds)[i];
3401 vop1 = (*vec_oprnds)[i + 1];
3402 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3403 new_tmp = make_ssa_name (vec_dest, new_stmt);
3404 gimple_assign_set_lhs (new_stmt, new_tmp);
3405 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3407 if (multi_step_cvt)
3408 /* Store the resulting vector for next recursive call. */
3409 (*vec_oprnds)[i/2] = new_tmp;
3410 else
3412 /* This is the last step of the conversion sequence. Store the
3413 vectors in SLP_NODE or in vector info of the scalar statement
3414 (or in STMT_VINFO_RELATED_STMT chain). */
3415 if (slp_node)
3416 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3417 else
3419 if (!*prev_stmt_info)
3420 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3421 else
3422 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3424 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3429 /* For multi-step demotion operations we first generate demotion operations
3430 from the source type to the intermediate types, and then combine the
3431 results (stored in VEC_OPRNDS) in demotion operation to the destination
3432 type. */
3433 if (multi_step_cvt)
3435 /* At each level of recursion we have half of the operands we had at the
3436 previous level. */
3437 vec_oprnds->truncate ((i+1)/2);
3438 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3439 stmt, vec_dsts, gsi, slp_node,
3440 VEC_PACK_TRUNC_EXPR,
3441 prev_stmt_info);
3444 vec_dsts.quick_push (vec_dest);
3448 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3449 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3450 the resulting vectors and call the function recursively. */
3452 static void
3453 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3454 vec<tree> *vec_oprnds1,
3455 gimple *stmt, tree vec_dest,
3456 gimple_stmt_iterator *gsi,
3457 enum tree_code code1,
3458 enum tree_code code2, tree decl1,
3459 tree decl2, int op_type)
3461 int i;
3462 tree vop0, vop1, new_tmp1, new_tmp2;
3463 gimple *new_stmt1, *new_stmt2;
3464 vec<tree> vec_tmp = vNULL;
3466 vec_tmp.create (vec_oprnds0->length () * 2);
3467 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3469 if (op_type == binary_op)
3470 vop1 = (*vec_oprnds1)[i];
3471 else
3472 vop1 = NULL_TREE;
3474 /* Generate the two halves of promotion operation. */
3475 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3476 op_type, vec_dest, gsi, stmt);
3477 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3478 op_type, vec_dest, gsi, stmt);
3479 if (is_gimple_call (new_stmt1))
3481 new_tmp1 = gimple_call_lhs (new_stmt1);
3482 new_tmp2 = gimple_call_lhs (new_stmt2);
3484 else
3486 new_tmp1 = gimple_assign_lhs (new_stmt1);
3487 new_tmp2 = gimple_assign_lhs (new_stmt2);
3490 /* Store the results for the next step. */
3491 vec_tmp.quick_push (new_tmp1);
3492 vec_tmp.quick_push (new_tmp2);
3495 vec_oprnds0->release ();
3496 *vec_oprnds0 = vec_tmp;
3500 /* Check if STMT performs a conversion operation, that can be vectorized.
3501 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3502 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3503 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3505 static bool
3506 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3507 gimple **vec_stmt, slp_tree slp_node)
3509 tree vec_dest;
3510 tree scalar_dest;
3511 tree op0, op1 = NULL_TREE;
3512 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3513 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3514 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3515 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3516 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3517 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3518 tree new_temp;
3519 gimple *def_stmt;
3520 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3521 gimple *new_stmt = NULL;
3522 stmt_vec_info prev_stmt_info;
3523 int nunits_in;
3524 int nunits_out;
3525 tree vectype_out, vectype_in;
3526 int ncopies, i, j;
3527 tree lhs_type, rhs_type;
3528 enum { NARROW, NONE, WIDEN } modifier;
3529 vec<tree> vec_oprnds0 = vNULL;
3530 vec<tree> vec_oprnds1 = vNULL;
3531 tree vop0;
3532 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3533 vec_info *vinfo = stmt_info->vinfo;
3534 int multi_step_cvt = 0;
3535 vec<tree> vec_dsts = vNULL;
3536 vec<tree> interm_types = vNULL;
3537 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3538 int op_type;
3539 machine_mode rhs_mode;
3540 unsigned short fltsz;
3542 /* Is STMT a vectorizable conversion? */
3544 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3545 return false;
3547 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3548 return false;
3550 if (!is_gimple_assign (stmt))
3551 return false;
3553 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3554 return false;
3556 code = gimple_assign_rhs_code (stmt);
3557 if (!CONVERT_EXPR_CODE_P (code)
3558 && code != FIX_TRUNC_EXPR
3559 && code != FLOAT_EXPR
3560 && code != WIDEN_MULT_EXPR
3561 && code != WIDEN_LSHIFT_EXPR)
3562 return false;
3564 op_type = TREE_CODE_LENGTH (code);
3566 /* Check types of lhs and rhs. */
3567 scalar_dest = gimple_assign_lhs (stmt);
3568 lhs_type = TREE_TYPE (scalar_dest);
3569 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3571 op0 = gimple_assign_rhs1 (stmt);
3572 rhs_type = TREE_TYPE (op0);
3574 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3575 && !((INTEGRAL_TYPE_P (lhs_type)
3576 && INTEGRAL_TYPE_P (rhs_type))
3577 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3578 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3579 return false;
3581 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
3582 && ((INTEGRAL_TYPE_P (lhs_type)
3583 && (TYPE_PRECISION (lhs_type)
3584 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3585 || (INTEGRAL_TYPE_P (rhs_type)
3586 && (TYPE_PRECISION (rhs_type)
3587 != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
3589 if (dump_enabled_p ())
3590 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3591 "type conversion to/from bit-precision unsupported."
3592 "\n");
3593 return false;
3596 /* Check the operands of the operation. */
3597 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3599 if (dump_enabled_p ())
3600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3601 "use not simple.\n");
3602 return false;
3604 if (op_type == binary_op)
3606 bool ok;
3608 op1 = gimple_assign_rhs2 (stmt);
3609 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3610 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3611 OP1. */
3612 if (CONSTANT_CLASS_P (op0))
3613 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3614 else
3615 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3617 if (!ok)
3619 if (dump_enabled_p ())
3620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3621 "use not simple.\n");
3622 return false;
3626 /* If op0 is an external or constant defs use a vector type of
3627 the same size as the output vector type. */
3628 if (!vectype_in)
3629 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3630 if (vec_stmt)
3631 gcc_assert (vectype_in);
3632 if (!vectype_in)
3634 if (dump_enabled_p ())
3636 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3637 "no vectype for scalar type ");
3638 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3639 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3642 return false;
3645 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
3646 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
3648 if (dump_enabled_p ())
3650 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3651 "can't convert between boolean and non "
3652 "boolean vectors");
3653 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3654 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3657 return false;
3660 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3661 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3662 if (nunits_in < nunits_out)
3663 modifier = NARROW;
3664 else if (nunits_out == nunits_in)
3665 modifier = NONE;
3666 else
3667 modifier = WIDEN;
3669 /* Multiple types in SLP are handled by creating the appropriate number of
3670 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3671 case of SLP. */
3672 if (slp_node || PURE_SLP_STMT (stmt_info))
3673 ncopies = 1;
3674 else if (modifier == NARROW)
3675 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3676 else
3677 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3679 /* Sanity check: make sure that at least one copy of the vectorized stmt
3680 needs to be generated. */
3681 gcc_assert (ncopies >= 1);
3683 /* Supportable by target? */
3684 switch (modifier)
3686 case NONE:
3687 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3688 return false;
3689 if (supportable_convert_operation (code, vectype_out, vectype_in,
3690 &decl1, &code1))
3691 break;
3692 /* FALLTHRU */
3693 unsupported:
3694 if (dump_enabled_p ())
3695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3696 "conversion not supported by target.\n");
3697 return false;
3699 case WIDEN:
3700 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3701 &code1, &code2, &multi_step_cvt,
3702 &interm_types))
3704 /* Binary widening operation can only be supported directly by the
3705 architecture. */
3706 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3707 break;
3710 if (code != FLOAT_EXPR
3711 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3712 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3713 goto unsupported;
3715 rhs_mode = TYPE_MODE (rhs_type);
3716 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3717 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3718 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3719 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3721 cvt_type
3722 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3723 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3724 if (cvt_type == NULL_TREE)
3725 goto unsupported;
3727 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3729 if (!supportable_convert_operation (code, vectype_out,
3730 cvt_type, &decl1, &codecvt1))
3731 goto unsupported;
3733 else if (!supportable_widening_operation (code, stmt, vectype_out,
3734 cvt_type, &codecvt1,
3735 &codecvt2, &multi_step_cvt,
3736 &interm_types))
3737 continue;
3738 else
3739 gcc_assert (multi_step_cvt == 0);
3741 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3742 vectype_in, &code1, &code2,
3743 &multi_step_cvt, &interm_types))
3744 break;
3747 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3748 goto unsupported;
3750 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3751 codecvt2 = ERROR_MARK;
3752 else
3754 multi_step_cvt++;
3755 interm_types.safe_push (cvt_type);
3756 cvt_type = NULL_TREE;
3758 break;
3760 case NARROW:
3761 gcc_assert (op_type == unary_op);
3762 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3763 &code1, &multi_step_cvt,
3764 &interm_types))
3765 break;
3767 if (code != FIX_TRUNC_EXPR
3768 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3769 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3770 goto unsupported;
3772 rhs_mode = TYPE_MODE (rhs_type);
3773 cvt_type
3774 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3775 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3776 if (cvt_type == NULL_TREE)
3777 goto unsupported;
3778 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3779 &decl1, &codecvt1))
3780 goto unsupported;
3781 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3782 &code1, &multi_step_cvt,
3783 &interm_types))
3784 break;
3785 goto unsupported;
3787 default:
3788 gcc_unreachable ();
3791 if (!vec_stmt) /* transformation not required. */
3793 if (dump_enabled_p ())
3794 dump_printf_loc (MSG_NOTE, vect_location,
3795 "=== vectorizable_conversion ===\n");
3796 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3798 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3799 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3801 else if (modifier == NARROW)
3803 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3804 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3806 else
3808 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3809 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3811 interm_types.release ();
3812 return true;
3815 /** Transform. **/
3816 if (dump_enabled_p ())
3817 dump_printf_loc (MSG_NOTE, vect_location,
3818 "transform conversion. ncopies = %d.\n", ncopies);
3820 if (op_type == binary_op)
3822 if (CONSTANT_CLASS_P (op0))
3823 op0 = fold_convert (TREE_TYPE (op1), op0);
3824 else if (CONSTANT_CLASS_P (op1))
3825 op1 = fold_convert (TREE_TYPE (op0), op1);
3828 /* In case of multi-step conversion, we first generate conversion operations
3829 to the intermediate types, and then from that types to the final one.
3830 We create vector destinations for the intermediate type (TYPES) received
3831 from supportable_*_operation, and store them in the correct order
3832 for future use in vect_create_vectorized_*_stmts (). */
3833 vec_dsts.create (multi_step_cvt + 1);
3834 vec_dest = vect_create_destination_var (scalar_dest,
3835 (cvt_type && modifier == WIDEN)
3836 ? cvt_type : vectype_out);
3837 vec_dsts.quick_push (vec_dest);
3839 if (multi_step_cvt)
3841 for (i = interm_types.length () - 1;
3842 interm_types.iterate (i, &intermediate_type); i--)
3844 vec_dest = vect_create_destination_var (scalar_dest,
3845 intermediate_type);
3846 vec_dsts.quick_push (vec_dest);
3850 if (cvt_type)
3851 vec_dest = vect_create_destination_var (scalar_dest,
3852 modifier == WIDEN
3853 ? vectype_out : cvt_type);
3855 if (!slp_node)
3857 if (modifier == WIDEN)
3859 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3860 if (op_type == binary_op)
3861 vec_oprnds1.create (1);
3863 else if (modifier == NARROW)
3864 vec_oprnds0.create (
3865 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3867 else if (code == WIDEN_LSHIFT_EXPR)
3868 vec_oprnds1.create (slp_node->vec_stmts_size);
3870 last_oprnd = op0;
3871 prev_stmt_info = NULL;
3872 switch (modifier)
3874 case NONE:
3875 for (j = 0; j < ncopies; j++)
3877 if (j == 0)
3878 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3879 -1);
3880 else
3881 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3883 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3885 /* Arguments are ready, create the new vector stmt. */
3886 if (code1 == CALL_EXPR)
3888 new_stmt = gimple_build_call (decl1, 1, vop0);
3889 new_temp = make_ssa_name (vec_dest, new_stmt);
3890 gimple_call_set_lhs (new_stmt, new_temp);
3892 else
3894 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3895 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3896 new_temp = make_ssa_name (vec_dest, new_stmt);
3897 gimple_assign_set_lhs (new_stmt, new_temp);
3900 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3901 if (slp_node)
3902 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3903 else
3905 if (!prev_stmt_info)
3906 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3907 else
3908 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3909 prev_stmt_info = vinfo_for_stmt (new_stmt);
3913 break;
3915 case WIDEN:
3916 /* In case the vectorization factor (VF) is bigger than the number
3917 of elements that we can fit in a vectype (nunits), we have to
3918 generate more than one vector stmt - i.e - we need to "unroll"
3919 the vector stmt by a factor VF/nunits. */
3920 for (j = 0; j < ncopies; j++)
3922 /* Handle uses. */
3923 if (j == 0)
3925 if (slp_node)
3927 if (code == WIDEN_LSHIFT_EXPR)
3929 unsigned int k;
3931 vec_oprnd1 = op1;
3932 /* Store vec_oprnd1 for every vector stmt to be created
3933 for SLP_NODE. We check during the analysis that all
3934 the shift arguments are the same. */
3935 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3936 vec_oprnds1.quick_push (vec_oprnd1);
3938 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3939 slp_node, -1);
3941 else
3942 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3943 &vec_oprnds1, slp_node, -1);
3945 else
3947 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
3948 vec_oprnds0.quick_push (vec_oprnd0);
3949 if (op_type == binary_op)
3951 if (code == WIDEN_LSHIFT_EXPR)
3952 vec_oprnd1 = op1;
3953 else
3954 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
3955 vec_oprnds1.quick_push (vec_oprnd1);
3959 else
3961 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3962 vec_oprnds0.truncate (0);
3963 vec_oprnds0.quick_push (vec_oprnd0);
3964 if (op_type == binary_op)
3966 if (code == WIDEN_LSHIFT_EXPR)
3967 vec_oprnd1 = op1;
3968 else
3969 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3970 vec_oprnd1);
3971 vec_oprnds1.truncate (0);
3972 vec_oprnds1.quick_push (vec_oprnd1);
3976 /* Arguments are ready. Create the new vector stmts. */
3977 for (i = multi_step_cvt; i >= 0; i--)
3979 tree this_dest = vec_dsts[i];
3980 enum tree_code c1 = code1, c2 = code2;
3981 if (i == 0 && codecvt2 != ERROR_MARK)
3983 c1 = codecvt1;
3984 c2 = codecvt2;
3986 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3987 &vec_oprnds1,
3988 stmt, this_dest, gsi,
3989 c1, c2, decl1, decl2,
3990 op_type);
3993 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3995 if (cvt_type)
3997 if (codecvt1 == CALL_EXPR)
3999 new_stmt = gimple_build_call (decl1, 1, vop0);
4000 new_temp = make_ssa_name (vec_dest, new_stmt);
4001 gimple_call_set_lhs (new_stmt, new_temp);
4003 else
4005 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4006 new_temp = make_ssa_name (vec_dest);
4007 new_stmt = gimple_build_assign (new_temp, codecvt1,
4008 vop0);
4011 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4013 else
4014 new_stmt = SSA_NAME_DEF_STMT (vop0);
4016 if (slp_node)
4017 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4018 else
4020 if (!prev_stmt_info)
4021 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4022 else
4023 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4024 prev_stmt_info = vinfo_for_stmt (new_stmt);
4029 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4030 break;
4032 case NARROW:
4033 /* In case the vectorization factor (VF) is bigger than the number
4034 of elements that we can fit in a vectype (nunits), we have to
4035 generate more than one vector stmt - i.e - we need to "unroll"
4036 the vector stmt by a factor VF/nunits. */
4037 for (j = 0; j < ncopies; j++)
4039 /* Handle uses. */
4040 if (slp_node)
4041 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4042 slp_node, -1);
4043 else
4045 vec_oprnds0.truncate (0);
4046 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4047 vect_pow2 (multi_step_cvt) - 1);
4050 /* Arguments are ready. Create the new vector stmts. */
4051 if (cvt_type)
4052 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4054 if (codecvt1 == CALL_EXPR)
4056 new_stmt = gimple_build_call (decl1, 1, vop0);
4057 new_temp = make_ssa_name (vec_dest, new_stmt);
4058 gimple_call_set_lhs (new_stmt, new_temp);
4060 else
4062 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4063 new_temp = make_ssa_name (vec_dest);
4064 new_stmt = gimple_build_assign (new_temp, codecvt1,
4065 vop0);
4068 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4069 vec_oprnds0[i] = new_temp;
4072 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4073 stmt, vec_dsts, gsi,
4074 slp_node, code1,
4075 &prev_stmt_info);
4078 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4079 break;
4082 vec_oprnds0.release ();
4083 vec_oprnds1.release ();
4084 vec_dsts.release ();
4085 interm_types.release ();
4087 return true;
4091 /* Function vectorizable_assignment.
4093 Check if STMT performs an assignment (copy) that can be vectorized.
4094 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4095 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4096 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4098 static bool
4099 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4100 gimple **vec_stmt, slp_tree slp_node)
4102 tree vec_dest;
4103 tree scalar_dest;
4104 tree op;
4105 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4106 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4107 tree new_temp;
4108 gimple *def_stmt;
4109 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4110 int ncopies;
4111 int i, j;
4112 vec<tree> vec_oprnds = vNULL;
4113 tree vop;
4114 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4115 vec_info *vinfo = stmt_info->vinfo;
4116 gimple *new_stmt = NULL;
4117 stmt_vec_info prev_stmt_info = NULL;
4118 enum tree_code code;
4119 tree vectype_in;
4121 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4122 return false;
4124 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4125 return false;
4127 /* Is vectorizable assignment? */
4128 if (!is_gimple_assign (stmt))
4129 return false;
4131 scalar_dest = gimple_assign_lhs (stmt);
4132 if (TREE_CODE (scalar_dest) != SSA_NAME)
4133 return false;
4135 code = gimple_assign_rhs_code (stmt);
4136 if (gimple_assign_single_p (stmt)
4137 || code == PAREN_EXPR
4138 || CONVERT_EXPR_CODE_P (code))
4139 op = gimple_assign_rhs1 (stmt);
4140 else
4141 return false;
4143 if (code == VIEW_CONVERT_EXPR)
4144 op = TREE_OPERAND (op, 0);
4146 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4147 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4149 /* Multiple types in SLP are handled by creating the appropriate number of
4150 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4151 case of SLP. */
4152 if (slp_node || PURE_SLP_STMT (stmt_info))
4153 ncopies = 1;
4154 else
4155 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4157 gcc_assert (ncopies >= 1);
4159 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4161 if (dump_enabled_p ())
4162 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4163 "use not simple.\n");
4164 return false;
4167 /* We can handle NOP_EXPR conversions that do not change the number
4168 of elements or the vector size. */
4169 if ((CONVERT_EXPR_CODE_P (code)
4170 || code == VIEW_CONVERT_EXPR)
4171 && (!vectype_in
4172 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4173 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4174 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4175 return false;
4177 /* We do not handle bit-precision changes. */
4178 if ((CONVERT_EXPR_CODE_P (code)
4179 || code == VIEW_CONVERT_EXPR)
4180 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4181 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4182 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4183 || ((TYPE_PRECISION (TREE_TYPE (op))
4184 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4185 /* But a conversion that does not change the bit-pattern is ok. */
4186 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4187 > TYPE_PRECISION (TREE_TYPE (op)))
4188 && TYPE_UNSIGNED (TREE_TYPE (op))))
4190 if (dump_enabled_p ())
4191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4192 "type conversion to/from bit-precision "
4193 "unsupported.\n");
4194 return false;
4197 if (!vec_stmt) /* transformation not required. */
4199 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4200 if (dump_enabled_p ())
4201 dump_printf_loc (MSG_NOTE, vect_location,
4202 "=== vectorizable_assignment ===\n");
4203 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4204 return true;
4207 /** Transform. **/
4208 if (dump_enabled_p ())
4209 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4211 /* Handle def. */
4212 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4214 /* Handle use. */
4215 for (j = 0; j < ncopies; j++)
4217 /* Handle uses. */
4218 if (j == 0)
4219 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4220 else
4221 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4223 /* Arguments are ready. create the new vector stmt. */
4224 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4226 if (CONVERT_EXPR_CODE_P (code)
4227 || code == VIEW_CONVERT_EXPR)
4228 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4229 new_stmt = gimple_build_assign (vec_dest, vop);
4230 new_temp = make_ssa_name (vec_dest, new_stmt);
4231 gimple_assign_set_lhs (new_stmt, new_temp);
4232 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4233 if (slp_node)
4234 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4237 if (slp_node)
4238 continue;
4240 if (j == 0)
4241 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4242 else
4243 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4245 prev_stmt_info = vinfo_for_stmt (new_stmt);
4248 vec_oprnds.release ();
4249 return true;
4253 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4254 either as shift by a scalar or by a vector. */
4256 bool
4257 vect_supportable_shift (enum tree_code code, tree scalar_type)
4260 machine_mode vec_mode;
4261 optab optab;
4262 int icode;
4263 tree vectype;
4265 vectype = get_vectype_for_scalar_type (scalar_type);
4266 if (!vectype)
4267 return false;
4269 optab = optab_for_tree_code (code, vectype, optab_scalar);
4270 if (!optab
4271 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4273 optab = optab_for_tree_code (code, vectype, optab_vector);
4274 if (!optab
4275 || (optab_handler (optab, TYPE_MODE (vectype))
4276 == CODE_FOR_nothing))
4277 return false;
4280 vec_mode = TYPE_MODE (vectype);
4281 icode = (int) optab_handler (optab, vec_mode);
4282 if (icode == CODE_FOR_nothing)
4283 return false;
4285 return true;
4289 /* Function vectorizable_shift.
4291 Check if STMT performs a shift operation that can be vectorized.
4292 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4293 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4294 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4296 static bool
4297 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4298 gimple **vec_stmt, slp_tree slp_node)
4300 tree vec_dest;
4301 tree scalar_dest;
4302 tree op0, op1 = NULL;
4303 tree vec_oprnd1 = NULL_TREE;
4304 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4305 tree vectype;
4306 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4307 enum tree_code code;
4308 machine_mode vec_mode;
4309 tree new_temp;
4310 optab optab;
4311 int icode;
4312 machine_mode optab_op2_mode;
4313 gimple *def_stmt;
4314 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4315 gimple *new_stmt = NULL;
4316 stmt_vec_info prev_stmt_info;
4317 int nunits_in;
4318 int nunits_out;
4319 tree vectype_out;
4320 tree op1_vectype;
4321 int ncopies;
4322 int j, i;
4323 vec<tree> vec_oprnds0 = vNULL;
4324 vec<tree> vec_oprnds1 = vNULL;
4325 tree vop0, vop1;
4326 unsigned int k;
4327 bool scalar_shift_arg = true;
4328 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4329 vec_info *vinfo = stmt_info->vinfo;
4330 int vf;
4332 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4333 return false;
4335 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4336 return false;
4338 /* Is STMT a vectorizable binary/unary operation? */
4339 if (!is_gimple_assign (stmt))
4340 return false;
4342 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4343 return false;
4345 code = gimple_assign_rhs_code (stmt);
4347 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4348 || code == RROTATE_EXPR))
4349 return false;
4351 scalar_dest = gimple_assign_lhs (stmt);
4352 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4353 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4354 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4356 if (dump_enabled_p ())
4357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4358 "bit-precision shifts not supported.\n");
4359 return false;
4362 op0 = gimple_assign_rhs1 (stmt);
4363 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4365 if (dump_enabled_p ())
4366 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4367 "use not simple.\n");
4368 return false;
4370 /* If op0 is an external or constant def use a vector type with
4371 the same size as the output vector type. */
4372 if (!vectype)
4373 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4374 if (vec_stmt)
4375 gcc_assert (vectype);
4376 if (!vectype)
4378 if (dump_enabled_p ())
4379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4380 "no vectype for scalar type\n");
4381 return false;
4384 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4385 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4386 if (nunits_out != nunits_in)
4387 return false;
4389 op1 = gimple_assign_rhs2 (stmt);
4390 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4392 if (dump_enabled_p ())
4393 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4394 "use not simple.\n");
4395 return false;
4398 if (loop_vinfo)
4399 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4400 else
4401 vf = 1;
4403 /* Multiple types in SLP are handled by creating the appropriate number of
4404 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4405 case of SLP. */
4406 if (slp_node || PURE_SLP_STMT (stmt_info))
4407 ncopies = 1;
4408 else
4409 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4411 gcc_assert (ncopies >= 1);
4413 /* Determine whether the shift amount is a vector, or scalar. If the
4414 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4416 if ((dt[1] == vect_internal_def
4417 || dt[1] == vect_induction_def)
4418 && !slp_node)
4419 scalar_shift_arg = false;
4420 else if (dt[1] == vect_constant_def
4421 || dt[1] == vect_external_def
4422 || dt[1] == vect_internal_def)
4424 /* In SLP, need to check whether the shift count is the same,
4425 in loops if it is a constant or invariant, it is always
4426 a scalar shift. */
4427 if (slp_node)
4429 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4430 gimple *slpstmt;
4432 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4433 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4434 scalar_shift_arg = false;
4437 else
4439 if (dump_enabled_p ())
4440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4441 "operand mode requires invariant argument.\n");
4442 return false;
4445 /* Vector shifted by vector. */
4446 if (!scalar_shift_arg)
4448 optab = optab_for_tree_code (code, vectype, optab_vector);
4449 if (dump_enabled_p ())
4450 dump_printf_loc (MSG_NOTE, vect_location,
4451 "vector/vector shift/rotate found.\n");
4453 if (!op1_vectype)
4454 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4455 if (op1_vectype == NULL_TREE
4456 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4458 if (dump_enabled_p ())
4459 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4460 "unusable type for last operand in"
4461 " vector/vector shift/rotate.\n");
4462 return false;
4465 /* See if the machine has a vector shifted by scalar insn and if not
4466 then see if it has a vector shifted by vector insn. */
4467 else
4469 optab = optab_for_tree_code (code, vectype, optab_scalar);
4470 if (optab
4471 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4473 if (dump_enabled_p ())
4474 dump_printf_loc (MSG_NOTE, vect_location,
4475 "vector/scalar shift/rotate found.\n");
4477 else
4479 optab = optab_for_tree_code (code, vectype, optab_vector);
4480 if (optab
4481 && (optab_handler (optab, TYPE_MODE (vectype))
4482 != CODE_FOR_nothing))
4484 scalar_shift_arg = false;
4486 if (dump_enabled_p ())
4487 dump_printf_loc (MSG_NOTE, vect_location,
4488 "vector/vector shift/rotate found.\n");
4490 /* Unlike the other binary operators, shifts/rotates have
4491 the rhs being int, instead of the same type as the lhs,
4492 so make sure the scalar is the right type if we are
4493 dealing with vectors of long long/long/short/char. */
4494 if (dt[1] == vect_constant_def)
4495 op1 = fold_convert (TREE_TYPE (vectype), op1);
4496 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4497 TREE_TYPE (op1)))
4499 if (slp_node
4500 && TYPE_MODE (TREE_TYPE (vectype))
4501 != TYPE_MODE (TREE_TYPE (op1)))
4503 if (dump_enabled_p ())
4504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4505 "unusable type for last operand in"
4506 " vector/vector shift/rotate.\n");
4507 return false;
4509 if (vec_stmt && !slp_node)
4511 op1 = fold_convert (TREE_TYPE (vectype), op1);
4512 op1 = vect_init_vector (stmt, op1,
4513 TREE_TYPE (vectype), NULL);
4520 /* Supportable by target? */
4521 if (!optab)
4523 if (dump_enabled_p ())
4524 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4525 "no optab.\n");
4526 return false;
4528 vec_mode = TYPE_MODE (vectype);
4529 icode = (int) optab_handler (optab, vec_mode);
4530 if (icode == CODE_FOR_nothing)
4532 if (dump_enabled_p ())
4533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4534 "op not supported by target.\n");
4535 /* Check only during analysis. */
4536 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4537 || (vf < vect_min_worthwhile_factor (code)
4538 && !vec_stmt))
4539 return false;
4540 if (dump_enabled_p ())
4541 dump_printf_loc (MSG_NOTE, vect_location,
4542 "proceeding using word mode.\n");
4545 /* Worthwhile without SIMD support? Check only during analysis. */
4546 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4547 && vf < vect_min_worthwhile_factor (code)
4548 && !vec_stmt)
4550 if (dump_enabled_p ())
4551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4552 "not worthwhile without SIMD support.\n");
4553 return false;
4556 if (!vec_stmt) /* transformation not required. */
4558 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4559 if (dump_enabled_p ())
4560 dump_printf_loc (MSG_NOTE, vect_location,
4561 "=== vectorizable_shift ===\n");
4562 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4563 return true;
4566 /** Transform. **/
4568 if (dump_enabled_p ())
4569 dump_printf_loc (MSG_NOTE, vect_location,
4570 "transform binary/unary operation.\n");
4572 /* Handle def. */
4573 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4575 prev_stmt_info = NULL;
4576 for (j = 0; j < ncopies; j++)
4578 /* Handle uses. */
4579 if (j == 0)
4581 if (scalar_shift_arg)
4583 /* Vector shl and shr insn patterns can be defined with scalar
4584 operand 2 (shift operand). In this case, use constant or loop
4585 invariant op1 directly, without extending it to vector mode
4586 first. */
4587 optab_op2_mode = insn_data[icode].operand[2].mode;
4588 if (!VECTOR_MODE_P (optab_op2_mode))
4590 if (dump_enabled_p ())
4591 dump_printf_loc (MSG_NOTE, vect_location,
4592 "operand 1 using scalar mode.\n");
4593 vec_oprnd1 = op1;
4594 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4595 vec_oprnds1.quick_push (vec_oprnd1);
4596 if (slp_node)
4598 /* Store vec_oprnd1 for every vector stmt to be created
4599 for SLP_NODE. We check during the analysis that all
4600 the shift arguments are the same.
4601 TODO: Allow different constants for different vector
4602 stmts generated for an SLP instance. */
4603 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4604 vec_oprnds1.quick_push (vec_oprnd1);
4609 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4610 (a special case for certain kind of vector shifts); otherwise,
4611 operand 1 should be of a vector type (the usual case). */
4612 if (vec_oprnd1)
4613 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4614 slp_node, -1);
4615 else
4616 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4617 slp_node, -1);
4619 else
4620 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4622 /* Arguments are ready. Create the new vector stmt. */
4623 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4625 vop1 = vec_oprnds1[i];
4626 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4627 new_temp = make_ssa_name (vec_dest, new_stmt);
4628 gimple_assign_set_lhs (new_stmt, new_temp);
4629 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4630 if (slp_node)
4631 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4634 if (slp_node)
4635 continue;
4637 if (j == 0)
4638 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4639 else
4640 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4641 prev_stmt_info = vinfo_for_stmt (new_stmt);
4644 vec_oprnds0.release ();
4645 vec_oprnds1.release ();
4647 return true;
4651 /* Function vectorizable_operation.
4653 Check if STMT performs a binary, unary or ternary operation that can
4654 be vectorized.
4655 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4656 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4657 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4659 static bool
4660 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4661 gimple **vec_stmt, slp_tree slp_node)
4663 tree vec_dest;
4664 tree scalar_dest;
4665 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4666 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4667 tree vectype;
4668 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4669 enum tree_code code;
4670 machine_mode vec_mode;
4671 tree new_temp;
4672 int op_type;
4673 optab optab;
4674 bool target_support_p;
4675 gimple *def_stmt;
4676 enum vect_def_type dt[3]
4677 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4678 gimple *new_stmt = NULL;
4679 stmt_vec_info prev_stmt_info;
4680 int nunits_in;
4681 int nunits_out;
4682 tree vectype_out;
4683 int ncopies;
4684 int j, i;
4685 vec<tree> vec_oprnds0 = vNULL;
4686 vec<tree> vec_oprnds1 = vNULL;
4687 vec<tree> vec_oprnds2 = vNULL;
4688 tree vop0, vop1, vop2;
4689 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4690 vec_info *vinfo = stmt_info->vinfo;
4691 int vf;
4693 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4694 return false;
4696 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4697 return false;
4699 /* Is STMT a vectorizable binary/unary operation? */
4700 if (!is_gimple_assign (stmt))
4701 return false;
4703 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4704 return false;
4706 code = gimple_assign_rhs_code (stmt);
4708 /* For pointer addition, we should use the normal plus for
4709 the vector addition. */
4710 if (code == POINTER_PLUS_EXPR)
4711 code = PLUS_EXPR;
4713 /* Support only unary or binary operations. */
4714 op_type = TREE_CODE_LENGTH (code);
4715 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4717 if (dump_enabled_p ())
4718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4719 "num. args = %d (not unary/binary/ternary op).\n",
4720 op_type);
4721 return false;
4724 scalar_dest = gimple_assign_lhs (stmt);
4725 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4727 /* Most operations cannot handle bit-precision types without extra
4728 truncations. */
4729 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4730 && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4731 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4732 /* Exception are bitwise binary operations. */
4733 && code != BIT_IOR_EXPR
4734 && code != BIT_XOR_EXPR
4735 && code != BIT_AND_EXPR)
4737 if (dump_enabled_p ())
4738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4739 "bit-precision arithmetic not supported.\n");
4740 return false;
4743 op0 = gimple_assign_rhs1 (stmt);
4744 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4746 if (dump_enabled_p ())
4747 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4748 "use not simple.\n");
4749 return false;
4751 /* If op0 is an external or constant def use a vector type with
4752 the same size as the output vector type. */
4753 if (!vectype)
4755 /* For boolean type we cannot determine vectype by
4756 invariant value (don't know whether it is a vector
4757 of booleans or vector of integers). We use output
4758 vectype because operations on boolean don't change
4759 type. */
4760 if (TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE)
4762 if (TREE_CODE (TREE_TYPE (scalar_dest)) != BOOLEAN_TYPE)
4764 if (dump_enabled_p ())
4765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4766 "not supported operation on bool value.\n");
4767 return false;
4769 vectype = vectype_out;
4771 else
4772 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4774 if (vec_stmt)
4775 gcc_assert (vectype);
4776 if (!vectype)
4778 if (dump_enabled_p ())
4780 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4781 "no vectype for scalar type ");
4782 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4783 TREE_TYPE (op0));
4784 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4787 return false;
4790 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4791 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4792 if (nunits_out != nunits_in)
4793 return false;
4795 if (op_type == binary_op || op_type == ternary_op)
4797 op1 = gimple_assign_rhs2 (stmt);
4798 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4800 if (dump_enabled_p ())
4801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4802 "use not simple.\n");
4803 return false;
4806 if (op_type == ternary_op)
4808 op2 = gimple_assign_rhs3 (stmt);
4809 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4811 if (dump_enabled_p ())
4812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4813 "use not simple.\n");
4814 return false;
4818 if (loop_vinfo)
4819 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4820 else
4821 vf = 1;
4823 /* Multiple types in SLP are handled by creating the appropriate number of
4824 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4825 case of SLP. */
4826 if (slp_node || PURE_SLP_STMT (stmt_info))
4827 ncopies = 1;
4828 else
4829 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4831 gcc_assert (ncopies >= 1);
4833 /* Shifts are handled in vectorizable_shift (). */
4834 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4835 || code == RROTATE_EXPR)
4836 return false;
4838 /* Supportable by target? */
4840 vec_mode = TYPE_MODE (vectype);
4841 if (code == MULT_HIGHPART_EXPR)
4842 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4843 else
4845 optab = optab_for_tree_code (code, vectype, optab_default);
4846 if (!optab)
4848 if (dump_enabled_p ())
4849 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4850 "no optab.\n");
4851 return false;
4853 target_support_p = (optab_handler (optab, vec_mode)
4854 != CODE_FOR_nothing);
4857 if (!target_support_p)
4859 if (dump_enabled_p ())
4860 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4861 "op not supported by target.\n");
4862 /* Check only during analysis. */
4863 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4864 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4865 return false;
4866 if (dump_enabled_p ())
4867 dump_printf_loc (MSG_NOTE, vect_location,
4868 "proceeding using word mode.\n");
4871 /* Worthwhile without SIMD support? Check only during analysis. */
4872 if (!VECTOR_MODE_P (vec_mode)
4873 && !vec_stmt
4874 && vf < vect_min_worthwhile_factor (code))
4876 if (dump_enabled_p ())
4877 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4878 "not worthwhile without SIMD support.\n");
4879 return false;
4882 if (!vec_stmt) /* transformation not required. */
4884 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4885 if (dump_enabled_p ())
4886 dump_printf_loc (MSG_NOTE, vect_location,
4887 "=== vectorizable_operation ===\n");
4888 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4889 return true;
4892 /** Transform. **/
4894 if (dump_enabled_p ())
4895 dump_printf_loc (MSG_NOTE, vect_location,
4896 "transform binary/unary operation.\n");
4898 /* Handle def. */
4899 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4901 /* In case the vectorization factor (VF) is bigger than the number
4902 of elements that we can fit in a vectype (nunits), we have to generate
4903 more than one vector stmt - i.e - we need to "unroll" the
4904 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4905 from one copy of the vector stmt to the next, in the field
4906 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4907 stages to find the correct vector defs to be used when vectorizing
4908 stmts that use the defs of the current stmt. The example below
4909 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4910 we need to create 4 vectorized stmts):
4912 before vectorization:
4913 RELATED_STMT VEC_STMT
4914 S1: x = memref - -
4915 S2: z = x + 1 - -
4917 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4918 there):
4919 RELATED_STMT VEC_STMT
4920 VS1_0: vx0 = memref0 VS1_1 -
4921 VS1_1: vx1 = memref1 VS1_2 -
4922 VS1_2: vx2 = memref2 VS1_3 -
4923 VS1_3: vx3 = memref3 - -
4924 S1: x = load - VS1_0
4925 S2: z = x + 1 - -
4927 step2: vectorize stmt S2 (done here):
4928 To vectorize stmt S2 we first need to find the relevant vector
4929 def for the first operand 'x'. This is, as usual, obtained from
4930 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4931 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4932 relevant vector def 'vx0'. Having found 'vx0' we can generate
4933 the vector stmt VS2_0, and as usual, record it in the
4934 STMT_VINFO_VEC_STMT of stmt S2.
4935 When creating the second copy (VS2_1), we obtain the relevant vector
4936 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4937 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4938 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4939 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4940 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4941 chain of stmts and pointers:
4942 RELATED_STMT VEC_STMT
4943 VS1_0: vx0 = memref0 VS1_1 -
4944 VS1_1: vx1 = memref1 VS1_2 -
4945 VS1_2: vx2 = memref2 VS1_3 -
4946 VS1_3: vx3 = memref3 - -
4947 S1: x = load - VS1_0
4948 VS2_0: vz0 = vx0 + v1 VS2_1 -
4949 VS2_1: vz1 = vx1 + v1 VS2_2 -
4950 VS2_2: vz2 = vx2 + v1 VS2_3 -
4951 VS2_3: vz3 = vx3 + v1 - -
4952 S2: z = x + 1 - VS2_0 */
4954 prev_stmt_info = NULL;
4955 for (j = 0; j < ncopies; j++)
4957 /* Handle uses. */
4958 if (j == 0)
4960 if (op_type == binary_op || op_type == ternary_op)
4961 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4962 slp_node, -1);
4963 else
4964 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4965 slp_node, -1);
4966 if (op_type == ternary_op)
4968 vec_oprnds2.create (1);
4969 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4970 stmt));
4973 else
4975 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4976 if (op_type == ternary_op)
4978 tree vec_oprnd = vec_oprnds2.pop ();
4979 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4980 vec_oprnd));
4984 /* Arguments are ready. Create the new vector stmt. */
4985 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4987 vop1 = ((op_type == binary_op || op_type == ternary_op)
4988 ? vec_oprnds1[i] : NULL_TREE);
4989 vop2 = ((op_type == ternary_op)
4990 ? vec_oprnds2[i] : NULL_TREE);
4991 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
4992 new_temp = make_ssa_name (vec_dest, new_stmt);
4993 gimple_assign_set_lhs (new_stmt, new_temp);
4994 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4995 if (slp_node)
4996 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4999 if (slp_node)
5000 continue;
5002 if (j == 0)
5003 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5004 else
5005 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5006 prev_stmt_info = vinfo_for_stmt (new_stmt);
5009 vec_oprnds0.release ();
5010 vec_oprnds1.release ();
5011 vec_oprnds2.release ();
5013 return true;
5016 /* A helper function to ensure data reference DR's base alignment
5017 for STMT_INFO. */
5019 static void
5020 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5022 if (!dr->aux)
5023 return;
5025 if (DR_VECT_AUX (dr)->base_misaligned)
5027 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5028 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5030 if (decl_in_symtab_p (base_decl))
5031 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5032 else
5034 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
5035 DECL_USER_ALIGN (base_decl) = 1;
5037 DR_VECT_AUX (dr)->base_misaligned = false;
5042 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5043 reversal of the vector elements. If that is impossible to do,
5044 returns NULL. */
5046 static tree
5047 perm_mask_for_reverse (tree vectype)
5049 int i, nunits;
5050 unsigned char *sel;
5052 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5053 sel = XALLOCAVEC (unsigned char, nunits);
5055 for (i = 0; i < nunits; ++i)
5056 sel[i] = nunits - 1 - i;
5058 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5059 return NULL_TREE;
5060 return vect_gen_perm_mask_checked (vectype, sel);
5063 /* Function vectorizable_store.
5065 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5066 can be vectorized.
5067 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5068 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5069 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5071 static bool
5072 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5073 slp_tree slp_node)
5075 tree scalar_dest;
5076 tree data_ref;
5077 tree op;
5078 tree vec_oprnd = NULL_TREE;
5079 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5080 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5081 tree elem_type;
5082 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5083 struct loop *loop = NULL;
5084 machine_mode vec_mode;
5085 tree dummy;
5086 enum dr_alignment_support alignment_support_scheme;
5087 gimple *def_stmt;
5088 enum vect_def_type dt;
5089 stmt_vec_info prev_stmt_info = NULL;
5090 tree dataref_ptr = NULL_TREE;
5091 tree dataref_offset = NULL_TREE;
5092 gimple *ptr_incr = NULL;
5093 int ncopies;
5094 int j;
5095 gimple *next_stmt, *first_stmt = NULL;
5096 bool grouped_store = false;
5097 bool store_lanes_p = false;
5098 unsigned int group_size, i;
5099 vec<tree> dr_chain = vNULL;
5100 vec<tree> oprnds = vNULL;
5101 vec<tree> result_chain = vNULL;
5102 bool inv_p;
5103 bool negative = false;
5104 tree offset = NULL_TREE;
5105 vec<tree> vec_oprnds = vNULL;
5106 bool slp = (slp_node != NULL);
5107 unsigned int vec_num;
5108 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5109 vec_info *vinfo = stmt_info->vinfo;
5110 tree aggr_type;
5111 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5112 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5113 int scatter_scale = 1;
5114 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5115 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5116 gimple *new_stmt;
5118 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5119 return false;
5121 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5122 return false;
5124 /* Is vectorizable store? */
5126 if (!is_gimple_assign (stmt))
5127 return false;
5129 scalar_dest = gimple_assign_lhs (stmt);
5130 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5131 && is_pattern_stmt_p (stmt_info))
5132 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5133 if (TREE_CODE (scalar_dest) != ARRAY_REF
5134 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5135 && TREE_CODE (scalar_dest) != INDIRECT_REF
5136 && TREE_CODE (scalar_dest) != COMPONENT_REF
5137 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5138 && TREE_CODE (scalar_dest) != REALPART_EXPR
5139 && TREE_CODE (scalar_dest) != MEM_REF)
5140 return false;
5142 gcc_assert (gimple_assign_single_p (stmt));
5144 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5145 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5147 if (loop_vinfo)
5148 loop = LOOP_VINFO_LOOP (loop_vinfo);
5150 /* Multiple types in SLP are handled by creating the appropriate number of
5151 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5152 case of SLP. */
5153 if (slp || PURE_SLP_STMT (stmt_info))
5154 ncopies = 1;
5155 else
5156 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5158 gcc_assert (ncopies >= 1);
5160 /* FORNOW. This restriction should be relaxed. */
5161 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5163 if (dump_enabled_p ())
5164 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5165 "multiple types in nested loop.\n");
5166 return false;
5169 op = gimple_assign_rhs1 (stmt);
5170 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5172 if (dump_enabled_p ())
5173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5174 "use not simple.\n");
5175 return false;
5178 elem_type = TREE_TYPE (vectype);
5179 vec_mode = TYPE_MODE (vectype);
5181 /* FORNOW. In some cases can vectorize even if data-type not supported
5182 (e.g. - array initialization with 0). */
5183 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5184 return false;
5186 if (!STMT_VINFO_DATA_REF (stmt_info))
5187 return false;
5189 if (!STMT_VINFO_STRIDED_P (stmt_info))
5191 negative =
5192 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5193 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5194 size_zero_node) < 0;
5195 if (negative && ncopies > 1)
5197 if (dump_enabled_p ())
5198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5199 "multiple types with negative step.\n");
5200 return false;
5202 if (negative)
5204 gcc_assert (!grouped_store);
5205 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5206 if (alignment_support_scheme != dr_aligned
5207 && alignment_support_scheme != dr_unaligned_supported)
5209 if (dump_enabled_p ())
5210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5211 "negative step but alignment required.\n");
5212 return false;
5214 if (dt != vect_constant_def
5215 && dt != vect_external_def
5216 && !perm_mask_for_reverse (vectype))
5218 if (dump_enabled_p ())
5219 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5220 "negative step and reversing not supported.\n");
5221 return false;
5226 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5228 grouped_store = true;
5229 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5230 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5231 if (!slp
5232 && !PURE_SLP_STMT (stmt_info)
5233 && !STMT_VINFO_STRIDED_P (stmt_info))
5235 if (vect_store_lanes_supported (vectype, group_size))
5236 store_lanes_p = true;
5237 else if (!vect_grouped_store_supported (vectype, group_size))
5238 return false;
5241 if (STMT_VINFO_STRIDED_P (stmt_info)
5242 && (slp || PURE_SLP_STMT (stmt_info))
5243 && (group_size > nunits
5244 || nunits % group_size != 0))
5246 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5247 "unhandled strided group store\n");
5248 return false;
5251 if (first_stmt == stmt)
5253 /* STMT is the leader of the group. Check the operands of all the
5254 stmts of the group. */
5255 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5256 while (next_stmt)
5258 gcc_assert (gimple_assign_single_p (next_stmt));
5259 op = gimple_assign_rhs1 (next_stmt);
5260 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5262 if (dump_enabled_p ())
5263 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5264 "use not simple.\n");
5265 return false;
5267 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5272 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5274 gimple *def_stmt;
5275 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5276 &scatter_off, &scatter_scale);
5277 gcc_assert (scatter_decl);
5278 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5279 &scatter_off_vectype))
5281 if (dump_enabled_p ())
5282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5283 "scatter index use not simple.");
5284 return false;
5288 if (!vec_stmt) /* transformation not required. */
5290 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5291 /* The SLP costs are calculated during SLP analysis. */
5292 if (!PURE_SLP_STMT (stmt_info))
5293 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5294 NULL, NULL, NULL);
5295 return true;
5298 /** Transform. **/
5300 ensure_base_align (stmt_info, dr);
5302 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5304 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5305 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5306 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5307 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5308 edge pe = loop_preheader_edge (loop);
5309 gimple_seq seq;
5310 basic_block new_bb;
5311 enum { NARROW, NONE, WIDEN } modifier;
5312 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5314 if (nunits == (unsigned int) scatter_off_nunits)
5315 modifier = NONE;
5316 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5318 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5319 modifier = WIDEN;
5321 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5322 sel[i] = i | nunits;
5324 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5325 gcc_assert (perm_mask != NULL_TREE);
5327 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5329 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5330 modifier = NARROW;
5332 for (i = 0; i < (unsigned int) nunits; ++i)
5333 sel[i] = i | scatter_off_nunits;
5335 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5336 gcc_assert (perm_mask != NULL_TREE);
5337 ncopies *= 2;
5339 else
5340 gcc_unreachable ();
5342 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5343 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5344 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5345 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5346 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5347 scaletype = TREE_VALUE (arglist);
5349 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5350 && TREE_CODE (rettype) == VOID_TYPE);
5352 ptr = fold_convert (ptrtype, scatter_base);
5353 if (!is_gimple_min_invariant (ptr))
5355 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5356 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5357 gcc_assert (!new_bb);
5360 /* Currently we support only unconditional scatter stores,
5361 so mask should be all ones. */
5362 mask = build_int_cst (masktype, -1);
5363 mask = vect_init_vector (stmt, mask, masktype, NULL);
5365 scale = build_int_cst (scaletype, scatter_scale);
5367 prev_stmt_info = NULL;
5368 for (j = 0; j < ncopies; ++j)
5370 if (j == 0)
5372 src = vec_oprnd1
5373 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5374 op = vec_oprnd0
5375 = vect_get_vec_def_for_operand (scatter_off, stmt);
5377 else if (modifier != NONE && (j & 1))
5379 if (modifier == WIDEN)
5381 src = vec_oprnd1
5382 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5383 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5384 stmt, gsi);
5386 else if (modifier == NARROW)
5388 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5389 stmt, gsi);
5390 op = vec_oprnd0
5391 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5393 else
5394 gcc_unreachable ();
5396 else
5398 src = vec_oprnd1
5399 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5400 op = vec_oprnd0
5401 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5404 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5406 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5407 == TYPE_VECTOR_SUBPARTS (srctype));
5408 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5409 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5410 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5411 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5412 src = var;
5415 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5417 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5418 == TYPE_VECTOR_SUBPARTS (idxtype));
5419 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5420 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5421 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5423 op = var;
5426 new_stmt
5427 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5429 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5431 if (prev_stmt_info == NULL)
5432 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5433 else
5434 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5435 prev_stmt_info = vinfo_for_stmt (new_stmt);
5437 return true;
5440 if (grouped_store)
5442 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5443 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5445 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5447 /* FORNOW */
5448 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5450 /* We vectorize all the stmts of the interleaving group when we
5451 reach the last stmt in the group. */
5452 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5453 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5454 && !slp)
5456 *vec_stmt = NULL;
5457 return true;
5460 if (slp)
5462 grouped_store = false;
5463 /* VEC_NUM is the number of vect stmts to be created for this
5464 group. */
5465 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5466 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5467 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
5468 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5469 op = gimple_assign_rhs1 (first_stmt);
5471 else
5472 /* VEC_NUM is the number of vect stmts to be created for this
5473 group. */
5474 vec_num = group_size;
5476 else
5478 first_stmt = stmt;
5479 first_dr = dr;
5480 group_size = vec_num = 1;
5483 if (dump_enabled_p ())
5484 dump_printf_loc (MSG_NOTE, vect_location,
5485 "transform store. ncopies = %d\n", ncopies);
5487 if (STMT_VINFO_STRIDED_P (stmt_info))
5489 gimple_stmt_iterator incr_gsi;
5490 bool insert_after;
5491 gimple *incr;
5492 tree offvar;
5493 tree ivstep;
5494 tree running_off;
5495 gimple_seq stmts = NULL;
5496 tree stride_base, stride_step, alias_off;
5497 tree vec_oprnd;
5498 unsigned int g;
5500 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5502 stride_base
5503 = fold_build_pointer_plus
5504 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5505 size_binop (PLUS_EXPR,
5506 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5507 convert_to_ptrofftype (DR_INIT(first_dr))));
5508 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5510 /* For a store with loop-invariant (but other than power-of-2)
5511 stride (i.e. not a grouped access) like so:
5513 for (i = 0; i < n; i += stride)
5514 array[i] = ...;
5516 we generate a new induction variable and new stores from
5517 the components of the (vectorized) rhs:
5519 for (j = 0; ; j += VF*stride)
5520 vectemp = ...;
5521 tmp1 = vectemp[0];
5522 array[j] = tmp1;
5523 tmp2 = vectemp[1];
5524 array[j + stride] = tmp2;
5528 unsigned nstores = nunits;
5529 tree ltype = elem_type;
5530 if (slp)
5532 nstores = nunits / group_size;
5533 if (group_size < nunits)
5534 ltype = build_vector_type (elem_type, group_size);
5535 else
5536 ltype = vectype;
5537 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5538 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5539 group_size = 1;
5542 ivstep = stride_step;
5543 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5544 build_int_cst (TREE_TYPE (ivstep),
5545 ncopies * nstores));
5547 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5549 create_iv (stride_base, ivstep, NULL,
5550 loop, &incr_gsi, insert_after,
5551 &offvar, NULL);
5552 incr = gsi_stmt (incr_gsi);
5553 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5555 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5556 if (stmts)
5557 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5559 prev_stmt_info = NULL;
5560 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5561 next_stmt = first_stmt;
5562 for (g = 0; g < group_size; g++)
5564 running_off = offvar;
5565 if (g)
5567 tree size = TYPE_SIZE_UNIT (ltype);
5568 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5569 size);
5570 tree newoff = copy_ssa_name (running_off, NULL);
5571 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5572 running_off, pos);
5573 vect_finish_stmt_generation (stmt, incr, gsi);
5574 running_off = newoff;
5576 for (j = 0; j < ncopies; j++)
5578 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5579 and first_stmt == stmt. */
5580 if (j == 0)
5582 if (slp)
5584 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5585 slp_node, -1);
5586 vec_oprnd = vec_oprnds[0];
5588 else
5590 gcc_assert (gimple_assign_single_p (next_stmt));
5591 op = gimple_assign_rhs1 (next_stmt);
5592 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5595 else
5597 if (slp)
5598 vec_oprnd = vec_oprnds[j];
5599 else
5601 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5602 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5606 for (i = 0; i < nstores; i++)
5608 tree newref, newoff;
5609 gimple *incr, *assign;
5610 tree size = TYPE_SIZE (ltype);
5611 /* Extract the i'th component. */
5612 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5613 bitsize_int (i), size);
5614 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5615 size, pos);
5617 elem = force_gimple_operand_gsi (gsi, elem, true,
5618 NULL_TREE, true,
5619 GSI_SAME_STMT);
5621 newref = build2 (MEM_REF, ltype,
5622 running_off, alias_off);
5624 /* And store it to *running_off. */
5625 assign = gimple_build_assign (newref, elem);
5626 vect_finish_stmt_generation (stmt, assign, gsi);
5628 newoff = copy_ssa_name (running_off, NULL);
5629 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5630 running_off, stride_step);
5631 vect_finish_stmt_generation (stmt, incr, gsi);
5633 running_off = newoff;
5634 if (g == group_size - 1
5635 && !slp)
5637 if (j == 0 && i == 0)
5638 STMT_VINFO_VEC_STMT (stmt_info)
5639 = *vec_stmt = assign;
5640 else
5641 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5642 prev_stmt_info = vinfo_for_stmt (assign);
5646 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5648 return true;
5651 dr_chain.create (group_size);
5652 oprnds.create (group_size);
5654 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5655 gcc_assert (alignment_support_scheme);
5656 /* Targets with store-lane instructions must not require explicit
5657 realignment. */
5658 gcc_assert (!store_lanes_p
5659 || alignment_support_scheme == dr_aligned
5660 || alignment_support_scheme == dr_unaligned_supported);
5662 if (negative)
5663 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5665 if (store_lanes_p)
5666 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5667 else
5668 aggr_type = vectype;
5670 /* In case the vectorization factor (VF) is bigger than the number
5671 of elements that we can fit in a vectype (nunits), we have to generate
5672 more than one vector stmt - i.e - we need to "unroll" the
5673 vector stmt by a factor VF/nunits. For more details see documentation in
5674 vect_get_vec_def_for_copy_stmt. */
5676 /* In case of interleaving (non-unit grouped access):
5678 S1: &base + 2 = x2
5679 S2: &base = x0
5680 S3: &base + 1 = x1
5681 S4: &base + 3 = x3
5683 We create vectorized stores starting from base address (the access of the
5684 first stmt in the chain (S2 in the above example), when the last store stmt
5685 of the chain (S4) is reached:
5687 VS1: &base = vx2
5688 VS2: &base + vec_size*1 = vx0
5689 VS3: &base + vec_size*2 = vx1
5690 VS4: &base + vec_size*3 = vx3
5692 Then permutation statements are generated:
5694 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5695 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5698 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5699 (the order of the data-refs in the output of vect_permute_store_chain
5700 corresponds to the order of scalar stmts in the interleaving chain - see
5701 the documentation of vect_permute_store_chain()).
5703 In case of both multiple types and interleaving, above vector stores and
5704 permutation stmts are created for every copy. The result vector stmts are
5705 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5706 STMT_VINFO_RELATED_STMT for the next copies.
5709 prev_stmt_info = NULL;
5710 for (j = 0; j < ncopies; j++)
5713 if (j == 0)
5715 if (slp)
5717 /* Get vectorized arguments for SLP_NODE. */
5718 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5719 NULL, slp_node, -1);
5721 vec_oprnd = vec_oprnds[0];
5723 else
5725 /* For interleaved stores we collect vectorized defs for all the
5726 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5727 used as an input to vect_permute_store_chain(), and OPRNDS as
5728 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5730 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5731 OPRNDS are of size 1. */
5732 next_stmt = first_stmt;
5733 for (i = 0; i < group_size; i++)
5735 /* Since gaps are not supported for interleaved stores,
5736 GROUP_SIZE is the exact number of stmts in the chain.
5737 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5738 there is no interleaving, GROUP_SIZE is 1, and only one
5739 iteration of the loop will be executed. */
5740 gcc_assert (next_stmt
5741 && gimple_assign_single_p (next_stmt));
5742 op = gimple_assign_rhs1 (next_stmt);
5744 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5745 dr_chain.quick_push (vec_oprnd);
5746 oprnds.quick_push (vec_oprnd);
5747 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5751 /* We should have catched mismatched types earlier. */
5752 gcc_assert (useless_type_conversion_p (vectype,
5753 TREE_TYPE (vec_oprnd)));
5754 bool simd_lane_access_p
5755 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5756 if (simd_lane_access_p
5757 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5758 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5759 && integer_zerop (DR_OFFSET (first_dr))
5760 && integer_zerop (DR_INIT (first_dr))
5761 && alias_sets_conflict_p (get_alias_set (aggr_type),
5762 get_alias_set (DR_REF (first_dr))))
5764 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5765 dataref_offset = build_int_cst (reference_alias_ptr_type
5766 (DR_REF (first_dr)), 0);
5767 inv_p = false;
5769 else
5770 dataref_ptr
5771 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5772 simd_lane_access_p ? loop : NULL,
5773 offset, &dummy, gsi, &ptr_incr,
5774 simd_lane_access_p, &inv_p);
5775 gcc_assert (bb_vinfo || !inv_p);
5777 else
5779 /* For interleaved stores we created vectorized defs for all the
5780 defs stored in OPRNDS in the previous iteration (previous copy).
5781 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5782 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5783 next copy.
5784 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5785 OPRNDS are of size 1. */
5786 for (i = 0; i < group_size; i++)
5788 op = oprnds[i];
5789 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5790 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5791 dr_chain[i] = vec_oprnd;
5792 oprnds[i] = vec_oprnd;
5794 if (dataref_offset)
5795 dataref_offset
5796 = int_const_binop (PLUS_EXPR, dataref_offset,
5797 TYPE_SIZE_UNIT (aggr_type));
5798 else
5799 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5800 TYPE_SIZE_UNIT (aggr_type));
5803 if (store_lanes_p)
5805 tree vec_array;
5807 /* Combine all the vectors into an array. */
5808 vec_array = create_vector_array (vectype, vec_num);
5809 for (i = 0; i < vec_num; i++)
5811 vec_oprnd = dr_chain[i];
5812 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5815 /* Emit:
5816 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5817 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5818 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5819 gimple_call_set_lhs (new_stmt, data_ref);
5820 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5822 else
5824 new_stmt = NULL;
5825 if (grouped_store)
5827 if (j == 0)
5828 result_chain.create (group_size);
5829 /* Permute. */
5830 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5831 &result_chain);
5834 next_stmt = first_stmt;
5835 for (i = 0; i < vec_num; i++)
5837 unsigned align, misalign;
5839 if (i > 0)
5840 /* Bump the vector pointer. */
5841 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5842 stmt, NULL_TREE);
5844 if (slp)
5845 vec_oprnd = vec_oprnds[i];
5846 else if (grouped_store)
5847 /* For grouped stores vectorized defs are interleaved in
5848 vect_permute_store_chain(). */
5849 vec_oprnd = result_chain[i];
5851 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5852 dataref_ptr,
5853 dataref_offset
5854 ? dataref_offset
5855 : build_int_cst (reference_alias_ptr_type
5856 (DR_REF (first_dr)), 0));
5857 align = TYPE_ALIGN_UNIT (vectype);
5858 if (aligned_access_p (first_dr))
5859 misalign = 0;
5860 else if (DR_MISALIGNMENT (first_dr) == -1)
5862 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5863 align = TYPE_ALIGN_UNIT (elem_type);
5864 else
5865 align = get_object_alignment (DR_REF (first_dr))
5866 / BITS_PER_UNIT;
5867 misalign = 0;
5868 TREE_TYPE (data_ref)
5869 = build_aligned_type (TREE_TYPE (data_ref),
5870 align * BITS_PER_UNIT);
5872 else
5874 TREE_TYPE (data_ref)
5875 = build_aligned_type (TREE_TYPE (data_ref),
5876 TYPE_ALIGN (elem_type));
5877 misalign = DR_MISALIGNMENT (first_dr);
5879 if (dataref_offset == NULL_TREE
5880 && TREE_CODE (dataref_ptr) == SSA_NAME)
5881 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5882 misalign);
5884 if (negative
5885 && dt != vect_constant_def
5886 && dt != vect_external_def)
5888 tree perm_mask = perm_mask_for_reverse (vectype);
5889 tree perm_dest
5890 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5891 vectype);
5892 tree new_temp = make_ssa_name (perm_dest);
5894 /* Generate the permute statement. */
5895 gimple *perm_stmt
5896 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5897 vec_oprnd, perm_mask);
5898 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5900 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5901 vec_oprnd = new_temp;
5904 /* Arguments are ready. Create the new vector stmt. */
5905 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5906 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5908 if (slp)
5909 continue;
5911 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5912 if (!next_stmt)
5913 break;
5916 if (!slp)
5918 if (j == 0)
5919 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5920 else
5921 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5922 prev_stmt_info = vinfo_for_stmt (new_stmt);
5926 dr_chain.release ();
5927 oprnds.release ();
5928 result_chain.release ();
5929 vec_oprnds.release ();
5931 return true;
5934 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5935 VECTOR_CST mask. No checks are made that the target platform supports the
5936 mask, so callers may wish to test can_vec_perm_p separately, or use
5937 vect_gen_perm_mask_checked. */
5939 tree
5940 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5942 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5943 int i, nunits;
5945 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5947 mask_elt_type = lang_hooks.types.type_for_mode
5948 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5949 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5951 mask_elts = XALLOCAVEC (tree, nunits);
5952 for (i = nunits - 1; i >= 0; i--)
5953 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5954 mask_vec = build_vector (mask_type, mask_elts);
5956 return mask_vec;
5959 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5960 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5962 tree
5963 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5965 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5966 return vect_gen_perm_mask_any (vectype, sel);
5969 /* Given a vector variable X and Y, that was generated for the scalar
5970 STMT, generate instructions to permute the vector elements of X and Y
5971 using permutation mask MASK_VEC, insert them at *GSI and return the
5972 permuted vector variable. */
5974 static tree
5975 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
5976 gimple_stmt_iterator *gsi)
5978 tree vectype = TREE_TYPE (x);
5979 tree perm_dest, data_ref;
5980 gimple *perm_stmt;
5982 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5983 data_ref = make_ssa_name (perm_dest);
5985 /* Generate the permute statement. */
5986 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5987 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5989 return data_ref;
5992 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5993 inserting them on the loops preheader edge. Returns true if we
5994 were successful in doing so (and thus STMT can be moved then),
5995 otherwise returns false. */
5997 static bool
5998 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6000 ssa_op_iter i;
6001 tree op;
6002 bool any = false;
6004 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6006 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6007 if (!gimple_nop_p (def_stmt)
6008 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6010 /* Make sure we don't need to recurse. While we could do
6011 so in simple cases when there are more complex use webs
6012 we don't have an easy way to preserve stmt order to fulfil
6013 dependencies within them. */
6014 tree op2;
6015 ssa_op_iter i2;
6016 if (gimple_code (def_stmt) == GIMPLE_PHI)
6017 return false;
6018 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6020 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6021 if (!gimple_nop_p (def_stmt2)
6022 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6023 return false;
6025 any = true;
6029 if (!any)
6030 return true;
6032 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6034 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6035 if (!gimple_nop_p (def_stmt)
6036 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6038 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6039 gsi_remove (&gsi, false);
6040 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6044 return true;
6047 /* vectorizable_load.
6049 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6050 can be vectorized.
6051 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6052 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6053 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6055 static bool
6056 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6057 slp_tree slp_node, slp_instance slp_node_instance)
6059 tree scalar_dest;
6060 tree vec_dest = NULL;
6061 tree data_ref = NULL;
6062 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6063 stmt_vec_info prev_stmt_info;
6064 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6065 struct loop *loop = NULL;
6066 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6067 bool nested_in_vect_loop = false;
6068 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6069 tree elem_type;
6070 tree new_temp;
6071 machine_mode mode;
6072 gimple *new_stmt = NULL;
6073 tree dummy;
6074 enum dr_alignment_support alignment_support_scheme;
6075 tree dataref_ptr = NULL_TREE;
6076 tree dataref_offset = NULL_TREE;
6077 gimple *ptr_incr = NULL;
6078 int ncopies;
6079 int i, j, group_size = -1, group_gap_adj;
6080 tree msq = NULL_TREE, lsq;
6081 tree offset = NULL_TREE;
6082 tree byte_offset = NULL_TREE;
6083 tree realignment_token = NULL_TREE;
6084 gphi *phi = NULL;
6085 vec<tree> dr_chain = vNULL;
6086 bool grouped_load = false;
6087 bool load_lanes_p = false;
6088 gimple *first_stmt;
6089 bool inv_p;
6090 bool negative = false;
6091 bool compute_in_loop = false;
6092 struct loop *at_loop;
6093 int vec_num;
6094 bool slp = (slp_node != NULL);
6095 bool slp_perm = false;
6096 enum tree_code code;
6097 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6098 int vf;
6099 tree aggr_type;
6100 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6101 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6102 int gather_scale = 1;
6103 enum vect_def_type gather_dt = vect_unknown_def_type;
6104 vec_info *vinfo = stmt_info->vinfo;
6106 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6107 return false;
6109 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
6110 return false;
6112 /* Is vectorizable load? */
6113 if (!is_gimple_assign (stmt))
6114 return false;
6116 scalar_dest = gimple_assign_lhs (stmt);
6117 if (TREE_CODE (scalar_dest) != SSA_NAME)
6118 return false;
6120 code = gimple_assign_rhs_code (stmt);
6121 if (code != ARRAY_REF
6122 && code != BIT_FIELD_REF
6123 && code != INDIRECT_REF
6124 && code != COMPONENT_REF
6125 && code != IMAGPART_EXPR
6126 && code != REALPART_EXPR
6127 && code != MEM_REF
6128 && TREE_CODE_CLASS (code) != tcc_declaration)
6129 return false;
6131 if (!STMT_VINFO_DATA_REF (stmt_info))
6132 return false;
6134 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6135 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6137 if (loop_vinfo)
6139 loop = LOOP_VINFO_LOOP (loop_vinfo);
6140 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6141 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6143 else
6144 vf = 1;
6146 /* Multiple types in SLP are handled by creating the appropriate number of
6147 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6148 case of SLP. */
6149 if (slp || PURE_SLP_STMT (stmt_info))
6150 ncopies = 1;
6151 else
6152 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6154 gcc_assert (ncopies >= 1);
6156 /* FORNOW. This restriction should be relaxed. */
6157 if (nested_in_vect_loop && ncopies > 1)
6159 if (dump_enabled_p ())
6160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6161 "multiple types in nested loop.\n");
6162 return false;
6165 /* Invalidate assumptions made by dependence analysis when vectorization
6166 on the unrolled body effectively re-orders stmts. */
6167 if (ncopies > 1
6168 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6169 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6170 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6172 if (dump_enabled_p ())
6173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6174 "cannot perform implicit CSE when unrolling "
6175 "with negative dependence distance\n");
6176 return false;
6179 elem_type = TREE_TYPE (vectype);
6180 mode = TYPE_MODE (vectype);
6182 /* FORNOW. In some cases can vectorize even if data-type not supported
6183 (e.g. - data copies). */
6184 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6186 if (dump_enabled_p ())
6187 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6188 "Aligned load, but unsupported type.\n");
6189 return false;
6192 /* Check if the load is a part of an interleaving chain. */
6193 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6195 grouped_load = true;
6196 /* FORNOW */
6197 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6199 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6201 /* If this is single-element interleaving with an element distance
6202 that leaves unused vector loads around punt - we at least create
6203 very sub-optimal code in that case (and blow up memory,
6204 see PR65518). */
6205 if (first_stmt == stmt
6206 && !GROUP_NEXT_ELEMENT (stmt_info)
6207 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6209 if (dump_enabled_p ())
6210 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6211 "single-element interleaving not supported "
6212 "for not adjacent vector loads\n");
6213 return false;
6216 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6217 slp_perm = true;
6219 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6220 if (!slp
6221 && !PURE_SLP_STMT (stmt_info)
6222 && !STMT_VINFO_STRIDED_P (stmt_info))
6224 if (vect_load_lanes_supported (vectype, group_size))
6225 load_lanes_p = true;
6226 else if (!vect_grouped_load_supported (vectype, group_size))
6227 return false;
6230 /* Invalidate assumptions made by dependence analysis when vectorization
6231 on the unrolled body effectively re-orders stmts. */
6232 if (!PURE_SLP_STMT (stmt_info)
6233 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6234 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6235 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6237 if (dump_enabled_p ())
6238 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6239 "cannot perform implicit CSE when performing "
6240 "group loads with negative dependence distance\n");
6241 return false;
6244 /* Similarly when the stmt is a load that is both part of a SLP
6245 instance and a loop vectorized stmt via the same-dr mechanism
6246 we have to give up. */
6247 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6248 && (STMT_SLP_TYPE (stmt_info)
6249 != STMT_SLP_TYPE (vinfo_for_stmt
6250 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6252 if (dump_enabled_p ())
6253 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6254 "conflicting SLP types for CSEd load\n");
6255 return false;
6260 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6262 gimple *def_stmt;
6263 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6264 &gather_off, &gather_scale);
6265 gcc_assert (gather_decl);
6266 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6267 &gather_off_vectype))
6269 if (dump_enabled_p ())
6270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6271 "gather index use not simple.\n");
6272 return false;
6275 else if (STMT_VINFO_STRIDED_P (stmt_info))
6277 if ((grouped_load
6278 && (slp || PURE_SLP_STMT (stmt_info)))
6279 && (group_size > nunits
6280 || nunits % group_size != 0))
6282 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6283 "unhandled strided group load\n");
6284 return false;
6287 else
6289 negative = tree_int_cst_compare (nested_in_vect_loop
6290 ? STMT_VINFO_DR_STEP (stmt_info)
6291 : DR_STEP (dr),
6292 size_zero_node) < 0;
6293 if (negative && ncopies > 1)
6295 if (dump_enabled_p ())
6296 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6297 "multiple types with negative step.\n");
6298 return false;
6301 if (negative)
6303 if (grouped_load)
6305 if (dump_enabled_p ())
6306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6307 "negative step for group load not supported"
6308 "\n");
6309 return false;
6311 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6312 if (alignment_support_scheme != dr_aligned
6313 && alignment_support_scheme != dr_unaligned_supported)
6315 if (dump_enabled_p ())
6316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6317 "negative step but alignment required.\n");
6318 return false;
6320 if (!perm_mask_for_reverse (vectype))
6322 if (dump_enabled_p ())
6323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6324 "negative step and reversing not supported."
6325 "\n");
6326 return false;
6331 if (!vec_stmt) /* transformation not required. */
6333 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6334 /* The SLP costs are calculated during SLP analysis. */
6335 if (!PURE_SLP_STMT (stmt_info))
6336 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6337 NULL, NULL, NULL);
6338 return true;
6341 if (dump_enabled_p ())
6342 dump_printf_loc (MSG_NOTE, vect_location,
6343 "transform load. ncopies = %d\n", ncopies);
6345 /** Transform. **/
6347 ensure_base_align (stmt_info, dr);
6349 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6351 tree vec_oprnd0 = NULL_TREE, op;
6352 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6353 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6354 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6355 edge pe = loop_preheader_edge (loop);
6356 gimple_seq seq;
6357 basic_block new_bb;
6358 enum { NARROW, NONE, WIDEN } modifier;
6359 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6361 if (nunits == gather_off_nunits)
6362 modifier = NONE;
6363 else if (nunits == gather_off_nunits / 2)
6365 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6366 modifier = WIDEN;
6368 for (i = 0; i < gather_off_nunits; ++i)
6369 sel[i] = i | nunits;
6371 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6373 else if (nunits == gather_off_nunits * 2)
6375 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6376 modifier = NARROW;
6378 for (i = 0; i < nunits; ++i)
6379 sel[i] = i < gather_off_nunits
6380 ? i : i + nunits - gather_off_nunits;
6382 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6383 ncopies *= 2;
6385 else
6386 gcc_unreachable ();
6388 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6389 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6390 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6391 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6392 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6393 scaletype = TREE_VALUE (arglist);
6394 gcc_checking_assert (types_compatible_p (srctype, rettype));
6396 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6398 ptr = fold_convert (ptrtype, gather_base);
6399 if (!is_gimple_min_invariant (ptr))
6401 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6402 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6403 gcc_assert (!new_bb);
6406 /* Currently we support only unconditional gather loads,
6407 so mask should be all ones. */
6408 if (TREE_CODE (masktype) == INTEGER_TYPE)
6409 mask = build_int_cst (masktype, -1);
6410 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6412 mask = build_int_cst (TREE_TYPE (masktype), -1);
6413 mask = build_vector_from_val (masktype, mask);
6414 mask = vect_init_vector (stmt, mask, masktype, NULL);
6416 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6418 REAL_VALUE_TYPE r;
6419 long tmp[6];
6420 for (j = 0; j < 6; ++j)
6421 tmp[j] = -1;
6422 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6423 mask = build_real (TREE_TYPE (masktype), r);
6424 mask = build_vector_from_val (masktype, mask);
6425 mask = vect_init_vector (stmt, mask, masktype, NULL);
6427 else
6428 gcc_unreachable ();
6430 scale = build_int_cst (scaletype, gather_scale);
6432 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6433 merge = build_int_cst (TREE_TYPE (rettype), 0);
6434 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6436 REAL_VALUE_TYPE r;
6437 long tmp[6];
6438 for (j = 0; j < 6; ++j)
6439 tmp[j] = 0;
6440 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6441 merge = build_real (TREE_TYPE (rettype), r);
6443 else
6444 gcc_unreachable ();
6445 merge = build_vector_from_val (rettype, merge);
6446 merge = vect_init_vector (stmt, merge, rettype, NULL);
6448 prev_stmt_info = NULL;
6449 for (j = 0; j < ncopies; ++j)
6451 if (modifier == WIDEN && (j & 1))
6452 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6453 perm_mask, stmt, gsi);
6454 else if (j == 0)
6455 op = vec_oprnd0
6456 = vect_get_vec_def_for_operand (gather_off, stmt);
6457 else
6458 op = vec_oprnd0
6459 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6461 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6463 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6464 == TYPE_VECTOR_SUBPARTS (idxtype));
6465 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6466 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6467 new_stmt
6468 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6469 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6470 op = var;
6473 new_stmt
6474 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6476 if (!useless_type_conversion_p (vectype, rettype))
6478 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6479 == TYPE_VECTOR_SUBPARTS (rettype));
6480 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6481 gimple_call_set_lhs (new_stmt, op);
6482 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6483 var = make_ssa_name (vec_dest);
6484 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6485 new_stmt
6486 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6488 else
6490 var = make_ssa_name (vec_dest, new_stmt);
6491 gimple_call_set_lhs (new_stmt, var);
6494 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6496 if (modifier == NARROW)
6498 if ((j & 1) == 0)
6500 prev_res = var;
6501 continue;
6503 var = permute_vec_elements (prev_res, var,
6504 perm_mask, stmt, gsi);
6505 new_stmt = SSA_NAME_DEF_STMT (var);
6508 if (prev_stmt_info == NULL)
6509 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6510 else
6511 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6512 prev_stmt_info = vinfo_for_stmt (new_stmt);
6514 return true;
6516 else if (STMT_VINFO_STRIDED_P (stmt_info))
6518 gimple_stmt_iterator incr_gsi;
6519 bool insert_after;
6520 gimple *incr;
6521 tree offvar;
6522 tree ivstep;
6523 tree running_off;
6524 vec<constructor_elt, va_gc> *v = NULL;
6525 gimple_seq stmts = NULL;
6526 tree stride_base, stride_step, alias_off;
6528 gcc_assert (!nested_in_vect_loop);
6530 if (slp && grouped_load)
6531 first_dr = STMT_VINFO_DATA_REF
6532 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6533 else
6534 first_dr = dr;
6536 stride_base
6537 = fold_build_pointer_plus
6538 (DR_BASE_ADDRESS (first_dr),
6539 size_binop (PLUS_EXPR,
6540 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6541 convert_to_ptrofftype (DR_INIT (first_dr))));
6542 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6544 /* For a load with loop-invariant (but other than power-of-2)
6545 stride (i.e. not a grouped access) like so:
6547 for (i = 0; i < n; i += stride)
6548 ... = array[i];
6550 we generate a new induction variable and new accesses to
6551 form a new vector (or vectors, depending on ncopies):
6553 for (j = 0; ; j += VF*stride)
6554 tmp1 = array[j];
6555 tmp2 = array[j + stride];
6557 vectemp = {tmp1, tmp2, ...}
6560 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6561 build_int_cst (TREE_TYPE (stride_step), vf));
6563 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6565 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6566 loop, &incr_gsi, insert_after,
6567 &offvar, NULL);
6568 incr = gsi_stmt (incr_gsi);
6569 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6571 stride_step = force_gimple_operand (unshare_expr (stride_step),
6572 &stmts, true, NULL_TREE);
6573 if (stmts)
6574 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6576 prev_stmt_info = NULL;
6577 running_off = offvar;
6578 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6579 int nloads = nunits;
6580 tree ltype = TREE_TYPE (vectype);
6581 auto_vec<tree> dr_chain;
6582 if (slp)
6584 nloads = nunits / group_size;
6585 if (group_size < nunits)
6586 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6587 else
6588 ltype = vectype;
6589 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6590 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6591 if (slp_perm)
6592 dr_chain.create (ncopies);
6594 for (j = 0; j < ncopies; j++)
6596 tree vec_inv;
6598 if (nloads > 1)
6600 vec_alloc (v, nloads);
6601 for (i = 0; i < nloads; i++)
6603 tree newref, newoff;
6604 gimple *incr;
6605 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6607 newref = force_gimple_operand_gsi (gsi, newref, true,
6608 NULL_TREE, true,
6609 GSI_SAME_STMT);
6610 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6611 newoff = copy_ssa_name (running_off);
6612 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6613 running_off, stride_step);
6614 vect_finish_stmt_generation (stmt, incr, gsi);
6616 running_off = newoff;
6619 vec_inv = build_constructor (vectype, v);
6620 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6621 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6623 else
6625 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6626 build2 (MEM_REF, ltype,
6627 running_off, alias_off));
6628 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6630 tree newoff = copy_ssa_name (running_off);
6631 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6632 running_off, stride_step);
6633 vect_finish_stmt_generation (stmt, incr, gsi);
6635 running_off = newoff;
6638 if (slp)
6640 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6641 if (slp_perm)
6642 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6644 else
6646 if (j == 0)
6647 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6648 else
6649 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6650 prev_stmt_info = vinfo_for_stmt (new_stmt);
6653 if (slp_perm)
6654 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6655 slp_node_instance, false);
6656 return true;
6659 if (grouped_load)
6661 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6662 /* For BB vectorization we directly vectorize a subchain
6663 without permutation. */
6664 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6665 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6667 /* Check if the chain of loads is already vectorized. */
6668 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6669 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6670 ??? But we can only do so if there is exactly one
6671 as we have no way to get at the rest. Leave the CSE
6672 opportunity alone.
6673 ??? With the group load eventually participating
6674 in multiple different permutations (having multiple
6675 slp nodes which refer to the same group) the CSE
6676 is even wrong code. See PR56270. */
6677 && !slp)
6679 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6680 return true;
6682 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6683 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6684 group_gap_adj = 0;
6686 /* VEC_NUM is the number of vect stmts to be created for this group. */
6687 if (slp)
6689 grouped_load = false;
6690 /* For SLP permutation support we need to load the whole group,
6691 not only the number of vector stmts the permutation result
6692 fits in. */
6693 if (slp_perm)
6694 vec_num = (group_size * vf + nunits - 1) / nunits;
6695 else
6696 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6697 group_gap_adj = vf * group_size - nunits * vec_num;
6699 else
6700 vec_num = group_size;
6702 else
6704 first_stmt = stmt;
6705 first_dr = dr;
6706 group_size = vec_num = 1;
6707 group_gap_adj = 0;
6710 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6711 gcc_assert (alignment_support_scheme);
6712 /* Targets with load-lane instructions must not require explicit
6713 realignment. */
6714 gcc_assert (!load_lanes_p
6715 || alignment_support_scheme == dr_aligned
6716 || alignment_support_scheme == dr_unaligned_supported);
6718 /* In case the vectorization factor (VF) is bigger than the number
6719 of elements that we can fit in a vectype (nunits), we have to generate
6720 more than one vector stmt - i.e - we need to "unroll" the
6721 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6722 from one copy of the vector stmt to the next, in the field
6723 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6724 stages to find the correct vector defs to be used when vectorizing
6725 stmts that use the defs of the current stmt. The example below
6726 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6727 need to create 4 vectorized stmts):
6729 before vectorization:
6730 RELATED_STMT VEC_STMT
6731 S1: x = memref - -
6732 S2: z = x + 1 - -
6734 step 1: vectorize stmt S1:
6735 We first create the vector stmt VS1_0, and, as usual, record a
6736 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6737 Next, we create the vector stmt VS1_1, and record a pointer to
6738 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6739 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6740 stmts and pointers:
6741 RELATED_STMT VEC_STMT
6742 VS1_0: vx0 = memref0 VS1_1 -
6743 VS1_1: vx1 = memref1 VS1_2 -
6744 VS1_2: vx2 = memref2 VS1_3 -
6745 VS1_3: vx3 = memref3 - -
6746 S1: x = load - VS1_0
6747 S2: z = x + 1 - -
6749 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6750 information we recorded in RELATED_STMT field is used to vectorize
6751 stmt S2. */
6753 /* In case of interleaving (non-unit grouped access):
6755 S1: x2 = &base + 2
6756 S2: x0 = &base
6757 S3: x1 = &base + 1
6758 S4: x3 = &base + 3
6760 Vectorized loads are created in the order of memory accesses
6761 starting from the access of the first stmt of the chain:
6763 VS1: vx0 = &base
6764 VS2: vx1 = &base + vec_size*1
6765 VS3: vx3 = &base + vec_size*2
6766 VS4: vx4 = &base + vec_size*3
6768 Then permutation statements are generated:
6770 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6771 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6774 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6775 (the order of the data-refs in the output of vect_permute_load_chain
6776 corresponds to the order of scalar stmts in the interleaving chain - see
6777 the documentation of vect_permute_load_chain()).
6778 The generation of permutation stmts and recording them in
6779 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6781 In case of both multiple types and interleaving, the vector loads and
6782 permutation stmts above are created for every copy. The result vector
6783 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6784 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6786 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6787 on a target that supports unaligned accesses (dr_unaligned_supported)
6788 we generate the following code:
6789 p = initial_addr;
6790 indx = 0;
6791 loop {
6792 p = p + indx * vectype_size;
6793 vec_dest = *(p);
6794 indx = indx + 1;
6797 Otherwise, the data reference is potentially unaligned on a target that
6798 does not support unaligned accesses (dr_explicit_realign_optimized) -
6799 then generate the following code, in which the data in each iteration is
6800 obtained by two vector loads, one from the previous iteration, and one
6801 from the current iteration:
6802 p1 = initial_addr;
6803 msq_init = *(floor(p1))
6804 p2 = initial_addr + VS - 1;
6805 realignment_token = call target_builtin;
6806 indx = 0;
6807 loop {
6808 p2 = p2 + indx * vectype_size
6809 lsq = *(floor(p2))
6810 vec_dest = realign_load (msq, lsq, realignment_token)
6811 indx = indx + 1;
6812 msq = lsq;
6813 } */
6815 /* If the misalignment remains the same throughout the execution of the
6816 loop, we can create the init_addr and permutation mask at the loop
6817 preheader. Otherwise, it needs to be created inside the loop.
6818 This can only occur when vectorizing memory accesses in the inner-loop
6819 nested within an outer-loop that is being vectorized. */
6821 if (nested_in_vect_loop
6822 && (TREE_INT_CST_LOW (DR_STEP (dr))
6823 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6825 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6826 compute_in_loop = true;
6829 if ((alignment_support_scheme == dr_explicit_realign_optimized
6830 || alignment_support_scheme == dr_explicit_realign)
6831 && !compute_in_loop)
6833 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6834 alignment_support_scheme, NULL_TREE,
6835 &at_loop);
6836 if (alignment_support_scheme == dr_explicit_realign_optimized)
6838 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6839 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6840 size_one_node);
6843 else
6844 at_loop = loop;
6846 if (negative)
6847 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6849 if (load_lanes_p)
6850 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6851 else
6852 aggr_type = vectype;
6854 prev_stmt_info = NULL;
6855 for (j = 0; j < ncopies; j++)
6857 /* 1. Create the vector or array pointer update chain. */
6858 if (j == 0)
6860 bool simd_lane_access_p
6861 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6862 if (simd_lane_access_p
6863 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6864 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6865 && integer_zerop (DR_OFFSET (first_dr))
6866 && integer_zerop (DR_INIT (first_dr))
6867 && alias_sets_conflict_p (get_alias_set (aggr_type),
6868 get_alias_set (DR_REF (first_dr)))
6869 && (alignment_support_scheme == dr_aligned
6870 || alignment_support_scheme == dr_unaligned_supported))
6872 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6873 dataref_offset = build_int_cst (reference_alias_ptr_type
6874 (DR_REF (first_dr)), 0);
6875 inv_p = false;
6877 else
6878 dataref_ptr
6879 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6880 offset, &dummy, gsi, &ptr_incr,
6881 simd_lane_access_p, &inv_p,
6882 byte_offset);
6884 else if (dataref_offset)
6885 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6886 TYPE_SIZE_UNIT (aggr_type));
6887 else
6888 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6889 TYPE_SIZE_UNIT (aggr_type));
6891 if (grouped_load || slp_perm)
6892 dr_chain.create (vec_num);
6894 if (load_lanes_p)
6896 tree vec_array;
6898 vec_array = create_vector_array (vectype, vec_num);
6900 /* Emit:
6901 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6902 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6903 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6904 gimple_call_set_lhs (new_stmt, vec_array);
6905 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6907 /* Extract each vector into an SSA_NAME. */
6908 for (i = 0; i < vec_num; i++)
6910 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6911 vec_array, i);
6912 dr_chain.quick_push (new_temp);
6915 /* Record the mapping between SSA_NAMEs and statements. */
6916 vect_record_grouped_load_vectors (stmt, dr_chain);
6918 else
6920 for (i = 0; i < vec_num; i++)
6922 if (i > 0)
6923 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6924 stmt, NULL_TREE);
6926 /* 2. Create the vector-load in the loop. */
6927 switch (alignment_support_scheme)
6929 case dr_aligned:
6930 case dr_unaligned_supported:
6932 unsigned int align, misalign;
6934 data_ref
6935 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6936 dataref_offset
6937 ? dataref_offset
6938 : build_int_cst (reference_alias_ptr_type
6939 (DR_REF (first_dr)), 0));
6940 align = TYPE_ALIGN_UNIT (vectype);
6941 if (alignment_support_scheme == dr_aligned)
6943 gcc_assert (aligned_access_p (first_dr));
6944 misalign = 0;
6946 else if (DR_MISALIGNMENT (first_dr) == -1)
6948 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6949 align = TYPE_ALIGN_UNIT (elem_type);
6950 else
6951 align = (get_object_alignment (DR_REF (first_dr))
6952 / BITS_PER_UNIT);
6953 misalign = 0;
6954 TREE_TYPE (data_ref)
6955 = build_aligned_type (TREE_TYPE (data_ref),
6956 align * BITS_PER_UNIT);
6958 else
6960 TREE_TYPE (data_ref)
6961 = build_aligned_type (TREE_TYPE (data_ref),
6962 TYPE_ALIGN (elem_type));
6963 misalign = DR_MISALIGNMENT (first_dr);
6965 if (dataref_offset == NULL_TREE
6966 && TREE_CODE (dataref_ptr) == SSA_NAME)
6967 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6968 align, misalign);
6969 break;
6971 case dr_explicit_realign:
6973 tree ptr, bump;
6975 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6977 if (compute_in_loop)
6978 msq = vect_setup_realignment (first_stmt, gsi,
6979 &realignment_token,
6980 dr_explicit_realign,
6981 dataref_ptr, NULL);
6983 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6984 ptr = copy_ssa_name (dataref_ptr);
6985 else
6986 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6987 new_stmt = gimple_build_assign
6988 (ptr, BIT_AND_EXPR, dataref_ptr,
6989 build_int_cst
6990 (TREE_TYPE (dataref_ptr),
6991 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6992 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6993 data_ref
6994 = build2 (MEM_REF, vectype, ptr,
6995 build_int_cst (reference_alias_ptr_type
6996 (DR_REF (first_dr)), 0));
6997 vec_dest = vect_create_destination_var (scalar_dest,
6998 vectype);
6999 new_stmt = gimple_build_assign (vec_dest, data_ref);
7000 new_temp = make_ssa_name (vec_dest, new_stmt);
7001 gimple_assign_set_lhs (new_stmt, new_temp);
7002 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7003 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7004 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7005 msq = new_temp;
7007 bump = size_binop (MULT_EXPR, vs,
7008 TYPE_SIZE_UNIT (elem_type));
7009 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7010 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7011 new_stmt = gimple_build_assign
7012 (NULL_TREE, BIT_AND_EXPR, ptr,
7013 build_int_cst
7014 (TREE_TYPE (ptr),
7015 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7016 ptr = copy_ssa_name (ptr, new_stmt);
7017 gimple_assign_set_lhs (new_stmt, ptr);
7018 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7019 data_ref
7020 = build2 (MEM_REF, vectype, ptr,
7021 build_int_cst (reference_alias_ptr_type
7022 (DR_REF (first_dr)), 0));
7023 break;
7025 case dr_explicit_realign_optimized:
7026 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7027 new_temp = copy_ssa_name (dataref_ptr);
7028 else
7029 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7030 new_stmt = gimple_build_assign
7031 (new_temp, BIT_AND_EXPR, dataref_ptr,
7032 build_int_cst
7033 (TREE_TYPE (dataref_ptr),
7034 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7035 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7036 data_ref
7037 = build2 (MEM_REF, vectype, new_temp,
7038 build_int_cst (reference_alias_ptr_type
7039 (DR_REF (first_dr)), 0));
7040 break;
7041 default:
7042 gcc_unreachable ();
7044 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7045 new_stmt = gimple_build_assign (vec_dest, data_ref);
7046 new_temp = make_ssa_name (vec_dest, new_stmt);
7047 gimple_assign_set_lhs (new_stmt, new_temp);
7048 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7050 /* 3. Handle explicit realignment if necessary/supported.
7051 Create in loop:
7052 vec_dest = realign_load (msq, lsq, realignment_token) */
7053 if (alignment_support_scheme == dr_explicit_realign_optimized
7054 || alignment_support_scheme == dr_explicit_realign)
7056 lsq = gimple_assign_lhs (new_stmt);
7057 if (!realignment_token)
7058 realignment_token = dataref_ptr;
7059 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7060 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7061 msq, lsq, realignment_token);
7062 new_temp = make_ssa_name (vec_dest, new_stmt);
7063 gimple_assign_set_lhs (new_stmt, new_temp);
7064 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7066 if (alignment_support_scheme == dr_explicit_realign_optimized)
7068 gcc_assert (phi);
7069 if (i == vec_num - 1 && j == ncopies - 1)
7070 add_phi_arg (phi, lsq,
7071 loop_latch_edge (containing_loop),
7072 UNKNOWN_LOCATION);
7073 msq = lsq;
7077 /* 4. Handle invariant-load. */
7078 if (inv_p && !bb_vinfo)
7080 gcc_assert (!grouped_load);
7081 /* If we have versioned for aliasing or the loop doesn't
7082 have any data dependencies that would preclude this,
7083 then we are sure this is a loop invariant load and
7084 thus we can insert it on the preheader edge. */
7085 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7086 && !nested_in_vect_loop
7087 && hoist_defs_of_uses (stmt, loop))
7089 if (dump_enabled_p ())
7091 dump_printf_loc (MSG_NOTE, vect_location,
7092 "hoisting out of the vectorized "
7093 "loop: ");
7094 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7096 tree tem = copy_ssa_name (scalar_dest);
7097 gsi_insert_on_edge_immediate
7098 (loop_preheader_edge (loop),
7099 gimple_build_assign (tem,
7100 unshare_expr
7101 (gimple_assign_rhs1 (stmt))));
7102 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7104 else
7106 gimple_stmt_iterator gsi2 = *gsi;
7107 gsi_next (&gsi2);
7108 new_temp = vect_init_vector (stmt, scalar_dest,
7109 vectype, &gsi2);
7111 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7112 set_vinfo_for_stmt (new_stmt,
7113 new_stmt_vec_info (new_stmt, vinfo));
7116 if (negative)
7118 tree perm_mask = perm_mask_for_reverse (vectype);
7119 new_temp = permute_vec_elements (new_temp, new_temp,
7120 perm_mask, stmt, gsi);
7121 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7124 /* Collect vector loads and later create their permutation in
7125 vect_transform_grouped_load (). */
7126 if (grouped_load || slp_perm)
7127 dr_chain.quick_push (new_temp);
7129 /* Store vector loads in the corresponding SLP_NODE. */
7130 if (slp && !slp_perm)
7131 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7133 /* Bump the vector pointer to account for a gap or for excess
7134 elements loaded for a permuted SLP load. */
7135 if (group_gap_adj != 0)
7137 bool ovf;
7138 tree bump
7139 = wide_int_to_tree (sizetype,
7140 wi::smul (TYPE_SIZE_UNIT (elem_type),
7141 group_gap_adj, &ovf));
7142 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7143 stmt, bump);
7147 if (slp && !slp_perm)
7148 continue;
7150 if (slp_perm)
7152 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7153 slp_node_instance, false))
7155 dr_chain.release ();
7156 return false;
7159 else
7161 if (grouped_load)
7163 if (!load_lanes_p)
7164 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7165 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7167 else
7169 if (j == 0)
7170 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7171 else
7172 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7173 prev_stmt_info = vinfo_for_stmt (new_stmt);
7176 dr_chain.release ();
7179 return true;
7182 /* Function vect_is_simple_cond.
7184 Input:
7185 LOOP - the loop that is being vectorized.
7186 COND - Condition that is checked for simple use.
7188 Output:
7189 *COMP_VECTYPE - the vector type for the comparison.
7191 Returns whether a COND can be vectorized. Checks whether
7192 condition operands are supportable using vec_is_simple_use. */
7194 static bool
7195 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7197 tree lhs, rhs;
7198 enum vect_def_type dt;
7199 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7201 /* Mask case. */
7202 if (TREE_CODE (cond) == SSA_NAME
7203 && TREE_CODE (TREE_TYPE (cond)) == BOOLEAN_TYPE)
7205 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7206 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7207 &dt, comp_vectype)
7208 || !*comp_vectype
7209 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7210 return false;
7211 return true;
7214 if (!COMPARISON_CLASS_P (cond))
7215 return false;
7217 lhs = TREE_OPERAND (cond, 0);
7218 rhs = TREE_OPERAND (cond, 1);
7220 if (TREE_CODE (lhs) == SSA_NAME)
7222 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7223 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7224 return false;
7226 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7227 && TREE_CODE (lhs) != FIXED_CST)
7228 return false;
7230 if (TREE_CODE (rhs) == SSA_NAME)
7232 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7233 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7234 return false;
7236 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7237 && TREE_CODE (rhs) != FIXED_CST)
7238 return false;
7240 *comp_vectype = vectype1 ? vectype1 : vectype2;
7241 return true;
7244 /* vectorizable_condition.
7246 Check if STMT is conditional modify expression that can be vectorized.
7247 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7248 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7249 at GSI.
7251 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7252 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7253 else clause if it is 2).
7255 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7257 bool
7258 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7259 gimple **vec_stmt, tree reduc_def, int reduc_index,
7260 slp_tree slp_node)
7262 tree scalar_dest = NULL_TREE;
7263 tree vec_dest = NULL_TREE;
7264 tree cond_expr, then_clause, else_clause;
7265 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7266 tree comp_vectype = NULL_TREE;
7267 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7268 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7269 tree vec_compare, vec_cond_expr;
7270 tree new_temp;
7271 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7272 enum vect_def_type dt, dts[4];
7273 int ncopies;
7274 enum tree_code code;
7275 stmt_vec_info prev_stmt_info = NULL;
7276 int i, j;
7277 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7278 vec<tree> vec_oprnds0 = vNULL;
7279 vec<tree> vec_oprnds1 = vNULL;
7280 vec<tree> vec_oprnds2 = vNULL;
7281 vec<tree> vec_oprnds3 = vNULL;
7282 tree vec_cmp_type;
7283 bool masked = false;
7285 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7286 return false;
7288 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7290 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7291 return false;
7293 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7294 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7295 && reduc_def))
7296 return false;
7298 /* FORNOW: not yet supported. */
7299 if (STMT_VINFO_LIVE_P (stmt_info))
7301 if (dump_enabled_p ())
7302 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7303 "value used after loop.\n");
7304 return false;
7308 /* Is vectorizable conditional operation? */
7309 if (!is_gimple_assign (stmt))
7310 return false;
7312 code = gimple_assign_rhs_code (stmt);
7314 if (code != COND_EXPR)
7315 return false;
7317 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7318 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7320 if (slp_node || PURE_SLP_STMT (stmt_info))
7321 ncopies = 1;
7322 else
7323 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7325 gcc_assert (ncopies >= 1);
7326 if (reduc_index && ncopies > 1)
7327 return false; /* FORNOW */
7329 cond_expr = gimple_assign_rhs1 (stmt);
7330 then_clause = gimple_assign_rhs2 (stmt);
7331 else_clause = gimple_assign_rhs3 (stmt);
7333 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7334 || !comp_vectype)
7335 return false;
7337 gimple *def_stmt;
7338 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt))
7339 return false;
7340 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt))
7341 return false;
7343 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype))
7345 vec_cmp_type = comp_vectype;
7346 masked = true;
7348 else
7349 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7350 if (vec_cmp_type == NULL_TREE)
7351 return false;
7353 if (!vec_stmt)
7355 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7356 return expand_vec_cond_expr_p (vectype, comp_vectype);
7359 /* Transform. */
7361 if (!slp_node)
7363 vec_oprnds0.create (1);
7364 vec_oprnds1.create (1);
7365 vec_oprnds2.create (1);
7366 vec_oprnds3.create (1);
7369 /* Handle def. */
7370 scalar_dest = gimple_assign_lhs (stmt);
7371 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7373 /* Handle cond expr. */
7374 for (j = 0; j < ncopies; j++)
7376 gassign *new_stmt = NULL;
7377 if (j == 0)
7379 if (slp_node)
7381 auto_vec<tree, 4> ops;
7382 auto_vec<vec<tree>, 4> vec_defs;
7384 if (masked)
7385 ops.safe_push (cond_expr);
7386 else
7388 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7389 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7391 ops.safe_push (then_clause);
7392 ops.safe_push (else_clause);
7393 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7394 vec_oprnds3 = vec_defs.pop ();
7395 vec_oprnds2 = vec_defs.pop ();
7396 if (!masked)
7397 vec_oprnds1 = vec_defs.pop ();
7398 vec_oprnds0 = vec_defs.pop ();
7400 ops.release ();
7401 vec_defs.release ();
7403 else
7405 gimple *gtemp;
7406 if (masked)
7408 vec_cond_lhs
7409 = vect_get_vec_def_for_operand (cond_expr, stmt,
7410 comp_vectype);
7411 vect_is_simple_use (cond_expr, stmt_info->vinfo,
7412 &gtemp, &dts[0]);
7414 else
7416 vec_cond_lhs =
7417 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7418 stmt, comp_vectype);
7419 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7420 loop_vinfo, &gtemp, &dts[0]);
7422 vec_cond_rhs =
7423 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7424 stmt, comp_vectype);
7425 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7426 loop_vinfo, &gtemp, &dts[1]);
7428 if (reduc_index == 1)
7429 vec_then_clause = reduc_def;
7430 else
7432 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7433 stmt);
7434 vect_is_simple_use (then_clause, loop_vinfo,
7435 &gtemp, &dts[2]);
7437 if (reduc_index == 2)
7438 vec_else_clause = reduc_def;
7439 else
7441 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7442 stmt);
7443 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7447 else
7449 vec_cond_lhs
7450 = vect_get_vec_def_for_stmt_copy (dts[0],
7451 vec_oprnds0.pop ());
7452 if (!masked)
7453 vec_cond_rhs
7454 = vect_get_vec_def_for_stmt_copy (dts[1],
7455 vec_oprnds1.pop ());
7457 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7458 vec_oprnds2.pop ());
7459 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7460 vec_oprnds3.pop ());
7463 if (!slp_node)
7465 vec_oprnds0.quick_push (vec_cond_lhs);
7466 if (!masked)
7467 vec_oprnds1.quick_push (vec_cond_rhs);
7468 vec_oprnds2.quick_push (vec_then_clause);
7469 vec_oprnds3.quick_push (vec_else_clause);
7472 /* Arguments are ready. Create the new vector stmt. */
7473 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7475 vec_then_clause = vec_oprnds2[i];
7476 vec_else_clause = vec_oprnds3[i];
7478 if (masked)
7479 vec_compare = vec_cond_lhs;
7480 else
7482 vec_cond_rhs = vec_oprnds1[i];
7483 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7484 vec_cond_lhs, vec_cond_rhs);
7486 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7487 vec_compare, vec_then_clause, vec_else_clause);
7489 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7490 new_temp = make_ssa_name (vec_dest, new_stmt);
7491 gimple_assign_set_lhs (new_stmt, new_temp);
7492 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7493 if (slp_node)
7494 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7497 if (slp_node)
7498 continue;
7500 if (j == 0)
7501 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7502 else
7503 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7505 prev_stmt_info = vinfo_for_stmt (new_stmt);
7508 vec_oprnds0.release ();
7509 vec_oprnds1.release ();
7510 vec_oprnds2.release ();
7511 vec_oprnds3.release ();
7513 return true;
7516 /* vectorizable_comparison.
7518 Check if STMT is comparison expression that can be vectorized.
7519 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7520 comparison, put it in VEC_STMT, and insert it at GSI.
7522 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7524 bool
7525 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
7526 gimple **vec_stmt, tree reduc_def,
7527 slp_tree slp_node)
7529 tree lhs, rhs1, rhs2;
7530 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7531 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7532 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7533 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
7534 tree new_temp;
7535 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7536 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
7537 unsigned nunits;
7538 int ncopies;
7539 enum tree_code code;
7540 stmt_vec_info prev_stmt_info = NULL;
7541 int i, j;
7542 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7543 vec<tree> vec_oprnds0 = vNULL;
7544 vec<tree> vec_oprnds1 = vNULL;
7545 gimple *def_stmt;
7546 tree mask_type;
7547 tree mask;
7549 if (!VECTOR_BOOLEAN_TYPE_P (vectype))
7550 return false;
7552 mask_type = vectype;
7553 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7555 if (slp_node || PURE_SLP_STMT (stmt_info))
7556 ncopies = 1;
7557 else
7558 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7560 gcc_assert (ncopies >= 1);
7561 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7562 return false;
7564 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7565 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7566 && reduc_def))
7567 return false;
7569 if (STMT_VINFO_LIVE_P (stmt_info))
7571 if (dump_enabled_p ())
7572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7573 "value used after loop.\n");
7574 return false;
7577 if (!is_gimple_assign (stmt))
7578 return false;
7580 code = gimple_assign_rhs_code (stmt);
7582 if (TREE_CODE_CLASS (code) != tcc_comparison)
7583 return false;
7585 rhs1 = gimple_assign_rhs1 (stmt);
7586 rhs2 = gimple_assign_rhs2 (stmt);
7588 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
7589 &dts[0], &vectype1))
7590 return false;
7592 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
7593 &dts[1], &vectype2))
7594 return false;
7596 if (vectype1 && vectype2
7597 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7598 return false;
7600 vectype = vectype1 ? vectype1 : vectype2;
7602 /* Invariant comparison. */
7603 if (!vectype)
7605 vectype = build_vector_type (TREE_TYPE (rhs1), nunits);
7606 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype)) != current_vector_size)
7607 return false;
7609 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
7610 return false;
7612 if (!vec_stmt)
7614 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
7615 vect_model_simple_cost (stmt_info, ncopies, dts, NULL, NULL);
7616 return expand_vec_cmp_expr_p (vectype, mask_type);
7619 /* Transform. */
7620 if (!slp_node)
7622 vec_oprnds0.create (1);
7623 vec_oprnds1.create (1);
7626 /* Handle def. */
7627 lhs = gimple_assign_lhs (stmt);
7628 mask = vect_create_destination_var (lhs, mask_type);
7630 /* Handle cmp expr. */
7631 for (j = 0; j < ncopies; j++)
7633 gassign *new_stmt = NULL;
7634 if (j == 0)
7636 if (slp_node)
7638 auto_vec<tree, 2> ops;
7639 auto_vec<vec<tree>, 2> vec_defs;
7641 ops.safe_push (rhs1);
7642 ops.safe_push (rhs2);
7643 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7644 vec_oprnds1 = vec_defs.pop ();
7645 vec_oprnds0 = vec_defs.pop ();
7647 else
7649 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
7650 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
7653 else
7655 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
7656 vec_oprnds0.pop ());
7657 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
7658 vec_oprnds1.pop ());
7661 if (!slp_node)
7663 vec_oprnds0.quick_push (vec_rhs1);
7664 vec_oprnds1.quick_push (vec_rhs2);
7667 /* Arguments are ready. Create the new vector stmt. */
7668 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
7670 vec_rhs2 = vec_oprnds1[i];
7672 new_temp = make_ssa_name (mask);
7673 new_stmt = gimple_build_assign (new_temp, code, vec_rhs1, vec_rhs2);
7674 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7675 if (slp_node)
7676 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7679 if (slp_node)
7680 continue;
7682 if (j == 0)
7683 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7684 else
7685 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7687 prev_stmt_info = vinfo_for_stmt (new_stmt);
7690 vec_oprnds0.release ();
7691 vec_oprnds1.release ();
7693 return true;
7696 /* Make sure the statement is vectorizable. */
7698 bool
7699 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7701 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7702 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7703 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7704 bool ok;
7705 tree scalar_type, vectype;
7706 gimple *pattern_stmt;
7707 gimple_seq pattern_def_seq;
7709 if (dump_enabled_p ())
7711 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7712 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7715 if (gimple_has_volatile_ops (stmt))
7717 if (dump_enabled_p ())
7718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7719 "not vectorized: stmt has volatile operands\n");
7721 return false;
7724 /* Skip stmts that do not need to be vectorized. In loops this is expected
7725 to include:
7726 - the COND_EXPR which is the loop exit condition
7727 - any LABEL_EXPRs in the loop
7728 - computations that are used only for array indexing or loop control.
7729 In basic blocks we only analyze statements that are a part of some SLP
7730 instance, therefore, all the statements are relevant.
7732 Pattern statement needs to be analyzed instead of the original statement
7733 if the original statement is not relevant. Otherwise, we analyze both
7734 statements. In basic blocks we are called from some SLP instance
7735 traversal, don't analyze pattern stmts instead, the pattern stmts
7736 already will be part of SLP instance. */
7738 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7739 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7740 && !STMT_VINFO_LIVE_P (stmt_info))
7742 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7743 && pattern_stmt
7744 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7745 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7747 /* Analyze PATTERN_STMT instead of the original stmt. */
7748 stmt = pattern_stmt;
7749 stmt_info = vinfo_for_stmt (pattern_stmt);
7750 if (dump_enabled_p ())
7752 dump_printf_loc (MSG_NOTE, vect_location,
7753 "==> examining pattern statement: ");
7754 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7757 else
7759 if (dump_enabled_p ())
7760 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7762 return true;
7765 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7766 && node == NULL
7767 && pattern_stmt
7768 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7769 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7771 /* Analyze PATTERN_STMT too. */
7772 if (dump_enabled_p ())
7774 dump_printf_loc (MSG_NOTE, vect_location,
7775 "==> examining pattern statement: ");
7776 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7779 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7780 return false;
7783 if (is_pattern_stmt_p (stmt_info)
7784 && node == NULL
7785 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7787 gimple_stmt_iterator si;
7789 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7791 gimple *pattern_def_stmt = gsi_stmt (si);
7792 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7793 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7795 /* Analyze def stmt of STMT if it's a pattern stmt. */
7796 if (dump_enabled_p ())
7798 dump_printf_loc (MSG_NOTE, vect_location,
7799 "==> examining pattern def statement: ");
7800 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7803 if (!vect_analyze_stmt (pattern_def_stmt,
7804 need_to_vectorize, node))
7805 return false;
7810 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7812 case vect_internal_def:
7813 break;
7815 case vect_reduction_def:
7816 case vect_nested_cycle:
7817 gcc_assert (!bb_vinfo
7818 && (relevance == vect_used_in_outer
7819 || relevance == vect_used_in_outer_by_reduction
7820 || relevance == vect_used_by_reduction
7821 || relevance == vect_unused_in_scope));
7822 break;
7824 case vect_induction_def:
7825 case vect_constant_def:
7826 case vect_external_def:
7827 case vect_unknown_def_type:
7828 default:
7829 gcc_unreachable ();
7832 if (bb_vinfo)
7834 gcc_assert (PURE_SLP_STMT (stmt_info));
7836 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7837 if (dump_enabled_p ())
7839 dump_printf_loc (MSG_NOTE, vect_location,
7840 "get vectype for scalar type: ");
7841 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7842 dump_printf (MSG_NOTE, "\n");
7845 vectype = get_vectype_for_scalar_type (scalar_type);
7846 if (!vectype)
7848 if (dump_enabled_p ())
7850 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7851 "not SLPed: unsupported data-type ");
7852 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7853 scalar_type);
7854 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7856 return false;
7859 if (dump_enabled_p ())
7861 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7862 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7863 dump_printf (MSG_NOTE, "\n");
7866 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7869 if (STMT_VINFO_RELEVANT_P (stmt_info))
7871 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7872 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7873 || (is_gimple_call (stmt)
7874 && gimple_call_lhs (stmt) == NULL_TREE));
7875 *need_to_vectorize = true;
7878 if (PURE_SLP_STMT (stmt_info) && !node)
7880 dump_printf_loc (MSG_NOTE, vect_location,
7881 "handled only by SLP analysis\n");
7882 return true;
7885 ok = true;
7886 if (!bb_vinfo
7887 && (STMT_VINFO_RELEVANT_P (stmt_info)
7888 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7889 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7890 || vectorizable_conversion (stmt, NULL, NULL, node)
7891 || vectorizable_shift (stmt, NULL, NULL, node)
7892 || vectorizable_operation (stmt, NULL, NULL, node)
7893 || vectorizable_assignment (stmt, NULL, NULL, node)
7894 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7895 || vectorizable_call (stmt, NULL, NULL, node)
7896 || vectorizable_store (stmt, NULL, NULL, node)
7897 || vectorizable_reduction (stmt, NULL, NULL, node)
7898 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
7899 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
7900 else
7902 if (bb_vinfo)
7903 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7904 || vectorizable_conversion (stmt, NULL, NULL, node)
7905 || vectorizable_shift (stmt, NULL, NULL, node)
7906 || vectorizable_operation (stmt, NULL, NULL, node)
7907 || vectorizable_assignment (stmt, NULL, NULL, node)
7908 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7909 || vectorizable_call (stmt, NULL, NULL, node)
7910 || vectorizable_store (stmt, NULL, NULL, node)
7911 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
7912 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
7915 if (!ok)
7917 if (dump_enabled_p ())
7919 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7920 "not vectorized: relevant stmt not ");
7921 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7922 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7925 return false;
7928 if (bb_vinfo)
7929 return true;
7931 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7932 need extra handling, except for vectorizable reductions. */
7933 if (STMT_VINFO_LIVE_P (stmt_info)
7934 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7935 ok = vectorizable_live_operation (stmt, NULL, NULL);
7937 if (!ok)
7939 if (dump_enabled_p ())
7941 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7942 "not vectorized: live stmt not ");
7943 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7944 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7947 return false;
7950 return true;
7954 /* Function vect_transform_stmt.
7956 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7958 bool
7959 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
7960 bool *grouped_store, slp_tree slp_node,
7961 slp_instance slp_node_instance)
7963 bool is_store = false;
7964 gimple *vec_stmt = NULL;
7965 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7966 bool done;
7968 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7970 switch (STMT_VINFO_TYPE (stmt_info))
7972 case type_demotion_vec_info_type:
7973 case type_promotion_vec_info_type:
7974 case type_conversion_vec_info_type:
7975 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7976 gcc_assert (done);
7977 break;
7979 case induc_vec_info_type:
7980 gcc_assert (!slp_node);
7981 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7982 gcc_assert (done);
7983 break;
7985 case shift_vec_info_type:
7986 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7987 gcc_assert (done);
7988 break;
7990 case op_vec_info_type:
7991 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7992 gcc_assert (done);
7993 break;
7995 case assignment_vec_info_type:
7996 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7997 gcc_assert (done);
7998 break;
8000 case load_vec_info_type:
8001 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8002 slp_node_instance);
8003 gcc_assert (done);
8004 break;
8006 case store_vec_info_type:
8007 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8008 gcc_assert (done);
8009 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8011 /* In case of interleaving, the whole chain is vectorized when the
8012 last store in the chain is reached. Store stmts before the last
8013 one are skipped, and there vec_stmt_info shouldn't be freed
8014 meanwhile. */
8015 *grouped_store = true;
8016 if (STMT_VINFO_VEC_STMT (stmt_info))
8017 is_store = true;
8019 else
8020 is_store = true;
8021 break;
8023 case condition_vec_info_type:
8024 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8025 gcc_assert (done);
8026 break;
8028 case comparison_vec_info_type:
8029 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8030 gcc_assert (done);
8031 break;
8033 case call_vec_info_type:
8034 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8035 stmt = gsi_stmt (*gsi);
8036 if (is_gimple_call (stmt)
8037 && gimple_call_internal_p (stmt)
8038 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
8039 is_store = true;
8040 break;
8042 case call_simd_clone_vec_info_type:
8043 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8044 stmt = gsi_stmt (*gsi);
8045 break;
8047 case reduc_vec_info_type:
8048 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
8049 gcc_assert (done);
8050 break;
8052 default:
8053 if (!STMT_VINFO_LIVE_P (stmt_info))
8055 if (dump_enabled_p ())
8056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8057 "stmt not supported.\n");
8058 gcc_unreachable ();
8062 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8063 This would break hybrid SLP vectorization. */
8064 if (slp_node)
8065 gcc_assert (!vec_stmt
8066 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8068 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8069 is being vectorized, but outside the immediately enclosing loop. */
8070 if (vec_stmt
8071 && STMT_VINFO_LOOP_VINFO (stmt_info)
8072 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8073 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8074 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8075 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8076 || STMT_VINFO_RELEVANT (stmt_info) ==
8077 vect_used_in_outer_by_reduction))
8079 struct loop *innerloop = LOOP_VINFO_LOOP (
8080 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8081 imm_use_iterator imm_iter;
8082 use_operand_p use_p;
8083 tree scalar_dest;
8084 gimple *exit_phi;
8086 if (dump_enabled_p ())
8087 dump_printf_loc (MSG_NOTE, vect_location,
8088 "Record the vdef for outer-loop vectorization.\n");
8090 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8091 (to be used when vectorizing outer-loop stmts that use the DEF of
8092 STMT). */
8093 if (gimple_code (stmt) == GIMPLE_PHI)
8094 scalar_dest = PHI_RESULT (stmt);
8095 else
8096 scalar_dest = gimple_assign_lhs (stmt);
8098 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8100 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8102 exit_phi = USE_STMT (use_p);
8103 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8108 /* Handle stmts whose DEF is used outside the loop-nest that is
8109 being vectorized. */
8110 if (STMT_VINFO_LIVE_P (stmt_info)
8111 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8113 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
8114 gcc_assert (done);
8117 if (vec_stmt)
8118 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8120 return is_store;
8124 /* Remove a group of stores (for SLP or interleaving), free their
8125 stmt_vec_info. */
8127 void
8128 vect_remove_stores (gimple *first_stmt)
8130 gimple *next = first_stmt;
8131 gimple *tmp;
8132 gimple_stmt_iterator next_si;
8134 while (next)
8136 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8138 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8139 if (is_pattern_stmt_p (stmt_info))
8140 next = STMT_VINFO_RELATED_STMT (stmt_info);
8141 /* Free the attached stmt_vec_info and remove the stmt. */
8142 next_si = gsi_for_stmt (next);
8143 unlink_stmt_vdef (next);
8144 gsi_remove (&next_si, true);
8145 release_defs (next);
8146 free_stmt_vec_info (next);
8147 next = tmp;
8152 /* Function new_stmt_vec_info.
8154 Create and initialize a new stmt_vec_info struct for STMT. */
8156 stmt_vec_info
8157 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
8159 stmt_vec_info res;
8160 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8162 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8163 STMT_VINFO_STMT (res) = stmt;
8164 res->vinfo = vinfo;
8165 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
8166 STMT_VINFO_LIVE_P (res) = false;
8167 STMT_VINFO_VECTYPE (res) = NULL;
8168 STMT_VINFO_VEC_STMT (res) = NULL;
8169 STMT_VINFO_VECTORIZABLE (res) = true;
8170 STMT_VINFO_IN_PATTERN_P (res) = false;
8171 STMT_VINFO_RELATED_STMT (res) = NULL;
8172 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
8173 STMT_VINFO_DATA_REF (res) = NULL;
8174 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
8176 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
8177 STMT_VINFO_DR_OFFSET (res) = NULL;
8178 STMT_VINFO_DR_INIT (res) = NULL;
8179 STMT_VINFO_DR_STEP (res) = NULL;
8180 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
8182 if (gimple_code (stmt) == GIMPLE_PHI
8183 && is_loop_header_bb_p (gimple_bb (stmt)))
8184 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
8185 else
8186 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
8188 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
8189 STMT_SLP_TYPE (res) = loop_vect;
8190 GROUP_FIRST_ELEMENT (res) = NULL;
8191 GROUP_NEXT_ELEMENT (res) = NULL;
8192 GROUP_SIZE (res) = 0;
8193 GROUP_STORE_COUNT (res) = 0;
8194 GROUP_GAP (res) = 0;
8195 GROUP_SAME_DR_STMT (res) = NULL;
8197 return res;
8201 /* Create a hash table for stmt_vec_info. */
8203 void
8204 init_stmt_vec_info_vec (void)
8206 gcc_assert (!stmt_vec_info_vec.exists ());
8207 stmt_vec_info_vec.create (50);
8211 /* Free hash table for stmt_vec_info. */
8213 void
8214 free_stmt_vec_info_vec (void)
8216 unsigned int i;
8217 stmt_vec_info info;
8218 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
8219 if (info != NULL)
8220 free_stmt_vec_info (STMT_VINFO_STMT (info));
8221 gcc_assert (stmt_vec_info_vec.exists ());
8222 stmt_vec_info_vec.release ();
8226 /* Free stmt vectorization related info. */
8228 void
8229 free_stmt_vec_info (gimple *stmt)
8231 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8233 if (!stmt_info)
8234 return;
8236 /* Check if this statement has a related "pattern stmt"
8237 (introduced by the vectorizer during the pattern recognition
8238 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8239 too. */
8240 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8242 stmt_vec_info patt_info
8243 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8244 if (patt_info)
8246 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
8247 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
8248 gimple_set_bb (patt_stmt, NULL);
8249 tree lhs = gimple_get_lhs (patt_stmt);
8250 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8251 release_ssa_name (lhs);
8252 if (seq)
8254 gimple_stmt_iterator si;
8255 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
8257 gimple *seq_stmt = gsi_stmt (si);
8258 gimple_set_bb (seq_stmt, NULL);
8259 lhs = gimple_get_lhs (seq_stmt);
8260 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8261 release_ssa_name (lhs);
8262 free_stmt_vec_info (seq_stmt);
8265 free_stmt_vec_info (patt_stmt);
8269 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
8270 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
8271 set_vinfo_for_stmt (stmt, NULL);
8272 free (stmt_info);
8276 /* Function get_vectype_for_scalar_type_and_size.
8278 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8279 by the target. */
8281 static tree
8282 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
8284 machine_mode inner_mode = TYPE_MODE (scalar_type);
8285 machine_mode simd_mode;
8286 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
8287 int nunits;
8288 tree vectype;
8290 if (nbytes == 0)
8291 return NULL_TREE;
8293 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8294 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8295 return NULL_TREE;
8297 /* For vector types of elements whose mode precision doesn't
8298 match their types precision we use a element type of mode
8299 precision. The vectorization routines will have to make sure
8300 they support the proper result truncation/extension.
8301 We also make sure to build vector types with INTEGER_TYPE
8302 component type only. */
8303 if (INTEGRAL_TYPE_P (scalar_type)
8304 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8305 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8306 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8307 TYPE_UNSIGNED (scalar_type));
8309 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8310 When the component mode passes the above test simply use a type
8311 corresponding to that mode. The theory is that any use that
8312 would cause problems with this will disable vectorization anyway. */
8313 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8314 && !INTEGRAL_TYPE_P (scalar_type))
8315 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8317 /* We can't build a vector type of elements with alignment bigger than
8318 their size. */
8319 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8320 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8321 TYPE_UNSIGNED (scalar_type));
8323 /* If we felt back to using the mode fail if there was
8324 no scalar type for it. */
8325 if (scalar_type == NULL_TREE)
8326 return NULL_TREE;
8328 /* If no size was supplied use the mode the target prefers. Otherwise
8329 lookup a vector mode of the specified size. */
8330 if (size == 0)
8331 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8332 else
8333 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8334 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8335 if (nunits <= 1)
8336 return NULL_TREE;
8338 vectype = build_vector_type (scalar_type, nunits);
8340 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8341 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8342 return NULL_TREE;
8344 return vectype;
8347 unsigned int current_vector_size;
8349 /* Function get_vectype_for_scalar_type.
8351 Returns the vector type corresponding to SCALAR_TYPE as supported
8352 by the target. */
8354 tree
8355 get_vectype_for_scalar_type (tree scalar_type)
8357 tree vectype;
8358 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8359 current_vector_size);
8360 if (vectype
8361 && current_vector_size == 0)
8362 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8363 return vectype;
8366 /* Function get_mask_type_for_scalar_type.
8368 Returns the mask type corresponding to a result of comparison
8369 of vectors of specified SCALAR_TYPE as supported by target. */
8371 tree
8372 get_mask_type_for_scalar_type (tree scalar_type)
8374 tree vectype = get_vectype_for_scalar_type (scalar_type);
8376 if (!vectype)
8377 return NULL;
8379 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
8380 current_vector_size);
8383 /* Function get_same_sized_vectype
8385 Returns a vector type corresponding to SCALAR_TYPE of size
8386 VECTOR_TYPE if supported by the target. */
8388 tree
8389 get_same_sized_vectype (tree scalar_type, tree vector_type)
8391 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8392 return build_same_sized_truth_vector_type (vector_type);
8394 return get_vectype_for_scalar_type_and_size
8395 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8398 /* Function vect_is_simple_use.
8400 Input:
8401 VINFO - the vect info of the loop or basic block that is being vectorized.
8402 OPERAND - operand in the loop or bb.
8403 Output:
8404 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8405 DT - the type of definition
8407 Returns whether a stmt with OPERAND can be vectorized.
8408 For loops, supportable operands are constants, loop invariants, and operands
8409 that are defined by the current iteration of the loop. Unsupportable
8410 operands are those that are defined by a previous iteration of the loop (as
8411 is the case in reduction/induction computations).
8412 For basic blocks, supportable operands are constants and bb invariants.
8413 For now, operands defined outside the basic block are not supported. */
8415 bool
8416 vect_is_simple_use (tree operand, vec_info *vinfo,
8417 gimple **def_stmt, enum vect_def_type *dt)
8419 *def_stmt = NULL;
8420 *dt = vect_unknown_def_type;
8422 if (dump_enabled_p ())
8424 dump_printf_loc (MSG_NOTE, vect_location,
8425 "vect_is_simple_use: operand ");
8426 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8427 dump_printf (MSG_NOTE, "\n");
8430 if (CONSTANT_CLASS_P (operand))
8432 *dt = vect_constant_def;
8433 return true;
8436 if (is_gimple_min_invariant (operand))
8438 *dt = vect_external_def;
8439 return true;
8442 if (TREE_CODE (operand) != SSA_NAME)
8444 if (dump_enabled_p ())
8445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8446 "not ssa-name.\n");
8447 return false;
8450 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8452 *dt = vect_external_def;
8453 return true;
8456 *def_stmt = SSA_NAME_DEF_STMT (operand);
8457 if (dump_enabled_p ())
8459 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8460 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8463 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8464 *dt = vect_external_def;
8465 else
8467 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8468 if (is_a <bb_vec_info> (vinfo) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8469 *dt = vect_external_def;
8470 else
8471 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8474 if (dump_enabled_p ())
8476 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8477 switch (*dt)
8479 case vect_uninitialized_def:
8480 dump_printf (MSG_NOTE, "uninitialized\n");
8481 break;
8482 case vect_constant_def:
8483 dump_printf (MSG_NOTE, "constant\n");
8484 break;
8485 case vect_external_def:
8486 dump_printf (MSG_NOTE, "external\n");
8487 break;
8488 case vect_internal_def:
8489 dump_printf (MSG_NOTE, "internal\n");
8490 break;
8491 case vect_induction_def:
8492 dump_printf (MSG_NOTE, "induction\n");
8493 break;
8494 case vect_reduction_def:
8495 dump_printf (MSG_NOTE, "reduction\n");
8496 break;
8497 case vect_double_reduction_def:
8498 dump_printf (MSG_NOTE, "double reduction\n");
8499 break;
8500 case vect_nested_cycle:
8501 dump_printf (MSG_NOTE, "nested cycle\n");
8502 break;
8503 case vect_unknown_def_type:
8504 dump_printf (MSG_NOTE, "unknown\n");
8505 break;
8509 if (*dt == vect_unknown_def_type)
8511 if (dump_enabled_p ())
8512 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8513 "Unsupported pattern.\n");
8514 return false;
8517 switch (gimple_code (*def_stmt))
8519 case GIMPLE_PHI:
8520 case GIMPLE_ASSIGN:
8521 case GIMPLE_CALL:
8522 break;
8523 default:
8524 if (dump_enabled_p ())
8525 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8526 "unsupported defining stmt:\n");
8527 return false;
8530 return true;
8533 /* Function vect_is_simple_use.
8535 Same as vect_is_simple_use but also determines the vector operand
8536 type of OPERAND and stores it to *VECTYPE. If the definition of
8537 OPERAND is vect_uninitialized_def, vect_constant_def or
8538 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8539 is responsible to compute the best suited vector type for the
8540 scalar operand. */
8542 bool
8543 vect_is_simple_use (tree operand, vec_info *vinfo,
8544 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8546 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8547 return false;
8549 /* Now get a vector type if the def is internal, otherwise supply
8550 NULL_TREE and leave it up to the caller to figure out a proper
8551 type for the use stmt. */
8552 if (*dt == vect_internal_def
8553 || *dt == vect_induction_def
8554 || *dt == vect_reduction_def
8555 || *dt == vect_double_reduction_def
8556 || *dt == vect_nested_cycle)
8558 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8560 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8561 && !STMT_VINFO_RELEVANT (stmt_info)
8562 && !STMT_VINFO_LIVE_P (stmt_info))
8563 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8565 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8566 gcc_assert (*vectype != NULL_TREE);
8568 else if (*dt == vect_uninitialized_def
8569 || *dt == vect_constant_def
8570 || *dt == vect_external_def)
8571 *vectype = NULL_TREE;
8572 else
8573 gcc_unreachable ();
8575 return true;
8579 /* Function supportable_widening_operation
8581 Check whether an operation represented by the code CODE is a
8582 widening operation that is supported by the target platform in
8583 vector form (i.e., when operating on arguments of type VECTYPE_IN
8584 producing a result of type VECTYPE_OUT).
8586 Widening operations we currently support are NOP (CONVERT), FLOAT
8587 and WIDEN_MULT. This function checks if these operations are supported
8588 by the target platform either directly (via vector tree-codes), or via
8589 target builtins.
8591 Output:
8592 - CODE1 and CODE2 are codes of vector operations to be used when
8593 vectorizing the operation, if available.
8594 - MULTI_STEP_CVT determines the number of required intermediate steps in
8595 case of multi-step conversion (like char->short->int - in that case
8596 MULTI_STEP_CVT will be 1).
8597 - INTERM_TYPES contains the intermediate type required to perform the
8598 widening operation (short in the above example). */
8600 bool
8601 supportable_widening_operation (enum tree_code code, gimple *stmt,
8602 tree vectype_out, tree vectype_in,
8603 enum tree_code *code1, enum tree_code *code2,
8604 int *multi_step_cvt,
8605 vec<tree> *interm_types)
8607 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8608 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8609 struct loop *vect_loop = NULL;
8610 machine_mode vec_mode;
8611 enum insn_code icode1, icode2;
8612 optab optab1, optab2;
8613 tree vectype = vectype_in;
8614 tree wide_vectype = vectype_out;
8615 enum tree_code c1, c2;
8616 int i;
8617 tree prev_type, intermediate_type;
8618 machine_mode intermediate_mode, prev_mode;
8619 optab optab3, optab4;
8621 *multi_step_cvt = 0;
8622 if (loop_info)
8623 vect_loop = LOOP_VINFO_LOOP (loop_info);
8625 switch (code)
8627 case WIDEN_MULT_EXPR:
8628 /* The result of a vectorized widening operation usually requires
8629 two vectors (because the widened results do not fit into one vector).
8630 The generated vector results would normally be expected to be
8631 generated in the same order as in the original scalar computation,
8632 i.e. if 8 results are generated in each vector iteration, they are
8633 to be organized as follows:
8634 vect1: [res1,res2,res3,res4],
8635 vect2: [res5,res6,res7,res8].
8637 However, in the special case that the result of the widening
8638 operation is used in a reduction computation only, the order doesn't
8639 matter (because when vectorizing a reduction we change the order of
8640 the computation). Some targets can take advantage of this and
8641 generate more efficient code. For example, targets like Altivec,
8642 that support widen_mult using a sequence of {mult_even,mult_odd}
8643 generate the following vectors:
8644 vect1: [res1,res3,res5,res7],
8645 vect2: [res2,res4,res6,res8].
8647 When vectorizing outer-loops, we execute the inner-loop sequentially
8648 (each vectorized inner-loop iteration contributes to VF outer-loop
8649 iterations in parallel). We therefore don't allow to change the
8650 order of the computation in the inner-loop during outer-loop
8651 vectorization. */
8652 /* TODO: Another case in which order doesn't *really* matter is when we
8653 widen and then contract again, e.g. (short)((int)x * y >> 8).
8654 Normally, pack_trunc performs an even/odd permute, whereas the
8655 repack from an even/odd expansion would be an interleave, which
8656 would be significantly simpler for e.g. AVX2. */
8657 /* In any case, in order to avoid duplicating the code below, recurse
8658 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8659 are properly set up for the caller. If we fail, we'll continue with
8660 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8661 if (vect_loop
8662 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8663 && !nested_in_vect_loop_p (vect_loop, stmt)
8664 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8665 stmt, vectype_out, vectype_in,
8666 code1, code2, multi_step_cvt,
8667 interm_types))
8669 /* Elements in a vector with vect_used_by_reduction property cannot
8670 be reordered if the use chain with this property does not have the
8671 same operation. One such an example is s += a * b, where elements
8672 in a and b cannot be reordered. Here we check if the vector defined
8673 by STMT is only directly used in the reduction statement. */
8674 tree lhs = gimple_assign_lhs (stmt);
8675 use_operand_p dummy;
8676 gimple *use_stmt;
8677 stmt_vec_info use_stmt_info = NULL;
8678 if (single_imm_use (lhs, &dummy, &use_stmt)
8679 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8680 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8681 return true;
8683 c1 = VEC_WIDEN_MULT_LO_EXPR;
8684 c2 = VEC_WIDEN_MULT_HI_EXPR;
8685 break;
8687 case DOT_PROD_EXPR:
8688 c1 = DOT_PROD_EXPR;
8689 c2 = DOT_PROD_EXPR;
8690 break;
8692 case SAD_EXPR:
8693 c1 = SAD_EXPR;
8694 c2 = SAD_EXPR;
8695 break;
8697 case VEC_WIDEN_MULT_EVEN_EXPR:
8698 /* Support the recursion induced just above. */
8699 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8700 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8701 break;
8703 case WIDEN_LSHIFT_EXPR:
8704 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8705 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8706 break;
8708 CASE_CONVERT:
8709 c1 = VEC_UNPACK_LO_EXPR;
8710 c2 = VEC_UNPACK_HI_EXPR;
8711 break;
8713 case FLOAT_EXPR:
8714 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8715 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8716 break;
8718 case FIX_TRUNC_EXPR:
8719 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8720 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8721 computing the operation. */
8722 return false;
8724 default:
8725 gcc_unreachable ();
8728 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8729 std::swap (c1, c2);
8731 if (code == FIX_TRUNC_EXPR)
8733 /* The signedness is determined from output operand. */
8734 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8735 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8737 else
8739 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8740 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8743 if (!optab1 || !optab2)
8744 return false;
8746 vec_mode = TYPE_MODE (vectype);
8747 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8748 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8749 return false;
8751 *code1 = c1;
8752 *code2 = c2;
8754 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8755 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8756 return true;
8758 /* Check if it's a multi-step conversion that can be done using intermediate
8759 types. */
8761 prev_type = vectype;
8762 prev_mode = vec_mode;
8764 if (!CONVERT_EXPR_CODE_P (code))
8765 return false;
8767 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8768 intermediate steps in promotion sequence. We try
8769 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8770 not. */
8771 interm_types->create (MAX_INTERM_CVT_STEPS);
8772 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8774 intermediate_mode = insn_data[icode1].operand[0].mode;
8775 intermediate_type
8776 = lang_hooks.types.type_for_mode (intermediate_mode,
8777 TYPE_UNSIGNED (prev_type));
8778 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8779 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8781 if (!optab3 || !optab4
8782 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8783 || insn_data[icode1].operand[0].mode != intermediate_mode
8784 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8785 || insn_data[icode2].operand[0].mode != intermediate_mode
8786 || ((icode1 = optab_handler (optab3, intermediate_mode))
8787 == CODE_FOR_nothing)
8788 || ((icode2 = optab_handler (optab4, intermediate_mode))
8789 == CODE_FOR_nothing))
8790 break;
8792 interm_types->quick_push (intermediate_type);
8793 (*multi_step_cvt)++;
8795 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8796 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8797 return true;
8799 prev_type = intermediate_type;
8800 prev_mode = intermediate_mode;
8803 interm_types->release ();
8804 return false;
8808 /* Function supportable_narrowing_operation
8810 Check whether an operation represented by the code CODE is a
8811 narrowing operation that is supported by the target platform in
8812 vector form (i.e., when operating on arguments of type VECTYPE_IN
8813 and producing a result of type VECTYPE_OUT).
8815 Narrowing operations we currently support are NOP (CONVERT) and
8816 FIX_TRUNC. This function checks if these operations are supported by
8817 the target platform directly via vector tree-codes.
8819 Output:
8820 - CODE1 is the code of a vector operation to be used when
8821 vectorizing the operation, if available.
8822 - MULTI_STEP_CVT determines the number of required intermediate steps in
8823 case of multi-step conversion (like int->short->char - in that case
8824 MULTI_STEP_CVT will be 1).
8825 - INTERM_TYPES contains the intermediate type required to perform the
8826 narrowing operation (short in the above example). */
8828 bool
8829 supportable_narrowing_operation (enum tree_code code,
8830 tree vectype_out, tree vectype_in,
8831 enum tree_code *code1, int *multi_step_cvt,
8832 vec<tree> *interm_types)
8834 machine_mode vec_mode;
8835 enum insn_code icode1;
8836 optab optab1, interm_optab;
8837 tree vectype = vectype_in;
8838 tree narrow_vectype = vectype_out;
8839 enum tree_code c1;
8840 tree intermediate_type;
8841 machine_mode intermediate_mode, prev_mode;
8842 int i;
8843 bool uns;
8845 *multi_step_cvt = 0;
8846 switch (code)
8848 CASE_CONVERT:
8849 c1 = VEC_PACK_TRUNC_EXPR;
8850 break;
8852 case FIX_TRUNC_EXPR:
8853 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8854 break;
8856 case FLOAT_EXPR:
8857 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8858 tree code and optabs used for computing the operation. */
8859 return false;
8861 default:
8862 gcc_unreachable ();
8865 if (code == FIX_TRUNC_EXPR)
8866 /* The signedness is determined from output operand. */
8867 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8868 else
8869 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8871 if (!optab1)
8872 return false;
8874 vec_mode = TYPE_MODE (vectype);
8875 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8876 return false;
8878 *code1 = c1;
8880 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8881 return true;
8883 /* Check if it's a multi-step conversion that can be done using intermediate
8884 types. */
8885 prev_mode = vec_mode;
8886 if (code == FIX_TRUNC_EXPR)
8887 uns = TYPE_UNSIGNED (vectype_out);
8888 else
8889 uns = TYPE_UNSIGNED (vectype);
8891 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8892 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8893 costly than signed. */
8894 if (code == FIX_TRUNC_EXPR && uns)
8896 enum insn_code icode2;
8898 intermediate_type
8899 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8900 interm_optab
8901 = optab_for_tree_code (c1, intermediate_type, optab_default);
8902 if (interm_optab != unknown_optab
8903 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8904 && insn_data[icode1].operand[0].mode
8905 == insn_data[icode2].operand[0].mode)
8907 uns = false;
8908 optab1 = interm_optab;
8909 icode1 = icode2;
8913 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8914 intermediate steps in promotion sequence. We try
8915 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8916 interm_types->create (MAX_INTERM_CVT_STEPS);
8917 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8919 intermediate_mode = insn_data[icode1].operand[0].mode;
8920 intermediate_type
8921 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8922 interm_optab
8923 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8924 optab_default);
8925 if (!interm_optab
8926 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8927 || insn_data[icode1].operand[0].mode != intermediate_mode
8928 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8929 == CODE_FOR_nothing))
8930 break;
8932 interm_types->quick_push (intermediate_type);
8933 (*multi_step_cvt)++;
8935 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8936 return true;
8938 prev_mode = intermediate_mode;
8939 optab1 = interm_optab;
8942 interm_types->release ();
8943 return false;