* sv.po: Update.
[official-gcc.git] / gcc / tree-vect-stmts.c
blob1aade9e650c45a12a78724b931befd2f155b3e6d
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
57 tree
58 stmt_vectype (struct _stmt_vec_info *stmt_info)
60 return STMT_VINFO_VECTYPE (stmt_info);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
65 bool
66 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
68 gimple *stmt = STMT_VINFO_STMT (stmt_info);
69 basic_block bb = gimple_bb (stmt);
70 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
71 struct loop* loop;
73 if (!loop_vinfo)
74 return false;
76 loop = LOOP_VINFO_LOOP (loop_vinfo);
78 return (bb->loop_father == loop->inner);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
85 unsigned
86 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
87 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
88 int misalign, enum vect_cost_model_location where)
90 if (body_cost_vec)
92 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
93 stmt_info_for_cost si = { count, kind,
94 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
95 misalign };
96 body_cost_vec->safe_push (si);
97 return (unsigned)
98 (builtin_vectorization_cost (kind, vectype, misalign) * count);
100 else
101 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
102 count, kind, stmt_info, misalign, where);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
107 static tree
108 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
110 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
111 "vect_array");
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
119 static tree
120 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
121 tree array, unsigned HOST_WIDE_INT n)
123 tree vect_type, vect, vect_name, array_ref;
124 gimple *new_stmt;
126 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
127 vect_type = TREE_TYPE (TREE_TYPE (array));
128 vect = vect_create_destination_var (scalar_dest, vect_type);
129 array_ref = build4 (ARRAY_REF, vect_type, array,
130 build_int_cst (size_type_node, n),
131 NULL_TREE, NULL_TREE);
133 new_stmt = gimple_build_assign (vect, array_ref);
134 vect_name = make_ssa_name (vect, new_stmt);
135 gimple_assign_set_lhs (new_stmt, vect_name);
136 vect_finish_stmt_generation (stmt, new_stmt, gsi);
138 return vect_name;
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
145 static void
146 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
147 tree array, unsigned HOST_WIDE_INT n)
149 tree array_ref;
150 gimple *new_stmt;
152 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
153 build_int_cst (size_type_node, n),
154 NULL_TREE, NULL_TREE);
156 new_stmt = gimple_build_assign (array_ref, vect);
157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
162 (and its group). */
164 static tree
165 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
167 tree mem_ref, alias_ptr_type;
169 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
170 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
173 return mem_ref;
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
182 static void
183 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
184 enum vect_relevant relevant, bool live_p,
185 bool used_in_pattern)
187 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
188 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
189 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
190 gimple *pattern_stmt;
192 if (dump_enabled_p ())
194 dump_printf_loc (MSG_NOTE, vect_location,
195 "mark relevant %d, live %d: ", relevant, live_p);
196 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
199 /* If this stmt is an original stmt in a pattern, we might need to mark its
200 related pattern stmt instead of the original stmt. However, such stmts
201 may have their own uses that are not in any pattern, in such cases the
202 stmt itself should be marked. */
203 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
205 bool found = false;
206 if (!used_in_pattern)
208 imm_use_iterator imm_iter;
209 use_operand_p use_p;
210 gimple *use_stmt;
211 tree lhs;
212 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
213 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
215 if (is_gimple_assign (stmt))
216 lhs = gimple_assign_lhs (stmt);
217 else
218 lhs = gimple_call_lhs (stmt);
220 /* This use is out of pattern use, if LHS has other uses that are
221 pattern uses, we should mark the stmt itself, and not the pattern
222 stmt. */
223 if (lhs && TREE_CODE (lhs) == SSA_NAME)
224 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
226 if (is_gimple_debug (USE_STMT (use_p)))
227 continue;
228 use_stmt = USE_STMT (use_p);
230 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
231 continue;
233 if (vinfo_for_stmt (use_stmt)
234 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
236 found = true;
237 break;
242 if (!found)
244 /* This is the last stmt in a sequence that was detected as a
245 pattern that can potentially be vectorized. Don't mark the stmt
246 as relevant/live because it's not going to be vectorized.
247 Instead mark the pattern-stmt that replaces it. */
249 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
251 if (dump_enabled_p ())
252 dump_printf_loc (MSG_NOTE, vect_location,
253 "last stmt in pattern. don't mark"
254 " relevant/live.\n");
255 stmt_info = vinfo_for_stmt (pattern_stmt);
256 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
257 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
258 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
259 stmt = pattern_stmt;
263 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
264 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
265 STMT_VINFO_RELEVANT (stmt_info) = relevant;
267 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
268 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
270 if (dump_enabled_p ())
271 dump_printf_loc (MSG_NOTE, vect_location,
272 "already marked relevant/live.\n");
273 return;
276 worklist->safe_push (stmt);
280 /* Function vect_stmt_relevant_p.
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
290 CHECKME: what other side effects would the vectorizer allow? */
292 static bool
293 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
294 enum vect_relevant *relevant, bool *live_p)
296 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
297 ssa_op_iter op_iter;
298 imm_use_iterator imm_iter;
299 use_operand_p use_p;
300 def_operand_p def_p;
302 *relevant = vect_unused_in_scope;
303 *live_p = false;
305 /* cond stmt other than loop exit cond. */
306 if (is_ctrl_stmt (stmt)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
308 != loop_exit_ctrl_vec_info_type)
309 *relevant = vect_used_in_scope;
311 /* changing memory. */
312 if (gimple_code (stmt) != GIMPLE_PHI)
313 if (gimple_vdef (stmt)
314 && !gimple_clobber_p (stmt))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE, vect_location,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant = vect_used_in_scope;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
325 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
327 basic_block bb = gimple_bb (USE_STMT (use_p));
328 if (!flow_bb_inside_loop_p (loop, bb))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE, vect_location,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p)))
335 continue;
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
340 gcc_assert (bb == single_exit (loop)->dest);
342 *live_p = true;
347 return (*live_p || *relevant);
351 /* Function exist_non_indexing_operands_for_use_p
353 USE is one of the uses attached to STMT. Check if USE is
354 used in STMT for anything other than indexing an array. */
356 static bool
357 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
359 tree operand;
360 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
362 /* USE corresponds to some operand in STMT. If there is no data
363 reference in STMT, then any operand that corresponds to USE
364 is not indexing an array. */
365 if (!STMT_VINFO_DATA_REF (stmt_info))
366 return true;
368 /* STMT has a data_ref. FORNOW this means that its of one of
369 the following forms:
370 -1- ARRAY_REF = var
371 -2- var = ARRAY_REF
372 (This should have been verified in analyze_data_refs).
374 'var' in the second case corresponds to a def, not a use,
375 so USE cannot correspond to any operands that are not used
376 for array indexing.
378 Therefore, all we need to check is if STMT falls into the
379 first case, and whether var corresponds to USE. */
381 if (!gimple_assign_copy_p (stmt))
383 if (is_gimple_call (stmt)
384 && gimple_call_internal_p (stmt))
385 switch (gimple_call_internal_fn (stmt))
387 case IFN_MASK_STORE:
388 operand = gimple_call_arg (stmt, 3);
389 if (operand == use)
390 return true;
391 /* FALLTHRU */
392 case IFN_MASK_LOAD:
393 operand = gimple_call_arg (stmt, 2);
394 if (operand == use)
395 return true;
396 break;
397 default:
398 break;
400 return false;
403 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
404 return false;
405 operand = gimple_assign_rhs1 (stmt);
406 if (TREE_CODE (operand) != SSA_NAME)
407 return false;
409 if (operand == use)
410 return true;
412 return false;
417 Function process_use.
419 Inputs:
420 - a USE in STMT in a loop represented by LOOP_VINFO
421 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
422 that defined USE. This is done by calling mark_relevant and passing it
423 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
424 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
425 be performed.
427 Outputs:
428 Generally, LIVE_P and RELEVANT are used to define the liveness and
429 relevance info of the DEF_STMT of this USE:
430 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
431 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
432 Exceptions:
433 - case 1: If USE is used only for address computations (e.g. array indexing),
434 which does not need to be directly vectorized, then the liveness/relevance
435 of the respective DEF_STMT is left unchanged.
436 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
437 skip DEF_STMT cause it had already been processed.
438 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
439 be modified accordingly.
441 Return true if everything is as expected. Return false otherwise. */
443 static bool
444 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
445 enum vect_relevant relevant, vec<gimple *> *worklist,
446 bool force)
448 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
449 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
450 stmt_vec_info dstmt_vinfo;
451 basic_block bb, def_bb;
452 gimple *def_stmt;
453 enum vect_def_type dt;
455 /* case 1: we are only interested in uses that need to be vectorized. Uses
456 that are used for address computation are not considered relevant. */
457 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
458 return true;
460 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
462 if (dump_enabled_p ())
463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
464 "not vectorized: unsupported use in stmt.\n");
465 return false;
468 if (!def_stmt || gimple_nop_p (def_stmt))
469 return true;
471 def_bb = gimple_bb (def_stmt);
472 if (!flow_bb_inside_loop_p (loop, def_bb))
474 if (dump_enabled_p ())
475 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
476 return true;
479 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
480 DEF_STMT must have already been processed, because this should be the
481 only way that STMT, which is a reduction-phi, was put in the worklist,
482 as there should be no other uses for DEF_STMT in the loop. So we just
483 check that everything is as expected, and we are done. */
484 dstmt_vinfo = vinfo_for_stmt (def_stmt);
485 bb = gimple_bb (stmt);
486 if (gimple_code (stmt) == GIMPLE_PHI
487 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
488 && gimple_code (def_stmt) != GIMPLE_PHI
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
490 && bb->loop_father == def_bb->loop_father)
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_NOTE, vect_location,
494 "reduc-stmt defining reduc-phi in the same nest.\n");
495 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
496 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
497 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
498 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
499 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
500 return true;
503 /* case 3a: outer-loop stmt defining an inner-loop stmt:
504 outer-loop-header-bb:
505 d = def_stmt
506 inner-loop:
507 stmt # use (d)
508 outer-loop-tail-bb:
509 ... */
510 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
512 if (dump_enabled_p ())
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "outer-loop def-stmt defining inner-loop stmt.\n");
516 switch (relevant)
518 case vect_unused_in_scope:
519 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
520 vect_used_in_scope : vect_unused_in_scope;
521 break;
523 case vect_used_in_outer_by_reduction:
524 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
525 relevant = vect_used_by_reduction;
526 break;
528 case vect_used_in_outer:
529 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
530 relevant = vect_used_in_scope;
531 break;
533 case vect_used_in_scope:
534 break;
536 default:
537 gcc_unreachable ();
541 /* case 3b: inner-loop stmt defining an outer-loop stmt:
542 outer-loop-header-bb:
544 inner-loop:
545 d = def_stmt
546 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
547 stmt # use (d) */
548 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
550 if (dump_enabled_p ())
551 dump_printf_loc (MSG_NOTE, vect_location,
552 "inner-loop def-stmt defining outer-loop stmt.\n");
554 switch (relevant)
556 case vect_unused_in_scope:
557 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
558 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
559 vect_used_in_outer_by_reduction : vect_unused_in_scope;
560 break;
562 case vect_used_by_reduction:
563 relevant = vect_used_in_outer_by_reduction;
564 break;
566 case vect_used_in_scope:
567 relevant = vect_used_in_outer;
568 break;
570 default:
571 gcc_unreachable ();
575 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
576 is_pattern_stmt_p (stmt_vinfo));
577 return true;
581 /* Function vect_mark_stmts_to_be_vectorized.
583 Not all stmts in the loop need to be vectorized. For example:
585 for i...
586 for j...
587 1. T0 = i + j
588 2. T1 = a[T0]
590 3. j = j + 1
592 Stmt 1 and 3 do not need to be vectorized, because loop control and
593 addressing of vectorized data-refs are handled differently.
595 This pass detects such stmts. */
597 bool
598 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
600 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
601 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
602 unsigned int nbbs = loop->num_nodes;
603 gimple_stmt_iterator si;
604 gimple *stmt;
605 unsigned int i;
606 stmt_vec_info stmt_vinfo;
607 basic_block bb;
608 gimple *phi;
609 bool live_p;
610 enum vect_relevant relevant, tmp_relevant;
611 enum vect_def_type def_type;
613 if (dump_enabled_p ())
614 dump_printf_loc (MSG_NOTE, vect_location,
615 "=== vect_mark_stmts_to_be_vectorized ===\n");
617 auto_vec<gimple *, 64> worklist;
619 /* 1. Init worklist. */
620 for (i = 0; i < nbbs; i++)
622 bb = bbs[i];
623 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
625 phi = gsi_stmt (si);
626 if (dump_enabled_p ())
628 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
629 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
632 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
633 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
635 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
637 stmt = gsi_stmt (si);
638 if (dump_enabled_p ())
640 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
641 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
644 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
645 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
649 /* 2. Process_worklist */
650 while (worklist.length () > 0)
652 use_operand_p use_p;
653 ssa_op_iter iter;
655 stmt = worklist.pop ();
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
662 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
663 (DEF_STMT) as relevant/irrelevant and live/dead according to the
664 liveness and relevance properties of STMT. */
665 stmt_vinfo = vinfo_for_stmt (stmt);
666 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
667 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
669 /* Generally, the liveness and relevance properties of STMT are
670 propagated as is to the DEF_STMTs of its USEs:
671 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
672 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
674 One exception is when STMT has been identified as defining a reduction
675 variable; in this case we set the liveness/relevance as follows:
676 live_p = false
677 relevant = vect_used_by_reduction
678 This is because we distinguish between two kinds of relevant stmts -
679 those that are used by a reduction computation, and those that are
680 (also) used by a regular computation. This allows us later on to
681 identify stmts that are used solely by a reduction, and therefore the
682 order of the results that they produce does not have to be kept. */
684 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
685 tmp_relevant = relevant;
686 switch (def_type)
688 case vect_reduction_def:
689 switch (tmp_relevant)
691 case vect_unused_in_scope:
692 relevant = vect_used_by_reduction;
693 break;
695 case vect_used_by_reduction:
696 if (gimple_code (stmt) == GIMPLE_PHI)
697 break;
698 /* fall through */
700 default:
701 if (dump_enabled_p ())
702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
703 "unsupported use of reduction.\n");
704 return false;
707 live_p = false;
708 break;
710 case vect_nested_cycle:
711 if (tmp_relevant != vect_unused_in_scope
712 && tmp_relevant != vect_used_in_outer_by_reduction
713 && tmp_relevant != vect_used_in_outer)
715 if (dump_enabled_p ())
716 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
717 "unsupported use of nested cycle.\n");
719 return false;
722 live_p = false;
723 break;
725 case vect_double_reduction_def:
726 if (tmp_relevant != vect_unused_in_scope
727 && tmp_relevant != vect_used_by_reduction)
729 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
731 "unsupported use of double reduction.\n");
733 return false;
736 live_p = false;
737 break;
739 default:
740 break;
743 if (is_pattern_stmt_p (stmt_vinfo))
745 /* Pattern statements are not inserted into the code, so
746 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
747 have to scan the RHS or function arguments instead. */
748 if (is_gimple_assign (stmt))
750 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
751 tree op = gimple_assign_rhs1 (stmt);
753 i = 1;
754 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
756 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
757 live_p, relevant, &worklist, false)
758 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
759 live_p, relevant, &worklist, false))
760 return false;
761 i = 2;
763 for (; i < gimple_num_ops (stmt); i++)
765 op = gimple_op (stmt, i);
766 if (TREE_CODE (op) == SSA_NAME
767 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
768 &worklist, false))
769 return false;
772 else if (is_gimple_call (stmt))
774 for (i = 0; i < gimple_call_num_args (stmt); i++)
776 tree arg = gimple_call_arg (stmt, i);
777 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
778 &worklist, false))
779 return false;
783 else
784 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
786 tree op = USE_FROM_PTR (use_p);
787 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
788 &worklist, false))
789 return false;
792 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
794 tree off;
795 tree decl = vect_check_gather_scatter (stmt, loop_vinfo, NULL, &off, NULL);
796 gcc_assert (decl);
797 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
798 &worklist, true))
799 return false;
801 } /* while worklist */
803 return true;
807 /* Function vect_model_simple_cost.
809 Models cost for simple operations, i.e. those that only emit ncopies of a
810 single op. Right now, this does not account for multiple insns that could
811 be generated for the single vector op. We will handle that shortly. */
813 void
814 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
815 enum vect_def_type *dt,
816 stmt_vector_for_cost *prologue_cost_vec,
817 stmt_vector_for_cost *body_cost_vec)
819 int i;
820 int inside_cost = 0, prologue_cost = 0;
822 /* The SLP costs were already calculated during SLP tree build. */
823 if (PURE_SLP_STMT (stmt_info))
824 return;
826 /* FORNOW: Assuming maximum 2 args per stmts. */
827 for (i = 0; i < 2; i++)
828 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
829 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
830 stmt_info, 0, vect_prologue);
832 /* Pass the inside-of-loop statements to the target-specific cost model. */
833 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
834 stmt_info, 0, vect_body);
836 if (dump_enabled_p ())
837 dump_printf_loc (MSG_NOTE, vect_location,
838 "vect_model_simple_cost: inside_cost = %d, "
839 "prologue_cost = %d .\n", inside_cost, prologue_cost);
843 /* Model cost for type demotion and promotion operations. PWR is normally
844 zero for single-step promotions and demotions. It will be one if
845 two-step promotion/demotion is required, and so on. Each additional
846 step doubles the number of instructions required. */
848 static void
849 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
850 enum vect_def_type *dt, int pwr)
852 int i, tmp;
853 int inside_cost = 0, prologue_cost = 0;
854 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
855 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
856 void *target_cost_data;
858 /* The SLP costs were already calculated during SLP tree build. */
859 if (PURE_SLP_STMT (stmt_info))
860 return;
862 if (loop_vinfo)
863 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
864 else
865 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
867 for (i = 0; i < pwr + 1; i++)
869 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
870 (i + 1) : i;
871 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
872 vec_promote_demote, stmt_info, 0,
873 vect_body);
876 /* FORNOW: Assuming maximum 2 args per stmts. */
877 for (i = 0; i < 2; i++)
878 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
879 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
880 stmt_info, 0, vect_prologue);
882 if (dump_enabled_p ())
883 dump_printf_loc (MSG_NOTE, vect_location,
884 "vect_model_promotion_demotion_cost: inside_cost = %d, "
885 "prologue_cost = %d .\n", inside_cost, prologue_cost);
888 /* Function vect_cost_group_size
890 For grouped load or store, return the group_size only if it is the first
891 load or store of a group, else return 1. This ensures that group size is
892 only returned once per group. */
894 static int
895 vect_cost_group_size (stmt_vec_info stmt_info)
897 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
899 if (first_stmt == STMT_VINFO_STMT (stmt_info))
900 return GROUP_SIZE (stmt_info);
902 return 1;
906 /* Function vect_model_store_cost
908 Models cost for stores. In the case of grouped accesses, one access
909 has the overhead of the grouped access attributed to it. */
911 void
912 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
913 bool store_lanes_p, enum vect_def_type dt,
914 slp_tree slp_node,
915 stmt_vector_for_cost *prologue_cost_vec,
916 stmt_vector_for_cost *body_cost_vec)
918 int group_size;
919 unsigned int inside_cost = 0, prologue_cost = 0;
920 struct data_reference *first_dr;
921 gimple *first_stmt;
923 if (dt == vect_constant_def || dt == vect_external_def)
924 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
925 stmt_info, 0, vect_prologue);
927 /* Grouped access? */
928 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
930 if (slp_node)
932 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
933 group_size = 1;
935 else
937 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
938 group_size = vect_cost_group_size (stmt_info);
941 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
943 /* Not a grouped access. */
944 else
946 group_size = 1;
947 first_dr = STMT_VINFO_DATA_REF (stmt_info);
950 /* We assume that the cost of a single store-lanes instruction is
951 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
952 access is instead being provided by a permute-and-store operation,
953 include the cost of the permutes. */
954 if (!store_lanes_p && group_size > 1
955 && !STMT_VINFO_STRIDED_P (stmt_info))
957 /* Uses a high and low interleave or shuffle operations for each
958 needed permute. */
959 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
960 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
961 stmt_info, 0, vect_body);
963 if (dump_enabled_p ())
964 dump_printf_loc (MSG_NOTE, vect_location,
965 "vect_model_store_cost: strided group_size = %d .\n",
966 group_size);
969 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
970 /* Costs of the stores. */
971 if (STMT_VINFO_STRIDED_P (stmt_info)
972 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
974 /* N scalar stores plus extracting the elements. */
975 inside_cost += record_stmt_cost (body_cost_vec,
976 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
977 scalar_store, stmt_info, 0, vect_body);
979 else
980 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
982 if (STMT_VINFO_STRIDED_P (stmt_info))
983 inside_cost += record_stmt_cost (body_cost_vec,
984 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
985 vec_to_scalar, stmt_info, 0, vect_body);
987 if (dump_enabled_p ())
988 dump_printf_loc (MSG_NOTE, vect_location,
989 "vect_model_store_cost: inside_cost = %d, "
990 "prologue_cost = %d .\n", inside_cost, prologue_cost);
994 /* Calculate cost of DR's memory access. */
995 void
996 vect_get_store_cost (struct data_reference *dr, int ncopies,
997 unsigned int *inside_cost,
998 stmt_vector_for_cost *body_cost_vec)
1000 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1001 gimple *stmt = DR_STMT (dr);
1002 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1004 switch (alignment_support_scheme)
1006 case dr_aligned:
1008 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1009 vector_store, stmt_info, 0,
1010 vect_body);
1012 if (dump_enabled_p ())
1013 dump_printf_loc (MSG_NOTE, vect_location,
1014 "vect_model_store_cost: aligned.\n");
1015 break;
1018 case dr_unaligned_supported:
1020 /* Here, we assign an additional cost for the unaligned store. */
1021 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1022 unaligned_store, stmt_info,
1023 DR_MISALIGNMENT (dr), vect_body);
1024 if (dump_enabled_p ())
1025 dump_printf_loc (MSG_NOTE, vect_location,
1026 "vect_model_store_cost: unaligned supported by "
1027 "hardware.\n");
1028 break;
1031 case dr_unaligned_unsupported:
1033 *inside_cost = VECT_MAX_COST;
1035 if (dump_enabled_p ())
1036 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1037 "vect_model_store_cost: unsupported access.\n");
1038 break;
1041 default:
1042 gcc_unreachable ();
1047 /* Function vect_model_load_cost
1049 Models cost for loads. In the case of grouped accesses, the last access
1050 has the overhead of the grouped access attributed to it. Since unaligned
1051 accesses are supported for loads, we also account for the costs of the
1052 access scheme chosen. */
1054 void
1055 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1056 bool load_lanes_p, slp_tree slp_node,
1057 stmt_vector_for_cost *prologue_cost_vec,
1058 stmt_vector_for_cost *body_cost_vec)
1060 int group_size;
1061 gimple *first_stmt;
1062 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1063 unsigned int inside_cost = 0, prologue_cost = 0;
1065 /* Grouped accesses? */
1066 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1067 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1069 group_size = vect_cost_group_size (stmt_info);
1070 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1072 /* Not a grouped access. */
1073 else
1075 group_size = 1;
1076 first_dr = dr;
1079 /* We assume that the cost of a single load-lanes instruction is
1080 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1081 access is instead being provided by a load-and-permute operation,
1082 include the cost of the permutes. */
1083 if (!load_lanes_p && group_size > 1
1084 && !STMT_VINFO_STRIDED_P (stmt_info))
1086 /* Uses an even and odd extract operations or shuffle operations
1087 for each needed permute. */
1088 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1089 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1090 stmt_info, 0, vect_body);
1092 if (dump_enabled_p ())
1093 dump_printf_loc (MSG_NOTE, vect_location,
1094 "vect_model_load_cost: strided group_size = %d .\n",
1095 group_size);
1098 /* The loads themselves. */
1099 if (STMT_VINFO_STRIDED_P (stmt_info)
1100 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1102 /* N scalar loads plus gathering them into a vector. */
1103 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1104 inside_cost += record_stmt_cost (body_cost_vec,
1105 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1106 scalar_load, stmt_info, 0, vect_body);
1108 else
1109 vect_get_load_cost (first_dr, ncopies,
1110 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1111 || group_size > 1 || slp_node),
1112 &inside_cost, &prologue_cost,
1113 prologue_cost_vec, body_cost_vec, true);
1114 if (STMT_VINFO_STRIDED_P (stmt_info))
1115 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1116 stmt_info, 0, vect_body);
1118 if (dump_enabled_p ())
1119 dump_printf_loc (MSG_NOTE, vect_location,
1120 "vect_model_load_cost: inside_cost = %d, "
1121 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1125 /* Calculate cost of DR's memory access. */
1126 void
1127 vect_get_load_cost (struct data_reference *dr, int ncopies,
1128 bool add_realign_cost, unsigned int *inside_cost,
1129 unsigned int *prologue_cost,
1130 stmt_vector_for_cost *prologue_cost_vec,
1131 stmt_vector_for_cost *body_cost_vec,
1132 bool record_prologue_costs)
1134 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1135 gimple *stmt = DR_STMT (dr);
1136 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1138 switch (alignment_support_scheme)
1140 case dr_aligned:
1142 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1143 stmt_info, 0, vect_body);
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE, vect_location,
1147 "vect_model_load_cost: aligned.\n");
1149 break;
1151 case dr_unaligned_supported:
1153 /* Here, we assign an additional cost for the unaligned load. */
1154 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1155 unaligned_load, stmt_info,
1156 DR_MISALIGNMENT (dr), vect_body);
1158 if (dump_enabled_p ())
1159 dump_printf_loc (MSG_NOTE, vect_location,
1160 "vect_model_load_cost: unaligned supported by "
1161 "hardware.\n");
1163 break;
1165 case dr_explicit_realign:
1167 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1168 vector_load, stmt_info, 0, vect_body);
1169 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1170 vec_perm, stmt_info, 0, vect_body);
1172 /* FIXME: If the misalignment remains fixed across the iterations of
1173 the containing loop, the following cost should be added to the
1174 prologue costs. */
1175 if (targetm.vectorize.builtin_mask_for_load)
1176 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1177 stmt_info, 0, vect_body);
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE, vect_location,
1181 "vect_model_load_cost: explicit realign\n");
1183 break;
1185 case dr_explicit_realign_optimized:
1187 if (dump_enabled_p ())
1188 dump_printf_loc (MSG_NOTE, vect_location,
1189 "vect_model_load_cost: unaligned software "
1190 "pipelined.\n");
1192 /* Unaligned software pipeline has a load of an address, an initial
1193 load, and possibly a mask operation to "prime" the loop. However,
1194 if this is an access in a group of loads, which provide grouped
1195 access, then the above cost should only be considered for one
1196 access in the group. Inside the loop, there is a load op
1197 and a realignment op. */
1199 if (add_realign_cost && record_prologue_costs)
1201 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1202 vector_stmt, stmt_info,
1203 0, vect_prologue);
1204 if (targetm.vectorize.builtin_mask_for_load)
1205 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1206 vector_stmt, stmt_info,
1207 0, vect_prologue);
1210 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1211 stmt_info, 0, vect_body);
1212 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1213 stmt_info, 0, vect_body);
1215 if (dump_enabled_p ())
1216 dump_printf_loc (MSG_NOTE, vect_location,
1217 "vect_model_load_cost: explicit realign optimized"
1218 "\n");
1220 break;
1223 case dr_unaligned_unsupported:
1225 *inside_cost = VECT_MAX_COST;
1227 if (dump_enabled_p ())
1228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1229 "vect_model_load_cost: unsupported access.\n");
1230 break;
1233 default:
1234 gcc_unreachable ();
1238 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1239 the loop preheader for the vectorized stmt STMT. */
1241 static void
1242 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1244 if (gsi)
1245 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1246 else
1248 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1249 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1251 if (loop_vinfo)
1253 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1254 basic_block new_bb;
1255 edge pe;
1257 if (nested_in_vect_loop_p (loop, stmt))
1258 loop = loop->inner;
1260 pe = loop_preheader_edge (loop);
1261 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1262 gcc_assert (!new_bb);
1264 else
1266 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1267 basic_block bb;
1268 gimple_stmt_iterator gsi_bb_start;
1270 gcc_assert (bb_vinfo);
1271 bb = BB_VINFO_BB (bb_vinfo);
1272 gsi_bb_start = gsi_after_labels (bb);
1273 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1277 if (dump_enabled_p ())
1279 dump_printf_loc (MSG_NOTE, vect_location,
1280 "created new init_stmt: ");
1281 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1285 /* Function vect_init_vector.
1287 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1288 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1289 vector type a vector with all elements equal to VAL is created first.
1290 Place the initialization at BSI if it is not NULL. Otherwise, place the
1291 initialization at the loop preheader.
1292 Return the DEF of INIT_STMT.
1293 It will be used in the vectorization of STMT. */
1295 tree
1296 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1298 gimple *init_stmt;
1299 tree new_temp;
1301 if (TREE_CODE (type) == VECTOR_TYPE
1302 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1304 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1306 /* Scalar boolean value should be transformed into
1307 all zeros or all ones value before building a vector. */
1308 if (VECTOR_BOOLEAN_TYPE_P (type))
1310 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1311 tree false_val = build_zero_cst (TREE_TYPE (type));
1313 if (CONSTANT_CLASS_P (val))
1314 val = integer_zerop (val) ? false_val : true_val;
1315 else
1317 new_temp = make_ssa_name (TREE_TYPE (type));
1318 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1319 val, true_val, false_val);
1320 vect_init_vector_1 (stmt, init_stmt, gsi);
1321 val = new_temp;
1324 else if (CONSTANT_CLASS_P (val))
1325 val = fold_convert (TREE_TYPE (type), val);
1326 else
1328 new_temp = make_ssa_name (TREE_TYPE (type));
1329 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1330 vect_init_vector_1 (stmt, init_stmt, gsi);
1331 val = new_temp;
1334 val = build_vector_from_val (type, val);
1337 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1338 init_stmt = gimple_build_assign (new_temp, val);
1339 vect_init_vector_1 (stmt, init_stmt, gsi);
1340 return new_temp;
1344 /* Function vect_get_vec_def_for_operand.
1346 OP is an operand in STMT. This function returns a (vector) def that will be
1347 used in the vectorized stmt for STMT.
1349 In the case that OP is an SSA_NAME which is defined in the loop, then
1350 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1352 In case OP is an invariant or constant, a new stmt that creates a vector def
1353 needs to be introduced. VECTYPE may be used to specify a required type for
1354 vector invariant. */
1356 tree
1357 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1359 tree vec_oprnd;
1360 gimple *vec_stmt;
1361 gimple *def_stmt;
1362 stmt_vec_info def_stmt_info = NULL;
1363 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1364 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1365 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1366 enum vect_def_type dt;
1367 bool is_simple_use;
1368 tree vector_type;
1370 if (dump_enabled_p ())
1372 dump_printf_loc (MSG_NOTE, vect_location,
1373 "vect_get_vec_def_for_operand: ");
1374 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1375 dump_printf (MSG_NOTE, "\n");
1378 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1379 gcc_assert (is_simple_use);
1380 if (dump_enabled_p ())
1382 int loc_printed = 0;
1383 if (def_stmt)
1385 if (loc_printed)
1386 dump_printf (MSG_NOTE, " def_stmt = ");
1387 else
1388 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1389 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1393 switch (dt)
1395 /* operand is a constant or a loop invariant. */
1396 case vect_constant_def:
1397 case vect_external_def:
1399 if (vectype)
1400 vector_type = vectype;
1401 else if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
1402 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1403 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1404 else
1405 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1407 gcc_assert (vector_type);
1408 return vect_init_vector (stmt, op, vector_type, NULL);
1411 /* operand is defined inside the loop. */
1412 case vect_internal_def:
1414 /* Get the def from the vectorized stmt. */
1415 def_stmt_info = vinfo_for_stmt (def_stmt);
1417 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1418 /* Get vectorized pattern statement. */
1419 if (!vec_stmt
1420 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1421 && !STMT_VINFO_RELEVANT (def_stmt_info))
1422 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1423 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1424 gcc_assert (vec_stmt);
1425 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1426 vec_oprnd = PHI_RESULT (vec_stmt);
1427 else if (is_gimple_call (vec_stmt))
1428 vec_oprnd = gimple_call_lhs (vec_stmt);
1429 else
1430 vec_oprnd = gimple_assign_lhs (vec_stmt);
1431 return vec_oprnd;
1434 /* operand is defined by a loop header phi - reduction */
1435 case vect_reduction_def:
1436 case vect_double_reduction_def:
1437 case vect_nested_cycle:
1438 /* Code should use get_initial_def_for_reduction. */
1439 gcc_unreachable ();
1441 /* operand is defined by loop-header phi - induction. */
1442 case vect_induction_def:
1444 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1446 /* Get the def from the vectorized stmt. */
1447 def_stmt_info = vinfo_for_stmt (def_stmt);
1448 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1449 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1450 vec_oprnd = PHI_RESULT (vec_stmt);
1451 else
1452 vec_oprnd = gimple_get_lhs (vec_stmt);
1453 return vec_oprnd;
1456 default:
1457 gcc_unreachable ();
1462 /* Function vect_get_vec_def_for_stmt_copy
1464 Return a vector-def for an operand. This function is used when the
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
1467 copies of the vector-stmt are required. In this case the vector-def is
1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1469 of the stmt that defines VEC_OPRND.
1470 DT is the type of the vector def VEC_OPRND.
1472 Context:
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
1475 more than one vector stmt to vectorize the scalar stmt. This situation
1476 arises when there are multiple data-types operated upon in the loop; the
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
1480 computing 'VF' results in each iteration). This function is called when
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
1489 VS1.3: vx.3 = memref3
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
1500 get the relevant vector-def for each operand of S2. For operand x it
1501 returns the vector-def 'vx.0'.
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1518 tree
1519 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1521 gimple *vec_stmt_for_operand;
1522 stmt_vec_info def_stmt_info;
1524 /* Do nothing; can reuse same def. */
1525 if (dt == vect_external_def || dt == vect_constant_def )
1526 return vec_oprnd;
1528 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1529 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1530 gcc_assert (def_stmt_info);
1531 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1532 gcc_assert (vec_stmt_for_operand);
1533 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1534 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1535 else
1536 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1537 return vec_oprnd;
1541 /* Get vectorized definitions for the operands to create a copy of an original
1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1544 static void
1545 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1546 vec<tree> *vec_oprnds0,
1547 vec<tree> *vec_oprnds1)
1549 tree vec_oprnd = vec_oprnds0->pop ();
1551 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1552 vec_oprnds0->quick_push (vec_oprnd);
1554 if (vec_oprnds1 && vec_oprnds1->length ())
1556 vec_oprnd = vec_oprnds1->pop ();
1557 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1558 vec_oprnds1->quick_push (vec_oprnd);
1563 /* Get vectorized definitions for OP0 and OP1.
1564 REDUC_INDEX is the index of reduction operand in case of reduction,
1565 and -1 otherwise. */
1567 void
1568 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1569 vec<tree> *vec_oprnds0,
1570 vec<tree> *vec_oprnds1,
1571 slp_tree slp_node, int reduc_index)
1573 if (slp_node)
1575 int nops = (op1 == NULL_TREE) ? 1 : 2;
1576 auto_vec<tree> ops (nops);
1577 auto_vec<vec<tree> > vec_defs (nops);
1579 ops.quick_push (op0);
1580 if (op1)
1581 ops.quick_push (op1);
1583 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1585 *vec_oprnds0 = vec_defs[0];
1586 if (op1)
1587 *vec_oprnds1 = vec_defs[1];
1589 else
1591 tree vec_oprnd;
1593 vec_oprnds0->create (1);
1594 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1595 vec_oprnds0->quick_push (vec_oprnd);
1597 if (op1)
1599 vec_oprnds1->create (1);
1600 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1601 vec_oprnds1->quick_push (vec_oprnd);
1607 /* Function vect_finish_stmt_generation.
1609 Insert a new stmt. */
1611 void
1612 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1613 gimple_stmt_iterator *gsi)
1615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1616 vec_info *vinfo = stmt_info->vinfo;
1618 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1620 if (!gsi_end_p (*gsi)
1621 && gimple_has_mem_ops (vec_stmt))
1623 gimple *at_stmt = gsi_stmt (*gsi);
1624 tree vuse = gimple_vuse (at_stmt);
1625 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1627 tree vdef = gimple_vdef (at_stmt);
1628 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1629 /* If we have an SSA vuse and insert a store, update virtual
1630 SSA form to avoid triggering the renamer. Do so only
1631 if we can easily see all uses - which is what almost always
1632 happens with the way vectorized stmts are inserted. */
1633 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1634 && ((is_gimple_assign (vec_stmt)
1635 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1636 || (is_gimple_call (vec_stmt)
1637 && !(gimple_call_flags (vec_stmt)
1638 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1640 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1641 gimple_set_vdef (vec_stmt, new_vdef);
1642 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1646 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1648 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1650 if (dump_enabled_p ())
1652 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1653 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1656 gimple_set_location (vec_stmt, gimple_location (stmt));
1658 /* While EH edges will generally prevent vectorization, stmt might
1659 e.g. be in a must-not-throw region. Ensure newly created stmts
1660 that could throw are part of the same region. */
1661 int lp_nr = lookup_stmt_eh_lp (stmt);
1662 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1663 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1666 /* We want to vectorize a call to combined function CFN with function
1667 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1668 as the types of all inputs. Check whether this is possible using
1669 an internal function, returning its code if so or IFN_LAST if not. */
1671 static internal_fn
1672 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1673 tree vectype_out, tree vectype_in)
1675 internal_fn ifn;
1676 if (internal_fn_p (cfn))
1677 ifn = as_internal_fn (cfn);
1678 else
1679 ifn = associated_internal_fn (fndecl);
1680 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1682 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1683 if (info.vectorizable)
1685 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1686 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1687 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1688 OPTIMIZE_FOR_SPEED))
1689 return ifn;
1692 return IFN_LAST;
1696 static tree permute_vec_elements (tree, tree, tree, gimple *,
1697 gimple_stmt_iterator *);
1700 /* Function vectorizable_mask_load_store.
1702 Check if STMT performs a conditional load or store that can be vectorized.
1703 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1704 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1705 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1707 static bool
1708 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
1709 gimple **vec_stmt, slp_tree slp_node)
1711 tree vec_dest = NULL;
1712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1713 stmt_vec_info prev_stmt_info;
1714 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1715 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1716 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1717 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1718 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1719 tree rhs_vectype = NULL_TREE;
1720 tree mask_vectype;
1721 tree elem_type;
1722 gimple *new_stmt;
1723 tree dummy;
1724 tree dataref_ptr = NULL_TREE;
1725 gimple *ptr_incr;
1726 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1727 int ncopies;
1728 int i, j;
1729 bool inv_p;
1730 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1731 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1732 int gather_scale = 1;
1733 enum vect_def_type gather_dt = vect_unknown_def_type;
1734 bool is_store;
1735 tree mask;
1736 gimple *def_stmt;
1737 enum vect_def_type dt;
1739 if (slp_node != NULL)
1740 return false;
1742 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1743 gcc_assert (ncopies >= 1);
1745 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1746 mask = gimple_call_arg (stmt, 2);
1748 if (TREE_CODE (TREE_TYPE (mask)) != BOOLEAN_TYPE)
1749 return false;
1751 /* FORNOW. This restriction should be relaxed. */
1752 if (nested_in_vect_loop && ncopies > 1)
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "multiple types in nested loop.");
1757 return false;
1760 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1761 return false;
1763 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
1764 && ! vec_stmt)
1765 return false;
1767 if (!STMT_VINFO_DATA_REF (stmt_info))
1768 return false;
1770 elem_type = TREE_TYPE (vectype);
1772 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1773 return false;
1775 if (STMT_VINFO_STRIDED_P (stmt_info))
1776 return false;
1778 if (TREE_CODE (mask) != SSA_NAME)
1779 return false;
1781 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
1782 return false;
1784 if (!mask_vectype)
1785 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
1787 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype))
1788 return false;
1790 if (is_store)
1792 tree rhs = gimple_call_arg (stmt, 3);
1793 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
1794 return false;
1797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1799 gimple *def_stmt;
1800 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
1801 &gather_off, &gather_scale);
1802 gcc_assert (gather_decl);
1803 if (!vect_is_simple_use (gather_off, loop_vinfo, &def_stmt, &gather_dt,
1804 &gather_off_vectype))
1806 if (dump_enabled_p ())
1807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1808 "gather index use not simple.");
1809 return false;
1812 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1813 tree masktype
1814 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1815 if (TREE_CODE (masktype) == INTEGER_TYPE)
1817 if (dump_enabled_p ())
1818 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1819 "masked gather with integer mask not supported.");
1820 return false;
1823 else if (tree_int_cst_compare (nested_in_vect_loop
1824 ? STMT_VINFO_DR_STEP (stmt_info)
1825 : DR_STEP (dr), size_zero_node) <= 0)
1826 return false;
1827 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1828 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
1829 TYPE_MODE (mask_vectype),
1830 !is_store)
1831 || (rhs_vectype
1832 && !useless_type_conversion_p (vectype, rhs_vectype)))
1833 return false;
1835 if (!vec_stmt) /* transformation not required. */
1837 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1838 if (is_store)
1839 vect_model_store_cost (stmt_info, ncopies, false, dt,
1840 NULL, NULL, NULL);
1841 else
1842 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1843 return true;
1846 /** Transform. **/
1848 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1850 tree vec_oprnd0 = NULL_TREE, op;
1851 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1852 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1853 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1854 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1855 tree mask_perm_mask = NULL_TREE;
1856 edge pe = loop_preheader_edge (loop);
1857 gimple_seq seq;
1858 basic_block new_bb;
1859 enum { NARROW, NONE, WIDEN } modifier;
1860 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1862 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1863 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1864 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1865 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1866 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1867 scaletype = TREE_VALUE (arglist);
1868 gcc_checking_assert (types_compatible_p (srctype, rettype)
1869 && types_compatible_p (srctype, masktype));
1871 if (nunits == gather_off_nunits)
1872 modifier = NONE;
1873 else if (nunits == gather_off_nunits / 2)
1875 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1876 modifier = WIDEN;
1878 for (i = 0; i < gather_off_nunits; ++i)
1879 sel[i] = i | nunits;
1881 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1883 else if (nunits == gather_off_nunits * 2)
1885 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1886 modifier = NARROW;
1888 for (i = 0; i < nunits; ++i)
1889 sel[i] = i < gather_off_nunits
1890 ? i : i + nunits - gather_off_nunits;
1892 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1893 ncopies *= 2;
1894 for (i = 0; i < nunits; ++i)
1895 sel[i] = i | gather_off_nunits;
1896 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1898 else
1899 gcc_unreachable ();
1901 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1903 ptr = fold_convert (ptrtype, gather_base);
1904 if (!is_gimple_min_invariant (ptr))
1906 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1907 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1908 gcc_assert (!new_bb);
1911 scale = build_int_cst (scaletype, gather_scale);
1913 prev_stmt_info = NULL;
1914 for (j = 0; j < ncopies; ++j)
1916 if (modifier == WIDEN && (j & 1))
1917 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1918 perm_mask, stmt, gsi);
1919 else if (j == 0)
1920 op = vec_oprnd0
1921 = vect_get_vec_def_for_operand (gather_off, stmt);
1922 else
1923 op = vec_oprnd0
1924 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1926 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1928 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1929 == TYPE_VECTOR_SUBPARTS (idxtype));
1930 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
1931 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1932 new_stmt
1933 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1934 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1935 op = var;
1938 if (mask_perm_mask && (j & 1))
1939 mask_op = permute_vec_elements (mask_op, mask_op,
1940 mask_perm_mask, stmt, gsi);
1941 else
1943 if (j == 0)
1944 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
1945 else
1947 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
1948 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1951 mask_op = vec_mask;
1952 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1954 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1955 == TYPE_VECTOR_SUBPARTS (masktype));
1956 var = vect_get_new_ssa_name (masktype, vect_simple_var);
1957 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1958 new_stmt
1959 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1960 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1961 mask_op = var;
1965 new_stmt
1966 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
1967 scale);
1969 if (!useless_type_conversion_p (vectype, rettype))
1971 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
1972 == TYPE_VECTOR_SUBPARTS (rettype));
1973 op = vect_get_new_ssa_name (rettype, vect_simple_var);
1974 gimple_call_set_lhs (new_stmt, op);
1975 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1976 var = make_ssa_name (vec_dest);
1977 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
1978 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1980 else
1982 var = make_ssa_name (vec_dest, new_stmt);
1983 gimple_call_set_lhs (new_stmt, var);
1986 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1988 if (modifier == NARROW)
1990 if ((j & 1) == 0)
1992 prev_res = var;
1993 continue;
1995 var = permute_vec_elements (prev_res, var,
1996 perm_mask, stmt, gsi);
1997 new_stmt = SSA_NAME_DEF_STMT (var);
2000 if (prev_stmt_info == NULL)
2001 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2002 else
2003 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2004 prev_stmt_info = vinfo_for_stmt (new_stmt);
2007 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2008 from the IL. */
2009 if (STMT_VINFO_RELATED_STMT (stmt_info))
2011 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2012 stmt_info = vinfo_for_stmt (stmt);
2014 tree lhs = gimple_call_lhs (stmt);
2015 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2016 set_vinfo_for_stmt (new_stmt, stmt_info);
2017 set_vinfo_for_stmt (stmt, NULL);
2018 STMT_VINFO_STMT (stmt_info) = new_stmt;
2019 gsi_replace (gsi, new_stmt, true);
2020 return true;
2022 else if (is_store)
2024 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2025 prev_stmt_info = NULL;
2026 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2027 for (i = 0; i < ncopies; i++)
2029 unsigned align, misalign;
2031 if (i == 0)
2033 tree rhs = gimple_call_arg (stmt, 3);
2034 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2035 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2036 /* We should have catched mismatched types earlier. */
2037 gcc_assert (useless_type_conversion_p (vectype,
2038 TREE_TYPE (vec_rhs)));
2039 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2040 NULL_TREE, &dummy, gsi,
2041 &ptr_incr, false, &inv_p);
2042 gcc_assert (!inv_p);
2044 else
2046 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2047 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2048 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2049 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2050 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2051 TYPE_SIZE_UNIT (vectype));
2054 align = TYPE_ALIGN_UNIT (vectype);
2055 if (aligned_access_p (dr))
2056 misalign = 0;
2057 else if (DR_MISALIGNMENT (dr) == -1)
2059 align = TYPE_ALIGN_UNIT (elem_type);
2060 misalign = 0;
2062 else
2063 misalign = DR_MISALIGNMENT (dr);
2064 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2065 misalign);
2066 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2067 misalign ? misalign & -misalign : align);
2068 new_stmt
2069 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2070 ptr, vec_mask, vec_rhs);
2071 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2072 if (i == 0)
2073 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2074 else
2075 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2076 prev_stmt_info = vinfo_for_stmt (new_stmt);
2079 else
2081 tree vec_mask = NULL_TREE;
2082 prev_stmt_info = NULL;
2083 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2084 for (i = 0; i < ncopies; i++)
2086 unsigned align, misalign;
2088 if (i == 0)
2090 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2091 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2092 NULL_TREE, &dummy, gsi,
2093 &ptr_incr, false, &inv_p);
2094 gcc_assert (!inv_p);
2096 else
2098 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2099 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2100 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2101 TYPE_SIZE_UNIT (vectype));
2104 align = TYPE_ALIGN_UNIT (vectype);
2105 if (aligned_access_p (dr))
2106 misalign = 0;
2107 else if (DR_MISALIGNMENT (dr) == -1)
2109 align = TYPE_ALIGN_UNIT (elem_type);
2110 misalign = 0;
2112 else
2113 misalign = DR_MISALIGNMENT (dr);
2114 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2115 misalign);
2116 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2117 misalign ? misalign & -misalign : align);
2118 new_stmt
2119 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2120 ptr, vec_mask);
2121 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2122 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2123 if (i == 0)
2124 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2125 else
2126 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2127 prev_stmt_info = vinfo_for_stmt (new_stmt);
2131 if (!is_store)
2133 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2134 from the IL. */
2135 if (STMT_VINFO_RELATED_STMT (stmt_info))
2137 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
2138 stmt_info = vinfo_for_stmt (stmt);
2140 tree lhs = gimple_call_lhs (stmt);
2141 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2142 set_vinfo_for_stmt (new_stmt, stmt_info);
2143 set_vinfo_for_stmt (stmt, NULL);
2144 STMT_VINFO_STMT (stmt_info) = new_stmt;
2145 gsi_replace (gsi, new_stmt, true);
2148 return true;
2151 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2152 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2153 in a single step. On success, store the binary pack code in
2154 *CONVERT_CODE. */
2156 static bool
2157 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2158 tree_code *convert_code)
2160 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2161 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2162 return false;
2164 tree_code code;
2165 int multi_step_cvt = 0;
2166 auto_vec <tree, 8> interm_types;
2167 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2168 &code, &multi_step_cvt,
2169 &interm_types)
2170 || multi_step_cvt)
2171 return false;
2173 *convert_code = code;
2174 return true;
2177 /* Function vectorizable_call.
2179 Check if GS performs a function call that can be vectorized.
2180 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2181 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2182 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2184 static bool
2185 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2186 slp_tree slp_node)
2188 gcall *stmt;
2189 tree vec_dest;
2190 tree scalar_dest;
2191 tree op, type;
2192 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2193 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2194 tree vectype_out, vectype_in;
2195 int nunits_in;
2196 int nunits_out;
2197 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2198 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2199 vec_info *vinfo = stmt_info->vinfo;
2200 tree fndecl, new_temp, rhs_type;
2201 gimple *def_stmt;
2202 enum vect_def_type dt[3]
2203 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2204 gimple *new_stmt = NULL;
2205 int ncopies, j;
2206 vec<tree> vargs = vNULL;
2207 enum { NARROW, NONE, WIDEN } modifier;
2208 size_t i, nargs;
2209 tree lhs;
2211 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2212 return false;
2214 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2215 && ! vec_stmt)
2216 return false;
2218 /* Is GS a vectorizable call? */
2219 stmt = dyn_cast <gcall *> (gs);
2220 if (!stmt)
2221 return false;
2223 if (gimple_call_internal_p (stmt)
2224 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2225 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2226 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2227 slp_node);
2229 if (gimple_call_lhs (stmt) == NULL_TREE
2230 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2231 return false;
2233 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2235 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2237 /* Process function arguments. */
2238 rhs_type = NULL_TREE;
2239 vectype_in = NULL_TREE;
2240 nargs = gimple_call_num_args (stmt);
2242 /* Bail out if the function has more than three arguments, we do not have
2243 interesting builtin functions to vectorize with more than two arguments
2244 except for fma. No arguments is also not good. */
2245 if (nargs == 0 || nargs > 3)
2246 return false;
2248 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2249 if (gimple_call_internal_p (stmt)
2250 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2252 nargs = 0;
2253 rhs_type = unsigned_type_node;
2256 for (i = 0; i < nargs; i++)
2258 tree opvectype;
2260 op = gimple_call_arg (stmt, i);
2262 /* We can only handle calls with arguments of the same type. */
2263 if (rhs_type
2264 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2266 if (dump_enabled_p ())
2267 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2268 "argument types differ.\n");
2269 return false;
2271 if (!rhs_type)
2272 rhs_type = TREE_TYPE (op);
2274 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2276 if (dump_enabled_p ())
2277 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2278 "use not simple.\n");
2279 return false;
2282 if (!vectype_in)
2283 vectype_in = opvectype;
2284 else if (opvectype
2285 && opvectype != vectype_in)
2287 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2289 "argument vector types differ.\n");
2290 return false;
2293 /* If all arguments are external or constant defs use a vector type with
2294 the same size as the output vector type. */
2295 if (!vectype_in)
2296 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2297 if (vec_stmt)
2298 gcc_assert (vectype_in);
2299 if (!vectype_in)
2301 if (dump_enabled_p ())
2303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2304 "no vectype for scalar type ");
2305 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2306 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2309 return false;
2312 /* FORNOW */
2313 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2314 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2315 if (nunits_in == nunits_out / 2)
2316 modifier = NARROW;
2317 else if (nunits_out == nunits_in)
2318 modifier = NONE;
2319 else if (nunits_out == nunits_in / 2)
2320 modifier = WIDEN;
2321 else
2322 return false;
2324 /* We only handle functions that do not read or clobber memory. */
2325 if (gimple_vuse (stmt))
2327 if (dump_enabled_p ())
2328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2329 "function reads from or writes to memory.\n");
2330 return false;
2333 /* For now, we only vectorize functions if a target specific builtin
2334 is available. TODO -- in some cases, it might be profitable to
2335 insert the calls for pieces of the vector, in order to be able
2336 to vectorize other operations in the loop. */
2337 fndecl = NULL_TREE;
2338 internal_fn ifn = IFN_LAST;
2339 combined_fn cfn = gimple_call_combined_fn (stmt);
2340 tree callee = gimple_call_fndecl (stmt);
2342 /* First try using an internal function. */
2343 tree_code convert_code = ERROR_MARK;
2344 if (cfn != CFN_LAST
2345 && (modifier == NONE
2346 || (modifier == NARROW
2347 && simple_integer_narrowing (vectype_out, vectype_in,
2348 &convert_code))))
2349 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2350 vectype_in);
2352 /* If that fails, try asking for a target-specific built-in function. */
2353 if (ifn == IFN_LAST)
2355 if (cfn != CFN_LAST)
2356 fndecl = targetm.vectorize.builtin_vectorized_function
2357 (cfn, vectype_out, vectype_in);
2358 else
2359 fndecl = targetm.vectorize.builtin_md_vectorized_function
2360 (callee, vectype_out, vectype_in);
2363 if (ifn == IFN_LAST && !fndecl)
2365 if (cfn == CFN_GOMP_SIMD_LANE
2366 && !slp_node
2367 && loop_vinfo
2368 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2369 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2370 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2371 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2373 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2374 { 0, 1, 2, ... vf - 1 } vector. */
2375 gcc_assert (nargs == 0);
2377 else
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2381 "function is not vectorizable.\n");
2382 return false;
2386 if (slp_node || PURE_SLP_STMT (stmt_info))
2387 ncopies = 1;
2388 else if (modifier == NARROW && ifn == IFN_LAST)
2389 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2390 else
2391 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2393 /* Sanity check: make sure that at least one copy of the vectorized stmt
2394 needs to be generated. */
2395 gcc_assert (ncopies >= 1);
2397 if (!vec_stmt) /* transformation not required. */
2399 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2400 if (dump_enabled_p ())
2401 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2402 "\n");
2403 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2404 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2405 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2406 vec_promote_demote, stmt_info, 0, vect_body);
2408 return true;
2411 /** Transform. **/
2413 if (dump_enabled_p ())
2414 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2416 /* Handle def. */
2417 scalar_dest = gimple_call_lhs (stmt);
2418 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2420 prev_stmt_info = NULL;
2421 if (modifier == NONE || ifn != IFN_LAST)
2423 tree prev_res = NULL_TREE;
2424 for (j = 0; j < ncopies; ++j)
2426 /* Build argument list for the vectorized call. */
2427 if (j == 0)
2428 vargs.create (nargs);
2429 else
2430 vargs.truncate (0);
2432 if (slp_node)
2434 auto_vec<vec<tree> > vec_defs (nargs);
2435 vec<tree> vec_oprnds0;
2437 for (i = 0; i < nargs; i++)
2438 vargs.quick_push (gimple_call_arg (stmt, i));
2439 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2440 vec_oprnds0 = vec_defs[0];
2442 /* Arguments are ready. Create the new vector stmt. */
2443 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2445 size_t k;
2446 for (k = 0; k < nargs; k++)
2448 vec<tree> vec_oprndsk = vec_defs[k];
2449 vargs[k] = vec_oprndsk[i];
2451 if (modifier == NARROW)
2453 tree half_res = make_ssa_name (vectype_in);
2454 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2455 gimple_call_set_lhs (new_stmt, half_res);
2456 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2457 if ((i & 1) == 0)
2459 prev_res = half_res;
2460 continue;
2462 new_temp = make_ssa_name (vec_dest);
2463 new_stmt = gimple_build_assign (new_temp, convert_code,
2464 prev_res, half_res);
2466 else
2468 if (ifn != IFN_LAST)
2469 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2470 else
2471 new_stmt = gimple_build_call_vec (fndecl, vargs);
2472 new_temp = make_ssa_name (vec_dest, new_stmt);
2473 gimple_call_set_lhs (new_stmt, new_temp);
2475 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2476 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2479 for (i = 0; i < nargs; i++)
2481 vec<tree> vec_oprndsi = vec_defs[i];
2482 vec_oprndsi.release ();
2484 continue;
2487 for (i = 0; i < nargs; i++)
2489 op = gimple_call_arg (stmt, i);
2490 if (j == 0)
2491 vec_oprnd0
2492 = vect_get_vec_def_for_operand (op, stmt);
2493 else
2495 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2496 vec_oprnd0
2497 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2500 vargs.quick_push (vec_oprnd0);
2503 if (gimple_call_internal_p (stmt)
2504 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2506 tree *v = XALLOCAVEC (tree, nunits_out);
2507 int k;
2508 for (k = 0; k < nunits_out; ++k)
2509 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2510 tree cst = build_vector (vectype_out, v);
2511 tree new_var
2512 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2513 gimple *init_stmt = gimple_build_assign (new_var, cst);
2514 vect_init_vector_1 (stmt, init_stmt, NULL);
2515 new_temp = make_ssa_name (vec_dest);
2516 new_stmt = gimple_build_assign (new_temp, new_var);
2518 else if (modifier == NARROW)
2520 tree half_res = make_ssa_name (vectype_in);
2521 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2522 gimple_call_set_lhs (new_stmt, half_res);
2523 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2524 if ((j & 1) == 0)
2526 prev_res = half_res;
2527 continue;
2529 new_temp = make_ssa_name (vec_dest);
2530 new_stmt = gimple_build_assign (new_temp, convert_code,
2531 prev_res, half_res);
2533 else
2535 if (ifn != IFN_LAST)
2536 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2537 else
2538 new_stmt = gimple_build_call_vec (fndecl, vargs);
2539 new_temp = make_ssa_name (vec_dest, new_stmt);
2540 gimple_call_set_lhs (new_stmt, new_temp);
2542 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2544 if (j == (modifier == NARROW ? 1 : 0))
2545 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2546 else
2547 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2549 prev_stmt_info = vinfo_for_stmt (new_stmt);
2552 else if (modifier == NARROW)
2554 for (j = 0; j < ncopies; ++j)
2556 /* Build argument list for the vectorized call. */
2557 if (j == 0)
2558 vargs.create (nargs * 2);
2559 else
2560 vargs.truncate (0);
2562 if (slp_node)
2564 auto_vec<vec<tree> > vec_defs (nargs);
2565 vec<tree> vec_oprnds0;
2567 for (i = 0; i < nargs; i++)
2568 vargs.quick_push (gimple_call_arg (stmt, i));
2569 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2570 vec_oprnds0 = vec_defs[0];
2572 /* Arguments are ready. Create the new vector stmt. */
2573 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2575 size_t k;
2576 vargs.truncate (0);
2577 for (k = 0; k < nargs; k++)
2579 vec<tree> vec_oprndsk = vec_defs[k];
2580 vargs.quick_push (vec_oprndsk[i]);
2581 vargs.quick_push (vec_oprndsk[i + 1]);
2583 if (ifn != IFN_LAST)
2584 new_stmt = gimple_build_call_internal_vec (ifn, vargs);
2585 else
2586 new_stmt = gimple_build_call_vec (fndecl, vargs);
2587 new_temp = make_ssa_name (vec_dest, new_stmt);
2588 gimple_call_set_lhs (new_stmt, new_temp);
2589 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2590 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2593 for (i = 0; i < nargs; i++)
2595 vec<tree> vec_oprndsi = vec_defs[i];
2596 vec_oprndsi.release ();
2598 continue;
2601 for (i = 0; i < nargs; i++)
2603 op = gimple_call_arg (stmt, i);
2604 if (j == 0)
2606 vec_oprnd0
2607 = vect_get_vec_def_for_operand (op, stmt);
2608 vec_oprnd1
2609 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2611 else
2613 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2614 vec_oprnd0
2615 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2616 vec_oprnd1
2617 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2620 vargs.quick_push (vec_oprnd0);
2621 vargs.quick_push (vec_oprnd1);
2624 new_stmt = gimple_build_call_vec (fndecl, vargs);
2625 new_temp = make_ssa_name (vec_dest, new_stmt);
2626 gimple_call_set_lhs (new_stmt, new_temp);
2627 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2629 if (j == 0)
2630 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2631 else
2632 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2634 prev_stmt_info = vinfo_for_stmt (new_stmt);
2637 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2639 else
2640 /* No current target implements this case. */
2641 return false;
2643 vargs.release ();
2645 /* The call in STMT might prevent it from being removed in dce.
2646 We however cannot remove it here, due to the way the ssa name
2647 it defines is mapped to the new definition. So just replace
2648 rhs of the statement with something harmless. */
2650 if (slp_node)
2651 return true;
2653 type = TREE_TYPE (scalar_dest);
2654 if (is_pattern_stmt_p (stmt_info))
2655 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2656 else
2657 lhs = gimple_call_lhs (stmt);
2659 if (gimple_call_internal_p (stmt)
2660 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2662 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2663 with vf - 1 rather than 0, that is the last iteration of the
2664 vectorized loop. */
2665 imm_use_iterator iter;
2666 use_operand_p use_p;
2667 gimple *use_stmt;
2668 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2670 basic_block use_bb = gimple_bb (use_stmt);
2671 if (use_bb
2672 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2674 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2675 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2676 ncopies * nunits_out - 1));
2677 update_stmt (use_stmt);
2682 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2683 set_vinfo_for_stmt (new_stmt, stmt_info);
2684 set_vinfo_for_stmt (stmt, NULL);
2685 STMT_VINFO_STMT (stmt_info) = new_stmt;
2686 gsi_replace (gsi, new_stmt, false);
2688 return true;
2692 struct simd_call_arg_info
2694 tree vectype;
2695 tree op;
2696 enum vect_def_type dt;
2697 HOST_WIDE_INT linear_step;
2698 unsigned int align;
2699 bool simd_lane_linear;
2702 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2703 is linear within simd lane (but not within whole loop), note it in
2704 *ARGINFO. */
2706 static void
2707 vect_simd_lane_linear (tree op, struct loop *loop,
2708 struct simd_call_arg_info *arginfo)
2710 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
2712 if (!is_gimple_assign (def_stmt)
2713 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2714 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2715 return;
2717 tree base = gimple_assign_rhs1 (def_stmt);
2718 HOST_WIDE_INT linear_step = 0;
2719 tree v = gimple_assign_rhs2 (def_stmt);
2720 while (TREE_CODE (v) == SSA_NAME)
2722 tree t;
2723 def_stmt = SSA_NAME_DEF_STMT (v);
2724 if (is_gimple_assign (def_stmt))
2725 switch (gimple_assign_rhs_code (def_stmt))
2727 case PLUS_EXPR:
2728 t = gimple_assign_rhs2 (def_stmt);
2729 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2730 return;
2731 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2732 v = gimple_assign_rhs1 (def_stmt);
2733 continue;
2734 case MULT_EXPR:
2735 t = gimple_assign_rhs2 (def_stmt);
2736 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2737 return;
2738 linear_step = tree_to_shwi (t);
2739 v = gimple_assign_rhs1 (def_stmt);
2740 continue;
2741 CASE_CONVERT:
2742 t = gimple_assign_rhs1 (def_stmt);
2743 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2744 || (TYPE_PRECISION (TREE_TYPE (v))
2745 < TYPE_PRECISION (TREE_TYPE (t))))
2746 return;
2747 if (!linear_step)
2748 linear_step = 1;
2749 v = t;
2750 continue;
2751 default:
2752 return;
2754 else if (is_gimple_call (def_stmt)
2755 && gimple_call_internal_p (def_stmt)
2756 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2757 && loop->simduid
2758 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2759 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2760 == loop->simduid))
2762 if (!linear_step)
2763 linear_step = 1;
2764 arginfo->linear_step = linear_step;
2765 arginfo->op = base;
2766 arginfo->simd_lane_linear = true;
2767 return;
2772 /* Function vectorizable_simd_clone_call.
2774 Check if STMT performs a function call that can be vectorized
2775 by calling a simd clone of the function.
2776 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2777 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2778 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2780 static bool
2781 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
2782 gimple **vec_stmt, slp_tree slp_node)
2784 tree vec_dest;
2785 tree scalar_dest;
2786 tree op, type;
2787 tree vec_oprnd0 = NULL_TREE;
2788 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2789 tree vectype;
2790 unsigned int nunits;
2791 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2792 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2793 vec_info *vinfo = stmt_info->vinfo;
2794 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2795 tree fndecl, new_temp;
2796 gimple *def_stmt;
2797 gimple *new_stmt = NULL;
2798 int ncopies, j;
2799 vec<simd_call_arg_info> arginfo = vNULL;
2800 vec<tree> vargs = vNULL;
2801 size_t i, nargs;
2802 tree lhs, rtype, ratype;
2803 vec<constructor_elt, va_gc> *ret_ctor_elts;
2805 /* Is STMT a vectorizable call? */
2806 if (!is_gimple_call (stmt))
2807 return false;
2809 fndecl = gimple_call_fndecl (stmt);
2810 if (fndecl == NULL_TREE)
2811 return false;
2813 struct cgraph_node *node = cgraph_node::get (fndecl);
2814 if (node == NULL || node->simd_clones == NULL)
2815 return false;
2817 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2818 return false;
2820 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2821 && ! vec_stmt)
2822 return false;
2824 if (gimple_call_lhs (stmt)
2825 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2826 return false;
2828 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2830 vectype = STMT_VINFO_VECTYPE (stmt_info);
2832 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2833 return false;
2835 /* FORNOW */
2836 if (slp_node || PURE_SLP_STMT (stmt_info))
2837 return false;
2839 /* Process function arguments. */
2840 nargs = gimple_call_num_args (stmt);
2842 /* Bail out if the function has zero arguments. */
2843 if (nargs == 0)
2844 return false;
2846 arginfo.create (nargs);
2848 for (i = 0; i < nargs; i++)
2850 simd_call_arg_info thisarginfo;
2851 affine_iv iv;
2853 thisarginfo.linear_step = 0;
2854 thisarginfo.align = 0;
2855 thisarginfo.op = NULL_TREE;
2856 thisarginfo.simd_lane_linear = false;
2858 op = gimple_call_arg (stmt, i);
2859 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
2860 &thisarginfo.vectype)
2861 || thisarginfo.dt == vect_uninitialized_def)
2863 if (dump_enabled_p ())
2864 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2865 "use not simple.\n");
2866 arginfo.release ();
2867 return false;
2870 if (thisarginfo.dt == vect_constant_def
2871 || thisarginfo.dt == vect_external_def)
2872 gcc_assert (thisarginfo.vectype == NULL_TREE);
2873 else
2874 gcc_assert (thisarginfo.vectype != NULL_TREE);
2876 /* For linear arguments, the analyze phase should have saved
2877 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2878 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2879 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2881 gcc_assert (vec_stmt);
2882 thisarginfo.linear_step
2883 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2884 thisarginfo.op
2885 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2886 thisarginfo.simd_lane_linear
2887 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2888 == boolean_true_node);
2889 /* If loop has been peeled for alignment, we need to adjust it. */
2890 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2891 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2892 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2894 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2895 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2896 tree opt = TREE_TYPE (thisarginfo.op);
2897 bias = fold_convert (TREE_TYPE (step), bias);
2898 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2899 thisarginfo.op
2900 = fold_build2 (POINTER_TYPE_P (opt)
2901 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2902 thisarginfo.op, bias);
2905 else if (!vec_stmt
2906 && thisarginfo.dt != vect_constant_def
2907 && thisarginfo.dt != vect_external_def
2908 && loop_vinfo
2909 && TREE_CODE (op) == SSA_NAME
2910 && simple_iv (loop, loop_containing_stmt (stmt), op,
2911 &iv, false)
2912 && tree_fits_shwi_p (iv.step))
2914 thisarginfo.linear_step = tree_to_shwi (iv.step);
2915 thisarginfo.op = iv.base;
2917 else if ((thisarginfo.dt == vect_constant_def
2918 || thisarginfo.dt == vect_external_def)
2919 && POINTER_TYPE_P (TREE_TYPE (op)))
2920 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2921 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2922 linear too. */
2923 if (POINTER_TYPE_P (TREE_TYPE (op))
2924 && !thisarginfo.linear_step
2925 && !vec_stmt
2926 && thisarginfo.dt != vect_constant_def
2927 && thisarginfo.dt != vect_external_def
2928 && loop_vinfo
2929 && !slp_node
2930 && TREE_CODE (op) == SSA_NAME)
2931 vect_simd_lane_linear (op, loop, &thisarginfo);
2933 arginfo.quick_push (thisarginfo);
2936 unsigned int badness = 0;
2937 struct cgraph_node *bestn = NULL;
2938 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2939 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2940 else
2941 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2942 n = n->simdclone->next_clone)
2944 unsigned int this_badness = 0;
2945 if (n->simdclone->simdlen
2946 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2947 || n->simdclone->nargs != nargs)
2948 continue;
2949 if (n->simdclone->simdlen
2950 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2951 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2952 - exact_log2 (n->simdclone->simdlen)) * 1024;
2953 if (n->simdclone->inbranch)
2954 this_badness += 2048;
2955 int target_badness = targetm.simd_clone.usable (n);
2956 if (target_badness < 0)
2957 continue;
2958 this_badness += target_badness * 512;
2959 /* FORNOW: Have to add code to add the mask argument. */
2960 if (n->simdclone->inbranch)
2961 continue;
2962 for (i = 0; i < nargs; i++)
2964 switch (n->simdclone->args[i].arg_type)
2966 case SIMD_CLONE_ARG_TYPE_VECTOR:
2967 if (!useless_type_conversion_p
2968 (n->simdclone->args[i].orig_type,
2969 TREE_TYPE (gimple_call_arg (stmt, i))))
2970 i = -1;
2971 else if (arginfo[i].dt == vect_constant_def
2972 || arginfo[i].dt == vect_external_def
2973 || arginfo[i].linear_step)
2974 this_badness += 64;
2975 break;
2976 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2977 if (arginfo[i].dt != vect_constant_def
2978 && arginfo[i].dt != vect_external_def)
2979 i = -1;
2980 break;
2981 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2982 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
2983 if (arginfo[i].dt == vect_constant_def
2984 || arginfo[i].dt == vect_external_def
2985 || (arginfo[i].linear_step
2986 != n->simdclone->args[i].linear_step))
2987 i = -1;
2988 break;
2989 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2990 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
2991 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
2992 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
2993 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
2994 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
2995 /* FORNOW */
2996 i = -1;
2997 break;
2998 case SIMD_CLONE_ARG_TYPE_MASK:
2999 gcc_unreachable ();
3001 if (i == (size_t) -1)
3002 break;
3003 if (n->simdclone->args[i].alignment > arginfo[i].align)
3005 i = -1;
3006 break;
3008 if (arginfo[i].align)
3009 this_badness += (exact_log2 (arginfo[i].align)
3010 - exact_log2 (n->simdclone->args[i].alignment));
3012 if (i == (size_t) -1)
3013 continue;
3014 if (bestn == NULL || this_badness < badness)
3016 bestn = n;
3017 badness = this_badness;
3021 if (bestn == NULL)
3023 arginfo.release ();
3024 return false;
3027 for (i = 0; i < nargs; i++)
3028 if ((arginfo[i].dt == vect_constant_def
3029 || arginfo[i].dt == vect_external_def)
3030 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3032 arginfo[i].vectype
3033 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3034 i)));
3035 if (arginfo[i].vectype == NULL
3036 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3037 > bestn->simdclone->simdlen))
3039 arginfo.release ();
3040 return false;
3044 fndecl = bestn->decl;
3045 nunits = bestn->simdclone->simdlen;
3046 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
3048 /* If the function isn't const, only allow it in simd loops where user
3049 has asserted that at least nunits consecutive iterations can be
3050 performed using SIMD instructions. */
3051 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3052 && gimple_vuse (stmt))
3054 arginfo.release ();
3055 return false;
3058 /* Sanity check: make sure that at least one copy of the vectorized stmt
3059 needs to be generated. */
3060 gcc_assert (ncopies >= 1);
3062 if (!vec_stmt) /* transformation not required. */
3064 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3065 for (i = 0; i < nargs; i++)
3066 if (bestn->simdclone->args[i].arg_type
3067 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3069 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3070 + 1);
3071 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3072 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3073 ? size_type_node : TREE_TYPE (arginfo[i].op);
3074 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3075 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3076 tree sll = arginfo[i].simd_lane_linear
3077 ? boolean_true_node : boolean_false_node;
3078 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3080 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3081 if (dump_enabled_p ())
3082 dump_printf_loc (MSG_NOTE, vect_location,
3083 "=== vectorizable_simd_clone_call ===\n");
3084 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3085 arginfo.release ();
3086 return true;
3089 /** Transform. **/
3091 if (dump_enabled_p ())
3092 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3094 /* Handle def. */
3095 scalar_dest = gimple_call_lhs (stmt);
3096 vec_dest = NULL_TREE;
3097 rtype = NULL_TREE;
3098 ratype = NULL_TREE;
3099 if (scalar_dest)
3101 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3102 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3103 if (TREE_CODE (rtype) == ARRAY_TYPE)
3105 ratype = rtype;
3106 rtype = TREE_TYPE (ratype);
3110 prev_stmt_info = NULL;
3111 for (j = 0; j < ncopies; ++j)
3113 /* Build argument list for the vectorized call. */
3114 if (j == 0)
3115 vargs.create (nargs);
3116 else
3117 vargs.truncate (0);
3119 for (i = 0; i < nargs; i++)
3121 unsigned int k, l, m, o;
3122 tree atype;
3123 op = gimple_call_arg (stmt, i);
3124 switch (bestn->simdclone->args[i].arg_type)
3126 case SIMD_CLONE_ARG_TYPE_VECTOR:
3127 atype = bestn->simdclone->args[i].vector_type;
3128 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3129 for (m = j * o; m < (j + 1) * o; m++)
3131 if (TYPE_VECTOR_SUBPARTS (atype)
3132 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3134 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3135 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3136 / TYPE_VECTOR_SUBPARTS (atype));
3137 gcc_assert ((k & (k - 1)) == 0);
3138 if (m == 0)
3139 vec_oprnd0
3140 = vect_get_vec_def_for_operand (op, stmt);
3141 else
3143 vec_oprnd0 = arginfo[i].op;
3144 if ((m & (k - 1)) == 0)
3145 vec_oprnd0
3146 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3147 vec_oprnd0);
3149 arginfo[i].op = vec_oprnd0;
3150 vec_oprnd0
3151 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3152 size_int (prec),
3153 bitsize_int ((m & (k - 1)) * prec));
3154 new_stmt
3155 = gimple_build_assign (make_ssa_name (atype),
3156 vec_oprnd0);
3157 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3158 vargs.safe_push (gimple_assign_lhs (new_stmt));
3160 else
3162 k = (TYPE_VECTOR_SUBPARTS (atype)
3163 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3164 gcc_assert ((k & (k - 1)) == 0);
3165 vec<constructor_elt, va_gc> *ctor_elts;
3166 if (k != 1)
3167 vec_alloc (ctor_elts, k);
3168 else
3169 ctor_elts = NULL;
3170 for (l = 0; l < k; l++)
3172 if (m == 0 && l == 0)
3173 vec_oprnd0
3174 = vect_get_vec_def_for_operand (op, stmt);
3175 else
3176 vec_oprnd0
3177 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3178 arginfo[i].op);
3179 arginfo[i].op = vec_oprnd0;
3180 if (k == 1)
3181 break;
3182 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3183 vec_oprnd0);
3185 if (k == 1)
3186 vargs.safe_push (vec_oprnd0);
3187 else
3189 vec_oprnd0 = build_constructor (atype, ctor_elts);
3190 new_stmt
3191 = gimple_build_assign (make_ssa_name (atype),
3192 vec_oprnd0);
3193 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3194 vargs.safe_push (gimple_assign_lhs (new_stmt));
3198 break;
3199 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3200 vargs.safe_push (op);
3201 break;
3202 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3203 if (j == 0)
3205 gimple_seq stmts;
3206 arginfo[i].op
3207 = force_gimple_operand (arginfo[i].op, &stmts, true,
3208 NULL_TREE);
3209 if (stmts != NULL)
3211 basic_block new_bb;
3212 edge pe = loop_preheader_edge (loop);
3213 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3214 gcc_assert (!new_bb);
3216 if (arginfo[i].simd_lane_linear)
3218 vargs.safe_push (arginfo[i].op);
3219 break;
3221 tree phi_res = copy_ssa_name (op);
3222 gphi *new_phi = create_phi_node (phi_res, loop->header);
3223 set_vinfo_for_stmt (new_phi,
3224 new_stmt_vec_info (new_phi, loop_vinfo));
3225 add_phi_arg (new_phi, arginfo[i].op,
3226 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3227 enum tree_code code
3228 = POINTER_TYPE_P (TREE_TYPE (op))
3229 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3230 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3231 ? sizetype : TREE_TYPE (op);
3232 widest_int cst
3233 = wi::mul (bestn->simdclone->args[i].linear_step,
3234 ncopies * nunits);
3235 tree tcst = wide_int_to_tree (type, cst);
3236 tree phi_arg = copy_ssa_name (op);
3237 new_stmt
3238 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3239 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3240 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3241 set_vinfo_for_stmt (new_stmt,
3242 new_stmt_vec_info (new_stmt, loop_vinfo));
3243 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3244 UNKNOWN_LOCATION);
3245 arginfo[i].op = phi_res;
3246 vargs.safe_push (phi_res);
3248 else
3250 enum tree_code code
3251 = POINTER_TYPE_P (TREE_TYPE (op))
3252 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3253 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3254 ? sizetype : TREE_TYPE (op);
3255 widest_int cst
3256 = wi::mul (bestn->simdclone->args[i].linear_step,
3257 j * nunits);
3258 tree tcst = wide_int_to_tree (type, cst);
3259 new_temp = make_ssa_name (TREE_TYPE (op));
3260 new_stmt = gimple_build_assign (new_temp, code,
3261 arginfo[i].op, tcst);
3262 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3263 vargs.safe_push (new_temp);
3265 break;
3266 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3267 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3268 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3269 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3270 default:
3271 gcc_unreachable ();
3275 new_stmt = gimple_build_call_vec (fndecl, vargs);
3276 if (vec_dest)
3278 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3279 if (ratype)
3280 new_temp = create_tmp_var (ratype);
3281 else if (TYPE_VECTOR_SUBPARTS (vectype)
3282 == TYPE_VECTOR_SUBPARTS (rtype))
3283 new_temp = make_ssa_name (vec_dest, new_stmt);
3284 else
3285 new_temp = make_ssa_name (rtype, new_stmt);
3286 gimple_call_set_lhs (new_stmt, new_temp);
3288 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3290 if (vec_dest)
3292 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3294 unsigned int k, l;
3295 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3296 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3297 gcc_assert ((k & (k - 1)) == 0);
3298 for (l = 0; l < k; l++)
3300 tree t;
3301 if (ratype)
3303 t = build_fold_addr_expr (new_temp);
3304 t = build2 (MEM_REF, vectype, t,
3305 build_int_cst (TREE_TYPE (t),
3306 l * prec / BITS_PER_UNIT));
3308 else
3309 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3310 size_int (prec), bitsize_int (l * prec));
3311 new_stmt
3312 = gimple_build_assign (make_ssa_name (vectype), t);
3313 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3314 if (j == 0 && l == 0)
3315 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3316 else
3317 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3319 prev_stmt_info = vinfo_for_stmt (new_stmt);
3322 if (ratype)
3324 tree clobber = build_constructor (ratype, NULL);
3325 TREE_THIS_VOLATILE (clobber) = 1;
3326 new_stmt = gimple_build_assign (new_temp, clobber);
3327 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3329 continue;
3331 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3333 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3334 / TYPE_VECTOR_SUBPARTS (rtype));
3335 gcc_assert ((k & (k - 1)) == 0);
3336 if ((j & (k - 1)) == 0)
3337 vec_alloc (ret_ctor_elts, k);
3338 if (ratype)
3340 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3341 for (m = 0; m < o; m++)
3343 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3344 size_int (m), NULL_TREE, NULL_TREE);
3345 new_stmt
3346 = gimple_build_assign (make_ssa_name (rtype), tem);
3347 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3348 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3349 gimple_assign_lhs (new_stmt));
3351 tree clobber = build_constructor (ratype, NULL);
3352 TREE_THIS_VOLATILE (clobber) = 1;
3353 new_stmt = gimple_build_assign (new_temp, clobber);
3354 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3356 else
3357 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3358 if ((j & (k - 1)) != k - 1)
3359 continue;
3360 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3361 new_stmt
3362 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3363 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3365 if ((unsigned) j == k - 1)
3366 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3367 else
3368 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3370 prev_stmt_info = vinfo_for_stmt (new_stmt);
3371 continue;
3373 else if (ratype)
3375 tree t = build_fold_addr_expr (new_temp);
3376 t = build2 (MEM_REF, vectype, t,
3377 build_int_cst (TREE_TYPE (t), 0));
3378 new_stmt
3379 = gimple_build_assign (make_ssa_name (vec_dest), t);
3380 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3381 tree clobber = build_constructor (ratype, NULL);
3382 TREE_THIS_VOLATILE (clobber) = 1;
3383 vect_finish_stmt_generation (stmt,
3384 gimple_build_assign (new_temp,
3385 clobber), gsi);
3389 if (j == 0)
3390 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3391 else
3392 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3394 prev_stmt_info = vinfo_for_stmt (new_stmt);
3397 vargs.release ();
3399 /* The call in STMT might prevent it from being removed in dce.
3400 We however cannot remove it here, due to the way the ssa name
3401 it defines is mapped to the new definition. So just replace
3402 rhs of the statement with something harmless. */
3404 if (slp_node)
3405 return true;
3407 if (scalar_dest)
3409 type = TREE_TYPE (scalar_dest);
3410 if (is_pattern_stmt_p (stmt_info))
3411 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3412 else
3413 lhs = gimple_call_lhs (stmt);
3414 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3416 else
3417 new_stmt = gimple_build_nop ();
3418 set_vinfo_for_stmt (new_stmt, stmt_info);
3419 set_vinfo_for_stmt (stmt, NULL);
3420 STMT_VINFO_STMT (stmt_info) = new_stmt;
3421 gsi_replace (gsi, new_stmt, true);
3422 unlink_stmt_vdef (stmt);
3424 return true;
3428 /* Function vect_gen_widened_results_half
3430 Create a vector stmt whose code, type, number of arguments, and result
3431 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3432 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3433 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3434 needs to be created (DECL is a function-decl of a target-builtin).
3435 STMT is the original scalar stmt that we are vectorizing. */
3437 static gimple *
3438 vect_gen_widened_results_half (enum tree_code code,
3439 tree decl,
3440 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3441 tree vec_dest, gimple_stmt_iterator *gsi,
3442 gimple *stmt)
3444 gimple *new_stmt;
3445 tree new_temp;
3447 /* Generate half of the widened result: */
3448 if (code == CALL_EXPR)
3450 /* Target specific support */
3451 if (op_type == binary_op)
3452 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3453 else
3454 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3455 new_temp = make_ssa_name (vec_dest, new_stmt);
3456 gimple_call_set_lhs (new_stmt, new_temp);
3458 else
3460 /* Generic support */
3461 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3462 if (op_type != binary_op)
3463 vec_oprnd1 = NULL;
3464 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3465 new_temp = make_ssa_name (vec_dest, new_stmt);
3466 gimple_assign_set_lhs (new_stmt, new_temp);
3468 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3470 return new_stmt;
3474 /* Get vectorized definitions for loop-based vectorization. For the first
3475 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3476 scalar operand), and for the rest we get a copy with
3477 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3478 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3479 The vectors are collected into VEC_OPRNDS. */
3481 static void
3482 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3483 vec<tree> *vec_oprnds, int multi_step_cvt)
3485 tree vec_oprnd;
3487 /* Get first vector operand. */
3488 /* All the vector operands except the very first one (that is scalar oprnd)
3489 are stmt copies. */
3490 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3491 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3492 else
3493 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3495 vec_oprnds->quick_push (vec_oprnd);
3497 /* Get second vector operand. */
3498 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3499 vec_oprnds->quick_push (vec_oprnd);
3501 *oprnd = vec_oprnd;
3503 /* For conversion in multiple steps, continue to get operands
3504 recursively. */
3505 if (multi_step_cvt)
3506 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3510 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3511 For multi-step conversions store the resulting vectors and call the function
3512 recursively. */
3514 static void
3515 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3516 int multi_step_cvt, gimple *stmt,
3517 vec<tree> vec_dsts,
3518 gimple_stmt_iterator *gsi,
3519 slp_tree slp_node, enum tree_code code,
3520 stmt_vec_info *prev_stmt_info)
3522 unsigned int i;
3523 tree vop0, vop1, new_tmp, vec_dest;
3524 gimple *new_stmt;
3525 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3527 vec_dest = vec_dsts.pop ();
3529 for (i = 0; i < vec_oprnds->length (); i += 2)
3531 /* Create demotion operation. */
3532 vop0 = (*vec_oprnds)[i];
3533 vop1 = (*vec_oprnds)[i + 1];
3534 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3535 new_tmp = make_ssa_name (vec_dest, new_stmt);
3536 gimple_assign_set_lhs (new_stmt, new_tmp);
3537 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3539 if (multi_step_cvt)
3540 /* Store the resulting vector for next recursive call. */
3541 (*vec_oprnds)[i/2] = new_tmp;
3542 else
3544 /* This is the last step of the conversion sequence. Store the
3545 vectors in SLP_NODE or in vector info of the scalar statement
3546 (or in STMT_VINFO_RELATED_STMT chain). */
3547 if (slp_node)
3548 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3549 else
3551 if (!*prev_stmt_info)
3552 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3553 else
3554 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3556 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3561 /* For multi-step demotion operations we first generate demotion operations
3562 from the source type to the intermediate types, and then combine the
3563 results (stored in VEC_OPRNDS) in demotion operation to the destination
3564 type. */
3565 if (multi_step_cvt)
3567 /* At each level of recursion we have half of the operands we had at the
3568 previous level. */
3569 vec_oprnds->truncate ((i+1)/2);
3570 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3571 stmt, vec_dsts, gsi, slp_node,
3572 VEC_PACK_TRUNC_EXPR,
3573 prev_stmt_info);
3576 vec_dsts.quick_push (vec_dest);
3580 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3581 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3582 the resulting vectors and call the function recursively. */
3584 static void
3585 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3586 vec<tree> *vec_oprnds1,
3587 gimple *stmt, tree vec_dest,
3588 gimple_stmt_iterator *gsi,
3589 enum tree_code code1,
3590 enum tree_code code2, tree decl1,
3591 tree decl2, int op_type)
3593 int i;
3594 tree vop0, vop1, new_tmp1, new_tmp2;
3595 gimple *new_stmt1, *new_stmt2;
3596 vec<tree> vec_tmp = vNULL;
3598 vec_tmp.create (vec_oprnds0->length () * 2);
3599 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3601 if (op_type == binary_op)
3602 vop1 = (*vec_oprnds1)[i];
3603 else
3604 vop1 = NULL_TREE;
3606 /* Generate the two halves of promotion operation. */
3607 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3608 op_type, vec_dest, gsi, stmt);
3609 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3610 op_type, vec_dest, gsi, stmt);
3611 if (is_gimple_call (new_stmt1))
3613 new_tmp1 = gimple_call_lhs (new_stmt1);
3614 new_tmp2 = gimple_call_lhs (new_stmt2);
3616 else
3618 new_tmp1 = gimple_assign_lhs (new_stmt1);
3619 new_tmp2 = gimple_assign_lhs (new_stmt2);
3622 /* Store the results for the next step. */
3623 vec_tmp.quick_push (new_tmp1);
3624 vec_tmp.quick_push (new_tmp2);
3627 vec_oprnds0->release ();
3628 *vec_oprnds0 = vec_tmp;
3632 /* Check if STMT performs a conversion operation, that can be vectorized.
3633 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3634 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3635 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3637 static bool
3638 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
3639 gimple **vec_stmt, slp_tree slp_node)
3641 tree vec_dest;
3642 tree scalar_dest;
3643 tree op0, op1 = NULL_TREE;
3644 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3647 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3648 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3649 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3650 tree new_temp;
3651 gimple *def_stmt;
3652 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3653 gimple *new_stmt = NULL;
3654 stmt_vec_info prev_stmt_info;
3655 int nunits_in;
3656 int nunits_out;
3657 tree vectype_out, vectype_in;
3658 int ncopies, i, j;
3659 tree lhs_type, rhs_type;
3660 enum { NARROW, NONE, WIDEN } modifier;
3661 vec<tree> vec_oprnds0 = vNULL;
3662 vec<tree> vec_oprnds1 = vNULL;
3663 tree vop0;
3664 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3665 vec_info *vinfo = stmt_info->vinfo;
3666 int multi_step_cvt = 0;
3667 vec<tree> vec_dsts = vNULL;
3668 vec<tree> interm_types = vNULL;
3669 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3670 int op_type;
3671 machine_mode rhs_mode;
3672 unsigned short fltsz;
3674 /* Is STMT a vectorizable conversion? */
3676 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3677 return false;
3679 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3680 && ! vec_stmt)
3681 return false;
3683 if (!is_gimple_assign (stmt))
3684 return false;
3686 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3687 return false;
3689 code = gimple_assign_rhs_code (stmt);
3690 if (!CONVERT_EXPR_CODE_P (code)
3691 && code != FIX_TRUNC_EXPR
3692 && code != FLOAT_EXPR
3693 && code != WIDEN_MULT_EXPR
3694 && code != WIDEN_LSHIFT_EXPR)
3695 return false;
3697 op_type = TREE_CODE_LENGTH (code);
3699 /* Check types of lhs and rhs. */
3700 scalar_dest = gimple_assign_lhs (stmt);
3701 lhs_type = TREE_TYPE (scalar_dest);
3702 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3704 op0 = gimple_assign_rhs1 (stmt);
3705 rhs_type = TREE_TYPE (op0);
3707 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3708 && !((INTEGRAL_TYPE_P (lhs_type)
3709 && INTEGRAL_TYPE_P (rhs_type))
3710 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3711 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3712 return false;
3714 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
3715 && ((INTEGRAL_TYPE_P (lhs_type)
3716 && (TYPE_PRECISION (lhs_type)
3717 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3718 || (INTEGRAL_TYPE_P (rhs_type)
3719 && (TYPE_PRECISION (rhs_type)
3720 != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
3722 if (dump_enabled_p ())
3723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3724 "type conversion to/from bit-precision unsupported."
3725 "\n");
3726 return false;
3729 /* Check the operands of the operation. */
3730 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
3732 if (dump_enabled_p ())
3733 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3734 "use not simple.\n");
3735 return false;
3737 if (op_type == binary_op)
3739 bool ok;
3741 op1 = gimple_assign_rhs2 (stmt);
3742 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3743 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3744 OP1. */
3745 if (CONSTANT_CLASS_P (op0))
3746 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
3747 else
3748 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
3750 if (!ok)
3752 if (dump_enabled_p ())
3753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3754 "use not simple.\n");
3755 return false;
3759 /* If op0 is an external or constant defs use a vector type of
3760 the same size as the output vector type. */
3761 if (!vectype_in)
3762 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3763 if (vec_stmt)
3764 gcc_assert (vectype_in);
3765 if (!vectype_in)
3767 if (dump_enabled_p ())
3769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3770 "no vectype for scalar type ");
3771 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3772 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3775 return false;
3778 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
3779 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
3781 if (dump_enabled_p ())
3783 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3784 "can't convert between boolean and non "
3785 "boolean vectors");
3786 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3787 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3790 return false;
3793 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3794 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3795 if (nunits_in < nunits_out)
3796 modifier = NARROW;
3797 else if (nunits_out == nunits_in)
3798 modifier = NONE;
3799 else
3800 modifier = WIDEN;
3802 /* Multiple types in SLP are handled by creating the appropriate number of
3803 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3804 case of SLP. */
3805 if (slp_node || PURE_SLP_STMT (stmt_info))
3806 ncopies = 1;
3807 else if (modifier == NARROW)
3808 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3809 else
3810 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3812 /* Sanity check: make sure that at least one copy of the vectorized stmt
3813 needs to be generated. */
3814 gcc_assert (ncopies >= 1);
3816 /* Supportable by target? */
3817 switch (modifier)
3819 case NONE:
3820 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3821 return false;
3822 if (supportable_convert_operation (code, vectype_out, vectype_in,
3823 &decl1, &code1))
3824 break;
3825 /* FALLTHRU */
3826 unsupported:
3827 if (dump_enabled_p ())
3828 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3829 "conversion not supported by target.\n");
3830 return false;
3832 case WIDEN:
3833 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3834 &code1, &code2, &multi_step_cvt,
3835 &interm_types))
3837 /* Binary widening operation can only be supported directly by the
3838 architecture. */
3839 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3840 break;
3843 if (code != FLOAT_EXPR
3844 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3845 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3846 goto unsupported;
3848 rhs_mode = TYPE_MODE (rhs_type);
3849 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3850 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3851 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3852 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3854 cvt_type
3855 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3856 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3857 if (cvt_type == NULL_TREE)
3858 goto unsupported;
3860 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3862 if (!supportable_convert_operation (code, vectype_out,
3863 cvt_type, &decl1, &codecvt1))
3864 goto unsupported;
3866 else if (!supportable_widening_operation (code, stmt, vectype_out,
3867 cvt_type, &codecvt1,
3868 &codecvt2, &multi_step_cvt,
3869 &interm_types))
3870 continue;
3871 else
3872 gcc_assert (multi_step_cvt == 0);
3874 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3875 vectype_in, &code1, &code2,
3876 &multi_step_cvt, &interm_types))
3877 break;
3880 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3881 goto unsupported;
3883 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3884 codecvt2 = ERROR_MARK;
3885 else
3887 multi_step_cvt++;
3888 interm_types.safe_push (cvt_type);
3889 cvt_type = NULL_TREE;
3891 break;
3893 case NARROW:
3894 gcc_assert (op_type == unary_op);
3895 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3896 &code1, &multi_step_cvt,
3897 &interm_types))
3898 break;
3900 if (code != FIX_TRUNC_EXPR
3901 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3902 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3903 goto unsupported;
3905 rhs_mode = TYPE_MODE (rhs_type);
3906 cvt_type
3907 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3908 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3909 if (cvt_type == NULL_TREE)
3910 goto unsupported;
3911 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3912 &decl1, &codecvt1))
3913 goto unsupported;
3914 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3915 &code1, &multi_step_cvt,
3916 &interm_types))
3917 break;
3918 goto unsupported;
3920 default:
3921 gcc_unreachable ();
3924 if (!vec_stmt) /* transformation not required. */
3926 if (dump_enabled_p ())
3927 dump_printf_loc (MSG_NOTE, vect_location,
3928 "=== vectorizable_conversion ===\n");
3929 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3931 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3932 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3934 else if (modifier == NARROW)
3936 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3937 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3939 else
3941 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3942 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3944 interm_types.release ();
3945 return true;
3948 /** Transform. **/
3949 if (dump_enabled_p ())
3950 dump_printf_loc (MSG_NOTE, vect_location,
3951 "transform conversion. ncopies = %d.\n", ncopies);
3953 if (op_type == binary_op)
3955 if (CONSTANT_CLASS_P (op0))
3956 op0 = fold_convert (TREE_TYPE (op1), op0);
3957 else if (CONSTANT_CLASS_P (op1))
3958 op1 = fold_convert (TREE_TYPE (op0), op1);
3961 /* In case of multi-step conversion, we first generate conversion operations
3962 to the intermediate types, and then from that types to the final one.
3963 We create vector destinations for the intermediate type (TYPES) received
3964 from supportable_*_operation, and store them in the correct order
3965 for future use in vect_create_vectorized_*_stmts (). */
3966 vec_dsts.create (multi_step_cvt + 1);
3967 vec_dest = vect_create_destination_var (scalar_dest,
3968 (cvt_type && modifier == WIDEN)
3969 ? cvt_type : vectype_out);
3970 vec_dsts.quick_push (vec_dest);
3972 if (multi_step_cvt)
3974 for (i = interm_types.length () - 1;
3975 interm_types.iterate (i, &intermediate_type); i--)
3977 vec_dest = vect_create_destination_var (scalar_dest,
3978 intermediate_type);
3979 vec_dsts.quick_push (vec_dest);
3983 if (cvt_type)
3984 vec_dest = vect_create_destination_var (scalar_dest,
3985 modifier == WIDEN
3986 ? vectype_out : cvt_type);
3988 if (!slp_node)
3990 if (modifier == WIDEN)
3992 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3993 if (op_type == binary_op)
3994 vec_oprnds1.create (1);
3996 else if (modifier == NARROW)
3997 vec_oprnds0.create (
3998 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4000 else if (code == WIDEN_LSHIFT_EXPR)
4001 vec_oprnds1.create (slp_node->vec_stmts_size);
4003 last_oprnd = op0;
4004 prev_stmt_info = NULL;
4005 switch (modifier)
4007 case NONE:
4008 for (j = 0; j < ncopies; j++)
4010 if (j == 0)
4011 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
4012 -1);
4013 else
4014 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4016 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4018 /* Arguments are ready, create the new vector stmt. */
4019 if (code1 == CALL_EXPR)
4021 new_stmt = gimple_build_call (decl1, 1, vop0);
4022 new_temp = make_ssa_name (vec_dest, new_stmt);
4023 gimple_call_set_lhs (new_stmt, new_temp);
4025 else
4027 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4028 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4029 new_temp = make_ssa_name (vec_dest, new_stmt);
4030 gimple_assign_set_lhs (new_stmt, new_temp);
4033 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4034 if (slp_node)
4035 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4036 else
4038 if (!prev_stmt_info)
4039 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4040 else
4041 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4042 prev_stmt_info = vinfo_for_stmt (new_stmt);
4046 break;
4048 case WIDEN:
4049 /* In case the vectorization factor (VF) is bigger than the number
4050 of elements that we can fit in a vectype (nunits), we have to
4051 generate more than one vector stmt - i.e - we need to "unroll"
4052 the vector stmt by a factor VF/nunits. */
4053 for (j = 0; j < ncopies; j++)
4055 /* Handle uses. */
4056 if (j == 0)
4058 if (slp_node)
4060 if (code == WIDEN_LSHIFT_EXPR)
4062 unsigned int k;
4064 vec_oprnd1 = op1;
4065 /* Store vec_oprnd1 for every vector stmt to be created
4066 for SLP_NODE. We check during the analysis that all
4067 the shift arguments are the same. */
4068 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4069 vec_oprnds1.quick_push (vec_oprnd1);
4071 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4072 slp_node, -1);
4074 else
4075 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4076 &vec_oprnds1, slp_node, -1);
4078 else
4080 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4081 vec_oprnds0.quick_push (vec_oprnd0);
4082 if (op_type == binary_op)
4084 if (code == WIDEN_LSHIFT_EXPR)
4085 vec_oprnd1 = op1;
4086 else
4087 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4088 vec_oprnds1.quick_push (vec_oprnd1);
4092 else
4094 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4095 vec_oprnds0.truncate (0);
4096 vec_oprnds0.quick_push (vec_oprnd0);
4097 if (op_type == binary_op)
4099 if (code == WIDEN_LSHIFT_EXPR)
4100 vec_oprnd1 = op1;
4101 else
4102 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4103 vec_oprnd1);
4104 vec_oprnds1.truncate (0);
4105 vec_oprnds1.quick_push (vec_oprnd1);
4109 /* Arguments are ready. Create the new vector stmts. */
4110 for (i = multi_step_cvt; i >= 0; i--)
4112 tree this_dest = vec_dsts[i];
4113 enum tree_code c1 = code1, c2 = code2;
4114 if (i == 0 && codecvt2 != ERROR_MARK)
4116 c1 = codecvt1;
4117 c2 = codecvt2;
4119 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4120 &vec_oprnds1,
4121 stmt, this_dest, gsi,
4122 c1, c2, decl1, decl2,
4123 op_type);
4126 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4128 if (cvt_type)
4130 if (codecvt1 == CALL_EXPR)
4132 new_stmt = gimple_build_call (decl1, 1, vop0);
4133 new_temp = make_ssa_name (vec_dest, new_stmt);
4134 gimple_call_set_lhs (new_stmt, new_temp);
4136 else
4138 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4139 new_temp = make_ssa_name (vec_dest);
4140 new_stmt = gimple_build_assign (new_temp, codecvt1,
4141 vop0);
4144 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4146 else
4147 new_stmt = SSA_NAME_DEF_STMT (vop0);
4149 if (slp_node)
4150 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4151 else
4153 if (!prev_stmt_info)
4154 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4155 else
4156 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4157 prev_stmt_info = vinfo_for_stmt (new_stmt);
4162 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4163 break;
4165 case NARROW:
4166 /* In case the vectorization factor (VF) is bigger than the number
4167 of elements that we can fit in a vectype (nunits), we have to
4168 generate more than one vector stmt - i.e - we need to "unroll"
4169 the vector stmt by a factor VF/nunits. */
4170 for (j = 0; j < ncopies; j++)
4172 /* Handle uses. */
4173 if (slp_node)
4174 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4175 slp_node, -1);
4176 else
4178 vec_oprnds0.truncate (0);
4179 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4180 vect_pow2 (multi_step_cvt) - 1);
4183 /* Arguments are ready. Create the new vector stmts. */
4184 if (cvt_type)
4185 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4187 if (codecvt1 == CALL_EXPR)
4189 new_stmt = gimple_build_call (decl1, 1, vop0);
4190 new_temp = make_ssa_name (vec_dest, new_stmt);
4191 gimple_call_set_lhs (new_stmt, new_temp);
4193 else
4195 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4196 new_temp = make_ssa_name (vec_dest);
4197 new_stmt = gimple_build_assign (new_temp, codecvt1,
4198 vop0);
4201 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4202 vec_oprnds0[i] = new_temp;
4205 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4206 stmt, vec_dsts, gsi,
4207 slp_node, code1,
4208 &prev_stmt_info);
4211 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4212 break;
4215 vec_oprnds0.release ();
4216 vec_oprnds1.release ();
4217 vec_dsts.release ();
4218 interm_types.release ();
4220 return true;
4224 /* Function vectorizable_assignment.
4226 Check if STMT performs an assignment (copy) that can be vectorized.
4227 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4228 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4229 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4231 static bool
4232 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4233 gimple **vec_stmt, slp_tree slp_node)
4235 tree vec_dest;
4236 tree scalar_dest;
4237 tree op;
4238 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4239 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4240 tree new_temp;
4241 gimple *def_stmt;
4242 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4243 int ncopies;
4244 int i, j;
4245 vec<tree> vec_oprnds = vNULL;
4246 tree vop;
4247 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4248 vec_info *vinfo = stmt_info->vinfo;
4249 gimple *new_stmt = NULL;
4250 stmt_vec_info prev_stmt_info = NULL;
4251 enum tree_code code;
4252 tree vectype_in;
4254 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4255 return false;
4257 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4258 && ! vec_stmt)
4259 return false;
4261 /* Is vectorizable assignment? */
4262 if (!is_gimple_assign (stmt))
4263 return false;
4265 scalar_dest = gimple_assign_lhs (stmt);
4266 if (TREE_CODE (scalar_dest) != SSA_NAME)
4267 return false;
4269 code = gimple_assign_rhs_code (stmt);
4270 if (gimple_assign_single_p (stmt)
4271 || code == PAREN_EXPR
4272 || CONVERT_EXPR_CODE_P (code))
4273 op = gimple_assign_rhs1 (stmt);
4274 else
4275 return false;
4277 if (code == VIEW_CONVERT_EXPR)
4278 op = TREE_OPERAND (op, 0);
4280 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4281 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4283 /* Multiple types in SLP are handled by creating the appropriate number of
4284 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4285 case of SLP. */
4286 if (slp_node || PURE_SLP_STMT (stmt_info))
4287 ncopies = 1;
4288 else
4289 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4291 gcc_assert (ncopies >= 1);
4293 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4295 if (dump_enabled_p ())
4296 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4297 "use not simple.\n");
4298 return false;
4301 /* We can handle NOP_EXPR conversions that do not change the number
4302 of elements or the vector size. */
4303 if ((CONVERT_EXPR_CODE_P (code)
4304 || code == VIEW_CONVERT_EXPR)
4305 && (!vectype_in
4306 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4307 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4308 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4309 return false;
4311 /* We do not handle bit-precision changes. */
4312 if ((CONVERT_EXPR_CODE_P (code)
4313 || code == VIEW_CONVERT_EXPR)
4314 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4315 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4316 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4317 || ((TYPE_PRECISION (TREE_TYPE (op))
4318 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4319 /* But a conversion that does not change the bit-pattern is ok. */
4320 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4321 > TYPE_PRECISION (TREE_TYPE (op)))
4322 && TYPE_UNSIGNED (TREE_TYPE (op)))
4323 /* Conversion between boolean types of different sizes is
4324 a simple assignment in case their vectypes are same
4325 boolean vectors. */
4326 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4327 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4329 if (dump_enabled_p ())
4330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4331 "type conversion to/from bit-precision "
4332 "unsupported.\n");
4333 return false;
4336 if (!vec_stmt) /* transformation not required. */
4338 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4339 if (dump_enabled_p ())
4340 dump_printf_loc (MSG_NOTE, vect_location,
4341 "=== vectorizable_assignment ===\n");
4342 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4343 return true;
4346 /** Transform. **/
4347 if (dump_enabled_p ())
4348 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4350 /* Handle def. */
4351 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4353 /* Handle use. */
4354 for (j = 0; j < ncopies; j++)
4356 /* Handle uses. */
4357 if (j == 0)
4358 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4359 else
4360 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4362 /* Arguments are ready. create the new vector stmt. */
4363 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4365 if (CONVERT_EXPR_CODE_P (code)
4366 || code == VIEW_CONVERT_EXPR)
4367 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4368 new_stmt = gimple_build_assign (vec_dest, vop);
4369 new_temp = make_ssa_name (vec_dest, new_stmt);
4370 gimple_assign_set_lhs (new_stmt, new_temp);
4371 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4372 if (slp_node)
4373 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4376 if (slp_node)
4377 continue;
4379 if (j == 0)
4380 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4381 else
4382 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4384 prev_stmt_info = vinfo_for_stmt (new_stmt);
4387 vec_oprnds.release ();
4388 return true;
4392 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4393 either as shift by a scalar or by a vector. */
4395 bool
4396 vect_supportable_shift (enum tree_code code, tree scalar_type)
4399 machine_mode vec_mode;
4400 optab optab;
4401 int icode;
4402 tree vectype;
4404 vectype = get_vectype_for_scalar_type (scalar_type);
4405 if (!vectype)
4406 return false;
4408 optab = optab_for_tree_code (code, vectype, optab_scalar);
4409 if (!optab
4410 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4412 optab = optab_for_tree_code (code, vectype, optab_vector);
4413 if (!optab
4414 || (optab_handler (optab, TYPE_MODE (vectype))
4415 == CODE_FOR_nothing))
4416 return false;
4419 vec_mode = TYPE_MODE (vectype);
4420 icode = (int) optab_handler (optab, vec_mode);
4421 if (icode == CODE_FOR_nothing)
4422 return false;
4424 return true;
4428 /* Function vectorizable_shift.
4430 Check if STMT performs a shift operation that can be vectorized.
4431 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4432 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4433 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4435 static bool
4436 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4437 gimple **vec_stmt, slp_tree slp_node)
4439 tree vec_dest;
4440 tree scalar_dest;
4441 tree op0, op1 = NULL;
4442 tree vec_oprnd1 = NULL_TREE;
4443 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4444 tree vectype;
4445 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4446 enum tree_code code;
4447 machine_mode vec_mode;
4448 tree new_temp;
4449 optab optab;
4450 int icode;
4451 machine_mode optab_op2_mode;
4452 gimple *def_stmt;
4453 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4454 gimple *new_stmt = NULL;
4455 stmt_vec_info prev_stmt_info;
4456 int nunits_in;
4457 int nunits_out;
4458 tree vectype_out;
4459 tree op1_vectype;
4460 int ncopies;
4461 int j, i;
4462 vec<tree> vec_oprnds0 = vNULL;
4463 vec<tree> vec_oprnds1 = vNULL;
4464 tree vop0, vop1;
4465 unsigned int k;
4466 bool scalar_shift_arg = true;
4467 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4468 vec_info *vinfo = stmt_info->vinfo;
4469 int vf;
4471 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4472 return false;
4474 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4475 && ! vec_stmt)
4476 return false;
4478 /* Is STMT a vectorizable binary/unary operation? */
4479 if (!is_gimple_assign (stmt))
4480 return false;
4482 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4483 return false;
4485 code = gimple_assign_rhs_code (stmt);
4487 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4488 || code == RROTATE_EXPR))
4489 return false;
4491 scalar_dest = gimple_assign_lhs (stmt);
4492 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4493 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4494 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4496 if (dump_enabled_p ())
4497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4498 "bit-precision shifts not supported.\n");
4499 return false;
4502 op0 = gimple_assign_rhs1 (stmt);
4503 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4505 if (dump_enabled_p ())
4506 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4507 "use not simple.\n");
4508 return false;
4510 /* If op0 is an external or constant def use a vector type with
4511 the same size as the output vector type. */
4512 if (!vectype)
4513 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4514 if (vec_stmt)
4515 gcc_assert (vectype);
4516 if (!vectype)
4518 if (dump_enabled_p ())
4519 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4520 "no vectype for scalar type\n");
4521 return false;
4524 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4525 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4526 if (nunits_out != nunits_in)
4527 return false;
4529 op1 = gimple_assign_rhs2 (stmt);
4530 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4532 if (dump_enabled_p ())
4533 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4534 "use not simple.\n");
4535 return false;
4538 if (loop_vinfo)
4539 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4540 else
4541 vf = 1;
4543 /* Multiple types in SLP are handled by creating the appropriate number of
4544 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4545 case of SLP. */
4546 if (slp_node || PURE_SLP_STMT (stmt_info))
4547 ncopies = 1;
4548 else
4549 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4551 gcc_assert (ncopies >= 1);
4553 /* Determine whether the shift amount is a vector, or scalar. If the
4554 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4556 if ((dt[1] == vect_internal_def
4557 || dt[1] == vect_induction_def)
4558 && !slp_node)
4559 scalar_shift_arg = false;
4560 else if (dt[1] == vect_constant_def
4561 || dt[1] == vect_external_def
4562 || dt[1] == vect_internal_def)
4564 /* In SLP, need to check whether the shift count is the same,
4565 in loops if it is a constant or invariant, it is always
4566 a scalar shift. */
4567 if (slp_node)
4569 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4570 gimple *slpstmt;
4572 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4573 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4574 scalar_shift_arg = false;
4577 else
4579 if (dump_enabled_p ())
4580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4581 "operand mode requires invariant argument.\n");
4582 return false;
4585 /* Vector shifted by vector. */
4586 if (!scalar_shift_arg)
4588 optab = optab_for_tree_code (code, vectype, optab_vector);
4589 if (dump_enabled_p ())
4590 dump_printf_loc (MSG_NOTE, vect_location,
4591 "vector/vector shift/rotate found.\n");
4593 if (!op1_vectype)
4594 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4595 if (op1_vectype == NULL_TREE
4596 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4598 if (dump_enabled_p ())
4599 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4600 "unusable type for last operand in"
4601 " vector/vector shift/rotate.\n");
4602 return false;
4605 /* See if the machine has a vector shifted by scalar insn and if not
4606 then see if it has a vector shifted by vector insn. */
4607 else
4609 optab = optab_for_tree_code (code, vectype, optab_scalar);
4610 if (optab
4611 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4613 if (dump_enabled_p ())
4614 dump_printf_loc (MSG_NOTE, vect_location,
4615 "vector/scalar shift/rotate found.\n");
4617 else
4619 optab = optab_for_tree_code (code, vectype, optab_vector);
4620 if (optab
4621 && (optab_handler (optab, TYPE_MODE (vectype))
4622 != CODE_FOR_nothing))
4624 scalar_shift_arg = false;
4626 if (dump_enabled_p ())
4627 dump_printf_loc (MSG_NOTE, vect_location,
4628 "vector/vector shift/rotate found.\n");
4630 /* Unlike the other binary operators, shifts/rotates have
4631 the rhs being int, instead of the same type as the lhs,
4632 so make sure the scalar is the right type if we are
4633 dealing with vectors of long long/long/short/char. */
4634 if (dt[1] == vect_constant_def)
4635 op1 = fold_convert (TREE_TYPE (vectype), op1);
4636 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4637 TREE_TYPE (op1)))
4639 if (slp_node
4640 && TYPE_MODE (TREE_TYPE (vectype))
4641 != TYPE_MODE (TREE_TYPE (op1)))
4643 if (dump_enabled_p ())
4644 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4645 "unusable type for last operand in"
4646 " vector/vector shift/rotate.\n");
4647 return false;
4649 if (vec_stmt && !slp_node)
4651 op1 = fold_convert (TREE_TYPE (vectype), op1);
4652 op1 = vect_init_vector (stmt, op1,
4653 TREE_TYPE (vectype), NULL);
4660 /* Supportable by target? */
4661 if (!optab)
4663 if (dump_enabled_p ())
4664 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4665 "no optab.\n");
4666 return false;
4668 vec_mode = TYPE_MODE (vectype);
4669 icode = (int) optab_handler (optab, vec_mode);
4670 if (icode == CODE_FOR_nothing)
4672 if (dump_enabled_p ())
4673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4674 "op not supported by target.\n");
4675 /* Check only during analysis. */
4676 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4677 || (vf < vect_min_worthwhile_factor (code)
4678 && !vec_stmt))
4679 return false;
4680 if (dump_enabled_p ())
4681 dump_printf_loc (MSG_NOTE, vect_location,
4682 "proceeding using word mode.\n");
4685 /* Worthwhile without SIMD support? Check only during analysis. */
4686 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4687 && vf < vect_min_worthwhile_factor (code)
4688 && !vec_stmt)
4690 if (dump_enabled_p ())
4691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4692 "not worthwhile without SIMD support.\n");
4693 return false;
4696 if (!vec_stmt) /* transformation not required. */
4698 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4699 if (dump_enabled_p ())
4700 dump_printf_loc (MSG_NOTE, vect_location,
4701 "=== vectorizable_shift ===\n");
4702 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4703 return true;
4706 /** Transform. **/
4708 if (dump_enabled_p ())
4709 dump_printf_loc (MSG_NOTE, vect_location,
4710 "transform binary/unary operation.\n");
4712 /* Handle def. */
4713 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4715 prev_stmt_info = NULL;
4716 for (j = 0; j < ncopies; j++)
4718 /* Handle uses. */
4719 if (j == 0)
4721 if (scalar_shift_arg)
4723 /* Vector shl and shr insn patterns can be defined with scalar
4724 operand 2 (shift operand). In this case, use constant or loop
4725 invariant op1 directly, without extending it to vector mode
4726 first. */
4727 optab_op2_mode = insn_data[icode].operand[2].mode;
4728 if (!VECTOR_MODE_P (optab_op2_mode))
4730 if (dump_enabled_p ())
4731 dump_printf_loc (MSG_NOTE, vect_location,
4732 "operand 1 using scalar mode.\n");
4733 vec_oprnd1 = op1;
4734 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4735 vec_oprnds1.quick_push (vec_oprnd1);
4736 if (slp_node)
4738 /* Store vec_oprnd1 for every vector stmt to be created
4739 for SLP_NODE. We check during the analysis that all
4740 the shift arguments are the same.
4741 TODO: Allow different constants for different vector
4742 stmts generated for an SLP instance. */
4743 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4744 vec_oprnds1.quick_push (vec_oprnd1);
4749 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4750 (a special case for certain kind of vector shifts); otherwise,
4751 operand 1 should be of a vector type (the usual case). */
4752 if (vec_oprnd1)
4753 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4754 slp_node, -1);
4755 else
4756 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4757 slp_node, -1);
4759 else
4760 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4762 /* Arguments are ready. Create the new vector stmt. */
4763 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4765 vop1 = vec_oprnds1[i];
4766 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4767 new_temp = make_ssa_name (vec_dest, new_stmt);
4768 gimple_assign_set_lhs (new_stmt, new_temp);
4769 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4770 if (slp_node)
4771 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4774 if (slp_node)
4775 continue;
4777 if (j == 0)
4778 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4779 else
4780 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4781 prev_stmt_info = vinfo_for_stmt (new_stmt);
4784 vec_oprnds0.release ();
4785 vec_oprnds1.release ();
4787 return true;
4791 /* Function vectorizable_operation.
4793 Check if STMT performs a binary, unary or ternary operation that can
4794 be vectorized.
4795 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4796 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4797 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4799 static bool
4800 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
4801 gimple **vec_stmt, slp_tree slp_node)
4803 tree vec_dest;
4804 tree scalar_dest;
4805 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4806 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4807 tree vectype;
4808 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4809 enum tree_code code;
4810 machine_mode vec_mode;
4811 tree new_temp;
4812 int op_type;
4813 optab optab;
4814 bool target_support_p;
4815 gimple *def_stmt;
4816 enum vect_def_type dt[3]
4817 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4818 gimple *new_stmt = NULL;
4819 stmt_vec_info prev_stmt_info;
4820 int nunits_in;
4821 int nunits_out;
4822 tree vectype_out;
4823 int ncopies;
4824 int j, i;
4825 vec<tree> vec_oprnds0 = vNULL;
4826 vec<tree> vec_oprnds1 = vNULL;
4827 vec<tree> vec_oprnds2 = vNULL;
4828 tree vop0, vop1, vop2;
4829 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4830 vec_info *vinfo = stmt_info->vinfo;
4831 int vf;
4833 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4834 return false;
4836 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4837 && ! vec_stmt)
4838 return false;
4840 /* Is STMT a vectorizable binary/unary operation? */
4841 if (!is_gimple_assign (stmt))
4842 return false;
4844 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4845 return false;
4847 code = gimple_assign_rhs_code (stmt);
4849 /* For pointer addition, we should use the normal plus for
4850 the vector addition. */
4851 if (code == POINTER_PLUS_EXPR)
4852 code = PLUS_EXPR;
4854 /* Support only unary or binary operations. */
4855 op_type = TREE_CODE_LENGTH (code);
4856 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4858 if (dump_enabled_p ())
4859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4860 "num. args = %d (not unary/binary/ternary op).\n",
4861 op_type);
4862 return false;
4865 scalar_dest = gimple_assign_lhs (stmt);
4866 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4868 /* Most operations cannot handle bit-precision types without extra
4869 truncations. */
4870 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4871 && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4872 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4873 /* Exception are bitwise binary operations. */
4874 && code != BIT_IOR_EXPR
4875 && code != BIT_XOR_EXPR
4876 && code != BIT_AND_EXPR)
4878 if (dump_enabled_p ())
4879 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4880 "bit-precision arithmetic not supported.\n");
4881 return false;
4884 op0 = gimple_assign_rhs1 (stmt);
4885 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4887 if (dump_enabled_p ())
4888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4889 "use not simple.\n");
4890 return false;
4892 /* If op0 is an external or constant def use a vector type with
4893 the same size as the output vector type. */
4894 if (!vectype)
4896 /* For boolean type we cannot determine vectype by
4897 invariant value (don't know whether it is a vector
4898 of booleans or vector of integers). We use output
4899 vectype because operations on boolean don't change
4900 type. */
4901 if (TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE)
4903 if (TREE_CODE (TREE_TYPE (scalar_dest)) != BOOLEAN_TYPE)
4905 if (dump_enabled_p ())
4906 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4907 "not supported operation on bool value.\n");
4908 return false;
4910 vectype = vectype_out;
4912 else
4913 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4915 if (vec_stmt)
4916 gcc_assert (vectype);
4917 if (!vectype)
4919 if (dump_enabled_p ())
4921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4922 "no vectype for scalar type ");
4923 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4924 TREE_TYPE (op0));
4925 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4928 return false;
4931 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4932 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4933 if (nunits_out != nunits_in)
4934 return false;
4936 if (op_type == binary_op || op_type == ternary_op)
4938 op1 = gimple_assign_rhs2 (stmt);
4939 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
4941 if (dump_enabled_p ())
4942 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4943 "use not simple.\n");
4944 return false;
4947 if (op_type == ternary_op)
4949 op2 = gimple_assign_rhs3 (stmt);
4950 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
4952 if (dump_enabled_p ())
4953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4954 "use not simple.\n");
4955 return false;
4959 if (loop_vinfo)
4960 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4961 else
4962 vf = 1;
4964 /* Multiple types in SLP are handled by creating the appropriate number of
4965 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4966 case of SLP. */
4967 if (slp_node || PURE_SLP_STMT (stmt_info))
4968 ncopies = 1;
4969 else
4970 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4972 gcc_assert (ncopies >= 1);
4974 /* Shifts are handled in vectorizable_shift (). */
4975 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4976 || code == RROTATE_EXPR)
4977 return false;
4979 /* Supportable by target? */
4981 vec_mode = TYPE_MODE (vectype);
4982 if (code == MULT_HIGHPART_EXPR)
4983 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4984 else
4986 optab = optab_for_tree_code (code, vectype, optab_default);
4987 if (!optab)
4989 if (dump_enabled_p ())
4990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4991 "no optab.\n");
4992 return false;
4994 target_support_p = (optab_handler (optab, vec_mode)
4995 != CODE_FOR_nothing);
4998 if (!target_support_p)
5000 if (dump_enabled_p ())
5001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5002 "op not supported by target.\n");
5003 /* Check only during analysis. */
5004 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
5005 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
5006 return false;
5007 if (dump_enabled_p ())
5008 dump_printf_loc (MSG_NOTE, vect_location,
5009 "proceeding using word mode.\n");
5012 /* Worthwhile without SIMD support? Check only during analysis. */
5013 if (!VECTOR_MODE_P (vec_mode)
5014 && !vec_stmt
5015 && vf < vect_min_worthwhile_factor (code))
5017 if (dump_enabled_p ())
5018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5019 "not worthwhile without SIMD support.\n");
5020 return false;
5023 if (!vec_stmt) /* transformation not required. */
5025 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5026 if (dump_enabled_p ())
5027 dump_printf_loc (MSG_NOTE, vect_location,
5028 "=== vectorizable_operation ===\n");
5029 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
5030 return true;
5033 /** Transform. **/
5035 if (dump_enabled_p ())
5036 dump_printf_loc (MSG_NOTE, vect_location,
5037 "transform binary/unary operation.\n");
5039 /* Handle def. */
5040 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5042 /* In case the vectorization factor (VF) is bigger than the number
5043 of elements that we can fit in a vectype (nunits), we have to generate
5044 more than one vector stmt - i.e - we need to "unroll" the
5045 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5046 from one copy of the vector stmt to the next, in the field
5047 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5048 stages to find the correct vector defs to be used when vectorizing
5049 stmts that use the defs of the current stmt. The example below
5050 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5051 we need to create 4 vectorized stmts):
5053 before vectorization:
5054 RELATED_STMT VEC_STMT
5055 S1: x = memref - -
5056 S2: z = x + 1 - -
5058 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5059 there):
5060 RELATED_STMT VEC_STMT
5061 VS1_0: vx0 = memref0 VS1_1 -
5062 VS1_1: vx1 = memref1 VS1_2 -
5063 VS1_2: vx2 = memref2 VS1_3 -
5064 VS1_3: vx3 = memref3 - -
5065 S1: x = load - VS1_0
5066 S2: z = x + 1 - -
5068 step2: vectorize stmt S2 (done here):
5069 To vectorize stmt S2 we first need to find the relevant vector
5070 def for the first operand 'x'. This is, as usual, obtained from
5071 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5072 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5073 relevant vector def 'vx0'. Having found 'vx0' we can generate
5074 the vector stmt VS2_0, and as usual, record it in the
5075 STMT_VINFO_VEC_STMT of stmt S2.
5076 When creating the second copy (VS2_1), we obtain the relevant vector
5077 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5078 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5079 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5080 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5081 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5082 chain of stmts and pointers:
5083 RELATED_STMT VEC_STMT
5084 VS1_0: vx0 = memref0 VS1_1 -
5085 VS1_1: vx1 = memref1 VS1_2 -
5086 VS1_2: vx2 = memref2 VS1_3 -
5087 VS1_3: vx3 = memref3 - -
5088 S1: x = load - VS1_0
5089 VS2_0: vz0 = vx0 + v1 VS2_1 -
5090 VS2_1: vz1 = vx1 + v1 VS2_2 -
5091 VS2_2: vz2 = vx2 + v1 VS2_3 -
5092 VS2_3: vz3 = vx3 + v1 - -
5093 S2: z = x + 1 - VS2_0 */
5095 prev_stmt_info = NULL;
5096 for (j = 0; j < ncopies; j++)
5098 /* Handle uses. */
5099 if (j == 0)
5101 if (op_type == binary_op || op_type == ternary_op)
5102 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5103 slp_node, -1);
5104 else
5105 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5106 slp_node, -1);
5107 if (op_type == ternary_op)
5109 vec_oprnds2.create (1);
5110 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
5111 stmt));
5114 else
5116 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5117 if (op_type == ternary_op)
5119 tree vec_oprnd = vec_oprnds2.pop ();
5120 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5121 vec_oprnd));
5125 /* Arguments are ready. Create the new vector stmt. */
5126 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5128 vop1 = ((op_type == binary_op || op_type == ternary_op)
5129 ? vec_oprnds1[i] : NULL_TREE);
5130 vop2 = ((op_type == ternary_op)
5131 ? vec_oprnds2[i] : NULL_TREE);
5132 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5133 new_temp = make_ssa_name (vec_dest, new_stmt);
5134 gimple_assign_set_lhs (new_stmt, new_temp);
5135 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5136 if (slp_node)
5137 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5140 if (slp_node)
5141 continue;
5143 if (j == 0)
5144 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5145 else
5146 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5147 prev_stmt_info = vinfo_for_stmt (new_stmt);
5150 vec_oprnds0.release ();
5151 vec_oprnds1.release ();
5152 vec_oprnds2.release ();
5154 return true;
5157 /* A helper function to ensure data reference DR's base alignment
5158 for STMT_INFO. */
5160 static void
5161 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5163 if (!dr->aux)
5164 return;
5166 if (DR_VECT_AUX (dr)->base_misaligned)
5168 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5169 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5171 if (decl_in_symtab_p (base_decl))
5172 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5173 else
5175 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
5176 DECL_USER_ALIGN (base_decl) = 1;
5178 DR_VECT_AUX (dr)->base_misaligned = false;
5183 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5184 reversal of the vector elements. If that is impossible to do,
5185 returns NULL. */
5187 static tree
5188 perm_mask_for_reverse (tree vectype)
5190 int i, nunits;
5191 unsigned char *sel;
5193 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5194 sel = XALLOCAVEC (unsigned char, nunits);
5196 for (i = 0; i < nunits; ++i)
5197 sel[i] = nunits - 1 - i;
5199 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5200 return NULL_TREE;
5201 return vect_gen_perm_mask_checked (vectype, sel);
5204 /* Function vectorizable_store.
5206 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5207 can be vectorized.
5208 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5209 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5210 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5212 static bool
5213 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5214 slp_tree slp_node)
5216 tree scalar_dest;
5217 tree data_ref;
5218 tree op;
5219 tree vec_oprnd = NULL_TREE;
5220 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5221 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5222 tree elem_type;
5223 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5224 struct loop *loop = NULL;
5225 machine_mode vec_mode;
5226 tree dummy;
5227 enum dr_alignment_support alignment_support_scheme;
5228 gimple *def_stmt;
5229 enum vect_def_type dt;
5230 stmt_vec_info prev_stmt_info = NULL;
5231 tree dataref_ptr = NULL_TREE;
5232 tree dataref_offset = NULL_TREE;
5233 gimple *ptr_incr = NULL;
5234 int ncopies;
5235 int j;
5236 gimple *next_stmt, *first_stmt = NULL;
5237 bool grouped_store = false;
5238 bool store_lanes_p = false;
5239 unsigned int group_size, i;
5240 vec<tree> dr_chain = vNULL;
5241 vec<tree> oprnds = vNULL;
5242 vec<tree> result_chain = vNULL;
5243 bool inv_p;
5244 bool negative = false;
5245 tree offset = NULL_TREE;
5246 vec<tree> vec_oprnds = vNULL;
5247 bool slp = (slp_node != NULL);
5248 unsigned int vec_num;
5249 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5250 vec_info *vinfo = stmt_info->vinfo;
5251 tree aggr_type;
5252 tree scatter_base = NULL_TREE, scatter_off = NULL_TREE;
5253 tree scatter_off_vectype = NULL_TREE, scatter_decl = NULL_TREE;
5254 int scatter_scale = 1;
5255 enum vect_def_type scatter_idx_dt = vect_unknown_def_type;
5256 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5257 gimple *new_stmt;
5259 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5260 return false;
5262 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5263 && ! vec_stmt)
5264 return false;
5266 /* Is vectorizable store? */
5268 if (!is_gimple_assign (stmt))
5269 return false;
5271 scalar_dest = gimple_assign_lhs (stmt);
5272 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5273 && is_pattern_stmt_p (stmt_info))
5274 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5275 if (TREE_CODE (scalar_dest) != ARRAY_REF
5276 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5277 && TREE_CODE (scalar_dest) != INDIRECT_REF
5278 && TREE_CODE (scalar_dest) != COMPONENT_REF
5279 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5280 && TREE_CODE (scalar_dest) != REALPART_EXPR
5281 && TREE_CODE (scalar_dest) != MEM_REF)
5282 return false;
5284 gcc_assert (gimple_assign_single_p (stmt));
5286 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5287 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5289 if (loop_vinfo)
5290 loop = LOOP_VINFO_LOOP (loop_vinfo);
5292 /* Multiple types in SLP are handled by creating the appropriate number of
5293 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5294 case of SLP. */
5295 if (slp || PURE_SLP_STMT (stmt_info))
5296 ncopies = 1;
5297 else
5298 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5300 gcc_assert (ncopies >= 1);
5302 /* FORNOW. This restriction should be relaxed. */
5303 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5305 if (dump_enabled_p ())
5306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5307 "multiple types in nested loop.\n");
5308 return false;
5311 op = gimple_assign_rhs1 (stmt);
5313 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5315 if (dump_enabled_p ())
5316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5317 "use not simple.\n");
5318 return false;
5321 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5322 return false;
5324 elem_type = TREE_TYPE (vectype);
5325 vec_mode = TYPE_MODE (vectype);
5327 /* FORNOW. In some cases can vectorize even if data-type not supported
5328 (e.g. - array initialization with 0). */
5329 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5330 return false;
5332 if (!STMT_VINFO_DATA_REF (stmt_info))
5333 return false;
5335 if (!STMT_VINFO_STRIDED_P (stmt_info))
5337 negative =
5338 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5339 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5340 size_zero_node) < 0;
5341 if (negative && ncopies > 1)
5343 if (dump_enabled_p ())
5344 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5345 "multiple types with negative step.\n");
5346 return false;
5348 if (negative)
5350 gcc_assert (!grouped_store);
5351 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5352 if (alignment_support_scheme != dr_aligned
5353 && alignment_support_scheme != dr_unaligned_supported)
5355 if (dump_enabled_p ())
5356 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5357 "negative step but alignment required.\n");
5358 return false;
5360 if (dt != vect_constant_def
5361 && dt != vect_external_def
5362 && !perm_mask_for_reverse (vectype))
5364 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5366 "negative step and reversing not supported.\n");
5367 return false;
5372 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5374 grouped_store = true;
5375 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5376 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5377 if (!slp
5378 && !PURE_SLP_STMT (stmt_info)
5379 && !STMT_VINFO_STRIDED_P (stmt_info))
5381 if (vect_store_lanes_supported (vectype, group_size))
5382 store_lanes_p = true;
5383 else if (!vect_grouped_store_supported (vectype, group_size))
5384 return false;
5387 if (STMT_VINFO_STRIDED_P (stmt_info)
5388 && (slp || PURE_SLP_STMT (stmt_info))
5389 && (group_size > nunits
5390 || nunits % group_size != 0))
5392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5393 "unhandled strided group store\n");
5394 return false;
5397 if (first_stmt == stmt)
5399 /* STMT is the leader of the group. Check the operands of all the
5400 stmts of the group. */
5401 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5402 while (next_stmt)
5404 gcc_assert (gimple_assign_single_p (next_stmt));
5405 op = gimple_assign_rhs1 (next_stmt);
5406 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
5408 if (dump_enabled_p ())
5409 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5410 "use not simple.\n");
5411 return false;
5413 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5418 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5420 gimple *def_stmt;
5421 scatter_decl = vect_check_gather_scatter (stmt, loop_vinfo, &scatter_base,
5422 &scatter_off, &scatter_scale);
5423 gcc_assert (scatter_decl);
5424 if (!vect_is_simple_use (scatter_off, vinfo, &def_stmt, &scatter_idx_dt,
5425 &scatter_off_vectype))
5427 if (dump_enabled_p ())
5428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5429 "scatter index use not simple.");
5430 return false;
5434 if (!vec_stmt) /* transformation not required. */
5436 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5437 /* The SLP costs are calculated during SLP analysis. */
5438 if (!PURE_SLP_STMT (stmt_info))
5439 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5440 NULL, NULL, NULL);
5441 return true;
5444 /** Transform. **/
5446 ensure_base_align (stmt_info, dr);
5448 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
5450 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5451 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (scatter_decl));
5452 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5453 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5454 edge pe = loop_preheader_edge (loop);
5455 gimple_seq seq;
5456 basic_block new_bb;
5457 enum { NARROW, NONE, WIDEN } modifier;
5458 int scatter_off_nunits = TYPE_VECTOR_SUBPARTS (scatter_off_vectype);
5460 if (nunits == (unsigned int) scatter_off_nunits)
5461 modifier = NONE;
5462 else if (nunits == (unsigned int) scatter_off_nunits / 2)
5464 unsigned char *sel = XALLOCAVEC (unsigned char, scatter_off_nunits);
5465 modifier = WIDEN;
5467 for (i = 0; i < (unsigned int) scatter_off_nunits; ++i)
5468 sel[i] = i | nunits;
5470 perm_mask = vect_gen_perm_mask_checked (scatter_off_vectype, sel);
5471 gcc_assert (perm_mask != NULL_TREE);
5473 else if (nunits == (unsigned int) scatter_off_nunits * 2)
5475 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5476 modifier = NARROW;
5478 for (i = 0; i < (unsigned int) nunits; ++i)
5479 sel[i] = i | scatter_off_nunits;
5481 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
5482 gcc_assert (perm_mask != NULL_TREE);
5483 ncopies *= 2;
5485 else
5486 gcc_unreachable ();
5488 rettype = TREE_TYPE (TREE_TYPE (scatter_decl));
5489 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5490 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5491 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5492 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5493 scaletype = TREE_VALUE (arglist);
5495 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5496 && TREE_CODE (rettype) == VOID_TYPE);
5498 ptr = fold_convert (ptrtype, scatter_base);
5499 if (!is_gimple_min_invariant (ptr))
5501 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5502 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5503 gcc_assert (!new_bb);
5506 /* Currently we support only unconditional scatter stores,
5507 so mask should be all ones. */
5508 mask = build_int_cst (masktype, -1);
5509 mask = vect_init_vector (stmt, mask, masktype, NULL);
5511 scale = build_int_cst (scaletype, scatter_scale);
5513 prev_stmt_info = NULL;
5514 for (j = 0; j < ncopies; ++j)
5516 if (j == 0)
5518 src = vec_oprnd1
5519 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5520 op = vec_oprnd0
5521 = vect_get_vec_def_for_operand (scatter_off, stmt);
5523 else if (modifier != NONE && (j & 1))
5525 if (modifier == WIDEN)
5527 src = vec_oprnd1
5528 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5529 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5530 stmt, gsi);
5532 else if (modifier == NARROW)
5534 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5535 stmt, gsi);
5536 op = vec_oprnd0
5537 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5539 else
5540 gcc_unreachable ();
5542 else
5544 src = vec_oprnd1
5545 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5546 op = vec_oprnd0
5547 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt, vec_oprnd0);
5550 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5552 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
5553 == TYPE_VECTOR_SUBPARTS (srctype));
5554 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5555 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5556 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5558 src = var;
5561 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5563 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5564 == TYPE_VECTOR_SUBPARTS (idxtype));
5565 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5566 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5567 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5568 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5569 op = var;
5572 new_stmt
5573 = gimple_build_call (scatter_decl, 5, ptr, mask, op, src, scale);
5575 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5577 if (prev_stmt_info == NULL)
5578 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5579 else
5580 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5581 prev_stmt_info = vinfo_for_stmt (new_stmt);
5583 return true;
5586 if (grouped_store)
5588 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5589 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5591 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5593 /* FORNOW */
5594 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5596 /* We vectorize all the stmts of the interleaving group when we
5597 reach the last stmt in the group. */
5598 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5599 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5600 && !slp)
5602 *vec_stmt = NULL;
5603 return true;
5606 if (slp)
5608 grouped_store = false;
5609 /* VEC_NUM is the number of vect stmts to be created for this
5610 group. */
5611 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5612 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5613 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
5614 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5615 op = gimple_assign_rhs1 (first_stmt);
5617 else
5618 /* VEC_NUM is the number of vect stmts to be created for this
5619 group. */
5620 vec_num = group_size;
5622 else
5624 first_stmt = stmt;
5625 first_dr = dr;
5626 group_size = vec_num = 1;
5629 if (dump_enabled_p ())
5630 dump_printf_loc (MSG_NOTE, vect_location,
5631 "transform store. ncopies = %d\n", ncopies);
5633 if (STMT_VINFO_STRIDED_P (stmt_info))
5635 gimple_stmt_iterator incr_gsi;
5636 bool insert_after;
5637 gimple *incr;
5638 tree offvar;
5639 tree ivstep;
5640 tree running_off;
5641 gimple_seq stmts = NULL;
5642 tree stride_base, stride_step, alias_off;
5643 tree vec_oprnd;
5644 unsigned int g;
5646 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5648 stride_base
5649 = fold_build_pointer_plus
5650 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5651 size_binop (PLUS_EXPR,
5652 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5653 convert_to_ptrofftype (DR_INIT(first_dr))));
5654 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5656 /* For a store with loop-invariant (but other than power-of-2)
5657 stride (i.e. not a grouped access) like so:
5659 for (i = 0; i < n; i += stride)
5660 array[i] = ...;
5662 we generate a new induction variable and new stores from
5663 the components of the (vectorized) rhs:
5665 for (j = 0; ; j += VF*stride)
5666 vectemp = ...;
5667 tmp1 = vectemp[0];
5668 array[j] = tmp1;
5669 tmp2 = vectemp[1];
5670 array[j + stride] = tmp2;
5674 unsigned nstores = nunits;
5675 tree ltype = elem_type;
5676 if (slp)
5678 nstores = nunits / group_size;
5679 if (group_size < nunits)
5680 ltype = build_vector_type (elem_type, group_size);
5681 else
5682 ltype = vectype;
5683 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5684 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5685 group_size = 1;
5688 ivstep = stride_step;
5689 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5690 build_int_cst (TREE_TYPE (ivstep),
5691 ncopies * nstores));
5693 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5695 create_iv (stride_base, ivstep, NULL,
5696 loop, &incr_gsi, insert_after,
5697 &offvar, NULL);
5698 incr = gsi_stmt (incr_gsi);
5699 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
5701 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5702 if (stmts)
5703 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5705 prev_stmt_info = NULL;
5706 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5707 next_stmt = first_stmt;
5708 for (g = 0; g < group_size; g++)
5710 running_off = offvar;
5711 if (g)
5713 tree size = TYPE_SIZE_UNIT (ltype);
5714 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5715 size);
5716 tree newoff = copy_ssa_name (running_off, NULL);
5717 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5718 running_off, pos);
5719 vect_finish_stmt_generation (stmt, incr, gsi);
5720 running_off = newoff;
5722 for (j = 0; j < ncopies; j++)
5724 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5725 and first_stmt == stmt. */
5726 if (j == 0)
5728 if (slp)
5730 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5731 slp_node, -1);
5732 vec_oprnd = vec_oprnds[0];
5734 else
5736 gcc_assert (gimple_assign_single_p (next_stmt));
5737 op = gimple_assign_rhs1 (next_stmt);
5738 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5741 else
5743 if (slp)
5744 vec_oprnd = vec_oprnds[j];
5745 else
5747 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
5748 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5752 for (i = 0; i < nstores; i++)
5754 tree newref, newoff;
5755 gimple *incr, *assign;
5756 tree size = TYPE_SIZE (ltype);
5757 /* Extract the i'th component. */
5758 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5759 bitsize_int (i), size);
5760 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5761 size, pos);
5763 elem = force_gimple_operand_gsi (gsi, elem, true,
5764 NULL_TREE, true,
5765 GSI_SAME_STMT);
5767 newref = build2 (MEM_REF, ltype,
5768 running_off, alias_off);
5770 /* And store it to *running_off. */
5771 assign = gimple_build_assign (newref, elem);
5772 vect_finish_stmt_generation (stmt, assign, gsi);
5774 newoff = copy_ssa_name (running_off, NULL);
5775 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5776 running_off, stride_step);
5777 vect_finish_stmt_generation (stmt, incr, gsi);
5779 running_off = newoff;
5780 if (g == group_size - 1
5781 && !slp)
5783 if (j == 0 && i == 0)
5784 STMT_VINFO_VEC_STMT (stmt_info)
5785 = *vec_stmt = assign;
5786 else
5787 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5788 prev_stmt_info = vinfo_for_stmt (assign);
5792 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5794 return true;
5797 dr_chain.create (group_size);
5798 oprnds.create (group_size);
5800 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5801 gcc_assert (alignment_support_scheme);
5802 /* Targets with store-lane instructions must not require explicit
5803 realignment. */
5804 gcc_assert (!store_lanes_p
5805 || alignment_support_scheme == dr_aligned
5806 || alignment_support_scheme == dr_unaligned_supported);
5808 if (negative)
5809 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5811 if (store_lanes_p)
5812 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5813 else
5814 aggr_type = vectype;
5816 /* In case the vectorization factor (VF) is bigger than the number
5817 of elements that we can fit in a vectype (nunits), we have to generate
5818 more than one vector stmt - i.e - we need to "unroll" the
5819 vector stmt by a factor VF/nunits. For more details see documentation in
5820 vect_get_vec_def_for_copy_stmt. */
5822 /* In case of interleaving (non-unit grouped access):
5824 S1: &base + 2 = x2
5825 S2: &base = x0
5826 S3: &base + 1 = x1
5827 S4: &base + 3 = x3
5829 We create vectorized stores starting from base address (the access of the
5830 first stmt in the chain (S2 in the above example), when the last store stmt
5831 of the chain (S4) is reached:
5833 VS1: &base = vx2
5834 VS2: &base + vec_size*1 = vx0
5835 VS3: &base + vec_size*2 = vx1
5836 VS4: &base + vec_size*3 = vx3
5838 Then permutation statements are generated:
5840 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5841 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5844 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5845 (the order of the data-refs in the output of vect_permute_store_chain
5846 corresponds to the order of scalar stmts in the interleaving chain - see
5847 the documentation of vect_permute_store_chain()).
5849 In case of both multiple types and interleaving, above vector stores and
5850 permutation stmts are created for every copy. The result vector stmts are
5851 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5852 STMT_VINFO_RELATED_STMT for the next copies.
5855 prev_stmt_info = NULL;
5856 for (j = 0; j < ncopies; j++)
5859 if (j == 0)
5861 if (slp)
5863 /* Get vectorized arguments for SLP_NODE. */
5864 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5865 NULL, slp_node, -1);
5867 vec_oprnd = vec_oprnds[0];
5869 else
5871 /* For interleaved stores we collect vectorized defs for all the
5872 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5873 used as an input to vect_permute_store_chain(), and OPRNDS as
5874 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5876 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5877 OPRNDS are of size 1. */
5878 next_stmt = first_stmt;
5879 for (i = 0; i < group_size; i++)
5881 /* Since gaps are not supported for interleaved stores,
5882 GROUP_SIZE is the exact number of stmts in the chain.
5883 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5884 there is no interleaving, GROUP_SIZE is 1, and only one
5885 iteration of the loop will be executed. */
5886 gcc_assert (next_stmt
5887 && gimple_assign_single_p (next_stmt));
5888 op = gimple_assign_rhs1 (next_stmt);
5890 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
5891 dr_chain.quick_push (vec_oprnd);
5892 oprnds.quick_push (vec_oprnd);
5893 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5897 /* We should have catched mismatched types earlier. */
5898 gcc_assert (useless_type_conversion_p (vectype,
5899 TREE_TYPE (vec_oprnd)));
5900 bool simd_lane_access_p
5901 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5902 if (simd_lane_access_p
5903 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5904 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5905 && integer_zerop (DR_OFFSET (first_dr))
5906 && integer_zerop (DR_INIT (first_dr))
5907 && alias_sets_conflict_p (get_alias_set (aggr_type),
5908 get_alias_set (DR_REF (first_dr))))
5910 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5911 dataref_offset = build_int_cst (reference_alias_ptr_type
5912 (DR_REF (first_dr)), 0);
5913 inv_p = false;
5915 else
5916 dataref_ptr
5917 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5918 simd_lane_access_p ? loop : NULL,
5919 offset, &dummy, gsi, &ptr_incr,
5920 simd_lane_access_p, &inv_p);
5921 gcc_assert (bb_vinfo || !inv_p);
5923 else
5925 /* For interleaved stores we created vectorized defs for all the
5926 defs stored in OPRNDS in the previous iteration (previous copy).
5927 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5928 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5929 next copy.
5930 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5931 OPRNDS are of size 1. */
5932 for (i = 0; i < group_size; i++)
5934 op = oprnds[i];
5935 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
5936 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5937 dr_chain[i] = vec_oprnd;
5938 oprnds[i] = vec_oprnd;
5940 if (dataref_offset)
5941 dataref_offset
5942 = int_const_binop (PLUS_EXPR, dataref_offset,
5943 TYPE_SIZE_UNIT (aggr_type));
5944 else
5945 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5946 TYPE_SIZE_UNIT (aggr_type));
5949 if (store_lanes_p)
5951 tree vec_array;
5953 /* Combine all the vectors into an array. */
5954 vec_array = create_vector_array (vectype, vec_num);
5955 for (i = 0; i < vec_num; i++)
5957 vec_oprnd = dr_chain[i];
5958 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5961 /* Emit:
5962 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5963 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5964 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5965 gimple_call_set_lhs (new_stmt, data_ref);
5966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5968 else
5970 new_stmt = NULL;
5971 if (grouped_store)
5973 if (j == 0)
5974 result_chain.create (group_size);
5975 /* Permute. */
5976 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5977 &result_chain);
5980 next_stmt = first_stmt;
5981 for (i = 0; i < vec_num; i++)
5983 unsigned align, misalign;
5985 if (i > 0)
5986 /* Bump the vector pointer. */
5987 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5988 stmt, NULL_TREE);
5990 if (slp)
5991 vec_oprnd = vec_oprnds[i];
5992 else if (grouped_store)
5993 /* For grouped stores vectorized defs are interleaved in
5994 vect_permute_store_chain(). */
5995 vec_oprnd = result_chain[i];
5997 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5998 dataref_ptr,
5999 dataref_offset
6000 ? dataref_offset
6001 : build_int_cst (reference_alias_ptr_type
6002 (DR_REF (first_dr)), 0));
6003 align = TYPE_ALIGN_UNIT (vectype);
6004 if (aligned_access_p (first_dr))
6005 misalign = 0;
6006 else if (DR_MISALIGNMENT (first_dr) == -1)
6008 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6009 align = TYPE_ALIGN_UNIT (elem_type);
6010 else
6011 align = get_object_alignment (DR_REF (first_dr))
6012 / BITS_PER_UNIT;
6013 misalign = 0;
6014 TREE_TYPE (data_ref)
6015 = build_aligned_type (TREE_TYPE (data_ref),
6016 align * BITS_PER_UNIT);
6018 else
6020 TREE_TYPE (data_ref)
6021 = build_aligned_type (TREE_TYPE (data_ref),
6022 TYPE_ALIGN (elem_type));
6023 misalign = DR_MISALIGNMENT (first_dr);
6025 if (dataref_offset == NULL_TREE
6026 && TREE_CODE (dataref_ptr) == SSA_NAME)
6027 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6028 misalign);
6030 if (negative
6031 && dt != vect_constant_def
6032 && dt != vect_external_def)
6034 tree perm_mask = perm_mask_for_reverse (vectype);
6035 tree perm_dest
6036 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6037 vectype);
6038 tree new_temp = make_ssa_name (perm_dest);
6040 /* Generate the permute statement. */
6041 gimple *perm_stmt
6042 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6043 vec_oprnd, perm_mask);
6044 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6046 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6047 vec_oprnd = new_temp;
6050 /* Arguments are ready. Create the new vector stmt. */
6051 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6052 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6054 if (slp)
6055 continue;
6057 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6058 if (!next_stmt)
6059 break;
6062 if (!slp)
6064 if (j == 0)
6065 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6066 else
6067 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6068 prev_stmt_info = vinfo_for_stmt (new_stmt);
6072 dr_chain.release ();
6073 oprnds.release ();
6074 result_chain.release ();
6075 vec_oprnds.release ();
6077 return true;
6080 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6081 VECTOR_CST mask. No checks are made that the target platform supports the
6082 mask, so callers may wish to test can_vec_perm_p separately, or use
6083 vect_gen_perm_mask_checked. */
6085 tree
6086 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
6088 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
6089 int i, nunits;
6091 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6093 mask_elt_type = lang_hooks.types.type_for_mode
6094 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
6095 mask_type = get_vectype_for_scalar_type (mask_elt_type);
6097 mask_elts = XALLOCAVEC (tree, nunits);
6098 for (i = nunits - 1; i >= 0; i--)
6099 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
6100 mask_vec = build_vector (mask_type, mask_elts);
6102 return mask_vec;
6105 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6106 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6108 tree
6109 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
6111 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
6112 return vect_gen_perm_mask_any (vectype, sel);
6115 /* Given a vector variable X and Y, that was generated for the scalar
6116 STMT, generate instructions to permute the vector elements of X and Y
6117 using permutation mask MASK_VEC, insert them at *GSI and return the
6118 permuted vector variable. */
6120 static tree
6121 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6122 gimple_stmt_iterator *gsi)
6124 tree vectype = TREE_TYPE (x);
6125 tree perm_dest, data_ref;
6126 gimple *perm_stmt;
6128 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6129 data_ref = make_ssa_name (perm_dest);
6131 /* Generate the permute statement. */
6132 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6133 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6135 return data_ref;
6138 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6139 inserting them on the loops preheader edge. Returns true if we
6140 were successful in doing so (and thus STMT can be moved then),
6141 otherwise returns false. */
6143 static bool
6144 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6146 ssa_op_iter i;
6147 tree op;
6148 bool any = false;
6150 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6152 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6153 if (!gimple_nop_p (def_stmt)
6154 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6156 /* Make sure we don't need to recurse. While we could do
6157 so in simple cases when there are more complex use webs
6158 we don't have an easy way to preserve stmt order to fulfil
6159 dependencies within them. */
6160 tree op2;
6161 ssa_op_iter i2;
6162 if (gimple_code (def_stmt) == GIMPLE_PHI)
6163 return false;
6164 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6166 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6167 if (!gimple_nop_p (def_stmt2)
6168 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6169 return false;
6171 any = true;
6175 if (!any)
6176 return true;
6178 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6180 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6181 if (!gimple_nop_p (def_stmt)
6182 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6184 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6185 gsi_remove (&gsi, false);
6186 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6190 return true;
6193 /* vectorizable_load.
6195 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6196 can be vectorized.
6197 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6198 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6199 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6201 static bool
6202 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6203 slp_tree slp_node, slp_instance slp_node_instance)
6205 tree scalar_dest;
6206 tree vec_dest = NULL;
6207 tree data_ref = NULL;
6208 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6209 stmt_vec_info prev_stmt_info;
6210 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6211 struct loop *loop = NULL;
6212 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6213 bool nested_in_vect_loop = false;
6214 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6215 tree elem_type;
6216 tree new_temp;
6217 machine_mode mode;
6218 gimple *new_stmt = NULL;
6219 tree dummy;
6220 enum dr_alignment_support alignment_support_scheme;
6221 tree dataref_ptr = NULL_TREE;
6222 tree dataref_offset = NULL_TREE;
6223 gimple *ptr_incr = NULL;
6224 int ncopies;
6225 int i, j, group_size = -1, group_gap_adj;
6226 tree msq = NULL_TREE, lsq;
6227 tree offset = NULL_TREE;
6228 tree byte_offset = NULL_TREE;
6229 tree realignment_token = NULL_TREE;
6230 gphi *phi = NULL;
6231 vec<tree> dr_chain = vNULL;
6232 bool grouped_load = false;
6233 bool load_lanes_p = false;
6234 gimple *first_stmt;
6235 gimple *first_stmt_for_drptr = NULL;
6236 bool inv_p;
6237 bool negative = false;
6238 bool compute_in_loop = false;
6239 struct loop *at_loop;
6240 int vec_num;
6241 bool slp = (slp_node != NULL);
6242 bool slp_perm = false;
6243 enum tree_code code;
6244 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6245 int vf;
6246 tree aggr_type;
6247 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
6248 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
6249 int gather_scale = 1;
6250 enum vect_def_type gather_dt = vect_unknown_def_type;
6251 vec_info *vinfo = stmt_info->vinfo;
6253 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6254 return false;
6256 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6257 && ! vec_stmt)
6258 return false;
6260 /* Is vectorizable load? */
6261 if (!is_gimple_assign (stmt))
6262 return false;
6264 scalar_dest = gimple_assign_lhs (stmt);
6265 if (TREE_CODE (scalar_dest) != SSA_NAME)
6266 return false;
6268 code = gimple_assign_rhs_code (stmt);
6269 if (code != ARRAY_REF
6270 && code != BIT_FIELD_REF
6271 && code != INDIRECT_REF
6272 && code != COMPONENT_REF
6273 && code != IMAGPART_EXPR
6274 && code != REALPART_EXPR
6275 && code != MEM_REF
6276 && TREE_CODE_CLASS (code) != tcc_declaration)
6277 return false;
6279 if (!STMT_VINFO_DATA_REF (stmt_info))
6280 return false;
6282 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6283 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6285 if (loop_vinfo)
6287 loop = LOOP_VINFO_LOOP (loop_vinfo);
6288 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6289 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6291 else
6292 vf = 1;
6294 /* Multiple types in SLP are handled by creating the appropriate number of
6295 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6296 case of SLP. */
6297 if (slp || PURE_SLP_STMT (stmt_info))
6298 ncopies = 1;
6299 else
6300 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6302 gcc_assert (ncopies >= 1);
6304 /* FORNOW. This restriction should be relaxed. */
6305 if (nested_in_vect_loop && ncopies > 1)
6307 if (dump_enabled_p ())
6308 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6309 "multiple types in nested loop.\n");
6310 return false;
6313 /* Invalidate assumptions made by dependence analysis when vectorization
6314 on the unrolled body effectively re-orders stmts. */
6315 if (ncopies > 1
6316 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6317 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6318 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6320 if (dump_enabled_p ())
6321 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6322 "cannot perform implicit CSE when unrolling "
6323 "with negative dependence distance\n");
6324 return false;
6327 elem_type = TREE_TYPE (vectype);
6328 mode = TYPE_MODE (vectype);
6330 /* FORNOW. In some cases can vectorize even if data-type not supported
6331 (e.g. - data copies). */
6332 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6334 if (dump_enabled_p ())
6335 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6336 "Aligned load, but unsupported type.\n");
6337 return false;
6340 /* Check if the load is a part of an interleaving chain. */
6341 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6343 grouped_load = true;
6344 /* FORNOW */
6345 gcc_assert (!nested_in_vect_loop && !STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6347 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6349 /* If this is single-element interleaving with an element distance
6350 that leaves unused vector loads around punt - we at least create
6351 very sub-optimal code in that case (and blow up memory,
6352 see PR65518). */
6353 bool force_peeling = false;
6354 if (first_stmt == stmt
6355 && !GROUP_NEXT_ELEMENT (stmt_info))
6357 if (GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6359 if (dump_enabled_p ())
6360 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6361 "single-element interleaving not supported "
6362 "for not adjacent vector loads\n");
6363 return false;
6366 /* Single-element interleaving requires peeling for gaps. */
6367 force_peeling = true;
6370 /* If there is a gap in the end of the group or the group size cannot
6371 be made a multiple of the vector element count then we access excess
6372 elements in the last iteration and thus need to peel that off. */
6373 if (loop_vinfo
6374 && ! STMT_VINFO_STRIDED_P (stmt_info)
6375 && (force_peeling
6376 || GROUP_GAP (vinfo_for_stmt (first_stmt)) != 0
6377 || (!slp && vf % GROUP_SIZE (vinfo_for_stmt (first_stmt)) != 0)))
6379 if (dump_enabled_p ())
6380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6381 "Data access with gaps requires scalar "
6382 "epilogue loop\n");
6383 if (loop->inner)
6385 if (dump_enabled_p ())
6386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6387 "Peeling for outer loop is not supported\n");
6388 return false;
6391 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
6394 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6395 slp_perm = true;
6397 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6398 if (!slp
6399 && !PURE_SLP_STMT (stmt_info)
6400 && !STMT_VINFO_STRIDED_P (stmt_info))
6402 if (vect_load_lanes_supported (vectype, group_size))
6403 load_lanes_p = true;
6404 else if (!vect_grouped_load_supported (vectype, group_size))
6405 return false;
6408 /* Invalidate assumptions made by dependence analysis when vectorization
6409 on the unrolled body effectively re-orders stmts. */
6410 if (!PURE_SLP_STMT (stmt_info)
6411 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6412 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6413 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6415 if (dump_enabled_p ())
6416 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6417 "cannot perform implicit CSE when performing "
6418 "group loads with negative dependence distance\n");
6419 return false;
6422 /* Similarly when the stmt is a load that is both part of a SLP
6423 instance and a loop vectorized stmt via the same-dr mechanism
6424 we have to give up. */
6425 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6426 && (STMT_SLP_TYPE (stmt_info)
6427 != STMT_SLP_TYPE (vinfo_for_stmt
6428 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6430 if (dump_enabled_p ())
6431 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6432 "conflicting SLP types for CSEd load\n");
6433 return false;
6438 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6440 gimple *def_stmt;
6441 gather_decl = vect_check_gather_scatter (stmt, loop_vinfo, &gather_base,
6442 &gather_off, &gather_scale);
6443 gcc_assert (gather_decl);
6444 if (!vect_is_simple_use (gather_off, vinfo, &def_stmt, &gather_dt,
6445 &gather_off_vectype))
6447 if (dump_enabled_p ())
6448 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6449 "gather index use not simple.\n");
6450 return false;
6453 else if (STMT_VINFO_STRIDED_P (stmt_info))
6455 if ((grouped_load
6456 && (slp || PURE_SLP_STMT (stmt_info)))
6457 && (group_size > nunits
6458 || nunits % group_size != 0))
6460 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6461 "unhandled strided group load\n");
6462 return false;
6465 else
6467 negative = tree_int_cst_compare (nested_in_vect_loop
6468 ? STMT_VINFO_DR_STEP (stmt_info)
6469 : DR_STEP (dr),
6470 size_zero_node) < 0;
6471 if (negative && ncopies > 1)
6473 if (dump_enabled_p ())
6474 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6475 "multiple types with negative step.\n");
6476 return false;
6479 if (negative)
6481 if (grouped_load)
6483 if (dump_enabled_p ())
6484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6485 "negative step for group load not supported"
6486 "\n");
6487 return false;
6489 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6490 if (alignment_support_scheme != dr_aligned
6491 && alignment_support_scheme != dr_unaligned_supported)
6493 if (dump_enabled_p ())
6494 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6495 "negative step but alignment required.\n");
6496 return false;
6498 if (!perm_mask_for_reverse (vectype))
6500 if (dump_enabled_p ())
6501 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6502 "negative step and reversing not supported."
6503 "\n");
6504 return false;
6509 if (!vec_stmt) /* transformation not required. */
6511 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6512 /* The SLP costs are calculated during SLP analysis. */
6513 if (!PURE_SLP_STMT (stmt_info))
6514 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6515 NULL, NULL, NULL);
6516 return true;
6519 if (dump_enabled_p ())
6520 dump_printf_loc (MSG_NOTE, vect_location,
6521 "transform load. ncopies = %d\n", ncopies);
6523 /** Transform. **/
6525 ensure_base_align (stmt_info, dr);
6527 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
6529 tree vec_oprnd0 = NULL_TREE, op;
6530 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6531 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6532 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6533 edge pe = loop_preheader_edge (loop);
6534 gimple_seq seq;
6535 basic_block new_bb;
6536 enum { NARROW, NONE, WIDEN } modifier;
6537 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6539 if (nunits == gather_off_nunits)
6540 modifier = NONE;
6541 else if (nunits == gather_off_nunits / 2)
6543 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6544 modifier = WIDEN;
6546 for (i = 0; i < gather_off_nunits; ++i)
6547 sel[i] = i | nunits;
6549 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6551 else if (nunits == gather_off_nunits * 2)
6553 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6554 modifier = NARROW;
6556 for (i = 0; i < nunits; ++i)
6557 sel[i] = i < gather_off_nunits
6558 ? i : i + nunits - gather_off_nunits;
6560 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6561 ncopies *= 2;
6563 else
6564 gcc_unreachable ();
6566 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6567 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6568 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6569 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6570 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6571 scaletype = TREE_VALUE (arglist);
6572 gcc_checking_assert (types_compatible_p (srctype, rettype));
6574 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6576 ptr = fold_convert (ptrtype, gather_base);
6577 if (!is_gimple_min_invariant (ptr))
6579 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6580 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6581 gcc_assert (!new_bb);
6584 /* Currently we support only unconditional gather loads,
6585 so mask should be all ones. */
6586 if (TREE_CODE (masktype) == INTEGER_TYPE)
6587 mask = build_int_cst (masktype, -1);
6588 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6590 mask = build_int_cst (TREE_TYPE (masktype), -1);
6591 mask = build_vector_from_val (masktype, mask);
6592 mask = vect_init_vector (stmt, mask, masktype, NULL);
6594 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6596 REAL_VALUE_TYPE r;
6597 long tmp[6];
6598 for (j = 0; j < 6; ++j)
6599 tmp[j] = -1;
6600 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6601 mask = build_real (TREE_TYPE (masktype), r);
6602 mask = build_vector_from_val (masktype, mask);
6603 mask = vect_init_vector (stmt, mask, masktype, NULL);
6605 else
6606 gcc_unreachable ();
6608 scale = build_int_cst (scaletype, gather_scale);
6610 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6611 merge = build_int_cst (TREE_TYPE (rettype), 0);
6612 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6614 REAL_VALUE_TYPE r;
6615 long tmp[6];
6616 for (j = 0; j < 6; ++j)
6617 tmp[j] = 0;
6618 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6619 merge = build_real (TREE_TYPE (rettype), r);
6621 else
6622 gcc_unreachable ();
6623 merge = build_vector_from_val (rettype, merge);
6624 merge = vect_init_vector (stmt, merge, rettype, NULL);
6626 prev_stmt_info = NULL;
6627 for (j = 0; j < ncopies; ++j)
6629 if (modifier == WIDEN && (j & 1))
6630 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6631 perm_mask, stmt, gsi);
6632 else if (j == 0)
6633 op = vec_oprnd0
6634 = vect_get_vec_def_for_operand (gather_off, stmt);
6635 else
6636 op = vec_oprnd0
6637 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6639 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6641 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6642 == TYPE_VECTOR_SUBPARTS (idxtype));
6643 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
6644 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6645 new_stmt
6646 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6647 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6648 op = var;
6651 new_stmt
6652 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6654 if (!useless_type_conversion_p (vectype, rettype))
6656 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6657 == TYPE_VECTOR_SUBPARTS (rettype));
6658 op = vect_get_new_ssa_name (rettype, vect_simple_var);
6659 gimple_call_set_lhs (new_stmt, op);
6660 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6661 var = make_ssa_name (vec_dest);
6662 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6663 new_stmt
6664 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6666 else
6668 var = make_ssa_name (vec_dest, new_stmt);
6669 gimple_call_set_lhs (new_stmt, var);
6672 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6674 if (modifier == NARROW)
6676 if ((j & 1) == 0)
6678 prev_res = var;
6679 continue;
6681 var = permute_vec_elements (prev_res, var,
6682 perm_mask, stmt, gsi);
6683 new_stmt = SSA_NAME_DEF_STMT (var);
6686 if (prev_stmt_info == NULL)
6687 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6688 else
6689 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6690 prev_stmt_info = vinfo_for_stmt (new_stmt);
6692 return true;
6694 else if (STMT_VINFO_STRIDED_P (stmt_info))
6696 gimple_stmt_iterator incr_gsi;
6697 bool insert_after;
6698 gimple *incr;
6699 tree offvar;
6700 tree ivstep;
6701 tree running_off;
6702 vec<constructor_elt, va_gc> *v = NULL;
6703 gimple_seq stmts = NULL;
6704 tree stride_base, stride_step, alias_off;
6706 gcc_assert (!nested_in_vect_loop);
6708 if (slp && grouped_load)
6709 first_dr = STMT_VINFO_DATA_REF
6710 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6711 else
6712 first_dr = dr;
6714 stride_base
6715 = fold_build_pointer_plus
6716 (DR_BASE_ADDRESS (first_dr),
6717 size_binop (PLUS_EXPR,
6718 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6719 convert_to_ptrofftype (DR_INIT (first_dr))));
6720 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6722 /* For a load with loop-invariant (but other than power-of-2)
6723 stride (i.e. not a grouped access) like so:
6725 for (i = 0; i < n; i += stride)
6726 ... = array[i];
6728 we generate a new induction variable and new accesses to
6729 form a new vector (or vectors, depending on ncopies):
6731 for (j = 0; ; j += VF*stride)
6732 tmp1 = array[j];
6733 tmp2 = array[j + stride];
6735 vectemp = {tmp1, tmp2, ...}
6738 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6739 build_int_cst (TREE_TYPE (stride_step), vf));
6741 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6743 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6744 loop, &incr_gsi, insert_after,
6745 &offvar, NULL);
6746 incr = gsi_stmt (incr_gsi);
6747 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6749 stride_step = force_gimple_operand (unshare_expr (stride_step),
6750 &stmts, true, NULL_TREE);
6751 if (stmts)
6752 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6754 prev_stmt_info = NULL;
6755 running_off = offvar;
6756 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6757 int nloads = nunits;
6758 tree ltype = TREE_TYPE (vectype);
6759 auto_vec<tree> dr_chain;
6760 if (slp)
6762 nloads = nunits / group_size;
6763 if (group_size < nunits)
6764 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6765 else
6766 ltype = vectype;
6767 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6768 /* For SLP permutation support we need to load the whole group,
6769 not only the number of vector stmts the permutation result
6770 fits in. */
6771 if (slp_perm)
6773 ncopies = (group_size * vf + nunits - 1) / nunits;
6774 dr_chain.create (ncopies);
6776 else
6777 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6779 for (j = 0; j < ncopies; j++)
6781 tree vec_inv;
6783 if (nloads > 1)
6785 vec_alloc (v, nloads);
6786 for (i = 0; i < nloads; i++)
6788 tree newref, newoff;
6789 gimple *incr;
6790 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6792 newref = force_gimple_operand_gsi (gsi, newref, true,
6793 NULL_TREE, true,
6794 GSI_SAME_STMT);
6795 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6796 newoff = copy_ssa_name (running_off);
6797 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6798 running_off, stride_step);
6799 vect_finish_stmt_generation (stmt, incr, gsi);
6801 running_off = newoff;
6804 vec_inv = build_constructor (vectype, v);
6805 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6806 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6808 else
6810 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6811 build2 (MEM_REF, ltype,
6812 running_off, alias_off));
6813 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6815 tree newoff = copy_ssa_name (running_off);
6816 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6817 running_off, stride_step);
6818 vect_finish_stmt_generation (stmt, incr, gsi);
6820 running_off = newoff;
6823 if (slp)
6825 if (slp_perm)
6826 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6827 else
6828 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6830 else
6832 if (j == 0)
6833 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6834 else
6835 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6836 prev_stmt_info = vinfo_for_stmt (new_stmt);
6839 if (slp_perm)
6840 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6841 slp_node_instance, false);
6842 return true;
6845 if (grouped_load)
6847 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6848 /* For SLP vectorization we directly vectorize a subchain
6849 without permutation. */
6850 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6851 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6852 /* For BB vectorization always use the first stmt to base
6853 the data ref pointer on. */
6854 if (bb_vinfo)
6855 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6857 /* Check if the chain of loads is already vectorized. */
6858 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6859 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6860 ??? But we can only do so if there is exactly one
6861 as we have no way to get at the rest. Leave the CSE
6862 opportunity alone.
6863 ??? With the group load eventually participating
6864 in multiple different permutations (having multiple
6865 slp nodes which refer to the same group) the CSE
6866 is even wrong code. See PR56270. */
6867 && !slp)
6869 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6870 return true;
6872 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6873 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6874 group_gap_adj = 0;
6876 /* VEC_NUM is the number of vect stmts to be created for this group. */
6877 if (slp)
6879 grouped_load = false;
6880 /* For SLP permutation support we need to load the whole group,
6881 not only the number of vector stmts the permutation result
6882 fits in. */
6883 if (slp_perm)
6884 vec_num = (group_size * vf + nunits - 1) / nunits;
6885 else
6886 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6887 group_gap_adj = vf * group_size - nunits * vec_num;
6889 else
6890 vec_num = group_size;
6892 else
6894 first_stmt = stmt;
6895 first_dr = dr;
6896 group_size = vec_num = 1;
6897 group_gap_adj = 0;
6900 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6901 gcc_assert (alignment_support_scheme);
6902 /* Targets with load-lane instructions must not require explicit
6903 realignment. */
6904 gcc_assert (!load_lanes_p
6905 || alignment_support_scheme == dr_aligned
6906 || alignment_support_scheme == dr_unaligned_supported);
6908 /* In case the vectorization factor (VF) is bigger than the number
6909 of elements that we can fit in a vectype (nunits), we have to generate
6910 more than one vector stmt - i.e - we need to "unroll" the
6911 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6912 from one copy of the vector stmt to the next, in the field
6913 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6914 stages to find the correct vector defs to be used when vectorizing
6915 stmts that use the defs of the current stmt. The example below
6916 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6917 need to create 4 vectorized stmts):
6919 before vectorization:
6920 RELATED_STMT VEC_STMT
6921 S1: x = memref - -
6922 S2: z = x + 1 - -
6924 step 1: vectorize stmt S1:
6925 We first create the vector stmt VS1_0, and, as usual, record a
6926 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6927 Next, we create the vector stmt VS1_1, and record a pointer to
6928 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6929 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6930 stmts and pointers:
6931 RELATED_STMT VEC_STMT
6932 VS1_0: vx0 = memref0 VS1_1 -
6933 VS1_1: vx1 = memref1 VS1_2 -
6934 VS1_2: vx2 = memref2 VS1_3 -
6935 VS1_3: vx3 = memref3 - -
6936 S1: x = load - VS1_0
6937 S2: z = x + 1 - -
6939 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6940 information we recorded in RELATED_STMT field is used to vectorize
6941 stmt S2. */
6943 /* In case of interleaving (non-unit grouped access):
6945 S1: x2 = &base + 2
6946 S2: x0 = &base
6947 S3: x1 = &base + 1
6948 S4: x3 = &base + 3
6950 Vectorized loads are created in the order of memory accesses
6951 starting from the access of the first stmt of the chain:
6953 VS1: vx0 = &base
6954 VS2: vx1 = &base + vec_size*1
6955 VS3: vx3 = &base + vec_size*2
6956 VS4: vx4 = &base + vec_size*3
6958 Then permutation statements are generated:
6960 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6961 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6964 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6965 (the order of the data-refs in the output of vect_permute_load_chain
6966 corresponds to the order of scalar stmts in the interleaving chain - see
6967 the documentation of vect_permute_load_chain()).
6968 The generation of permutation stmts and recording them in
6969 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6971 In case of both multiple types and interleaving, the vector loads and
6972 permutation stmts above are created for every copy. The result vector
6973 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6974 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6976 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6977 on a target that supports unaligned accesses (dr_unaligned_supported)
6978 we generate the following code:
6979 p = initial_addr;
6980 indx = 0;
6981 loop {
6982 p = p + indx * vectype_size;
6983 vec_dest = *(p);
6984 indx = indx + 1;
6987 Otherwise, the data reference is potentially unaligned on a target that
6988 does not support unaligned accesses (dr_explicit_realign_optimized) -
6989 then generate the following code, in which the data in each iteration is
6990 obtained by two vector loads, one from the previous iteration, and one
6991 from the current iteration:
6992 p1 = initial_addr;
6993 msq_init = *(floor(p1))
6994 p2 = initial_addr + VS - 1;
6995 realignment_token = call target_builtin;
6996 indx = 0;
6997 loop {
6998 p2 = p2 + indx * vectype_size
6999 lsq = *(floor(p2))
7000 vec_dest = realign_load (msq, lsq, realignment_token)
7001 indx = indx + 1;
7002 msq = lsq;
7003 } */
7005 /* If the misalignment remains the same throughout the execution of the
7006 loop, we can create the init_addr and permutation mask at the loop
7007 preheader. Otherwise, it needs to be created inside the loop.
7008 This can only occur when vectorizing memory accesses in the inner-loop
7009 nested within an outer-loop that is being vectorized. */
7011 if (nested_in_vect_loop
7012 && (TREE_INT_CST_LOW (DR_STEP (dr))
7013 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
7015 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7016 compute_in_loop = true;
7019 if ((alignment_support_scheme == dr_explicit_realign_optimized
7020 || alignment_support_scheme == dr_explicit_realign)
7021 && !compute_in_loop)
7023 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7024 alignment_support_scheme, NULL_TREE,
7025 &at_loop);
7026 if (alignment_support_scheme == dr_explicit_realign_optimized)
7028 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7029 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7030 size_one_node);
7033 else
7034 at_loop = loop;
7036 if (negative)
7037 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7039 if (load_lanes_p)
7040 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7041 else
7042 aggr_type = vectype;
7044 prev_stmt_info = NULL;
7045 for (j = 0; j < ncopies; j++)
7047 /* 1. Create the vector or array pointer update chain. */
7048 if (j == 0)
7050 bool simd_lane_access_p
7051 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7052 if (simd_lane_access_p
7053 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7054 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7055 && integer_zerop (DR_OFFSET (first_dr))
7056 && integer_zerop (DR_INIT (first_dr))
7057 && alias_sets_conflict_p (get_alias_set (aggr_type),
7058 get_alias_set (DR_REF (first_dr)))
7059 && (alignment_support_scheme == dr_aligned
7060 || alignment_support_scheme == dr_unaligned_supported))
7062 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7063 dataref_offset = build_int_cst (reference_alias_ptr_type
7064 (DR_REF (first_dr)), 0);
7065 inv_p = false;
7067 else if (first_stmt_for_drptr
7068 && first_stmt != first_stmt_for_drptr)
7070 dataref_ptr
7071 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7072 at_loop, offset, &dummy, gsi,
7073 &ptr_incr, simd_lane_access_p,
7074 &inv_p, byte_offset);
7075 /* Adjust the pointer by the difference to first_stmt. */
7076 data_reference_p ptrdr
7077 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7078 tree diff = fold_convert (sizetype,
7079 size_binop (MINUS_EXPR,
7080 DR_INIT (first_dr),
7081 DR_INIT (ptrdr)));
7082 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7083 stmt, diff);
7085 else
7086 dataref_ptr
7087 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7088 offset, &dummy, gsi, &ptr_incr,
7089 simd_lane_access_p, &inv_p,
7090 byte_offset);
7092 else if (dataref_offset)
7093 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7094 TYPE_SIZE_UNIT (aggr_type));
7095 else
7096 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7097 TYPE_SIZE_UNIT (aggr_type));
7099 if (grouped_load || slp_perm)
7100 dr_chain.create (vec_num);
7102 if (load_lanes_p)
7104 tree vec_array;
7106 vec_array = create_vector_array (vectype, vec_num);
7108 /* Emit:
7109 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7110 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
7111 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
7112 gimple_call_set_lhs (new_stmt, vec_array);
7113 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7115 /* Extract each vector into an SSA_NAME. */
7116 for (i = 0; i < vec_num; i++)
7118 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7119 vec_array, i);
7120 dr_chain.quick_push (new_temp);
7123 /* Record the mapping between SSA_NAMEs and statements. */
7124 vect_record_grouped_load_vectors (stmt, dr_chain);
7126 else
7128 for (i = 0; i < vec_num; i++)
7130 if (i > 0)
7131 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7132 stmt, NULL_TREE);
7134 /* 2. Create the vector-load in the loop. */
7135 switch (alignment_support_scheme)
7137 case dr_aligned:
7138 case dr_unaligned_supported:
7140 unsigned int align, misalign;
7142 data_ref
7143 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7144 dataref_offset
7145 ? dataref_offset
7146 : build_int_cst (reference_alias_ptr_type
7147 (DR_REF (first_dr)), 0));
7148 align = TYPE_ALIGN_UNIT (vectype);
7149 if (alignment_support_scheme == dr_aligned)
7151 gcc_assert (aligned_access_p (first_dr));
7152 misalign = 0;
7154 else if (DR_MISALIGNMENT (first_dr) == -1)
7156 if (DR_VECT_AUX (first_dr)->base_element_aligned)
7157 align = TYPE_ALIGN_UNIT (elem_type);
7158 else
7159 align = (get_object_alignment (DR_REF (first_dr))
7160 / BITS_PER_UNIT);
7161 misalign = 0;
7162 TREE_TYPE (data_ref)
7163 = build_aligned_type (TREE_TYPE (data_ref),
7164 align * BITS_PER_UNIT);
7166 else
7168 TREE_TYPE (data_ref)
7169 = build_aligned_type (TREE_TYPE (data_ref),
7170 TYPE_ALIGN (elem_type));
7171 misalign = DR_MISALIGNMENT (first_dr);
7173 if (dataref_offset == NULL_TREE
7174 && TREE_CODE (dataref_ptr) == SSA_NAME)
7175 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7176 align, misalign);
7177 break;
7179 case dr_explicit_realign:
7181 tree ptr, bump;
7183 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7185 if (compute_in_loop)
7186 msq = vect_setup_realignment (first_stmt, gsi,
7187 &realignment_token,
7188 dr_explicit_realign,
7189 dataref_ptr, NULL);
7191 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7192 ptr = copy_ssa_name (dataref_ptr);
7193 else
7194 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7195 new_stmt = gimple_build_assign
7196 (ptr, BIT_AND_EXPR, dataref_ptr,
7197 build_int_cst
7198 (TREE_TYPE (dataref_ptr),
7199 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7200 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7201 data_ref
7202 = build2 (MEM_REF, vectype, ptr,
7203 build_int_cst (reference_alias_ptr_type
7204 (DR_REF (first_dr)), 0));
7205 vec_dest = vect_create_destination_var (scalar_dest,
7206 vectype);
7207 new_stmt = gimple_build_assign (vec_dest, data_ref);
7208 new_temp = make_ssa_name (vec_dest, new_stmt);
7209 gimple_assign_set_lhs (new_stmt, new_temp);
7210 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7211 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7212 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7213 msq = new_temp;
7215 bump = size_binop (MULT_EXPR, vs,
7216 TYPE_SIZE_UNIT (elem_type));
7217 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7218 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7219 new_stmt = gimple_build_assign
7220 (NULL_TREE, BIT_AND_EXPR, ptr,
7221 build_int_cst
7222 (TREE_TYPE (ptr),
7223 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7224 ptr = copy_ssa_name (ptr, new_stmt);
7225 gimple_assign_set_lhs (new_stmt, ptr);
7226 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7227 data_ref
7228 = build2 (MEM_REF, vectype, ptr,
7229 build_int_cst (reference_alias_ptr_type
7230 (DR_REF (first_dr)), 0));
7231 break;
7233 case dr_explicit_realign_optimized:
7234 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7235 new_temp = copy_ssa_name (dataref_ptr);
7236 else
7237 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7238 new_stmt = gimple_build_assign
7239 (new_temp, BIT_AND_EXPR, dataref_ptr,
7240 build_int_cst
7241 (TREE_TYPE (dataref_ptr),
7242 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
7243 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7244 data_ref
7245 = build2 (MEM_REF, vectype, new_temp,
7246 build_int_cst (reference_alias_ptr_type
7247 (DR_REF (first_dr)), 0));
7248 break;
7249 default:
7250 gcc_unreachable ();
7252 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7253 new_stmt = gimple_build_assign (vec_dest, data_ref);
7254 new_temp = make_ssa_name (vec_dest, new_stmt);
7255 gimple_assign_set_lhs (new_stmt, new_temp);
7256 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7258 /* 3. Handle explicit realignment if necessary/supported.
7259 Create in loop:
7260 vec_dest = realign_load (msq, lsq, realignment_token) */
7261 if (alignment_support_scheme == dr_explicit_realign_optimized
7262 || alignment_support_scheme == dr_explicit_realign)
7264 lsq = gimple_assign_lhs (new_stmt);
7265 if (!realignment_token)
7266 realignment_token = dataref_ptr;
7267 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7268 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7269 msq, lsq, realignment_token);
7270 new_temp = make_ssa_name (vec_dest, new_stmt);
7271 gimple_assign_set_lhs (new_stmt, new_temp);
7272 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7274 if (alignment_support_scheme == dr_explicit_realign_optimized)
7276 gcc_assert (phi);
7277 if (i == vec_num - 1 && j == ncopies - 1)
7278 add_phi_arg (phi, lsq,
7279 loop_latch_edge (containing_loop),
7280 UNKNOWN_LOCATION);
7281 msq = lsq;
7285 /* 4. Handle invariant-load. */
7286 if (inv_p && !bb_vinfo)
7288 gcc_assert (!grouped_load);
7289 /* If we have versioned for aliasing or the loop doesn't
7290 have any data dependencies that would preclude this,
7291 then we are sure this is a loop invariant load and
7292 thus we can insert it on the preheader edge. */
7293 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7294 && !nested_in_vect_loop
7295 && hoist_defs_of_uses (stmt, loop))
7297 if (dump_enabled_p ())
7299 dump_printf_loc (MSG_NOTE, vect_location,
7300 "hoisting out of the vectorized "
7301 "loop: ");
7302 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7304 tree tem = copy_ssa_name (scalar_dest);
7305 gsi_insert_on_edge_immediate
7306 (loop_preheader_edge (loop),
7307 gimple_build_assign (tem,
7308 unshare_expr
7309 (gimple_assign_rhs1 (stmt))));
7310 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7311 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7312 set_vinfo_for_stmt (new_stmt,
7313 new_stmt_vec_info (new_stmt, vinfo));
7315 else
7317 gimple_stmt_iterator gsi2 = *gsi;
7318 gsi_next (&gsi2);
7319 new_temp = vect_init_vector (stmt, scalar_dest,
7320 vectype, &gsi2);
7321 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7325 if (negative)
7327 tree perm_mask = perm_mask_for_reverse (vectype);
7328 new_temp = permute_vec_elements (new_temp, new_temp,
7329 perm_mask, stmt, gsi);
7330 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7333 /* Collect vector loads and later create their permutation in
7334 vect_transform_grouped_load (). */
7335 if (grouped_load || slp_perm)
7336 dr_chain.quick_push (new_temp);
7338 /* Store vector loads in the corresponding SLP_NODE. */
7339 if (slp && !slp_perm)
7340 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7342 /* Bump the vector pointer to account for a gap or for excess
7343 elements loaded for a permuted SLP load. */
7344 if (group_gap_adj != 0)
7346 bool ovf;
7347 tree bump
7348 = wide_int_to_tree (sizetype,
7349 wi::smul (TYPE_SIZE_UNIT (elem_type),
7350 group_gap_adj, &ovf));
7351 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7352 stmt, bump);
7356 if (slp && !slp_perm)
7357 continue;
7359 if (slp_perm)
7361 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7362 slp_node_instance, false))
7364 dr_chain.release ();
7365 return false;
7368 else
7370 if (grouped_load)
7372 if (!load_lanes_p)
7373 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7374 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7376 else
7378 if (j == 0)
7379 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7380 else
7381 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7382 prev_stmt_info = vinfo_for_stmt (new_stmt);
7385 dr_chain.release ();
7388 return true;
7391 /* Function vect_is_simple_cond.
7393 Input:
7394 LOOP - the loop that is being vectorized.
7395 COND - Condition that is checked for simple use.
7397 Output:
7398 *COMP_VECTYPE - the vector type for the comparison.
7400 Returns whether a COND can be vectorized. Checks whether
7401 condition operands are supportable using vec_is_simple_use. */
7403 static bool
7404 vect_is_simple_cond (tree cond, vec_info *vinfo, tree *comp_vectype)
7406 tree lhs, rhs;
7407 enum vect_def_type dt;
7408 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7410 /* Mask case. */
7411 if (TREE_CODE (cond) == SSA_NAME
7412 && TREE_CODE (TREE_TYPE (cond)) == BOOLEAN_TYPE)
7414 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7415 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7416 &dt, comp_vectype)
7417 || !*comp_vectype
7418 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7419 return false;
7420 return true;
7423 if (!COMPARISON_CLASS_P (cond))
7424 return false;
7426 lhs = TREE_OPERAND (cond, 0);
7427 rhs = TREE_OPERAND (cond, 1);
7429 if (TREE_CODE (lhs) == SSA_NAME)
7431 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7432 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dt, &vectype1))
7433 return false;
7435 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7436 && TREE_CODE (lhs) != FIXED_CST)
7437 return false;
7439 if (TREE_CODE (rhs) == SSA_NAME)
7441 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7442 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dt, &vectype2))
7443 return false;
7445 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7446 && TREE_CODE (rhs) != FIXED_CST)
7447 return false;
7449 if (vectype1 && vectype2
7450 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7451 return false;
7453 *comp_vectype = vectype1 ? vectype1 : vectype2;
7454 return true;
7457 /* vectorizable_condition.
7459 Check if STMT is conditional modify expression that can be vectorized.
7460 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7461 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7462 at GSI.
7464 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7465 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7466 else clause if it is 2).
7468 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7470 bool
7471 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7472 gimple **vec_stmt, tree reduc_def, int reduc_index,
7473 slp_tree slp_node)
7475 tree scalar_dest = NULL_TREE;
7476 tree vec_dest = NULL_TREE;
7477 tree cond_expr, then_clause, else_clause;
7478 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7479 tree comp_vectype = NULL_TREE;
7480 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7481 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7482 tree vec_compare;
7483 tree new_temp;
7484 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7485 enum vect_def_type dt, dts[4];
7486 int ncopies;
7487 enum tree_code code;
7488 stmt_vec_info prev_stmt_info = NULL;
7489 int i, j;
7490 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7491 vec<tree> vec_oprnds0 = vNULL;
7492 vec<tree> vec_oprnds1 = vNULL;
7493 vec<tree> vec_oprnds2 = vNULL;
7494 vec<tree> vec_oprnds3 = vNULL;
7495 tree vec_cmp_type;
7496 bool masked = false;
7498 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7499 return false;
7501 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7503 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7504 return false;
7506 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7507 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7508 && reduc_def))
7509 return false;
7511 /* FORNOW: not yet supported. */
7512 if (STMT_VINFO_LIVE_P (stmt_info))
7514 if (dump_enabled_p ())
7515 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7516 "value used after loop.\n");
7517 return false;
7521 /* Is vectorizable conditional operation? */
7522 if (!is_gimple_assign (stmt))
7523 return false;
7525 code = gimple_assign_rhs_code (stmt);
7527 if (code != COND_EXPR)
7528 return false;
7530 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7531 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7532 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7534 if (slp_node || PURE_SLP_STMT (stmt_info))
7535 ncopies = 1;
7536 else
7537 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7539 gcc_assert (ncopies >= 1);
7540 if (reduc_index && ncopies > 1)
7541 return false; /* FORNOW */
7543 cond_expr = gimple_assign_rhs1 (stmt);
7544 then_clause = gimple_assign_rhs2 (stmt);
7545 else_clause = gimple_assign_rhs3 (stmt);
7547 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo, &comp_vectype)
7548 || !comp_vectype)
7549 return false;
7551 gimple *def_stmt;
7552 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dt,
7553 &vectype1))
7554 return false;
7555 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dt,
7556 &vectype2))
7557 return false;
7559 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
7560 return false;
7562 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
7563 return false;
7565 masked = !COMPARISON_CLASS_P (cond_expr);
7566 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
7568 if (vec_cmp_type == NULL_TREE)
7569 return false;
7571 if (!vec_stmt)
7573 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7574 return expand_vec_cond_expr_p (vectype, comp_vectype);
7577 /* Transform. */
7579 if (!slp_node)
7581 vec_oprnds0.create (1);
7582 vec_oprnds1.create (1);
7583 vec_oprnds2.create (1);
7584 vec_oprnds3.create (1);
7587 /* Handle def. */
7588 scalar_dest = gimple_assign_lhs (stmt);
7589 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7591 /* Handle cond expr. */
7592 for (j = 0; j < ncopies; j++)
7594 gassign *new_stmt = NULL;
7595 if (j == 0)
7597 if (slp_node)
7599 auto_vec<tree, 4> ops;
7600 auto_vec<vec<tree>, 4> vec_defs;
7602 if (masked)
7603 ops.safe_push (cond_expr);
7604 else
7606 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7607 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7609 ops.safe_push (then_clause);
7610 ops.safe_push (else_clause);
7611 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7612 vec_oprnds3 = vec_defs.pop ();
7613 vec_oprnds2 = vec_defs.pop ();
7614 if (!masked)
7615 vec_oprnds1 = vec_defs.pop ();
7616 vec_oprnds0 = vec_defs.pop ();
7618 ops.release ();
7619 vec_defs.release ();
7621 else
7623 gimple *gtemp;
7624 if (masked)
7626 vec_cond_lhs
7627 = vect_get_vec_def_for_operand (cond_expr, stmt,
7628 comp_vectype);
7629 vect_is_simple_use (cond_expr, stmt_info->vinfo,
7630 &gtemp, &dts[0]);
7632 else
7634 vec_cond_lhs =
7635 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7636 stmt, comp_vectype);
7637 vect_is_simple_use (TREE_OPERAND (cond_expr, 0),
7638 loop_vinfo, &gtemp, &dts[0]);
7640 vec_cond_rhs =
7641 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7642 stmt, comp_vectype);
7643 vect_is_simple_use (TREE_OPERAND (cond_expr, 1),
7644 loop_vinfo, &gtemp, &dts[1]);
7646 if (reduc_index == 1)
7647 vec_then_clause = reduc_def;
7648 else
7650 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7651 stmt);
7652 vect_is_simple_use (then_clause, loop_vinfo,
7653 &gtemp, &dts[2]);
7655 if (reduc_index == 2)
7656 vec_else_clause = reduc_def;
7657 else
7659 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7660 stmt);
7661 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
7665 else
7667 vec_cond_lhs
7668 = vect_get_vec_def_for_stmt_copy (dts[0],
7669 vec_oprnds0.pop ());
7670 if (!masked)
7671 vec_cond_rhs
7672 = vect_get_vec_def_for_stmt_copy (dts[1],
7673 vec_oprnds1.pop ());
7675 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7676 vec_oprnds2.pop ());
7677 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7678 vec_oprnds3.pop ());
7681 if (!slp_node)
7683 vec_oprnds0.quick_push (vec_cond_lhs);
7684 if (!masked)
7685 vec_oprnds1.quick_push (vec_cond_rhs);
7686 vec_oprnds2.quick_push (vec_then_clause);
7687 vec_oprnds3.quick_push (vec_else_clause);
7690 /* Arguments are ready. Create the new vector stmt. */
7691 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7693 vec_then_clause = vec_oprnds2[i];
7694 vec_else_clause = vec_oprnds3[i];
7696 if (masked)
7697 vec_compare = vec_cond_lhs;
7698 else
7700 vec_cond_rhs = vec_oprnds1[i];
7701 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7702 vec_cond_lhs, vec_cond_rhs);
7704 new_temp = make_ssa_name (vec_dest);
7705 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
7706 vec_compare, vec_then_clause,
7707 vec_else_clause);
7708 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7709 if (slp_node)
7710 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7713 if (slp_node)
7714 continue;
7716 if (j == 0)
7717 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7718 else
7719 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7721 prev_stmt_info = vinfo_for_stmt (new_stmt);
7724 vec_oprnds0.release ();
7725 vec_oprnds1.release ();
7726 vec_oprnds2.release ();
7727 vec_oprnds3.release ();
7729 return true;
7732 /* vectorizable_comparison.
7734 Check if STMT is comparison expression that can be vectorized.
7735 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7736 comparison, put it in VEC_STMT, and insert it at GSI.
7738 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7740 bool
7741 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
7742 gimple **vec_stmt, tree reduc_def,
7743 slp_tree slp_node)
7745 tree lhs, rhs1, rhs2;
7746 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7747 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7748 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7749 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
7750 tree new_temp;
7751 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7752 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
7753 unsigned nunits;
7754 int ncopies;
7755 enum tree_code code;
7756 stmt_vec_info prev_stmt_info = NULL;
7757 int i, j;
7758 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7759 vec<tree> vec_oprnds0 = vNULL;
7760 vec<tree> vec_oprnds1 = vNULL;
7761 gimple *def_stmt;
7762 tree mask_type;
7763 tree mask;
7765 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7766 return false;
7768 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
7769 return false;
7771 mask_type = vectype;
7772 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7774 if (slp_node || PURE_SLP_STMT (stmt_info))
7775 ncopies = 1;
7776 else
7777 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7779 gcc_assert (ncopies >= 1);
7780 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7781 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7782 && reduc_def))
7783 return false;
7785 if (STMT_VINFO_LIVE_P (stmt_info))
7787 if (dump_enabled_p ())
7788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7789 "value used after loop.\n");
7790 return false;
7793 if (!is_gimple_assign (stmt))
7794 return false;
7796 code = gimple_assign_rhs_code (stmt);
7798 if (TREE_CODE_CLASS (code) != tcc_comparison)
7799 return false;
7801 rhs1 = gimple_assign_rhs1 (stmt);
7802 rhs2 = gimple_assign_rhs2 (stmt);
7804 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
7805 &dts[0], &vectype1))
7806 return false;
7808 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
7809 &dts[1], &vectype2))
7810 return false;
7812 if (vectype1 && vectype2
7813 && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
7814 return false;
7816 vectype = vectype1 ? vectype1 : vectype2;
7818 /* Invariant comparison. */
7819 if (!vectype)
7821 vectype = build_vector_type (TREE_TYPE (rhs1), nunits);
7822 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype)) != current_vector_size)
7823 return false;
7825 else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
7826 return false;
7828 if (!vec_stmt)
7830 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
7831 vect_model_simple_cost (stmt_info, ncopies, dts, NULL, NULL);
7832 return expand_vec_cmp_expr_p (vectype, mask_type);
7835 /* Transform. */
7836 if (!slp_node)
7838 vec_oprnds0.create (1);
7839 vec_oprnds1.create (1);
7842 /* Handle def. */
7843 lhs = gimple_assign_lhs (stmt);
7844 mask = vect_create_destination_var (lhs, mask_type);
7846 /* Handle cmp expr. */
7847 for (j = 0; j < ncopies; j++)
7849 gassign *new_stmt = NULL;
7850 if (j == 0)
7852 if (slp_node)
7854 auto_vec<tree, 2> ops;
7855 auto_vec<vec<tree>, 2> vec_defs;
7857 ops.safe_push (rhs1);
7858 ops.safe_push (rhs2);
7859 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7860 vec_oprnds1 = vec_defs.pop ();
7861 vec_oprnds0 = vec_defs.pop ();
7863 else
7865 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
7866 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
7869 else
7871 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
7872 vec_oprnds0.pop ());
7873 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
7874 vec_oprnds1.pop ());
7877 if (!slp_node)
7879 vec_oprnds0.quick_push (vec_rhs1);
7880 vec_oprnds1.quick_push (vec_rhs2);
7883 /* Arguments are ready. Create the new vector stmt. */
7884 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
7886 vec_rhs2 = vec_oprnds1[i];
7888 new_temp = make_ssa_name (mask);
7889 new_stmt = gimple_build_assign (new_temp, code, vec_rhs1, vec_rhs2);
7890 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7891 if (slp_node)
7892 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7895 if (slp_node)
7896 continue;
7898 if (j == 0)
7899 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7900 else
7901 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7903 prev_stmt_info = vinfo_for_stmt (new_stmt);
7906 vec_oprnds0.release ();
7907 vec_oprnds1.release ();
7909 return true;
7912 /* Make sure the statement is vectorizable. */
7914 bool
7915 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node)
7917 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7918 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7919 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7920 bool ok;
7921 tree scalar_type, vectype;
7922 gimple *pattern_stmt;
7923 gimple_seq pattern_def_seq;
7925 if (dump_enabled_p ())
7927 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7928 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7931 if (gimple_has_volatile_ops (stmt))
7933 if (dump_enabled_p ())
7934 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7935 "not vectorized: stmt has volatile operands\n");
7937 return false;
7940 /* Skip stmts that do not need to be vectorized. In loops this is expected
7941 to include:
7942 - the COND_EXPR which is the loop exit condition
7943 - any LABEL_EXPRs in the loop
7944 - computations that are used only for array indexing or loop control.
7945 In basic blocks we only analyze statements that are a part of some SLP
7946 instance, therefore, all the statements are relevant.
7948 Pattern statement needs to be analyzed instead of the original statement
7949 if the original statement is not relevant. Otherwise, we analyze both
7950 statements. In basic blocks we are called from some SLP instance
7951 traversal, don't analyze pattern stmts instead, the pattern stmts
7952 already will be part of SLP instance. */
7954 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7955 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7956 && !STMT_VINFO_LIVE_P (stmt_info))
7958 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7959 && pattern_stmt
7960 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7961 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7963 /* Analyze PATTERN_STMT instead of the original stmt. */
7964 stmt = pattern_stmt;
7965 stmt_info = vinfo_for_stmt (pattern_stmt);
7966 if (dump_enabled_p ())
7968 dump_printf_loc (MSG_NOTE, vect_location,
7969 "==> examining pattern statement: ");
7970 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7973 else
7975 if (dump_enabled_p ())
7976 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7978 return true;
7981 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7982 && node == NULL
7983 && pattern_stmt
7984 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7985 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7987 /* Analyze PATTERN_STMT too. */
7988 if (dump_enabled_p ())
7990 dump_printf_loc (MSG_NOTE, vect_location,
7991 "==> examining pattern statement: ");
7992 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7995 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7996 return false;
7999 if (is_pattern_stmt_p (stmt_info)
8000 && node == NULL
8001 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8003 gimple_stmt_iterator si;
8005 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8007 gimple *pattern_def_stmt = gsi_stmt (si);
8008 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8009 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8011 /* Analyze def stmt of STMT if it's a pattern stmt. */
8012 if (dump_enabled_p ())
8014 dump_printf_loc (MSG_NOTE, vect_location,
8015 "==> examining pattern def statement: ");
8016 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8019 if (!vect_analyze_stmt (pattern_def_stmt,
8020 need_to_vectorize, node))
8021 return false;
8026 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8028 case vect_internal_def:
8029 break;
8031 case vect_reduction_def:
8032 case vect_nested_cycle:
8033 gcc_assert (!bb_vinfo
8034 && (relevance == vect_used_in_outer
8035 || relevance == vect_used_in_outer_by_reduction
8036 || relevance == vect_used_by_reduction
8037 || relevance == vect_unused_in_scope));
8038 break;
8040 case vect_induction_def:
8041 case vect_constant_def:
8042 case vect_external_def:
8043 case vect_unknown_def_type:
8044 default:
8045 gcc_unreachable ();
8048 if (bb_vinfo)
8050 gcc_assert (PURE_SLP_STMT (stmt_info));
8052 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
8053 if (dump_enabled_p ())
8055 dump_printf_loc (MSG_NOTE, vect_location,
8056 "get vectype for scalar type: ");
8057 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
8058 dump_printf (MSG_NOTE, "\n");
8061 vectype = get_vectype_for_scalar_type (scalar_type);
8062 if (!vectype)
8064 if (dump_enabled_p ())
8066 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8067 "not SLPed: unsupported data-type ");
8068 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
8069 scalar_type);
8070 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
8072 return false;
8075 if (dump_enabled_p ())
8077 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
8078 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
8079 dump_printf (MSG_NOTE, "\n");
8082 STMT_VINFO_VECTYPE (stmt_info) = vectype;
8085 if (STMT_VINFO_RELEVANT_P (stmt_info))
8087 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8088 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8089 || (is_gimple_call (stmt)
8090 && gimple_call_lhs (stmt) == NULL_TREE));
8091 *need_to_vectorize = true;
8094 if (PURE_SLP_STMT (stmt_info) && !node)
8096 dump_printf_loc (MSG_NOTE, vect_location,
8097 "handled only by SLP analysis\n");
8098 return true;
8101 ok = true;
8102 if (!bb_vinfo
8103 && (STMT_VINFO_RELEVANT_P (stmt_info)
8104 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8105 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8106 || vectorizable_conversion (stmt, NULL, NULL, node)
8107 || vectorizable_shift (stmt, NULL, NULL, node)
8108 || vectorizable_operation (stmt, NULL, NULL, node)
8109 || vectorizable_assignment (stmt, NULL, NULL, node)
8110 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8111 || vectorizable_call (stmt, NULL, NULL, node)
8112 || vectorizable_store (stmt, NULL, NULL, node)
8113 || vectorizable_reduction (stmt, NULL, NULL, node)
8114 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8115 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8116 else
8118 if (bb_vinfo)
8119 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8120 || vectorizable_conversion (stmt, NULL, NULL, node)
8121 || vectorizable_shift (stmt, NULL, NULL, node)
8122 || vectorizable_operation (stmt, NULL, NULL, node)
8123 || vectorizable_assignment (stmt, NULL, NULL, node)
8124 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8125 || vectorizable_call (stmt, NULL, NULL, node)
8126 || vectorizable_store (stmt, NULL, NULL, node)
8127 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8128 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8131 if (!ok)
8133 if (dump_enabled_p ())
8135 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8136 "not vectorized: relevant stmt not ");
8137 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8138 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8141 return false;
8144 if (bb_vinfo)
8145 return true;
8147 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8148 need extra handling, except for vectorizable reductions. */
8149 if (STMT_VINFO_LIVE_P (stmt_info)
8150 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8151 ok = vectorizable_live_operation (stmt, NULL, NULL);
8153 if (!ok)
8155 if (dump_enabled_p ())
8157 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8158 "not vectorized: live stmt not ");
8159 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8160 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8163 return false;
8166 return true;
8170 /* Function vect_transform_stmt.
8172 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8174 bool
8175 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8176 bool *grouped_store, slp_tree slp_node,
8177 slp_instance slp_node_instance)
8179 bool is_store = false;
8180 gimple *vec_stmt = NULL;
8181 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8182 bool done;
8184 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8186 switch (STMT_VINFO_TYPE (stmt_info))
8188 case type_demotion_vec_info_type:
8189 case type_promotion_vec_info_type:
8190 case type_conversion_vec_info_type:
8191 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8192 gcc_assert (done);
8193 break;
8195 case induc_vec_info_type:
8196 gcc_assert (!slp_node);
8197 done = vectorizable_induction (stmt, gsi, &vec_stmt);
8198 gcc_assert (done);
8199 break;
8201 case shift_vec_info_type:
8202 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8203 gcc_assert (done);
8204 break;
8206 case op_vec_info_type:
8207 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8208 gcc_assert (done);
8209 break;
8211 case assignment_vec_info_type:
8212 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8213 gcc_assert (done);
8214 break;
8216 case load_vec_info_type:
8217 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8218 slp_node_instance);
8219 gcc_assert (done);
8220 break;
8222 case store_vec_info_type:
8223 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8224 gcc_assert (done);
8225 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8227 /* In case of interleaving, the whole chain is vectorized when the
8228 last store in the chain is reached. Store stmts before the last
8229 one are skipped, and there vec_stmt_info shouldn't be freed
8230 meanwhile. */
8231 *grouped_store = true;
8232 if (STMT_VINFO_VEC_STMT (stmt_info))
8233 is_store = true;
8235 else
8236 is_store = true;
8237 break;
8239 case condition_vec_info_type:
8240 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8241 gcc_assert (done);
8242 break;
8244 case comparison_vec_info_type:
8245 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8246 gcc_assert (done);
8247 break;
8249 case call_vec_info_type:
8250 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8251 stmt = gsi_stmt (*gsi);
8252 if (is_gimple_call (stmt)
8253 && gimple_call_internal_p (stmt)
8254 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
8255 is_store = true;
8256 break;
8258 case call_simd_clone_vec_info_type:
8259 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8260 stmt = gsi_stmt (*gsi);
8261 break;
8263 case reduc_vec_info_type:
8264 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
8265 gcc_assert (done);
8266 break;
8268 default:
8269 if (!STMT_VINFO_LIVE_P (stmt_info))
8271 if (dump_enabled_p ())
8272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8273 "stmt not supported.\n");
8274 gcc_unreachable ();
8278 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8279 This would break hybrid SLP vectorization. */
8280 if (slp_node)
8281 gcc_assert (!vec_stmt
8282 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8284 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8285 is being vectorized, but outside the immediately enclosing loop. */
8286 if (vec_stmt
8287 && STMT_VINFO_LOOP_VINFO (stmt_info)
8288 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8289 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8290 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8291 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8292 || STMT_VINFO_RELEVANT (stmt_info) ==
8293 vect_used_in_outer_by_reduction))
8295 struct loop *innerloop = LOOP_VINFO_LOOP (
8296 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8297 imm_use_iterator imm_iter;
8298 use_operand_p use_p;
8299 tree scalar_dest;
8300 gimple *exit_phi;
8302 if (dump_enabled_p ())
8303 dump_printf_loc (MSG_NOTE, vect_location,
8304 "Record the vdef for outer-loop vectorization.\n");
8306 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8307 (to be used when vectorizing outer-loop stmts that use the DEF of
8308 STMT). */
8309 if (gimple_code (stmt) == GIMPLE_PHI)
8310 scalar_dest = PHI_RESULT (stmt);
8311 else
8312 scalar_dest = gimple_assign_lhs (stmt);
8314 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8316 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8318 exit_phi = USE_STMT (use_p);
8319 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8324 /* Handle stmts whose DEF is used outside the loop-nest that is
8325 being vectorized. */
8326 if (STMT_VINFO_LIVE_P (stmt_info)
8327 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8329 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
8330 gcc_assert (done);
8333 if (vec_stmt)
8334 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8336 return is_store;
8340 /* Remove a group of stores (for SLP or interleaving), free their
8341 stmt_vec_info. */
8343 void
8344 vect_remove_stores (gimple *first_stmt)
8346 gimple *next = first_stmt;
8347 gimple *tmp;
8348 gimple_stmt_iterator next_si;
8350 while (next)
8352 stmt_vec_info stmt_info = vinfo_for_stmt (next);
8354 tmp = GROUP_NEXT_ELEMENT (stmt_info);
8355 if (is_pattern_stmt_p (stmt_info))
8356 next = STMT_VINFO_RELATED_STMT (stmt_info);
8357 /* Free the attached stmt_vec_info and remove the stmt. */
8358 next_si = gsi_for_stmt (next);
8359 unlink_stmt_vdef (next);
8360 gsi_remove (&next_si, true);
8361 release_defs (next);
8362 free_stmt_vec_info (next);
8363 next = tmp;
8368 /* Function new_stmt_vec_info.
8370 Create and initialize a new stmt_vec_info struct for STMT. */
8372 stmt_vec_info
8373 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
8375 stmt_vec_info res;
8376 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
8378 STMT_VINFO_TYPE (res) = undef_vec_info_type;
8379 STMT_VINFO_STMT (res) = stmt;
8380 res->vinfo = vinfo;
8381 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
8382 STMT_VINFO_LIVE_P (res) = false;
8383 STMT_VINFO_VECTYPE (res) = NULL;
8384 STMT_VINFO_VEC_STMT (res) = NULL;
8385 STMT_VINFO_VECTORIZABLE (res) = true;
8386 STMT_VINFO_IN_PATTERN_P (res) = false;
8387 STMT_VINFO_RELATED_STMT (res) = NULL;
8388 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
8389 STMT_VINFO_DATA_REF (res) = NULL;
8390 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
8392 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
8393 STMT_VINFO_DR_OFFSET (res) = NULL;
8394 STMT_VINFO_DR_INIT (res) = NULL;
8395 STMT_VINFO_DR_STEP (res) = NULL;
8396 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
8398 if (gimple_code (stmt) == GIMPLE_PHI
8399 && is_loop_header_bb_p (gimple_bb (stmt)))
8400 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
8401 else
8402 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
8404 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
8405 STMT_SLP_TYPE (res) = loop_vect;
8406 STMT_VINFO_NUM_SLP_USES (res) = 0;
8408 GROUP_FIRST_ELEMENT (res) = NULL;
8409 GROUP_NEXT_ELEMENT (res) = NULL;
8410 GROUP_SIZE (res) = 0;
8411 GROUP_STORE_COUNT (res) = 0;
8412 GROUP_GAP (res) = 0;
8413 GROUP_SAME_DR_STMT (res) = NULL;
8415 return res;
8419 /* Create a hash table for stmt_vec_info. */
8421 void
8422 init_stmt_vec_info_vec (void)
8424 gcc_assert (!stmt_vec_info_vec.exists ());
8425 stmt_vec_info_vec.create (50);
8429 /* Free hash table for stmt_vec_info. */
8431 void
8432 free_stmt_vec_info_vec (void)
8434 unsigned int i;
8435 stmt_vec_info info;
8436 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
8437 if (info != NULL)
8438 free_stmt_vec_info (STMT_VINFO_STMT (info));
8439 gcc_assert (stmt_vec_info_vec.exists ());
8440 stmt_vec_info_vec.release ();
8444 /* Free stmt vectorization related info. */
8446 void
8447 free_stmt_vec_info (gimple *stmt)
8449 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8451 if (!stmt_info)
8452 return;
8454 /* Check if this statement has a related "pattern stmt"
8455 (introduced by the vectorizer during the pattern recognition
8456 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8457 too. */
8458 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8460 stmt_vec_info patt_info
8461 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8462 if (patt_info)
8464 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
8465 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
8466 gimple_set_bb (patt_stmt, NULL);
8467 tree lhs = gimple_get_lhs (patt_stmt);
8468 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8469 release_ssa_name (lhs);
8470 if (seq)
8472 gimple_stmt_iterator si;
8473 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
8475 gimple *seq_stmt = gsi_stmt (si);
8476 gimple_set_bb (seq_stmt, NULL);
8477 lhs = gimple_get_lhs (seq_stmt);
8478 if (lhs && TREE_CODE (lhs) == SSA_NAME)
8479 release_ssa_name (lhs);
8480 free_stmt_vec_info (seq_stmt);
8483 free_stmt_vec_info (patt_stmt);
8487 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
8488 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
8489 set_vinfo_for_stmt (stmt, NULL);
8490 free (stmt_info);
8494 /* Function get_vectype_for_scalar_type_and_size.
8496 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8497 by the target. */
8499 static tree
8500 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
8502 machine_mode inner_mode = TYPE_MODE (scalar_type);
8503 machine_mode simd_mode;
8504 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
8505 int nunits;
8506 tree vectype;
8508 if (nbytes == 0)
8509 return NULL_TREE;
8511 if (GET_MODE_CLASS (inner_mode) != MODE_INT
8512 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
8513 return NULL_TREE;
8515 /* For vector types of elements whose mode precision doesn't
8516 match their types precision we use a element type of mode
8517 precision. The vectorization routines will have to make sure
8518 they support the proper result truncation/extension.
8519 We also make sure to build vector types with INTEGER_TYPE
8520 component type only. */
8521 if (INTEGRAL_TYPE_P (scalar_type)
8522 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
8523 || TREE_CODE (scalar_type) != INTEGER_TYPE))
8524 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
8525 TYPE_UNSIGNED (scalar_type));
8527 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8528 When the component mode passes the above test simply use a type
8529 corresponding to that mode. The theory is that any use that
8530 would cause problems with this will disable vectorization anyway. */
8531 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
8532 && !INTEGRAL_TYPE_P (scalar_type))
8533 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
8535 /* We can't build a vector type of elements with alignment bigger than
8536 their size. */
8537 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
8538 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
8539 TYPE_UNSIGNED (scalar_type));
8541 /* If we felt back to using the mode fail if there was
8542 no scalar type for it. */
8543 if (scalar_type == NULL_TREE)
8544 return NULL_TREE;
8546 /* If no size was supplied use the mode the target prefers. Otherwise
8547 lookup a vector mode of the specified size. */
8548 if (size == 0)
8549 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8550 else
8551 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8552 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8553 if (nunits <= 1)
8554 return NULL_TREE;
8556 vectype = build_vector_type (scalar_type, nunits);
8558 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8559 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8560 return NULL_TREE;
8562 return vectype;
8565 unsigned int current_vector_size;
8567 /* Function get_vectype_for_scalar_type.
8569 Returns the vector type corresponding to SCALAR_TYPE as supported
8570 by the target. */
8572 tree
8573 get_vectype_for_scalar_type (tree scalar_type)
8575 tree vectype;
8576 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8577 current_vector_size);
8578 if (vectype
8579 && current_vector_size == 0)
8580 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8581 return vectype;
8584 /* Function get_mask_type_for_scalar_type.
8586 Returns the mask type corresponding to a result of comparison
8587 of vectors of specified SCALAR_TYPE as supported by target. */
8589 tree
8590 get_mask_type_for_scalar_type (tree scalar_type)
8592 tree vectype = get_vectype_for_scalar_type (scalar_type);
8594 if (!vectype)
8595 return NULL;
8597 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
8598 current_vector_size);
8601 /* Function get_same_sized_vectype
8603 Returns a vector type corresponding to SCALAR_TYPE of size
8604 VECTOR_TYPE if supported by the target. */
8606 tree
8607 get_same_sized_vectype (tree scalar_type, tree vector_type)
8609 if (TREE_CODE (scalar_type) == BOOLEAN_TYPE)
8610 return build_same_sized_truth_vector_type (vector_type);
8612 return get_vectype_for_scalar_type_and_size
8613 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8616 /* Function vect_is_simple_use.
8618 Input:
8619 VINFO - the vect info of the loop or basic block that is being vectorized.
8620 OPERAND - operand in the loop or bb.
8621 Output:
8622 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8623 DT - the type of definition
8625 Returns whether a stmt with OPERAND can be vectorized.
8626 For loops, supportable operands are constants, loop invariants, and operands
8627 that are defined by the current iteration of the loop. Unsupportable
8628 operands are those that are defined by a previous iteration of the loop (as
8629 is the case in reduction/induction computations).
8630 For basic blocks, supportable operands are constants and bb invariants.
8631 For now, operands defined outside the basic block are not supported. */
8633 bool
8634 vect_is_simple_use (tree operand, vec_info *vinfo,
8635 gimple **def_stmt, enum vect_def_type *dt)
8637 *def_stmt = NULL;
8638 *dt = vect_unknown_def_type;
8640 if (dump_enabled_p ())
8642 dump_printf_loc (MSG_NOTE, vect_location,
8643 "vect_is_simple_use: operand ");
8644 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8645 dump_printf (MSG_NOTE, "\n");
8648 if (CONSTANT_CLASS_P (operand))
8650 *dt = vect_constant_def;
8651 return true;
8654 if (is_gimple_min_invariant (operand))
8656 *dt = vect_external_def;
8657 return true;
8660 if (TREE_CODE (operand) != SSA_NAME)
8662 if (dump_enabled_p ())
8663 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8664 "not ssa-name.\n");
8665 return false;
8668 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8670 *dt = vect_external_def;
8671 return true;
8674 *def_stmt = SSA_NAME_DEF_STMT (operand);
8675 if (dump_enabled_p ())
8677 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8678 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8681 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
8682 *dt = vect_external_def;
8683 else
8685 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8686 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8689 if (dump_enabled_p ())
8691 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8692 switch (*dt)
8694 case vect_uninitialized_def:
8695 dump_printf (MSG_NOTE, "uninitialized\n");
8696 break;
8697 case vect_constant_def:
8698 dump_printf (MSG_NOTE, "constant\n");
8699 break;
8700 case vect_external_def:
8701 dump_printf (MSG_NOTE, "external\n");
8702 break;
8703 case vect_internal_def:
8704 dump_printf (MSG_NOTE, "internal\n");
8705 break;
8706 case vect_induction_def:
8707 dump_printf (MSG_NOTE, "induction\n");
8708 break;
8709 case vect_reduction_def:
8710 dump_printf (MSG_NOTE, "reduction\n");
8711 break;
8712 case vect_double_reduction_def:
8713 dump_printf (MSG_NOTE, "double reduction\n");
8714 break;
8715 case vect_nested_cycle:
8716 dump_printf (MSG_NOTE, "nested cycle\n");
8717 break;
8718 case vect_unknown_def_type:
8719 dump_printf (MSG_NOTE, "unknown\n");
8720 break;
8724 if (*dt == vect_unknown_def_type)
8726 if (dump_enabled_p ())
8727 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8728 "Unsupported pattern.\n");
8729 return false;
8732 switch (gimple_code (*def_stmt))
8734 case GIMPLE_PHI:
8735 case GIMPLE_ASSIGN:
8736 case GIMPLE_CALL:
8737 break;
8738 default:
8739 if (dump_enabled_p ())
8740 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8741 "unsupported defining stmt:\n");
8742 return false;
8745 return true;
8748 /* Function vect_is_simple_use.
8750 Same as vect_is_simple_use but also determines the vector operand
8751 type of OPERAND and stores it to *VECTYPE. If the definition of
8752 OPERAND is vect_uninitialized_def, vect_constant_def or
8753 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8754 is responsible to compute the best suited vector type for the
8755 scalar operand. */
8757 bool
8758 vect_is_simple_use (tree operand, vec_info *vinfo,
8759 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
8761 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
8762 return false;
8764 /* Now get a vector type if the def is internal, otherwise supply
8765 NULL_TREE and leave it up to the caller to figure out a proper
8766 type for the use stmt. */
8767 if (*dt == vect_internal_def
8768 || *dt == vect_induction_def
8769 || *dt == vect_reduction_def
8770 || *dt == vect_double_reduction_def
8771 || *dt == vect_nested_cycle)
8773 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8775 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8776 && !STMT_VINFO_RELEVANT (stmt_info)
8777 && !STMT_VINFO_LIVE_P (stmt_info))
8778 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8780 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8781 gcc_assert (*vectype != NULL_TREE);
8783 else if (*dt == vect_uninitialized_def
8784 || *dt == vect_constant_def
8785 || *dt == vect_external_def)
8786 *vectype = NULL_TREE;
8787 else
8788 gcc_unreachable ();
8790 return true;
8794 /* Function supportable_widening_operation
8796 Check whether an operation represented by the code CODE is a
8797 widening operation that is supported by the target platform in
8798 vector form (i.e., when operating on arguments of type VECTYPE_IN
8799 producing a result of type VECTYPE_OUT).
8801 Widening operations we currently support are NOP (CONVERT), FLOAT
8802 and WIDEN_MULT. This function checks if these operations are supported
8803 by the target platform either directly (via vector tree-codes), or via
8804 target builtins.
8806 Output:
8807 - CODE1 and CODE2 are codes of vector operations to be used when
8808 vectorizing the operation, if available.
8809 - MULTI_STEP_CVT determines the number of required intermediate steps in
8810 case of multi-step conversion (like char->short->int - in that case
8811 MULTI_STEP_CVT will be 1).
8812 - INTERM_TYPES contains the intermediate type required to perform the
8813 widening operation (short in the above example). */
8815 bool
8816 supportable_widening_operation (enum tree_code code, gimple *stmt,
8817 tree vectype_out, tree vectype_in,
8818 enum tree_code *code1, enum tree_code *code2,
8819 int *multi_step_cvt,
8820 vec<tree> *interm_types)
8822 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8823 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8824 struct loop *vect_loop = NULL;
8825 machine_mode vec_mode;
8826 enum insn_code icode1, icode2;
8827 optab optab1, optab2;
8828 tree vectype = vectype_in;
8829 tree wide_vectype = vectype_out;
8830 enum tree_code c1, c2;
8831 int i;
8832 tree prev_type, intermediate_type;
8833 machine_mode intermediate_mode, prev_mode;
8834 optab optab3, optab4;
8836 *multi_step_cvt = 0;
8837 if (loop_info)
8838 vect_loop = LOOP_VINFO_LOOP (loop_info);
8840 switch (code)
8842 case WIDEN_MULT_EXPR:
8843 /* The result of a vectorized widening operation usually requires
8844 two vectors (because the widened results do not fit into one vector).
8845 The generated vector results would normally be expected to be
8846 generated in the same order as in the original scalar computation,
8847 i.e. if 8 results are generated in each vector iteration, they are
8848 to be organized as follows:
8849 vect1: [res1,res2,res3,res4],
8850 vect2: [res5,res6,res7,res8].
8852 However, in the special case that the result of the widening
8853 operation is used in a reduction computation only, the order doesn't
8854 matter (because when vectorizing a reduction we change the order of
8855 the computation). Some targets can take advantage of this and
8856 generate more efficient code. For example, targets like Altivec,
8857 that support widen_mult using a sequence of {mult_even,mult_odd}
8858 generate the following vectors:
8859 vect1: [res1,res3,res5,res7],
8860 vect2: [res2,res4,res6,res8].
8862 When vectorizing outer-loops, we execute the inner-loop sequentially
8863 (each vectorized inner-loop iteration contributes to VF outer-loop
8864 iterations in parallel). We therefore don't allow to change the
8865 order of the computation in the inner-loop during outer-loop
8866 vectorization. */
8867 /* TODO: Another case in which order doesn't *really* matter is when we
8868 widen and then contract again, e.g. (short)((int)x * y >> 8).
8869 Normally, pack_trunc performs an even/odd permute, whereas the
8870 repack from an even/odd expansion would be an interleave, which
8871 would be significantly simpler for e.g. AVX2. */
8872 /* In any case, in order to avoid duplicating the code below, recurse
8873 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8874 are properly set up for the caller. If we fail, we'll continue with
8875 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8876 if (vect_loop
8877 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8878 && !nested_in_vect_loop_p (vect_loop, stmt)
8879 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8880 stmt, vectype_out, vectype_in,
8881 code1, code2, multi_step_cvt,
8882 interm_types))
8884 /* Elements in a vector with vect_used_by_reduction property cannot
8885 be reordered if the use chain with this property does not have the
8886 same operation. One such an example is s += a * b, where elements
8887 in a and b cannot be reordered. Here we check if the vector defined
8888 by STMT is only directly used in the reduction statement. */
8889 tree lhs = gimple_assign_lhs (stmt);
8890 use_operand_p dummy;
8891 gimple *use_stmt;
8892 stmt_vec_info use_stmt_info = NULL;
8893 if (single_imm_use (lhs, &dummy, &use_stmt)
8894 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8895 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8896 return true;
8898 c1 = VEC_WIDEN_MULT_LO_EXPR;
8899 c2 = VEC_WIDEN_MULT_HI_EXPR;
8900 break;
8902 case DOT_PROD_EXPR:
8903 c1 = DOT_PROD_EXPR;
8904 c2 = DOT_PROD_EXPR;
8905 break;
8907 case SAD_EXPR:
8908 c1 = SAD_EXPR;
8909 c2 = SAD_EXPR;
8910 break;
8912 case VEC_WIDEN_MULT_EVEN_EXPR:
8913 /* Support the recursion induced just above. */
8914 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8915 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8916 break;
8918 case WIDEN_LSHIFT_EXPR:
8919 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8920 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8921 break;
8923 CASE_CONVERT:
8924 c1 = VEC_UNPACK_LO_EXPR;
8925 c2 = VEC_UNPACK_HI_EXPR;
8926 break;
8928 case FLOAT_EXPR:
8929 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8930 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8931 break;
8933 case FIX_TRUNC_EXPR:
8934 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8935 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8936 computing the operation. */
8937 return false;
8939 default:
8940 gcc_unreachable ();
8943 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8944 std::swap (c1, c2);
8946 if (code == FIX_TRUNC_EXPR)
8948 /* The signedness is determined from output operand. */
8949 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8950 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8952 else
8954 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8955 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8958 if (!optab1 || !optab2)
8959 return false;
8961 vec_mode = TYPE_MODE (vectype);
8962 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8963 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8964 return false;
8966 *code1 = c1;
8967 *code2 = c2;
8969 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8970 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8971 return true;
8973 /* Check if it's a multi-step conversion that can be done using intermediate
8974 types. */
8976 prev_type = vectype;
8977 prev_mode = vec_mode;
8979 if (!CONVERT_EXPR_CODE_P (code))
8980 return false;
8982 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8983 intermediate steps in promotion sequence. We try
8984 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8985 not. */
8986 interm_types->create (MAX_INTERM_CVT_STEPS);
8987 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8989 intermediate_mode = insn_data[icode1].operand[0].mode;
8990 intermediate_type
8991 = lang_hooks.types.type_for_mode (intermediate_mode,
8992 TYPE_UNSIGNED (prev_type));
8993 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8994 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8996 if (!optab3 || !optab4
8997 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8998 || insn_data[icode1].operand[0].mode != intermediate_mode
8999 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9000 || insn_data[icode2].operand[0].mode != intermediate_mode
9001 || ((icode1 = optab_handler (optab3, intermediate_mode))
9002 == CODE_FOR_nothing)
9003 || ((icode2 = optab_handler (optab4, intermediate_mode))
9004 == CODE_FOR_nothing))
9005 break;
9007 interm_types->quick_push (intermediate_type);
9008 (*multi_step_cvt)++;
9010 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9011 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9012 return true;
9014 prev_type = intermediate_type;
9015 prev_mode = intermediate_mode;
9018 interm_types->release ();
9019 return false;
9023 /* Function supportable_narrowing_operation
9025 Check whether an operation represented by the code CODE is a
9026 narrowing operation that is supported by the target platform in
9027 vector form (i.e., when operating on arguments of type VECTYPE_IN
9028 and producing a result of type VECTYPE_OUT).
9030 Narrowing operations we currently support are NOP (CONVERT) and
9031 FIX_TRUNC. This function checks if these operations are supported by
9032 the target platform directly via vector tree-codes.
9034 Output:
9035 - CODE1 is the code of a vector operation to be used when
9036 vectorizing the operation, if available.
9037 - MULTI_STEP_CVT determines the number of required intermediate steps in
9038 case of multi-step conversion (like int->short->char - in that case
9039 MULTI_STEP_CVT will be 1).
9040 - INTERM_TYPES contains the intermediate type required to perform the
9041 narrowing operation (short in the above example). */
9043 bool
9044 supportable_narrowing_operation (enum tree_code code,
9045 tree vectype_out, tree vectype_in,
9046 enum tree_code *code1, int *multi_step_cvt,
9047 vec<tree> *interm_types)
9049 machine_mode vec_mode;
9050 enum insn_code icode1;
9051 optab optab1, interm_optab;
9052 tree vectype = vectype_in;
9053 tree narrow_vectype = vectype_out;
9054 enum tree_code c1;
9055 tree intermediate_type;
9056 machine_mode intermediate_mode, prev_mode;
9057 int i;
9058 bool uns;
9060 *multi_step_cvt = 0;
9061 switch (code)
9063 CASE_CONVERT:
9064 c1 = VEC_PACK_TRUNC_EXPR;
9065 break;
9067 case FIX_TRUNC_EXPR:
9068 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9069 break;
9071 case FLOAT_EXPR:
9072 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9073 tree code and optabs used for computing the operation. */
9074 return false;
9076 default:
9077 gcc_unreachable ();
9080 if (code == FIX_TRUNC_EXPR)
9081 /* The signedness is determined from output operand. */
9082 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9083 else
9084 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9086 if (!optab1)
9087 return false;
9089 vec_mode = TYPE_MODE (vectype);
9090 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9091 return false;
9093 *code1 = c1;
9095 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9096 return true;
9098 /* Check if it's a multi-step conversion that can be done using intermediate
9099 types. */
9100 prev_mode = vec_mode;
9101 if (code == FIX_TRUNC_EXPR)
9102 uns = TYPE_UNSIGNED (vectype_out);
9103 else
9104 uns = TYPE_UNSIGNED (vectype);
9106 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9107 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9108 costly than signed. */
9109 if (code == FIX_TRUNC_EXPR && uns)
9111 enum insn_code icode2;
9113 intermediate_type
9114 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9115 interm_optab
9116 = optab_for_tree_code (c1, intermediate_type, optab_default);
9117 if (interm_optab != unknown_optab
9118 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9119 && insn_data[icode1].operand[0].mode
9120 == insn_data[icode2].operand[0].mode)
9122 uns = false;
9123 optab1 = interm_optab;
9124 icode1 = icode2;
9128 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9129 intermediate steps in promotion sequence. We try
9130 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9131 interm_types->create (MAX_INTERM_CVT_STEPS);
9132 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9134 intermediate_mode = insn_data[icode1].operand[0].mode;
9135 intermediate_type
9136 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9137 interm_optab
9138 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9139 optab_default);
9140 if (!interm_optab
9141 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9142 || insn_data[icode1].operand[0].mode != intermediate_mode
9143 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9144 == CODE_FOR_nothing))
9145 break;
9147 interm_types->quick_push (intermediate_type);
9148 (*multi_step_cvt)++;
9150 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9151 return true;
9153 prev_mode = intermediate_mode;
9154 optab1 = interm_optab;
9157 interm_types->release ();
9158 return false;