Daily bump.
[official-gcc.git] / gcc / tree-vect-stmts.c
blobf87c0664e5b3458528f3c2dbc6de01a76def312f
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "backend.h"
27 #include "tree.h"
28 #include "gimple.h"
29 #include "rtl.h"
30 #include "ssa.h"
31 #include "alias.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
34 #include "target.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
37 #include "tree-eh.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-cfg.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "cfgloop.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
46 #include "flags.h"
47 #include "insn-config.h"
48 #include "expmed.h"
49 #include "dojump.h"
50 #include "explow.h"
51 #include "calls.h"
52 #include "emit-rtl.h"
53 #include "varasm.h"
54 #include "stmt.h"
55 #include "expr.h"
56 #include "recog.h" /* FIXME: for insn_data */
57 #include "insn-codes.h"
58 #include "optabs.h"
59 #include "diagnostic-core.h"
60 #include "tree-vectorizer.h"
61 #include "cgraph.h"
62 #include "builtins.h"
64 /* For lang_hooks.types.type_for_mode. */
65 #include "langhooks.h"
67 /* Return the vectorized type for the given statement. */
69 tree
70 stmt_vectype (struct _stmt_vec_info *stmt_info)
72 return STMT_VINFO_VECTYPE (stmt_info);
75 /* Return TRUE iff the given statement is in an inner loop relative to
76 the loop being vectorized. */
77 bool
78 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
80 gimple stmt = STMT_VINFO_STMT (stmt_info);
81 basic_block bb = gimple_bb (stmt);
82 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
83 struct loop* loop;
85 if (!loop_vinfo)
86 return false;
88 loop = LOOP_VINFO_LOOP (loop_vinfo);
90 return (bb->loop_father == loop->inner);
93 /* Record the cost of a statement, either by directly informing the
94 target model or by saving it in a vector for later processing.
95 Return a preliminary estimate of the statement's cost. */
97 unsigned
98 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
99 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
100 int misalign, enum vect_cost_model_location where)
102 if (body_cost_vec)
104 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
105 add_stmt_info_to_vec (body_cost_vec, count, kind,
106 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
107 misalign);
108 return (unsigned)
109 (builtin_vectorization_cost (kind, vectype, misalign) * count);
112 else
114 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
115 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
116 void *target_cost_data;
118 if (loop_vinfo)
119 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
120 else
121 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
123 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
124 misalign, where);
128 /* Return a variable of type ELEM_TYPE[NELEMS]. */
130 static tree
131 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
133 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
134 "vect_array");
137 /* ARRAY is an array of vectors created by create_vector_array.
138 Return an SSA_NAME for the vector in index N. The reference
139 is part of the vectorization of STMT and the vector is associated
140 with scalar destination SCALAR_DEST. */
142 static tree
143 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
144 tree array, unsigned HOST_WIDE_INT n)
146 tree vect_type, vect, vect_name, array_ref;
147 gimple new_stmt;
149 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
150 vect_type = TREE_TYPE (TREE_TYPE (array));
151 vect = vect_create_destination_var (scalar_dest, vect_type);
152 array_ref = build4 (ARRAY_REF, vect_type, array,
153 build_int_cst (size_type_node, n),
154 NULL_TREE, NULL_TREE);
156 new_stmt = gimple_build_assign (vect, array_ref);
157 vect_name = make_ssa_name (vect, new_stmt);
158 gimple_assign_set_lhs (new_stmt, vect_name);
159 vect_finish_stmt_generation (stmt, new_stmt, gsi);
161 return vect_name;
164 /* ARRAY is an array of vectors created by create_vector_array.
165 Emit code to store SSA_NAME VECT in index N of the array.
166 The store is part of the vectorization of STMT. */
168 static void
169 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
170 tree array, unsigned HOST_WIDE_INT n)
172 tree array_ref;
173 gimple new_stmt;
175 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
176 build_int_cst (size_type_node, n),
177 NULL_TREE, NULL_TREE);
179 new_stmt = gimple_build_assign (array_ref, vect);
180 vect_finish_stmt_generation (stmt, new_stmt, gsi);
183 /* PTR is a pointer to an array of type TYPE. Return a representation
184 of *PTR. The memory reference replaces those in FIRST_DR
185 (and its group). */
187 static tree
188 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
190 tree mem_ref, alias_ptr_type;
192 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
193 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
194 /* Arrays have the same alignment as their type. */
195 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
196 return mem_ref;
199 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
201 /* Function vect_mark_relevant.
203 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
205 static void
206 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
207 enum vect_relevant relevant, bool live_p,
208 bool used_in_pattern)
210 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
211 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
212 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
213 gimple pattern_stmt;
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_NOTE, vect_location,
217 "mark relevant %d, live %d.\n", relevant, live_p);
219 /* If this stmt is an original stmt in a pattern, we might need to mark its
220 related pattern stmt instead of the original stmt. However, such stmts
221 may have their own uses that are not in any pattern, in such cases the
222 stmt itself should be marked. */
223 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
225 bool found = false;
226 if (!used_in_pattern)
228 imm_use_iterator imm_iter;
229 use_operand_p use_p;
230 gimple use_stmt;
231 tree lhs;
232 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
233 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
235 if (is_gimple_assign (stmt))
236 lhs = gimple_assign_lhs (stmt);
237 else
238 lhs = gimple_call_lhs (stmt);
240 /* This use is out of pattern use, if LHS has other uses that are
241 pattern uses, we should mark the stmt itself, and not the pattern
242 stmt. */
243 if (lhs && TREE_CODE (lhs) == SSA_NAME)
244 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
246 if (is_gimple_debug (USE_STMT (use_p)))
247 continue;
248 use_stmt = USE_STMT (use_p);
250 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
251 continue;
253 if (vinfo_for_stmt (use_stmt)
254 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
256 found = true;
257 break;
262 if (!found)
264 /* This is the last stmt in a sequence that was detected as a
265 pattern that can potentially be vectorized. Don't mark the stmt
266 as relevant/live because it's not going to be vectorized.
267 Instead mark the pattern-stmt that replaces it. */
269 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE, vect_location,
273 "last stmt in pattern. don't mark"
274 " relevant/live.\n");
275 stmt_info = vinfo_for_stmt (pattern_stmt);
276 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
277 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
278 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
279 stmt = pattern_stmt;
283 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
284 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
285 STMT_VINFO_RELEVANT (stmt_info) = relevant;
287 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
288 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
290 if (dump_enabled_p ())
291 dump_printf_loc (MSG_NOTE, vect_location,
292 "already marked relevant/live.\n");
293 return;
296 worklist->safe_push (stmt);
300 /* Function vect_stmt_relevant_p.
302 Return true if STMT in loop that is represented by LOOP_VINFO is
303 "relevant for vectorization".
305 A stmt is considered "relevant for vectorization" if:
306 - it has uses outside the loop.
307 - it has vdefs (it alters memory).
308 - control stmts in the loop (except for the exit condition).
310 CHECKME: what other side effects would the vectorizer allow? */
312 static bool
313 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
314 enum vect_relevant *relevant, bool *live_p)
316 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
317 ssa_op_iter op_iter;
318 imm_use_iterator imm_iter;
319 use_operand_p use_p;
320 def_operand_p def_p;
322 *relevant = vect_unused_in_scope;
323 *live_p = false;
325 /* cond stmt other than loop exit cond. */
326 if (is_ctrl_stmt (stmt)
327 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
328 != loop_exit_ctrl_vec_info_type)
329 *relevant = vect_used_in_scope;
331 /* changing memory. */
332 if (gimple_code (stmt) != GIMPLE_PHI)
333 if (gimple_vdef (stmt)
334 && !gimple_clobber_p (stmt))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: stmt has vdefs.\n");
339 *relevant = vect_used_in_scope;
342 /* uses outside the loop. */
343 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
345 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
347 basic_block bb = gimple_bb (USE_STMT (use_p));
348 if (!flow_bb_inside_loop_p (loop, bb))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location,
352 "vec_stmt_relevant_p: used out of loop.\n");
354 if (is_gimple_debug (USE_STMT (use_p)))
355 continue;
357 /* We expect all such uses to be in the loop exit phis
358 (because of loop closed form) */
359 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
360 gcc_assert (bb == single_exit (loop)->dest);
362 *live_p = true;
367 return (*live_p || *relevant);
371 /* Function exist_non_indexing_operands_for_use_p
373 USE is one of the uses attached to STMT. Check if USE is
374 used in STMT for anything other than indexing an array. */
376 static bool
377 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
379 tree operand;
380 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
382 /* USE corresponds to some operand in STMT. If there is no data
383 reference in STMT, then any operand that corresponds to USE
384 is not indexing an array. */
385 if (!STMT_VINFO_DATA_REF (stmt_info))
386 return true;
388 /* STMT has a data_ref. FORNOW this means that its of one of
389 the following forms:
390 -1- ARRAY_REF = var
391 -2- var = ARRAY_REF
392 (This should have been verified in analyze_data_refs).
394 'var' in the second case corresponds to a def, not a use,
395 so USE cannot correspond to any operands that are not used
396 for array indexing.
398 Therefore, all we need to check is if STMT falls into the
399 first case, and whether var corresponds to USE. */
401 if (!gimple_assign_copy_p (stmt))
403 if (is_gimple_call (stmt)
404 && gimple_call_internal_p (stmt))
405 switch (gimple_call_internal_fn (stmt))
407 case IFN_MASK_STORE:
408 operand = gimple_call_arg (stmt, 3);
409 if (operand == use)
410 return true;
411 /* FALLTHRU */
412 case IFN_MASK_LOAD:
413 operand = gimple_call_arg (stmt, 2);
414 if (operand == use)
415 return true;
416 break;
417 default:
418 break;
420 return false;
423 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
424 return false;
425 operand = gimple_assign_rhs1 (stmt);
426 if (TREE_CODE (operand) != SSA_NAME)
427 return false;
429 if (operand == use)
430 return true;
432 return false;
437 Function process_use.
439 Inputs:
440 - a USE in STMT in a loop represented by LOOP_VINFO
441 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
442 that defined USE. This is done by calling mark_relevant and passing it
443 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
444 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
445 be performed.
447 Outputs:
448 Generally, LIVE_P and RELEVANT are used to define the liveness and
449 relevance info of the DEF_STMT of this USE:
450 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
451 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
452 Exceptions:
453 - case 1: If USE is used only for address computations (e.g. array indexing),
454 which does not need to be directly vectorized, then the liveness/relevance
455 of the respective DEF_STMT is left unchanged.
456 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
457 skip DEF_STMT cause it had already been processed.
458 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
459 be modified accordingly.
461 Return true if everything is as expected. Return false otherwise. */
463 static bool
464 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
465 enum vect_relevant relevant, vec<gimple> *worklist,
466 bool force)
468 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
469 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
470 stmt_vec_info dstmt_vinfo;
471 basic_block bb, def_bb;
472 tree def;
473 gimple def_stmt;
474 enum vect_def_type dt;
476 /* case 1: we are only interested in uses that need to be vectorized. Uses
477 that are used for address computation are not considered relevant. */
478 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
479 return true;
481 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
483 if (dump_enabled_p ())
484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
485 "not vectorized: unsupported use in stmt.\n");
486 return false;
489 if (!def_stmt || gimple_nop_p (def_stmt))
490 return true;
492 def_bb = gimple_bb (def_stmt);
493 if (!flow_bb_inside_loop_p (loop, def_bb))
495 if (dump_enabled_p ())
496 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
497 return true;
500 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
501 DEF_STMT must have already been processed, because this should be the
502 only way that STMT, which is a reduction-phi, was put in the worklist,
503 as there should be no other uses for DEF_STMT in the loop. So we just
504 check that everything is as expected, and we are done. */
505 dstmt_vinfo = vinfo_for_stmt (def_stmt);
506 bb = gimple_bb (stmt);
507 if (gimple_code (stmt) == GIMPLE_PHI
508 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
509 && gimple_code (def_stmt) != GIMPLE_PHI
510 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
511 && bb->loop_father == def_bb->loop_father)
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE, vect_location,
515 "reduc-stmt defining reduc-phi in the same nest.\n");
516 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
517 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
518 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
519 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
520 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
521 return true;
524 /* case 3a: outer-loop stmt defining an inner-loop stmt:
525 outer-loop-header-bb:
526 d = def_stmt
527 inner-loop:
528 stmt # use (d)
529 outer-loop-tail-bb:
530 ... */
531 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
533 if (dump_enabled_p ())
534 dump_printf_loc (MSG_NOTE, vect_location,
535 "outer-loop def-stmt defining inner-loop stmt.\n");
537 switch (relevant)
539 case vect_unused_in_scope:
540 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
541 vect_used_in_scope : vect_unused_in_scope;
542 break;
544 case vect_used_in_outer_by_reduction:
545 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
546 relevant = vect_used_by_reduction;
547 break;
549 case vect_used_in_outer:
550 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
551 relevant = vect_used_in_scope;
552 break;
554 case vect_used_in_scope:
555 break;
557 default:
558 gcc_unreachable ();
562 /* case 3b: inner-loop stmt defining an outer-loop stmt:
563 outer-loop-header-bb:
565 inner-loop:
566 d = def_stmt
567 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
568 stmt # use (d) */
569 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE, vect_location,
573 "inner-loop def-stmt defining outer-loop stmt.\n");
575 switch (relevant)
577 case vect_unused_in_scope:
578 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
579 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
580 vect_used_in_outer_by_reduction : vect_unused_in_scope;
581 break;
583 case vect_used_by_reduction:
584 relevant = vect_used_in_outer_by_reduction;
585 break;
587 case vect_used_in_scope:
588 relevant = vect_used_in_outer;
589 break;
591 default:
592 gcc_unreachable ();
596 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
597 is_pattern_stmt_p (stmt_vinfo));
598 return true;
602 /* Function vect_mark_stmts_to_be_vectorized.
604 Not all stmts in the loop need to be vectorized. For example:
606 for i...
607 for j...
608 1. T0 = i + j
609 2. T1 = a[T0]
611 3. j = j + 1
613 Stmt 1 and 3 do not need to be vectorized, because loop control and
614 addressing of vectorized data-refs are handled differently.
616 This pass detects such stmts. */
618 bool
619 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
621 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
622 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
623 unsigned int nbbs = loop->num_nodes;
624 gimple_stmt_iterator si;
625 gimple stmt;
626 unsigned int i;
627 stmt_vec_info stmt_vinfo;
628 basic_block bb;
629 gimple phi;
630 bool live_p;
631 enum vect_relevant relevant, tmp_relevant;
632 enum vect_def_type def_type;
634 if (dump_enabled_p ())
635 dump_printf_loc (MSG_NOTE, vect_location,
636 "=== vect_mark_stmts_to_be_vectorized ===\n");
638 auto_vec<gimple, 64> worklist;
640 /* 1. Init worklist. */
641 for (i = 0; i < nbbs; i++)
643 bb = bbs[i];
644 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
646 phi = gsi_stmt (si);
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
653 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
654 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
656 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
658 stmt = gsi_stmt (si);
659 if (dump_enabled_p ())
661 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
662 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
665 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
666 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
670 /* 2. Process_worklist */
671 while (worklist.length () > 0)
673 use_operand_p use_p;
674 ssa_op_iter iter;
676 stmt = worklist.pop ();
677 if (dump_enabled_p ())
679 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
680 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
683 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
684 (DEF_STMT) as relevant/irrelevant and live/dead according to the
685 liveness and relevance properties of STMT. */
686 stmt_vinfo = vinfo_for_stmt (stmt);
687 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
688 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
690 /* Generally, the liveness and relevance properties of STMT are
691 propagated as is to the DEF_STMTs of its USEs:
692 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
693 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
695 One exception is when STMT has been identified as defining a reduction
696 variable; in this case we set the liveness/relevance as follows:
697 live_p = false
698 relevant = vect_used_by_reduction
699 This is because we distinguish between two kinds of relevant stmts -
700 those that are used by a reduction computation, and those that are
701 (also) used by a regular computation. This allows us later on to
702 identify stmts that are used solely by a reduction, and therefore the
703 order of the results that they produce does not have to be kept. */
705 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
706 tmp_relevant = relevant;
707 switch (def_type)
709 case vect_reduction_def:
710 switch (tmp_relevant)
712 case vect_unused_in_scope:
713 relevant = vect_used_by_reduction;
714 break;
716 case vect_used_by_reduction:
717 if (gimple_code (stmt) == GIMPLE_PHI)
718 break;
719 /* fall through */
721 default:
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "unsupported use of reduction.\n");
725 return false;
728 live_p = false;
729 break;
731 case vect_nested_cycle:
732 if (tmp_relevant != vect_unused_in_scope
733 && tmp_relevant != vect_used_in_outer_by_reduction
734 && tmp_relevant != vect_used_in_outer)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
738 "unsupported use of nested cycle.\n");
740 return false;
743 live_p = false;
744 break;
746 case vect_double_reduction_def:
747 if (tmp_relevant != vect_unused_in_scope
748 && tmp_relevant != vect_used_by_reduction)
750 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
752 "unsupported use of double reduction.\n");
754 return false;
757 live_p = false;
758 break;
760 default:
761 break;
764 if (is_pattern_stmt_p (stmt_vinfo))
766 /* Pattern statements are not inserted into the code, so
767 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
768 have to scan the RHS or function arguments instead. */
769 if (is_gimple_assign (stmt))
771 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
772 tree op = gimple_assign_rhs1 (stmt);
774 i = 1;
775 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
777 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
778 live_p, relevant, &worklist, false)
779 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
780 live_p, relevant, &worklist, false))
781 return false;
782 i = 2;
784 for (; i < gimple_num_ops (stmt); i++)
786 op = gimple_op (stmt, i);
787 if (TREE_CODE (op) == SSA_NAME
788 && !process_use (stmt, op, loop_vinfo, live_p, relevant,
789 &worklist, false))
790 return false;
793 else if (is_gimple_call (stmt))
795 for (i = 0; i < gimple_call_num_args (stmt); i++)
797 tree arg = gimple_call_arg (stmt, i);
798 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
799 &worklist, false))
800 return false;
804 else
805 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
807 tree op = USE_FROM_PTR (use_p);
808 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
809 &worklist, false))
810 return false;
813 if (STMT_VINFO_GATHER_P (stmt_vinfo))
815 tree off;
816 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
817 gcc_assert (decl);
818 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
819 &worklist, true))
820 return false;
822 } /* while worklist */
824 return true;
828 /* Function vect_model_simple_cost.
830 Models cost for simple operations, i.e. those that only emit ncopies of a
831 single op. Right now, this does not account for multiple insns that could
832 be generated for the single vector op. We will handle that shortly. */
834 void
835 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
836 enum vect_def_type *dt,
837 stmt_vector_for_cost *prologue_cost_vec,
838 stmt_vector_for_cost *body_cost_vec)
840 int i;
841 int inside_cost = 0, prologue_cost = 0;
843 /* The SLP costs were already calculated during SLP tree build. */
844 if (PURE_SLP_STMT (stmt_info))
845 return;
847 /* FORNOW: Assuming maximum 2 args per stmts. */
848 for (i = 0; i < 2; i++)
849 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
850 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
851 stmt_info, 0, vect_prologue);
853 /* Pass the inside-of-loop statements to the target-specific cost model. */
854 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
855 stmt_info, 0, vect_body);
857 if (dump_enabled_p ())
858 dump_printf_loc (MSG_NOTE, vect_location,
859 "vect_model_simple_cost: inside_cost = %d, "
860 "prologue_cost = %d .\n", inside_cost, prologue_cost);
864 /* Model cost for type demotion and promotion operations. PWR is normally
865 zero for single-step promotions and demotions. It will be one if
866 two-step promotion/demotion is required, and so on. Each additional
867 step doubles the number of instructions required. */
869 static void
870 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
871 enum vect_def_type *dt, int pwr)
873 int i, tmp;
874 int inside_cost = 0, prologue_cost = 0;
875 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
876 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
877 void *target_cost_data;
879 /* The SLP costs were already calculated during SLP tree build. */
880 if (PURE_SLP_STMT (stmt_info))
881 return;
883 if (loop_vinfo)
884 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
885 else
886 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
888 for (i = 0; i < pwr + 1; i++)
890 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
891 (i + 1) : i;
892 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
893 vec_promote_demote, stmt_info, 0,
894 vect_body);
897 /* FORNOW: Assuming maximum 2 args per stmts. */
898 for (i = 0; i < 2; i++)
899 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
900 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
901 stmt_info, 0, vect_prologue);
903 if (dump_enabled_p ())
904 dump_printf_loc (MSG_NOTE, vect_location,
905 "vect_model_promotion_demotion_cost: inside_cost = %d, "
906 "prologue_cost = %d .\n", inside_cost, prologue_cost);
909 /* Function vect_cost_group_size
911 For grouped load or store, return the group_size only if it is the first
912 load or store of a group, else return 1. This ensures that group size is
913 only returned once per group. */
915 static int
916 vect_cost_group_size (stmt_vec_info stmt_info)
918 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
920 if (first_stmt == STMT_VINFO_STMT (stmt_info))
921 return GROUP_SIZE (stmt_info);
923 return 1;
927 /* Function vect_model_store_cost
929 Models cost for stores. In the case of grouped accesses, one access
930 has the overhead of the grouped access attributed to it. */
932 void
933 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
934 bool store_lanes_p, enum vect_def_type dt,
935 slp_tree slp_node,
936 stmt_vector_for_cost *prologue_cost_vec,
937 stmt_vector_for_cost *body_cost_vec)
939 int group_size;
940 unsigned int inside_cost = 0, prologue_cost = 0;
941 struct data_reference *first_dr;
942 gimple first_stmt;
944 if (dt == vect_constant_def || dt == vect_external_def)
945 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
946 stmt_info, 0, vect_prologue);
948 /* Grouped access? */
949 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
951 if (slp_node)
953 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
954 group_size = 1;
956 else
958 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
959 group_size = vect_cost_group_size (stmt_info);
962 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
964 /* Not a grouped access. */
965 else
967 group_size = 1;
968 first_dr = STMT_VINFO_DATA_REF (stmt_info);
971 /* We assume that the cost of a single store-lanes instruction is
972 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
973 access is instead being provided by a permute-and-store operation,
974 include the cost of the permutes. */
975 if (!store_lanes_p && group_size > 1
976 && !STMT_VINFO_STRIDED_P (stmt_info))
978 /* Uses a high and low interleave or shuffle operations for each
979 needed permute. */
980 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
981 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
982 stmt_info, 0, vect_body);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE, vect_location,
986 "vect_model_store_cost: strided group_size = %d .\n",
987 group_size);
990 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
991 /* Costs of the stores. */
992 if (STMT_VINFO_STRIDED_P (stmt_info)
993 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
995 /* N scalar stores plus extracting the elements. */
996 inside_cost += record_stmt_cost (body_cost_vec,
997 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
998 scalar_store, stmt_info, 0, vect_body);
1000 else
1001 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
1003 if (STMT_VINFO_STRIDED_P (stmt_info))
1004 inside_cost += record_stmt_cost (body_cost_vec,
1005 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1006 vec_to_scalar, stmt_info, 0, vect_body);
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE, vect_location,
1010 "vect_model_store_cost: inside_cost = %d, "
1011 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1015 /* Calculate cost of DR's memory access. */
1016 void
1017 vect_get_store_cost (struct data_reference *dr, int ncopies,
1018 unsigned int *inside_cost,
1019 stmt_vector_for_cost *body_cost_vec)
1021 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1022 gimple stmt = DR_STMT (dr);
1023 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1025 switch (alignment_support_scheme)
1027 case dr_aligned:
1029 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1030 vector_store, stmt_info, 0,
1031 vect_body);
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_NOTE, vect_location,
1035 "vect_model_store_cost: aligned.\n");
1036 break;
1039 case dr_unaligned_supported:
1041 /* Here, we assign an additional cost for the unaligned store. */
1042 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1043 unaligned_store, stmt_info,
1044 DR_MISALIGNMENT (dr), vect_body);
1045 if (dump_enabled_p ())
1046 dump_printf_loc (MSG_NOTE, vect_location,
1047 "vect_model_store_cost: unaligned supported by "
1048 "hardware.\n");
1049 break;
1052 case dr_unaligned_unsupported:
1054 *inside_cost = VECT_MAX_COST;
1056 if (dump_enabled_p ())
1057 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1058 "vect_model_store_cost: unsupported access.\n");
1059 break;
1062 default:
1063 gcc_unreachable ();
1068 /* Function vect_model_load_cost
1070 Models cost for loads. In the case of grouped accesses, the last access
1071 has the overhead of the grouped access attributed to it. Since unaligned
1072 accesses are supported for loads, we also account for the costs of the
1073 access scheme chosen. */
1075 void
1076 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1077 bool load_lanes_p, slp_tree slp_node,
1078 stmt_vector_for_cost *prologue_cost_vec,
1079 stmt_vector_for_cost *body_cost_vec)
1081 int group_size;
1082 gimple first_stmt;
1083 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1084 unsigned int inside_cost = 0, prologue_cost = 0;
1086 /* Grouped accesses? */
1087 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1088 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1090 group_size = vect_cost_group_size (stmt_info);
1091 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1093 /* Not a grouped access. */
1094 else
1096 group_size = 1;
1097 first_dr = dr;
1100 /* We assume that the cost of a single load-lanes instruction is
1101 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1102 access is instead being provided by a load-and-permute operation,
1103 include the cost of the permutes. */
1104 if (!load_lanes_p && group_size > 1
1105 && !STMT_VINFO_STRIDED_P (stmt_info))
1107 /* Uses an even and odd extract operations or shuffle operations
1108 for each needed permute. */
1109 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1110 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1111 stmt_info, 0, vect_body);
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE, vect_location,
1115 "vect_model_load_cost: strided group_size = %d .\n",
1116 group_size);
1119 /* The loads themselves. */
1120 if (STMT_VINFO_STRIDED_P (stmt_info)
1121 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1123 /* N scalar loads plus gathering them into a vector. */
1124 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1125 inside_cost += record_stmt_cost (body_cost_vec,
1126 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1127 scalar_load, stmt_info, 0, vect_body);
1129 else
1130 vect_get_load_cost (first_dr, ncopies,
1131 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1132 || group_size > 1 || slp_node),
1133 &inside_cost, &prologue_cost,
1134 prologue_cost_vec, body_cost_vec, true);
1135 if (STMT_VINFO_STRIDED_P (stmt_info))
1136 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1137 stmt_info, 0, vect_body);
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_NOTE, vect_location,
1141 "vect_model_load_cost: inside_cost = %d, "
1142 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1146 /* Calculate cost of DR's memory access. */
1147 void
1148 vect_get_load_cost (struct data_reference *dr, int ncopies,
1149 bool add_realign_cost, unsigned int *inside_cost,
1150 unsigned int *prologue_cost,
1151 stmt_vector_for_cost *prologue_cost_vec,
1152 stmt_vector_for_cost *body_cost_vec,
1153 bool record_prologue_costs)
1155 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1156 gimple stmt = DR_STMT (dr);
1157 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1159 switch (alignment_support_scheme)
1161 case dr_aligned:
1163 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1164 stmt_info, 0, vect_body);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE, vect_location,
1168 "vect_model_load_cost: aligned.\n");
1170 break;
1172 case dr_unaligned_supported:
1174 /* Here, we assign an additional cost for the unaligned load. */
1175 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1176 unaligned_load, stmt_info,
1177 DR_MISALIGNMENT (dr), vect_body);
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE, vect_location,
1181 "vect_model_load_cost: unaligned supported by "
1182 "hardware.\n");
1184 break;
1186 case dr_explicit_realign:
1188 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1189 vector_load, stmt_info, 0, vect_body);
1190 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1191 vec_perm, stmt_info, 0, vect_body);
1193 /* FIXME: If the misalignment remains fixed across the iterations of
1194 the containing loop, the following cost should be added to the
1195 prologue costs. */
1196 if (targetm.vectorize.builtin_mask_for_load)
1197 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1198 stmt_info, 0, vect_body);
1200 if (dump_enabled_p ())
1201 dump_printf_loc (MSG_NOTE, vect_location,
1202 "vect_model_load_cost: explicit realign\n");
1204 break;
1206 case dr_explicit_realign_optimized:
1208 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_NOTE, vect_location,
1210 "vect_model_load_cost: unaligned software "
1211 "pipelined.\n");
1213 /* Unaligned software pipeline has a load of an address, an initial
1214 load, and possibly a mask operation to "prime" the loop. However,
1215 if this is an access in a group of loads, which provide grouped
1216 access, then the above cost should only be considered for one
1217 access in the group. Inside the loop, there is a load op
1218 and a realignment op. */
1220 if (add_realign_cost && record_prologue_costs)
1222 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1223 vector_stmt, stmt_info,
1224 0, vect_prologue);
1225 if (targetm.vectorize.builtin_mask_for_load)
1226 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1227 vector_stmt, stmt_info,
1228 0, vect_prologue);
1231 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1232 stmt_info, 0, vect_body);
1233 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1234 stmt_info, 0, vect_body);
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE, vect_location,
1238 "vect_model_load_cost: explicit realign optimized"
1239 "\n");
1241 break;
1244 case dr_unaligned_unsupported:
1246 *inside_cost = VECT_MAX_COST;
1248 if (dump_enabled_p ())
1249 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1250 "vect_model_load_cost: unsupported access.\n");
1251 break;
1254 default:
1255 gcc_unreachable ();
1259 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1260 the loop preheader for the vectorized stmt STMT. */
1262 static void
1263 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1265 if (gsi)
1266 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1267 else
1269 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1270 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1272 if (loop_vinfo)
1274 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1275 basic_block new_bb;
1276 edge pe;
1278 if (nested_in_vect_loop_p (loop, stmt))
1279 loop = loop->inner;
1281 pe = loop_preheader_edge (loop);
1282 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1283 gcc_assert (!new_bb);
1285 else
1287 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1288 basic_block bb;
1289 gimple_stmt_iterator gsi_bb_start;
1291 gcc_assert (bb_vinfo);
1292 bb = BB_VINFO_BB (bb_vinfo);
1293 gsi_bb_start = gsi_after_labels (bb);
1294 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1298 if (dump_enabled_p ())
1300 dump_printf_loc (MSG_NOTE, vect_location,
1301 "created new init_stmt: ");
1302 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1306 /* Function vect_init_vector.
1308 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1309 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1310 vector type a vector with all elements equal to VAL is created first.
1311 Place the initialization at BSI if it is not NULL. Otherwise, place the
1312 initialization at the loop preheader.
1313 Return the DEF of INIT_STMT.
1314 It will be used in the vectorization of STMT. */
1316 tree
1317 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1319 tree new_var;
1320 gimple init_stmt;
1321 tree vec_oprnd;
1322 tree new_temp;
1324 if (TREE_CODE (type) == VECTOR_TYPE
1325 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1327 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1329 if (CONSTANT_CLASS_P (val))
1330 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1331 else
1333 new_temp = make_ssa_name (TREE_TYPE (type));
1334 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1335 vect_init_vector_1 (stmt, init_stmt, gsi);
1336 val = new_temp;
1339 val = build_vector_from_val (type, val);
1342 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1343 init_stmt = gimple_build_assign (new_var, val);
1344 new_temp = make_ssa_name (new_var, init_stmt);
1345 gimple_assign_set_lhs (init_stmt, new_temp);
1346 vect_init_vector_1 (stmt, init_stmt, gsi);
1347 vec_oprnd = gimple_assign_lhs (init_stmt);
1348 return vec_oprnd;
1352 /* Function vect_get_vec_def_for_operand.
1354 OP is an operand in STMT. This function returns a (vector) def that will be
1355 used in the vectorized stmt for STMT.
1357 In the case that OP is an SSA_NAME which is defined in the loop, then
1358 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1360 In case OP is an invariant or constant, a new stmt that creates a vector def
1361 needs to be introduced. */
1363 tree
1364 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1366 tree vec_oprnd;
1367 gimple vec_stmt;
1368 gimple def_stmt;
1369 stmt_vec_info def_stmt_info = NULL;
1370 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1371 unsigned int nunits;
1372 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1373 tree def;
1374 enum vect_def_type dt;
1375 bool is_simple_use;
1376 tree vector_type;
1378 if (dump_enabled_p ())
1380 dump_printf_loc (MSG_NOTE, vect_location,
1381 "vect_get_vec_def_for_operand: ");
1382 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1383 dump_printf (MSG_NOTE, "\n");
1386 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1387 &def_stmt, &def, &dt);
1388 gcc_assert (is_simple_use);
1389 if (dump_enabled_p ())
1391 int loc_printed = 0;
1392 if (def)
1394 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1395 loc_printed = 1;
1396 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1397 dump_printf (MSG_NOTE, "\n");
1399 if (def_stmt)
1401 if (loc_printed)
1402 dump_printf (MSG_NOTE, " def_stmt = ");
1403 else
1404 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1405 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1409 switch (dt)
1411 /* Case 1: operand is a constant. */
1412 case vect_constant_def:
1414 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1415 gcc_assert (vector_type);
1416 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1418 if (scalar_def)
1419 *scalar_def = op;
1421 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1422 if (dump_enabled_p ())
1423 dump_printf_loc (MSG_NOTE, vect_location,
1424 "Create vector_cst. nunits = %d\n", nunits);
1426 return vect_init_vector (stmt, op, vector_type, NULL);
1429 /* Case 2: operand is defined outside the loop - loop invariant. */
1430 case vect_external_def:
1432 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1433 gcc_assert (vector_type);
1435 if (scalar_def)
1436 *scalar_def = def;
1438 /* Create 'vec_inv = {inv,inv,..,inv}' */
1439 if (dump_enabled_p ())
1440 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1442 return vect_init_vector (stmt, def, vector_type, NULL);
1445 /* Case 3: operand is defined inside the loop. */
1446 case vect_internal_def:
1448 if (scalar_def)
1449 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1451 /* Get the def from the vectorized stmt. */
1452 def_stmt_info = vinfo_for_stmt (def_stmt);
1454 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1455 /* Get vectorized pattern statement. */
1456 if (!vec_stmt
1457 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1458 && !STMT_VINFO_RELEVANT (def_stmt_info))
1459 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1460 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1461 gcc_assert (vec_stmt);
1462 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1463 vec_oprnd = PHI_RESULT (vec_stmt);
1464 else if (is_gimple_call (vec_stmt))
1465 vec_oprnd = gimple_call_lhs (vec_stmt);
1466 else
1467 vec_oprnd = gimple_assign_lhs (vec_stmt);
1468 return vec_oprnd;
1471 /* Case 4: operand is defined by a loop header phi - reduction */
1472 case vect_reduction_def:
1473 case vect_double_reduction_def:
1474 case vect_nested_cycle:
1476 struct loop *loop;
1478 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1479 loop = (gimple_bb (def_stmt))->loop_father;
1481 /* Get the def before the loop */
1482 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1483 return get_initial_def_for_reduction (stmt, op, scalar_def);
1486 /* Case 5: operand is defined by loop-header phi - induction. */
1487 case vect_induction_def:
1489 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1491 /* Get the def from the vectorized stmt. */
1492 def_stmt_info = vinfo_for_stmt (def_stmt);
1493 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1494 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1495 vec_oprnd = PHI_RESULT (vec_stmt);
1496 else
1497 vec_oprnd = gimple_get_lhs (vec_stmt);
1498 return vec_oprnd;
1501 default:
1502 gcc_unreachable ();
1507 /* Function vect_get_vec_def_for_stmt_copy
1509 Return a vector-def for an operand. This function is used when the
1510 vectorized stmt to be created (by the caller to this function) is a "copy"
1511 created in case the vectorized result cannot fit in one vector, and several
1512 copies of the vector-stmt are required. In this case the vector-def is
1513 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1514 of the stmt that defines VEC_OPRND.
1515 DT is the type of the vector def VEC_OPRND.
1517 Context:
1518 In case the vectorization factor (VF) is bigger than the number
1519 of elements that can fit in a vectype (nunits), we have to generate
1520 more than one vector stmt to vectorize the scalar stmt. This situation
1521 arises when there are multiple data-types operated upon in the loop; the
1522 smallest data-type determines the VF, and as a result, when vectorizing
1523 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1524 vector stmt (each computing a vector of 'nunits' results, and together
1525 computing 'VF' results in each iteration). This function is called when
1526 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1527 which VF=16 and nunits=4, so the number of copies required is 4):
1529 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1531 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1532 VS1.1: vx.1 = memref1 VS1.2
1533 VS1.2: vx.2 = memref2 VS1.3
1534 VS1.3: vx.3 = memref3
1536 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1537 VSnew.1: vz1 = vx.1 + ... VSnew.2
1538 VSnew.2: vz2 = vx.2 + ... VSnew.3
1539 VSnew.3: vz3 = vx.3 + ...
1541 The vectorization of S1 is explained in vectorizable_load.
1542 The vectorization of S2:
1543 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1544 the function 'vect_get_vec_def_for_operand' is called to
1545 get the relevant vector-def for each operand of S2. For operand x it
1546 returns the vector-def 'vx.0'.
1548 To create the remaining copies of the vector-stmt (VSnew.j), this
1549 function is called to get the relevant vector-def for each operand. It is
1550 obtained from the respective VS1.j stmt, which is recorded in the
1551 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1553 For example, to obtain the vector-def 'vx.1' in order to create the
1554 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1555 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1556 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1557 and return its def ('vx.1').
1558 Overall, to create the above sequence this function will be called 3 times:
1559 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1560 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1561 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1563 tree
1564 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1566 gimple vec_stmt_for_operand;
1567 stmt_vec_info def_stmt_info;
1569 /* Do nothing; can reuse same def. */
1570 if (dt == vect_external_def || dt == vect_constant_def )
1571 return vec_oprnd;
1573 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1574 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1575 gcc_assert (def_stmt_info);
1576 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1577 gcc_assert (vec_stmt_for_operand);
1578 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1579 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1580 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1581 else
1582 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1583 return vec_oprnd;
1587 /* Get vectorized definitions for the operands to create a copy of an original
1588 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1590 static void
1591 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1592 vec<tree> *vec_oprnds0,
1593 vec<tree> *vec_oprnds1)
1595 tree vec_oprnd = vec_oprnds0->pop ();
1597 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1598 vec_oprnds0->quick_push (vec_oprnd);
1600 if (vec_oprnds1 && vec_oprnds1->length ())
1602 vec_oprnd = vec_oprnds1->pop ();
1603 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1604 vec_oprnds1->quick_push (vec_oprnd);
1609 /* Get vectorized definitions for OP0 and OP1.
1610 REDUC_INDEX is the index of reduction operand in case of reduction,
1611 and -1 otherwise. */
1613 void
1614 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1615 vec<tree> *vec_oprnds0,
1616 vec<tree> *vec_oprnds1,
1617 slp_tree slp_node, int reduc_index)
1619 if (slp_node)
1621 int nops = (op1 == NULL_TREE) ? 1 : 2;
1622 auto_vec<tree> ops (nops);
1623 auto_vec<vec<tree> > vec_defs (nops);
1625 ops.quick_push (op0);
1626 if (op1)
1627 ops.quick_push (op1);
1629 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1631 *vec_oprnds0 = vec_defs[0];
1632 if (op1)
1633 *vec_oprnds1 = vec_defs[1];
1635 else
1637 tree vec_oprnd;
1639 vec_oprnds0->create (1);
1640 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1641 vec_oprnds0->quick_push (vec_oprnd);
1643 if (op1)
1645 vec_oprnds1->create (1);
1646 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1647 vec_oprnds1->quick_push (vec_oprnd);
1653 /* Function vect_finish_stmt_generation.
1655 Insert a new stmt. */
1657 void
1658 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1659 gimple_stmt_iterator *gsi)
1661 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1662 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1663 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1665 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1667 if (!gsi_end_p (*gsi)
1668 && gimple_has_mem_ops (vec_stmt))
1670 gimple at_stmt = gsi_stmt (*gsi);
1671 tree vuse = gimple_vuse (at_stmt);
1672 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1674 tree vdef = gimple_vdef (at_stmt);
1675 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1676 /* If we have an SSA vuse and insert a store, update virtual
1677 SSA form to avoid triggering the renamer. Do so only
1678 if we can easily see all uses - which is what almost always
1679 happens with the way vectorized stmts are inserted. */
1680 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1681 && ((is_gimple_assign (vec_stmt)
1682 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1683 || (is_gimple_call (vec_stmt)
1684 && !(gimple_call_flags (vec_stmt)
1685 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1687 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1688 gimple_set_vdef (vec_stmt, new_vdef);
1689 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1693 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1695 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1696 bb_vinfo));
1698 if (dump_enabled_p ())
1700 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1701 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1704 gimple_set_location (vec_stmt, gimple_location (stmt));
1706 /* While EH edges will generally prevent vectorization, stmt might
1707 e.g. be in a must-not-throw region. Ensure newly created stmts
1708 that could throw are part of the same region. */
1709 int lp_nr = lookup_stmt_eh_lp (stmt);
1710 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1711 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1714 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1715 a function declaration if the target has a vectorized version
1716 of the function, or NULL_TREE if the function cannot be vectorized. */
1718 tree
1719 vectorizable_function (gcall *call, tree vectype_out, tree vectype_in)
1721 tree fndecl = gimple_call_fndecl (call);
1723 /* We only handle functions that do not read or clobber memory -- i.e.
1724 const or novops ones. */
1725 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1726 return NULL_TREE;
1728 if (!fndecl
1729 || TREE_CODE (fndecl) != FUNCTION_DECL
1730 || !DECL_BUILT_IN (fndecl))
1731 return NULL_TREE;
1733 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1734 vectype_in);
1738 static tree permute_vec_elements (tree, tree, tree, gimple,
1739 gimple_stmt_iterator *);
1742 /* Function vectorizable_mask_load_store.
1744 Check if STMT performs a conditional load or store that can be vectorized.
1745 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1746 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1747 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1749 static bool
1750 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1751 gimple *vec_stmt, slp_tree slp_node)
1753 tree vec_dest = NULL;
1754 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1755 stmt_vec_info prev_stmt_info;
1756 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1757 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1758 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1759 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1760 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1761 tree elem_type;
1762 gimple new_stmt;
1763 tree dummy;
1764 tree dataref_ptr = NULL_TREE;
1765 gimple ptr_incr;
1766 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1767 int ncopies;
1768 int i, j;
1769 bool inv_p;
1770 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1771 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1772 int gather_scale = 1;
1773 enum vect_def_type gather_dt = vect_unknown_def_type;
1774 bool is_store;
1775 tree mask;
1776 gimple def_stmt;
1777 tree def;
1778 enum vect_def_type dt;
1780 if (slp_node != NULL)
1781 return false;
1783 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1784 gcc_assert (ncopies >= 1);
1786 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1787 mask = gimple_call_arg (stmt, 2);
1788 if (TYPE_PRECISION (TREE_TYPE (mask))
1789 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1790 return false;
1792 /* FORNOW. This restriction should be relaxed. */
1793 if (nested_in_vect_loop && ncopies > 1)
1795 if (dump_enabled_p ())
1796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1797 "multiple types in nested loop.");
1798 return false;
1801 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1802 return false;
1804 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1805 return false;
1807 if (!STMT_VINFO_DATA_REF (stmt_info))
1808 return false;
1810 elem_type = TREE_TYPE (vectype);
1812 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1813 return false;
1815 if (STMT_VINFO_STRIDED_P (stmt_info))
1816 return false;
1818 if (STMT_VINFO_GATHER_P (stmt_info))
1820 gimple def_stmt;
1821 tree def;
1822 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1823 &gather_off, &gather_scale);
1824 gcc_assert (gather_decl);
1825 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1826 &def_stmt, &def, &gather_dt,
1827 &gather_off_vectype))
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1831 "gather index use not simple.");
1832 return false;
1835 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1836 tree masktype
1837 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1838 if (TREE_CODE (masktype) == INTEGER_TYPE)
1840 if (dump_enabled_p ())
1841 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1842 "masked gather with integer mask not supported.");
1843 return false;
1846 else if (tree_int_cst_compare (nested_in_vect_loop
1847 ? STMT_VINFO_DR_STEP (stmt_info)
1848 : DR_STEP (dr), size_zero_node) <= 0)
1849 return false;
1850 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1851 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1852 return false;
1854 if (TREE_CODE (mask) != SSA_NAME)
1855 return false;
1857 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1858 &def_stmt, &def, &dt))
1859 return false;
1861 if (is_store)
1863 tree rhs = gimple_call_arg (stmt, 3);
1864 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1865 &def_stmt, &def, &dt))
1866 return false;
1869 if (!vec_stmt) /* transformation not required. */
1871 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1872 if (is_store)
1873 vect_model_store_cost (stmt_info, ncopies, false, dt,
1874 NULL, NULL, NULL);
1875 else
1876 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1877 return true;
1880 /** Transform. **/
1882 if (STMT_VINFO_GATHER_P (stmt_info))
1884 tree vec_oprnd0 = NULL_TREE, op;
1885 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1886 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1887 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1888 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1889 tree mask_perm_mask = NULL_TREE;
1890 edge pe = loop_preheader_edge (loop);
1891 gimple_seq seq;
1892 basic_block new_bb;
1893 enum { NARROW, NONE, WIDEN } modifier;
1894 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1896 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1897 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1898 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1899 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1900 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1901 scaletype = TREE_VALUE (arglist);
1902 gcc_checking_assert (types_compatible_p (srctype, rettype)
1903 && types_compatible_p (srctype, masktype));
1905 if (nunits == gather_off_nunits)
1906 modifier = NONE;
1907 else if (nunits == gather_off_nunits / 2)
1909 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1910 modifier = WIDEN;
1912 for (i = 0; i < gather_off_nunits; ++i)
1913 sel[i] = i | nunits;
1915 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
1917 else if (nunits == gather_off_nunits * 2)
1919 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1920 modifier = NARROW;
1922 for (i = 0; i < nunits; ++i)
1923 sel[i] = i < gather_off_nunits
1924 ? i : i + nunits - gather_off_nunits;
1926 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
1927 ncopies *= 2;
1928 for (i = 0; i < nunits; ++i)
1929 sel[i] = i | gather_off_nunits;
1930 mask_perm_mask = vect_gen_perm_mask_checked (masktype, sel);
1932 else
1933 gcc_unreachable ();
1935 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1937 ptr = fold_convert (ptrtype, gather_base);
1938 if (!is_gimple_min_invariant (ptr))
1940 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1941 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1942 gcc_assert (!new_bb);
1945 scale = build_int_cst (scaletype, gather_scale);
1947 prev_stmt_info = NULL;
1948 for (j = 0; j < ncopies; ++j)
1950 if (modifier == WIDEN && (j & 1))
1951 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1952 perm_mask, stmt, gsi);
1953 else if (j == 0)
1954 op = vec_oprnd0
1955 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1956 else
1957 op = vec_oprnd0
1958 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1960 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1962 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1963 == TYPE_VECTOR_SUBPARTS (idxtype));
1964 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1965 var = make_ssa_name (var);
1966 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1967 new_stmt
1968 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
1969 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1970 op = var;
1973 if (mask_perm_mask && (j & 1))
1974 mask_op = permute_vec_elements (mask_op, mask_op,
1975 mask_perm_mask, stmt, gsi);
1976 else
1978 if (j == 0)
1979 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
1980 else
1982 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
1983 &def_stmt, &def, &dt);
1984 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1987 mask_op = vec_mask;
1988 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1990 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1991 == TYPE_VECTOR_SUBPARTS (masktype));
1992 var = vect_get_new_vect_var (masktype, vect_simple_var,
1993 NULL);
1994 var = make_ssa_name (var);
1995 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1996 new_stmt
1997 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
1998 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1999 mask_op = var;
2003 new_stmt
2004 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2005 scale);
2007 if (!useless_type_conversion_p (vectype, rettype))
2009 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2010 == TYPE_VECTOR_SUBPARTS (rettype));
2011 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2012 op = make_ssa_name (var, new_stmt);
2013 gimple_call_set_lhs (new_stmt, op);
2014 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2015 var = make_ssa_name (vec_dest);
2016 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2017 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2019 else
2021 var = make_ssa_name (vec_dest, new_stmt);
2022 gimple_call_set_lhs (new_stmt, var);
2025 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2027 if (modifier == NARROW)
2029 if ((j & 1) == 0)
2031 prev_res = var;
2032 continue;
2034 var = permute_vec_elements (prev_res, var,
2035 perm_mask, stmt, gsi);
2036 new_stmt = SSA_NAME_DEF_STMT (var);
2039 if (prev_stmt_info == NULL)
2040 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2041 else
2042 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2043 prev_stmt_info = vinfo_for_stmt (new_stmt);
2046 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2047 from the IL. */
2048 tree lhs = gimple_call_lhs (stmt);
2049 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2050 set_vinfo_for_stmt (new_stmt, stmt_info);
2051 set_vinfo_for_stmt (stmt, NULL);
2052 STMT_VINFO_STMT (stmt_info) = new_stmt;
2053 gsi_replace (gsi, new_stmt, true);
2054 return true;
2056 else if (is_store)
2058 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2059 prev_stmt_info = NULL;
2060 for (i = 0; i < ncopies; i++)
2062 unsigned align, misalign;
2064 if (i == 0)
2066 tree rhs = gimple_call_arg (stmt, 3);
2067 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2068 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2069 /* We should have catched mismatched types earlier. */
2070 gcc_assert (useless_type_conversion_p (vectype,
2071 TREE_TYPE (vec_rhs)));
2072 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2073 NULL_TREE, &dummy, gsi,
2074 &ptr_incr, false, &inv_p);
2075 gcc_assert (!inv_p);
2077 else
2079 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2080 &def, &dt);
2081 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2082 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2083 &def, &dt);
2084 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2085 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2086 TYPE_SIZE_UNIT (vectype));
2089 align = TYPE_ALIGN_UNIT (vectype);
2090 if (aligned_access_p (dr))
2091 misalign = 0;
2092 else if (DR_MISALIGNMENT (dr) == -1)
2094 align = TYPE_ALIGN_UNIT (elem_type);
2095 misalign = 0;
2097 else
2098 misalign = DR_MISALIGNMENT (dr);
2099 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2100 misalign);
2101 new_stmt
2102 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2103 gimple_call_arg (stmt, 1),
2104 vec_mask, vec_rhs);
2105 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2106 if (i == 0)
2107 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2108 else
2109 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2110 prev_stmt_info = vinfo_for_stmt (new_stmt);
2113 else
2115 tree vec_mask = NULL_TREE;
2116 prev_stmt_info = NULL;
2117 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2118 for (i = 0; i < ncopies; i++)
2120 unsigned align, misalign;
2122 if (i == 0)
2124 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2125 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2126 NULL_TREE, &dummy, gsi,
2127 &ptr_incr, false, &inv_p);
2128 gcc_assert (!inv_p);
2130 else
2132 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2133 &def, &dt);
2134 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2135 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2136 TYPE_SIZE_UNIT (vectype));
2139 align = TYPE_ALIGN_UNIT (vectype);
2140 if (aligned_access_p (dr))
2141 misalign = 0;
2142 else if (DR_MISALIGNMENT (dr) == -1)
2144 align = TYPE_ALIGN_UNIT (elem_type);
2145 misalign = 0;
2147 else
2148 misalign = DR_MISALIGNMENT (dr);
2149 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2150 misalign);
2151 new_stmt
2152 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2153 gimple_call_arg (stmt, 1),
2154 vec_mask);
2155 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest));
2156 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2157 if (i == 0)
2158 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2159 else
2160 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2161 prev_stmt_info = vinfo_for_stmt (new_stmt);
2165 if (!is_store)
2167 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2168 from the IL. */
2169 tree lhs = gimple_call_lhs (stmt);
2170 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2171 set_vinfo_for_stmt (new_stmt, stmt_info);
2172 set_vinfo_for_stmt (stmt, NULL);
2173 STMT_VINFO_STMT (stmt_info) = new_stmt;
2174 gsi_replace (gsi, new_stmt, true);
2177 return true;
2181 /* Function vectorizable_call.
2183 Check if GS performs a function call that can be vectorized.
2184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2188 static bool
2189 vectorizable_call (gimple gs, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2190 slp_tree slp_node)
2192 gcall *stmt;
2193 tree vec_dest;
2194 tree scalar_dest;
2195 tree op, type;
2196 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2197 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2198 tree vectype_out, vectype_in;
2199 int nunits_in;
2200 int nunits_out;
2201 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2202 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2203 tree fndecl, new_temp, def, rhs_type;
2204 gimple def_stmt;
2205 enum vect_def_type dt[3]
2206 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2207 gimple new_stmt = NULL;
2208 int ncopies, j;
2209 vec<tree> vargs = vNULL;
2210 enum { NARROW, NONE, WIDEN } modifier;
2211 size_t i, nargs;
2212 tree lhs;
2214 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2215 return false;
2217 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2218 return false;
2220 /* Is GS a vectorizable call? */
2221 stmt = dyn_cast <gcall *> (gs);
2222 if (!stmt)
2223 return false;
2225 if (gimple_call_internal_p (stmt)
2226 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2227 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2228 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2229 slp_node);
2231 if (gimple_call_lhs (stmt) == NULL_TREE
2232 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2233 return false;
2235 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2237 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2239 /* Process function arguments. */
2240 rhs_type = NULL_TREE;
2241 vectype_in = NULL_TREE;
2242 nargs = gimple_call_num_args (stmt);
2244 /* Bail out if the function has more than three arguments, we do not have
2245 interesting builtin functions to vectorize with more than two arguments
2246 except for fma. No arguments is also not good. */
2247 if (nargs == 0 || nargs > 3)
2248 return false;
2250 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2251 if (gimple_call_internal_p (stmt)
2252 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2254 nargs = 0;
2255 rhs_type = unsigned_type_node;
2258 for (i = 0; i < nargs; i++)
2260 tree opvectype;
2262 op = gimple_call_arg (stmt, i);
2264 /* We can only handle calls with arguments of the same type. */
2265 if (rhs_type
2266 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2270 "argument types differ.\n");
2271 return false;
2273 if (!rhs_type)
2274 rhs_type = TREE_TYPE (op);
2276 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2277 &def_stmt, &def, &dt[i], &opvectype))
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2281 "use not simple.\n");
2282 return false;
2285 if (!vectype_in)
2286 vectype_in = opvectype;
2287 else if (opvectype
2288 && opvectype != vectype_in)
2290 if (dump_enabled_p ())
2291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2292 "argument vector types differ.\n");
2293 return false;
2296 /* If all arguments are external or constant defs use a vector type with
2297 the same size as the output vector type. */
2298 if (!vectype_in)
2299 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2300 if (vec_stmt)
2301 gcc_assert (vectype_in);
2302 if (!vectype_in)
2304 if (dump_enabled_p ())
2306 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2307 "no vectype for scalar type ");
2308 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2312 return false;
2315 /* FORNOW */
2316 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2317 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2318 if (nunits_in == nunits_out / 2)
2319 modifier = NARROW;
2320 else if (nunits_out == nunits_in)
2321 modifier = NONE;
2322 else if (nunits_out == nunits_in / 2)
2323 modifier = WIDEN;
2324 else
2325 return false;
2327 /* For now, we only vectorize functions if a target specific builtin
2328 is available. TODO -- in some cases, it might be profitable to
2329 insert the calls for pieces of the vector, in order to be able
2330 to vectorize other operations in the loop. */
2331 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2332 if (fndecl == NULL_TREE)
2334 if (gimple_call_internal_p (stmt)
2335 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2336 && !slp_node
2337 && loop_vinfo
2338 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2339 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2340 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2341 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2343 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2344 { 0, 1, 2, ... vf - 1 } vector. */
2345 gcc_assert (nargs == 0);
2347 else
2349 if (dump_enabled_p ())
2350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2351 "function is not vectorizable.\n");
2352 return false;
2356 gcc_assert (!gimple_vuse (stmt));
2358 if (slp_node || PURE_SLP_STMT (stmt_info))
2359 ncopies = 1;
2360 else if (modifier == NARROW)
2361 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2362 else
2363 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2365 /* Sanity check: make sure that at least one copy of the vectorized stmt
2366 needs to be generated. */
2367 gcc_assert (ncopies >= 1);
2369 if (!vec_stmt) /* transformation not required. */
2371 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2374 "\n");
2375 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2376 return true;
2379 /** Transform. **/
2381 if (dump_enabled_p ())
2382 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2384 /* Handle def. */
2385 scalar_dest = gimple_call_lhs (stmt);
2386 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2388 prev_stmt_info = NULL;
2389 switch (modifier)
2391 case NONE:
2392 for (j = 0; j < ncopies; ++j)
2394 /* Build argument list for the vectorized call. */
2395 if (j == 0)
2396 vargs.create (nargs);
2397 else
2398 vargs.truncate (0);
2400 if (slp_node)
2402 auto_vec<vec<tree> > vec_defs (nargs);
2403 vec<tree> vec_oprnds0;
2405 for (i = 0; i < nargs; i++)
2406 vargs.quick_push (gimple_call_arg (stmt, i));
2407 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2408 vec_oprnds0 = vec_defs[0];
2410 /* Arguments are ready. Create the new vector stmt. */
2411 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2413 size_t k;
2414 for (k = 0; k < nargs; k++)
2416 vec<tree> vec_oprndsk = vec_defs[k];
2417 vargs[k] = vec_oprndsk[i];
2419 new_stmt = gimple_build_call_vec (fndecl, vargs);
2420 new_temp = make_ssa_name (vec_dest, new_stmt);
2421 gimple_call_set_lhs (new_stmt, new_temp);
2422 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2423 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2426 for (i = 0; i < nargs; i++)
2428 vec<tree> vec_oprndsi = vec_defs[i];
2429 vec_oprndsi.release ();
2431 continue;
2434 for (i = 0; i < nargs; i++)
2436 op = gimple_call_arg (stmt, i);
2437 if (j == 0)
2438 vec_oprnd0
2439 = vect_get_vec_def_for_operand (op, stmt, NULL);
2440 else
2442 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2443 vec_oprnd0
2444 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2447 vargs.quick_push (vec_oprnd0);
2450 if (gimple_call_internal_p (stmt)
2451 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2453 tree *v = XALLOCAVEC (tree, nunits_out);
2454 int k;
2455 for (k = 0; k < nunits_out; ++k)
2456 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2457 tree cst = build_vector (vectype_out, v);
2458 tree new_var
2459 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2460 gimple init_stmt = gimple_build_assign (new_var, cst);
2461 new_temp = make_ssa_name (new_var, init_stmt);
2462 gimple_assign_set_lhs (init_stmt, new_temp);
2463 vect_init_vector_1 (stmt, init_stmt, NULL);
2464 new_temp = make_ssa_name (vec_dest);
2465 new_stmt = gimple_build_assign (new_temp,
2466 gimple_assign_lhs (init_stmt));
2468 else
2470 new_stmt = gimple_build_call_vec (fndecl, vargs);
2471 new_temp = make_ssa_name (vec_dest, new_stmt);
2472 gimple_call_set_lhs (new_stmt, new_temp);
2474 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2476 if (j == 0)
2477 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2478 else
2479 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2481 prev_stmt_info = vinfo_for_stmt (new_stmt);
2484 break;
2486 case NARROW:
2487 for (j = 0; j < ncopies; ++j)
2489 /* Build argument list for the vectorized call. */
2490 if (j == 0)
2491 vargs.create (nargs * 2);
2492 else
2493 vargs.truncate (0);
2495 if (slp_node)
2497 auto_vec<vec<tree> > vec_defs (nargs);
2498 vec<tree> vec_oprnds0;
2500 for (i = 0; i < nargs; i++)
2501 vargs.quick_push (gimple_call_arg (stmt, i));
2502 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2503 vec_oprnds0 = vec_defs[0];
2505 /* Arguments are ready. Create the new vector stmt. */
2506 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2508 size_t k;
2509 vargs.truncate (0);
2510 for (k = 0; k < nargs; k++)
2512 vec<tree> vec_oprndsk = vec_defs[k];
2513 vargs.quick_push (vec_oprndsk[i]);
2514 vargs.quick_push (vec_oprndsk[i + 1]);
2516 new_stmt = gimple_build_call_vec (fndecl, vargs);
2517 new_temp = make_ssa_name (vec_dest, new_stmt);
2518 gimple_call_set_lhs (new_stmt, new_temp);
2519 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2520 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2523 for (i = 0; i < nargs; i++)
2525 vec<tree> vec_oprndsi = vec_defs[i];
2526 vec_oprndsi.release ();
2528 continue;
2531 for (i = 0; i < nargs; i++)
2533 op = gimple_call_arg (stmt, i);
2534 if (j == 0)
2536 vec_oprnd0
2537 = vect_get_vec_def_for_operand (op, stmt, NULL);
2538 vec_oprnd1
2539 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2541 else
2543 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2544 vec_oprnd0
2545 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2546 vec_oprnd1
2547 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2550 vargs.quick_push (vec_oprnd0);
2551 vargs.quick_push (vec_oprnd1);
2554 new_stmt = gimple_build_call_vec (fndecl, vargs);
2555 new_temp = make_ssa_name (vec_dest, new_stmt);
2556 gimple_call_set_lhs (new_stmt, new_temp);
2557 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2559 if (j == 0)
2560 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2561 else
2562 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2564 prev_stmt_info = vinfo_for_stmt (new_stmt);
2567 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2569 break;
2571 case WIDEN:
2572 /* No current target implements this case. */
2573 return false;
2576 vargs.release ();
2578 /* The call in STMT might prevent it from being removed in dce.
2579 We however cannot remove it here, due to the way the ssa name
2580 it defines is mapped to the new definition. So just replace
2581 rhs of the statement with something harmless. */
2583 if (slp_node)
2584 return true;
2586 type = TREE_TYPE (scalar_dest);
2587 if (is_pattern_stmt_p (stmt_info))
2588 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2589 else
2590 lhs = gimple_call_lhs (stmt);
2592 if (gimple_call_internal_p (stmt)
2593 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2595 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2596 with vf - 1 rather than 0, that is the last iteration of the
2597 vectorized loop. */
2598 imm_use_iterator iter;
2599 use_operand_p use_p;
2600 gimple use_stmt;
2601 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
2603 basic_block use_bb = gimple_bb (use_stmt);
2604 if (use_bb
2605 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), use_bb))
2607 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2608 SET_USE (use_p, build_int_cst (TREE_TYPE (lhs),
2609 ncopies * nunits_out - 1));
2610 update_stmt (use_stmt);
2615 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2616 set_vinfo_for_stmt (new_stmt, stmt_info);
2617 set_vinfo_for_stmt (stmt, NULL);
2618 STMT_VINFO_STMT (stmt_info) = new_stmt;
2619 gsi_replace (gsi, new_stmt, false);
2621 return true;
2625 struct simd_call_arg_info
2627 tree vectype;
2628 tree op;
2629 enum vect_def_type dt;
2630 HOST_WIDE_INT linear_step;
2631 unsigned int align;
2632 bool simd_lane_linear;
2635 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2636 is linear within simd lane (but not within whole loop), note it in
2637 *ARGINFO. */
2639 static void
2640 vect_simd_lane_linear (tree op, struct loop *loop,
2641 struct simd_call_arg_info *arginfo)
2643 gimple def_stmt = SSA_NAME_DEF_STMT (op);
2645 if (!is_gimple_assign (def_stmt)
2646 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
2647 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
2648 return;
2650 tree base = gimple_assign_rhs1 (def_stmt);
2651 HOST_WIDE_INT linear_step = 0;
2652 tree v = gimple_assign_rhs2 (def_stmt);
2653 while (TREE_CODE (v) == SSA_NAME)
2655 tree t;
2656 def_stmt = SSA_NAME_DEF_STMT (v);
2657 if (is_gimple_assign (def_stmt))
2658 switch (gimple_assign_rhs_code (def_stmt))
2660 case PLUS_EXPR:
2661 t = gimple_assign_rhs2 (def_stmt);
2662 if (linear_step || TREE_CODE (t) != INTEGER_CST)
2663 return;
2664 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
2665 v = gimple_assign_rhs1 (def_stmt);
2666 continue;
2667 case MULT_EXPR:
2668 t = gimple_assign_rhs2 (def_stmt);
2669 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
2670 return;
2671 linear_step = tree_to_shwi (t);
2672 v = gimple_assign_rhs1 (def_stmt);
2673 continue;
2674 CASE_CONVERT:
2675 t = gimple_assign_rhs1 (def_stmt);
2676 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
2677 || (TYPE_PRECISION (TREE_TYPE (v))
2678 < TYPE_PRECISION (TREE_TYPE (t))))
2679 return;
2680 if (!linear_step)
2681 linear_step = 1;
2682 v = t;
2683 continue;
2684 default:
2685 return;
2687 else if (is_gimple_call (def_stmt)
2688 && gimple_call_internal_p (def_stmt)
2689 && gimple_call_internal_fn (def_stmt) == IFN_GOMP_SIMD_LANE
2690 && loop->simduid
2691 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
2692 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
2693 == loop->simduid))
2695 if (!linear_step)
2696 linear_step = 1;
2697 arginfo->linear_step = linear_step;
2698 arginfo->op = base;
2699 arginfo->simd_lane_linear = true;
2700 return;
2705 /* Function vectorizable_simd_clone_call.
2707 Check if STMT performs a function call that can be vectorized
2708 by calling a simd clone of the function.
2709 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2710 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2711 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2713 static bool
2714 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2715 gimple *vec_stmt, slp_tree slp_node)
2717 tree vec_dest;
2718 tree scalar_dest;
2719 tree op, type;
2720 tree vec_oprnd0 = NULL_TREE;
2721 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2722 tree vectype;
2723 unsigned int nunits;
2724 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2725 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2726 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2727 tree fndecl, new_temp, def;
2728 gimple def_stmt;
2729 gimple new_stmt = NULL;
2730 int ncopies, j;
2731 vec<simd_call_arg_info> arginfo = vNULL;
2732 vec<tree> vargs = vNULL;
2733 size_t i, nargs;
2734 tree lhs, rtype, ratype;
2735 vec<constructor_elt, va_gc> *ret_ctor_elts;
2737 /* Is STMT a vectorizable call? */
2738 if (!is_gimple_call (stmt))
2739 return false;
2741 fndecl = gimple_call_fndecl (stmt);
2742 if (fndecl == NULL_TREE)
2743 return false;
2745 struct cgraph_node *node = cgraph_node::get (fndecl);
2746 if (node == NULL || node->simd_clones == NULL)
2747 return false;
2749 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2750 return false;
2752 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2753 return false;
2755 if (gimple_call_lhs (stmt)
2756 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2757 return false;
2759 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2761 vectype = STMT_VINFO_VECTYPE (stmt_info);
2763 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2764 return false;
2766 /* FORNOW */
2767 if (slp_node || PURE_SLP_STMT (stmt_info))
2768 return false;
2770 /* Process function arguments. */
2771 nargs = gimple_call_num_args (stmt);
2773 /* Bail out if the function has zero arguments. */
2774 if (nargs == 0)
2775 return false;
2777 arginfo.create (nargs);
2779 for (i = 0; i < nargs; i++)
2781 simd_call_arg_info thisarginfo;
2782 affine_iv iv;
2784 thisarginfo.linear_step = 0;
2785 thisarginfo.align = 0;
2786 thisarginfo.op = NULL_TREE;
2787 thisarginfo.simd_lane_linear = false;
2789 op = gimple_call_arg (stmt, i);
2790 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2791 &def_stmt, &def, &thisarginfo.dt,
2792 &thisarginfo.vectype)
2793 || thisarginfo.dt == vect_uninitialized_def)
2795 if (dump_enabled_p ())
2796 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2797 "use not simple.\n");
2798 arginfo.release ();
2799 return false;
2802 if (thisarginfo.dt == vect_constant_def
2803 || thisarginfo.dt == vect_external_def)
2804 gcc_assert (thisarginfo.vectype == NULL_TREE);
2805 else
2806 gcc_assert (thisarginfo.vectype != NULL_TREE);
2808 /* For linear arguments, the analyze phase should have saved
2809 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2810 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
2811 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
2813 gcc_assert (vec_stmt);
2814 thisarginfo.linear_step
2815 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
2816 thisarginfo.op
2817 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
2818 thisarginfo.simd_lane_linear
2819 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
2820 == boolean_true_node);
2821 /* If loop has been peeled for alignment, we need to adjust it. */
2822 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
2823 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
2824 if (n1 != n2 && !thisarginfo.simd_lane_linear)
2826 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
2827 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
2828 tree opt = TREE_TYPE (thisarginfo.op);
2829 bias = fold_convert (TREE_TYPE (step), bias);
2830 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
2831 thisarginfo.op
2832 = fold_build2 (POINTER_TYPE_P (opt)
2833 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
2834 thisarginfo.op, bias);
2837 else if (!vec_stmt
2838 && thisarginfo.dt != vect_constant_def
2839 && thisarginfo.dt != vect_external_def
2840 && loop_vinfo
2841 && TREE_CODE (op) == SSA_NAME
2842 && simple_iv (loop, loop_containing_stmt (stmt), op,
2843 &iv, false)
2844 && tree_fits_shwi_p (iv.step))
2846 thisarginfo.linear_step = tree_to_shwi (iv.step);
2847 thisarginfo.op = iv.base;
2849 else if ((thisarginfo.dt == vect_constant_def
2850 || thisarginfo.dt == vect_external_def)
2851 && POINTER_TYPE_P (TREE_TYPE (op)))
2852 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2853 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2854 linear too. */
2855 if (POINTER_TYPE_P (TREE_TYPE (op))
2856 && !thisarginfo.linear_step
2857 && !vec_stmt
2858 && thisarginfo.dt != vect_constant_def
2859 && thisarginfo.dt != vect_external_def
2860 && loop_vinfo
2861 && !slp_node
2862 && TREE_CODE (op) == SSA_NAME)
2863 vect_simd_lane_linear (op, loop, &thisarginfo);
2865 arginfo.quick_push (thisarginfo);
2868 unsigned int badness = 0;
2869 struct cgraph_node *bestn = NULL;
2870 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
2871 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
2872 else
2873 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2874 n = n->simdclone->next_clone)
2876 unsigned int this_badness = 0;
2877 if (n->simdclone->simdlen
2878 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2879 || n->simdclone->nargs != nargs)
2880 continue;
2881 if (n->simdclone->simdlen
2882 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2883 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2884 - exact_log2 (n->simdclone->simdlen)) * 1024;
2885 if (n->simdclone->inbranch)
2886 this_badness += 2048;
2887 int target_badness = targetm.simd_clone.usable (n);
2888 if (target_badness < 0)
2889 continue;
2890 this_badness += target_badness * 512;
2891 /* FORNOW: Have to add code to add the mask argument. */
2892 if (n->simdclone->inbranch)
2893 continue;
2894 for (i = 0; i < nargs; i++)
2896 switch (n->simdclone->args[i].arg_type)
2898 case SIMD_CLONE_ARG_TYPE_VECTOR:
2899 if (!useless_type_conversion_p
2900 (n->simdclone->args[i].orig_type,
2901 TREE_TYPE (gimple_call_arg (stmt, i))))
2902 i = -1;
2903 else if (arginfo[i].dt == vect_constant_def
2904 || arginfo[i].dt == vect_external_def
2905 || arginfo[i].linear_step)
2906 this_badness += 64;
2907 break;
2908 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2909 if (arginfo[i].dt != vect_constant_def
2910 && arginfo[i].dt != vect_external_def)
2911 i = -1;
2912 break;
2913 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2914 if (arginfo[i].dt == vect_constant_def
2915 || arginfo[i].dt == vect_external_def
2916 || (arginfo[i].linear_step
2917 != n->simdclone->args[i].linear_step))
2918 i = -1;
2919 break;
2920 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2921 /* FORNOW */
2922 i = -1;
2923 break;
2924 case SIMD_CLONE_ARG_TYPE_MASK:
2925 gcc_unreachable ();
2927 if (i == (size_t) -1)
2928 break;
2929 if (n->simdclone->args[i].alignment > arginfo[i].align)
2931 i = -1;
2932 break;
2934 if (arginfo[i].align)
2935 this_badness += (exact_log2 (arginfo[i].align)
2936 - exact_log2 (n->simdclone->args[i].alignment));
2938 if (i == (size_t) -1)
2939 continue;
2940 if (bestn == NULL || this_badness < badness)
2942 bestn = n;
2943 badness = this_badness;
2947 if (bestn == NULL)
2949 arginfo.release ();
2950 return false;
2953 for (i = 0; i < nargs; i++)
2954 if ((arginfo[i].dt == vect_constant_def
2955 || arginfo[i].dt == vect_external_def)
2956 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2958 arginfo[i].vectype
2959 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2960 i)));
2961 if (arginfo[i].vectype == NULL
2962 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2963 > bestn->simdclone->simdlen))
2965 arginfo.release ();
2966 return false;
2970 fndecl = bestn->decl;
2971 nunits = bestn->simdclone->simdlen;
2972 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2974 /* If the function isn't const, only allow it in simd loops where user
2975 has asserted that at least nunits consecutive iterations can be
2976 performed using SIMD instructions. */
2977 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2978 && gimple_vuse (stmt))
2980 arginfo.release ();
2981 return false;
2984 /* Sanity check: make sure that at least one copy of the vectorized stmt
2985 needs to be generated. */
2986 gcc_assert (ncopies >= 1);
2988 if (!vec_stmt) /* transformation not required. */
2990 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
2991 for (i = 0; i < nargs; i++)
2992 if (bestn->simdclone->args[i].arg_type
2993 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
2995 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
2996 + 1);
2997 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
2998 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
2999 ? size_type_node : TREE_TYPE (arginfo[i].op);
3000 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3001 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3002 tree sll = arginfo[i].simd_lane_linear
3003 ? boolean_true_node : boolean_false_node;
3004 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3006 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3007 if (dump_enabled_p ())
3008 dump_printf_loc (MSG_NOTE, vect_location,
3009 "=== vectorizable_simd_clone_call ===\n");
3010 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3011 arginfo.release ();
3012 return true;
3015 /** Transform. **/
3017 if (dump_enabled_p ())
3018 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3020 /* Handle def. */
3021 scalar_dest = gimple_call_lhs (stmt);
3022 vec_dest = NULL_TREE;
3023 rtype = NULL_TREE;
3024 ratype = NULL_TREE;
3025 if (scalar_dest)
3027 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3028 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3029 if (TREE_CODE (rtype) == ARRAY_TYPE)
3031 ratype = rtype;
3032 rtype = TREE_TYPE (ratype);
3036 prev_stmt_info = NULL;
3037 for (j = 0; j < ncopies; ++j)
3039 /* Build argument list for the vectorized call. */
3040 if (j == 0)
3041 vargs.create (nargs);
3042 else
3043 vargs.truncate (0);
3045 for (i = 0; i < nargs; i++)
3047 unsigned int k, l, m, o;
3048 tree atype;
3049 op = gimple_call_arg (stmt, i);
3050 switch (bestn->simdclone->args[i].arg_type)
3052 case SIMD_CLONE_ARG_TYPE_VECTOR:
3053 atype = bestn->simdclone->args[i].vector_type;
3054 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
3055 for (m = j * o; m < (j + 1) * o; m++)
3057 if (TYPE_VECTOR_SUBPARTS (atype)
3058 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
3060 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3061 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
3062 / TYPE_VECTOR_SUBPARTS (atype));
3063 gcc_assert ((k & (k - 1)) == 0);
3064 if (m == 0)
3065 vec_oprnd0
3066 = vect_get_vec_def_for_operand (op, stmt, NULL);
3067 else
3069 vec_oprnd0 = arginfo[i].op;
3070 if ((m & (k - 1)) == 0)
3071 vec_oprnd0
3072 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3073 vec_oprnd0);
3075 arginfo[i].op = vec_oprnd0;
3076 vec_oprnd0
3077 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3078 size_int (prec),
3079 bitsize_int ((m & (k - 1)) * prec));
3080 new_stmt
3081 = gimple_build_assign (make_ssa_name (atype),
3082 vec_oprnd0);
3083 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3084 vargs.safe_push (gimple_assign_lhs (new_stmt));
3086 else
3088 k = (TYPE_VECTOR_SUBPARTS (atype)
3089 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
3090 gcc_assert ((k & (k - 1)) == 0);
3091 vec<constructor_elt, va_gc> *ctor_elts;
3092 if (k != 1)
3093 vec_alloc (ctor_elts, k);
3094 else
3095 ctor_elts = NULL;
3096 for (l = 0; l < k; l++)
3098 if (m == 0 && l == 0)
3099 vec_oprnd0
3100 = vect_get_vec_def_for_operand (op, stmt, NULL);
3101 else
3102 vec_oprnd0
3103 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3104 arginfo[i].op);
3105 arginfo[i].op = vec_oprnd0;
3106 if (k == 1)
3107 break;
3108 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3109 vec_oprnd0);
3111 if (k == 1)
3112 vargs.safe_push (vec_oprnd0);
3113 else
3115 vec_oprnd0 = build_constructor (atype, ctor_elts);
3116 new_stmt
3117 = gimple_build_assign (make_ssa_name (atype),
3118 vec_oprnd0);
3119 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3120 vargs.safe_push (gimple_assign_lhs (new_stmt));
3124 break;
3125 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3126 vargs.safe_push (op);
3127 break;
3128 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3129 if (j == 0)
3131 gimple_seq stmts;
3132 arginfo[i].op
3133 = force_gimple_operand (arginfo[i].op, &stmts, true,
3134 NULL_TREE);
3135 if (stmts != NULL)
3137 basic_block new_bb;
3138 edge pe = loop_preheader_edge (loop);
3139 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3140 gcc_assert (!new_bb);
3142 if (arginfo[i].simd_lane_linear)
3144 vargs.safe_push (arginfo[i].op);
3145 break;
3147 tree phi_res = copy_ssa_name (op);
3148 gphi *new_phi = create_phi_node (phi_res, loop->header);
3149 set_vinfo_for_stmt (new_phi,
3150 new_stmt_vec_info (new_phi, loop_vinfo,
3151 NULL));
3152 add_phi_arg (new_phi, arginfo[i].op,
3153 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3154 enum tree_code code
3155 = POINTER_TYPE_P (TREE_TYPE (op))
3156 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3157 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3158 ? sizetype : TREE_TYPE (op);
3159 widest_int cst
3160 = wi::mul (bestn->simdclone->args[i].linear_step,
3161 ncopies * nunits);
3162 tree tcst = wide_int_to_tree (type, cst);
3163 tree phi_arg = copy_ssa_name (op);
3164 new_stmt
3165 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3166 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3167 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3168 set_vinfo_for_stmt (new_stmt,
3169 new_stmt_vec_info (new_stmt, loop_vinfo,
3170 NULL));
3171 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3172 UNKNOWN_LOCATION);
3173 arginfo[i].op = phi_res;
3174 vargs.safe_push (phi_res);
3176 else
3178 enum tree_code code
3179 = POINTER_TYPE_P (TREE_TYPE (op))
3180 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3181 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3182 ? sizetype : TREE_TYPE (op);
3183 widest_int cst
3184 = wi::mul (bestn->simdclone->args[i].linear_step,
3185 j * nunits);
3186 tree tcst = wide_int_to_tree (type, cst);
3187 new_temp = make_ssa_name (TREE_TYPE (op));
3188 new_stmt = gimple_build_assign (new_temp, code,
3189 arginfo[i].op, tcst);
3190 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3191 vargs.safe_push (new_temp);
3193 break;
3194 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3195 default:
3196 gcc_unreachable ();
3200 new_stmt = gimple_build_call_vec (fndecl, vargs);
3201 if (vec_dest)
3203 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3204 if (ratype)
3205 new_temp = create_tmp_var (ratype);
3206 else if (TYPE_VECTOR_SUBPARTS (vectype)
3207 == TYPE_VECTOR_SUBPARTS (rtype))
3208 new_temp = make_ssa_name (vec_dest, new_stmt);
3209 else
3210 new_temp = make_ssa_name (rtype, new_stmt);
3211 gimple_call_set_lhs (new_stmt, new_temp);
3213 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3215 if (vec_dest)
3217 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3219 unsigned int k, l;
3220 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3221 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3222 gcc_assert ((k & (k - 1)) == 0);
3223 for (l = 0; l < k; l++)
3225 tree t;
3226 if (ratype)
3228 t = build_fold_addr_expr (new_temp);
3229 t = build2 (MEM_REF, vectype, t,
3230 build_int_cst (TREE_TYPE (t),
3231 l * prec / BITS_PER_UNIT));
3233 else
3234 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3235 size_int (prec), bitsize_int (l * prec));
3236 new_stmt
3237 = gimple_build_assign (make_ssa_name (vectype), t);
3238 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3239 if (j == 0 && l == 0)
3240 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3241 else
3242 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3244 prev_stmt_info = vinfo_for_stmt (new_stmt);
3247 if (ratype)
3249 tree clobber = build_constructor (ratype, NULL);
3250 TREE_THIS_VOLATILE (clobber) = 1;
3251 new_stmt = gimple_build_assign (new_temp, clobber);
3252 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3254 continue;
3256 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3258 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3259 / TYPE_VECTOR_SUBPARTS (rtype));
3260 gcc_assert ((k & (k - 1)) == 0);
3261 if ((j & (k - 1)) == 0)
3262 vec_alloc (ret_ctor_elts, k);
3263 if (ratype)
3265 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3266 for (m = 0; m < o; m++)
3268 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3269 size_int (m), NULL_TREE, NULL_TREE);
3270 new_stmt
3271 = gimple_build_assign (make_ssa_name (rtype), tem);
3272 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3273 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3274 gimple_assign_lhs (new_stmt));
3276 tree clobber = build_constructor (ratype, NULL);
3277 TREE_THIS_VOLATILE (clobber) = 1;
3278 new_stmt = gimple_build_assign (new_temp, clobber);
3279 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3281 else
3282 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3283 if ((j & (k - 1)) != k - 1)
3284 continue;
3285 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3286 new_stmt
3287 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3288 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3290 if ((unsigned) j == k - 1)
3291 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3292 else
3293 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3295 prev_stmt_info = vinfo_for_stmt (new_stmt);
3296 continue;
3298 else if (ratype)
3300 tree t = build_fold_addr_expr (new_temp);
3301 t = build2 (MEM_REF, vectype, t,
3302 build_int_cst (TREE_TYPE (t), 0));
3303 new_stmt
3304 = gimple_build_assign (make_ssa_name (vec_dest), t);
3305 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3306 tree clobber = build_constructor (ratype, NULL);
3307 TREE_THIS_VOLATILE (clobber) = 1;
3308 vect_finish_stmt_generation (stmt,
3309 gimple_build_assign (new_temp,
3310 clobber), gsi);
3314 if (j == 0)
3315 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3316 else
3317 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3319 prev_stmt_info = vinfo_for_stmt (new_stmt);
3322 vargs.release ();
3324 /* The call in STMT might prevent it from being removed in dce.
3325 We however cannot remove it here, due to the way the ssa name
3326 it defines is mapped to the new definition. So just replace
3327 rhs of the statement with something harmless. */
3329 if (slp_node)
3330 return true;
3332 if (scalar_dest)
3334 type = TREE_TYPE (scalar_dest);
3335 if (is_pattern_stmt_p (stmt_info))
3336 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3337 else
3338 lhs = gimple_call_lhs (stmt);
3339 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3341 else
3342 new_stmt = gimple_build_nop ();
3343 set_vinfo_for_stmt (new_stmt, stmt_info);
3344 set_vinfo_for_stmt (stmt, NULL);
3345 STMT_VINFO_STMT (stmt_info) = new_stmt;
3346 gsi_replace (gsi, new_stmt, true);
3347 unlink_stmt_vdef (stmt);
3349 return true;
3353 /* Function vect_gen_widened_results_half
3355 Create a vector stmt whose code, type, number of arguments, and result
3356 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3357 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3358 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3359 needs to be created (DECL is a function-decl of a target-builtin).
3360 STMT is the original scalar stmt that we are vectorizing. */
3362 static gimple
3363 vect_gen_widened_results_half (enum tree_code code,
3364 tree decl,
3365 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3366 tree vec_dest, gimple_stmt_iterator *gsi,
3367 gimple stmt)
3369 gimple new_stmt;
3370 tree new_temp;
3372 /* Generate half of the widened result: */
3373 if (code == CALL_EXPR)
3375 /* Target specific support */
3376 if (op_type == binary_op)
3377 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3378 else
3379 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3380 new_temp = make_ssa_name (vec_dest, new_stmt);
3381 gimple_call_set_lhs (new_stmt, new_temp);
3383 else
3385 /* Generic support */
3386 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3387 if (op_type != binary_op)
3388 vec_oprnd1 = NULL;
3389 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3390 new_temp = make_ssa_name (vec_dest, new_stmt);
3391 gimple_assign_set_lhs (new_stmt, new_temp);
3393 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3395 return new_stmt;
3399 /* Get vectorized definitions for loop-based vectorization. For the first
3400 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3401 scalar operand), and for the rest we get a copy with
3402 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3403 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3404 The vectors are collected into VEC_OPRNDS. */
3406 static void
3407 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3408 vec<tree> *vec_oprnds, int multi_step_cvt)
3410 tree vec_oprnd;
3412 /* Get first vector operand. */
3413 /* All the vector operands except the very first one (that is scalar oprnd)
3414 are stmt copies. */
3415 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3416 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3417 else
3418 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3420 vec_oprnds->quick_push (vec_oprnd);
3422 /* Get second vector operand. */
3423 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3424 vec_oprnds->quick_push (vec_oprnd);
3426 *oprnd = vec_oprnd;
3428 /* For conversion in multiple steps, continue to get operands
3429 recursively. */
3430 if (multi_step_cvt)
3431 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3435 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3436 For multi-step conversions store the resulting vectors and call the function
3437 recursively. */
3439 static void
3440 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3441 int multi_step_cvt, gimple stmt,
3442 vec<tree> vec_dsts,
3443 gimple_stmt_iterator *gsi,
3444 slp_tree slp_node, enum tree_code code,
3445 stmt_vec_info *prev_stmt_info)
3447 unsigned int i;
3448 tree vop0, vop1, new_tmp, vec_dest;
3449 gimple new_stmt;
3450 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3452 vec_dest = vec_dsts.pop ();
3454 for (i = 0; i < vec_oprnds->length (); i += 2)
3456 /* Create demotion operation. */
3457 vop0 = (*vec_oprnds)[i];
3458 vop1 = (*vec_oprnds)[i + 1];
3459 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3460 new_tmp = make_ssa_name (vec_dest, new_stmt);
3461 gimple_assign_set_lhs (new_stmt, new_tmp);
3462 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3464 if (multi_step_cvt)
3465 /* Store the resulting vector for next recursive call. */
3466 (*vec_oprnds)[i/2] = new_tmp;
3467 else
3469 /* This is the last step of the conversion sequence. Store the
3470 vectors in SLP_NODE or in vector info of the scalar statement
3471 (or in STMT_VINFO_RELATED_STMT chain). */
3472 if (slp_node)
3473 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3474 else
3476 if (!*prev_stmt_info)
3477 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3478 else
3479 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3481 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3486 /* For multi-step demotion operations we first generate demotion operations
3487 from the source type to the intermediate types, and then combine the
3488 results (stored in VEC_OPRNDS) in demotion operation to the destination
3489 type. */
3490 if (multi_step_cvt)
3492 /* At each level of recursion we have half of the operands we had at the
3493 previous level. */
3494 vec_oprnds->truncate ((i+1)/2);
3495 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3496 stmt, vec_dsts, gsi, slp_node,
3497 VEC_PACK_TRUNC_EXPR,
3498 prev_stmt_info);
3501 vec_dsts.quick_push (vec_dest);
3505 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3506 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3507 the resulting vectors and call the function recursively. */
3509 static void
3510 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3511 vec<tree> *vec_oprnds1,
3512 gimple stmt, tree vec_dest,
3513 gimple_stmt_iterator *gsi,
3514 enum tree_code code1,
3515 enum tree_code code2, tree decl1,
3516 tree decl2, int op_type)
3518 int i;
3519 tree vop0, vop1, new_tmp1, new_tmp2;
3520 gimple new_stmt1, new_stmt2;
3521 vec<tree> vec_tmp = vNULL;
3523 vec_tmp.create (vec_oprnds0->length () * 2);
3524 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3526 if (op_type == binary_op)
3527 vop1 = (*vec_oprnds1)[i];
3528 else
3529 vop1 = NULL_TREE;
3531 /* Generate the two halves of promotion operation. */
3532 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3533 op_type, vec_dest, gsi, stmt);
3534 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3535 op_type, vec_dest, gsi, stmt);
3536 if (is_gimple_call (new_stmt1))
3538 new_tmp1 = gimple_call_lhs (new_stmt1);
3539 new_tmp2 = gimple_call_lhs (new_stmt2);
3541 else
3543 new_tmp1 = gimple_assign_lhs (new_stmt1);
3544 new_tmp2 = gimple_assign_lhs (new_stmt2);
3547 /* Store the results for the next step. */
3548 vec_tmp.quick_push (new_tmp1);
3549 vec_tmp.quick_push (new_tmp2);
3552 vec_oprnds0->release ();
3553 *vec_oprnds0 = vec_tmp;
3557 /* Check if STMT performs a conversion operation, that can be vectorized.
3558 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3559 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3560 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3562 static bool
3563 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3564 gimple *vec_stmt, slp_tree slp_node)
3566 tree vec_dest;
3567 tree scalar_dest;
3568 tree op0, op1 = NULL_TREE;
3569 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3570 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3571 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3572 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3573 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3574 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3575 tree new_temp;
3576 tree def;
3577 gimple def_stmt;
3578 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3579 gimple new_stmt = NULL;
3580 stmt_vec_info prev_stmt_info;
3581 int nunits_in;
3582 int nunits_out;
3583 tree vectype_out, vectype_in;
3584 int ncopies, i, j;
3585 tree lhs_type, rhs_type;
3586 enum { NARROW, NONE, WIDEN } modifier;
3587 vec<tree> vec_oprnds0 = vNULL;
3588 vec<tree> vec_oprnds1 = vNULL;
3589 tree vop0;
3590 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3591 int multi_step_cvt = 0;
3592 vec<tree> vec_dsts = vNULL;
3593 vec<tree> interm_types = vNULL;
3594 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3595 int op_type;
3596 machine_mode rhs_mode;
3597 unsigned short fltsz;
3599 /* Is STMT a vectorizable conversion? */
3601 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3602 return false;
3604 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3605 return false;
3607 if (!is_gimple_assign (stmt))
3608 return false;
3610 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3611 return false;
3613 code = gimple_assign_rhs_code (stmt);
3614 if (!CONVERT_EXPR_CODE_P (code)
3615 && code != FIX_TRUNC_EXPR
3616 && code != FLOAT_EXPR
3617 && code != WIDEN_MULT_EXPR
3618 && code != WIDEN_LSHIFT_EXPR)
3619 return false;
3621 op_type = TREE_CODE_LENGTH (code);
3623 /* Check types of lhs and rhs. */
3624 scalar_dest = gimple_assign_lhs (stmt);
3625 lhs_type = TREE_TYPE (scalar_dest);
3626 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3628 op0 = gimple_assign_rhs1 (stmt);
3629 rhs_type = TREE_TYPE (op0);
3631 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3632 && !((INTEGRAL_TYPE_P (lhs_type)
3633 && INTEGRAL_TYPE_P (rhs_type))
3634 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3635 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3636 return false;
3638 if ((INTEGRAL_TYPE_P (lhs_type)
3639 && (TYPE_PRECISION (lhs_type)
3640 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3641 || (INTEGRAL_TYPE_P (rhs_type)
3642 && (TYPE_PRECISION (rhs_type)
3643 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3645 if (dump_enabled_p ())
3646 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3647 "type conversion to/from bit-precision unsupported."
3648 "\n");
3649 return false;
3652 /* Check the operands of the operation. */
3653 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3654 &def_stmt, &def, &dt[0], &vectype_in))
3656 if (dump_enabled_p ())
3657 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3658 "use not simple.\n");
3659 return false;
3661 if (op_type == binary_op)
3663 bool ok;
3665 op1 = gimple_assign_rhs2 (stmt);
3666 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3667 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3668 OP1. */
3669 if (CONSTANT_CLASS_P (op0))
3670 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3671 &def_stmt, &def, &dt[1], &vectype_in);
3672 else
3673 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3674 &def, &dt[1]);
3676 if (!ok)
3678 if (dump_enabled_p ())
3679 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3680 "use not simple.\n");
3681 return false;
3685 /* If op0 is an external or constant defs use a vector type of
3686 the same size as the output vector type. */
3687 if (!vectype_in)
3688 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3689 if (vec_stmt)
3690 gcc_assert (vectype_in);
3691 if (!vectype_in)
3693 if (dump_enabled_p ())
3695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3696 "no vectype for scalar type ");
3697 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3698 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3701 return false;
3704 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3705 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3706 if (nunits_in < nunits_out)
3707 modifier = NARROW;
3708 else if (nunits_out == nunits_in)
3709 modifier = NONE;
3710 else
3711 modifier = WIDEN;
3713 /* Multiple types in SLP are handled by creating the appropriate number of
3714 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3715 case of SLP. */
3716 if (slp_node || PURE_SLP_STMT (stmt_info))
3717 ncopies = 1;
3718 else if (modifier == NARROW)
3719 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3720 else
3721 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3723 /* Sanity check: make sure that at least one copy of the vectorized stmt
3724 needs to be generated. */
3725 gcc_assert (ncopies >= 1);
3727 /* Supportable by target? */
3728 switch (modifier)
3730 case NONE:
3731 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3732 return false;
3733 if (supportable_convert_operation (code, vectype_out, vectype_in,
3734 &decl1, &code1))
3735 break;
3736 /* FALLTHRU */
3737 unsupported:
3738 if (dump_enabled_p ())
3739 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3740 "conversion not supported by target.\n");
3741 return false;
3743 case WIDEN:
3744 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3745 &code1, &code2, &multi_step_cvt,
3746 &interm_types))
3748 /* Binary widening operation can only be supported directly by the
3749 architecture. */
3750 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3751 break;
3754 if (code != FLOAT_EXPR
3755 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3756 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3757 goto unsupported;
3759 rhs_mode = TYPE_MODE (rhs_type);
3760 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3761 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3762 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3763 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3765 cvt_type
3766 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3767 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3768 if (cvt_type == NULL_TREE)
3769 goto unsupported;
3771 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3773 if (!supportable_convert_operation (code, vectype_out,
3774 cvt_type, &decl1, &codecvt1))
3775 goto unsupported;
3777 else if (!supportable_widening_operation (code, stmt, vectype_out,
3778 cvt_type, &codecvt1,
3779 &codecvt2, &multi_step_cvt,
3780 &interm_types))
3781 continue;
3782 else
3783 gcc_assert (multi_step_cvt == 0);
3785 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3786 vectype_in, &code1, &code2,
3787 &multi_step_cvt, &interm_types))
3788 break;
3791 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3792 goto unsupported;
3794 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3795 codecvt2 = ERROR_MARK;
3796 else
3798 multi_step_cvt++;
3799 interm_types.safe_push (cvt_type);
3800 cvt_type = NULL_TREE;
3802 break;
3804 case NARROW:
3805 gcc_assert (op_type == unary_op);
3806 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3807 &code1, &multi_step_cvt,
3808 &interm_types))
3809 break;
3811 if (code != FIX_TRUNC_EXPR
3812 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3813 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3814 goto unsupported;
3816 rhs_mode = TYPE_MODE (rhs_type);
3817 cvt_type
3818 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3819 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3820 if (cvt_type == NULL_TREE)
3821 goto unsupported;
3822 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3823 &decl1, &codecvt1))
3824 goto unsupported;
3825 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3826 &code1, &multi_step_cvt,
3827 &interm_types))
3828 break;
3829 goto unsupported;
3831 default:
3832 gcc_unreachable ();
3835 if (!vec_stmt) /* transformation not required. */
3837 if (dump_enabled_p ())
3838 dump_printf_loc (MSG_NOTE, vect_location,
3839 "=== vectorizable_conversion ===\n");
3840 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3842 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3843 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3845 else if (modifier == NARROW)
3847 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3848 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3850 else
3852 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3853 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3855 interm_types.release ();
3856 return true;
3859 /** Transform. **/
3860 if (dump_enabled_p ())
3861 dump_printf_loc (MSG_NOTE, vect_location,
3862 "transform conversion. ncopies = %d.\n", ncopies);
3864 if (op_type == binary_op)
3866 if (CONSTANT_CLASS_P (op0))
3867 op0 = fold_convert (TREE_TYPE (op1), op0);
3868 else if (CONSTANT_CLASS_P (op1))
3869 op1 = fold_convert (TREE_TYPE (op0), op1);
3872 /* In case of multi-step conversion, we first generate conversion operations
3873 to the intermediate types, and then from that types to the final one.
3874 We create vector destinations for the intermediate type (TYPES) received
3875 from supportable_*_operation, and store them in the correct order
3876 for future use in vect_create_vectorized_*_stmts (). */
3877 vec_dsts.create (multi_step_cvt + 1);
3878 vec_dest = vect_create_destination_var (scalar_dest,
3879 (cvt_type && modifier == WIDEN)
3880 ? cvt_type : vectype_out);
3881 vec_dsts.quick_push (vec_dest);
3883 if (multi_step_cvt)
3885 for (i = interm_types.length () - 1;
3886 interm_types.iterate (i, &intermediate_type); i--)
3888 vec_dest = vect_create_destination_var (scalar_dest,
3889 intermediate_type);
3890 vec_dsts.quick_push (vec_dest);
3894 if (cvt_type)
3895 vec_dest = vect_create_destination_var (scalar_dest,
3896 modifier == WIDEN
3897 ? vectype_out : cvt_type);
3899 if (!slp_node)
3901 if (modifier == WIDEN)
3903 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3904 if (op_type == binary_op)
3905 vec_oprnds1.create (1);
3907 else if (modifier == NARROW)
3908 vec_oprnds0.create (
3909 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3911 else if (code == WIDEN_LSHIFT_EXPR)
3912 vec_oprnds1.create (slp_node->vec_stmts_size);
3914 last_oprnd = op0;
3915 prev_stmt_info = NULL;
3916 switch (modifier)
3918 case NONE:
3919 for (j = 0; j < ncopies; j++)
3921 if (j == 0)
3922 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3923 -1);
3924 else
3925 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3927 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3929 /* Arguments are ready, create the new vector stmt. */
3930 if (code1 == CALL_EXPR)
3932 new_stmt = gimple_build_call (decl1, 1, vop0);
3933 new_temp = make_ssa_name (vec_dest, new_stmt);
3934 gimple_call_set_lhs (new_stmt, new_temp);
3936 else
3938 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3939 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
3940 new_temp = make_ssa_name (vec_dest, new_stmt);
3941 gimple_assign_set_lhs (new_stmt, new_temp);
3944 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3945 if (slp_node)
3946 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3947 else
3949 if (!prev_stmt_info)
3950 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3951 else
3952 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3953 prev_stmt_info = vinfo_for_stmt (new_stmt);
3957 break;
3959 case WIDEN:
3960 /* In case the vectorization factor (VF) is bigger than the number
3961 of elements that we can fit in a vectype (nunits), we have to
3962 generate more than one vector stmt - i.e - we need to "unroll"
3963 the vector stmt by a factor VF/nunits. */
3964 for (j = 0; j < ncopies; j++)
3966 /* Handle uses. */
3967 if (j == 0)
3969 if (slp_node)
3971 if (code == WIDEN_LSHIFT_EXPR)
3973 unsigned int k;
3975 vec_oprnd1 = op1;
3976 /* Store vec_oprnd1 for every vector stmt to be created
3977 for SLP_NODE. We check during the analysis that all
3978 the shift arguments are the same. */
3979 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3980 vec_oprnds1.quick_push (vec_oprnd1);
3982 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3983 slp_node, -1);
3985 else
3986 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3987 &vec_oprnds1, slp_node, -1);
3989 else
3991 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3992 vec_oprnds0.quick_push (vec_oprnd0);
3993 if (op_type == binary_op)
3995 if (code == WIDEN_LSHIFT_EXPR)
3996 vec_oprnd1 = op1;
3997 else
3998 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3999 NULL);
4000 vec_oprnds1.quick_push (vec_oprnd1);
4004 else
4006 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4007 vec_oprnds0.truncate (0);
4008 vec_oprnds0.quick_push (vec_oprnd0);
4009 if (op_type == binary_op)
4011 if (code == WIDEN_LSHIFT_EXPR)
4012 vec_oprnd1 = op1;
4013 else
4014 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4015 vec_oprnd1);
4016 vec_oprnds1.truncate (0);
4017 vec_oprnds1.quick_push (vec_oprnd1);
4021 /* Arguments are ready. Create the new vector stmts. */
4022 for (i = multi_step_cvt; i >= 0; i--)
4024 tree this_dest = vec_dsts[i];
4025 enum tree_code c1 = code1, c2 = code2;
4026 if (i == 0 && codecvt2 != ERROR_MARK)
4028 c1 = codecvt1;
4029 c2 = codecvt2;
4031 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4032 &vec_oprnds1,
4033 stmt, this_dest, gsi,
4034 c1, c2, decl1, decl2,
4035 op_type);
4038 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4040 if (cvt_type)
4042 if (codecvt1 == CALL_EXPR)
4044 new_stmt = gimple_build_call (decl1, 1, vop0);
4045 new_temp = make_ssa_name (vec_dest, new_stmt);
4046 gimple_call_set_lhs (new_stmt, new_temp);
4048 else
4050 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4051 new_temp = make_ssa_name (vec_dest);
4052 new_stmt = gimple_build_assign (new_temp, codecvt1,
4053 vop0);
4056 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4058 else
4059 new_stmt = SSA_NAME_DEF_STMT (vop0);
4061 if (slp_node)
4062 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4063 else
4065 if (!prev_stmt_info)
4066 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4067 else
4068 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4069 prev_stmt_info = vinfo_for_stmt (new_stmt);
4074 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4075 break;
4077 case NARROW:
4078 /* In case the vectorization factor (VF) is bigger than the number
4079 of elements that we can fit in a vectype (nunits), we have to
4080 generate more than one vector stmt - i.e - we need to "unroll"
4081 the vector stmt by a factor VF/nunits. */
4082 for (j = 0; j < ncopies; j++)
4084 /* Handle uses. */
4085 if (slp_node)
4086 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4087 slp_node, -1);
4088 else
4090 vec_oprnds0.truncate (0);
4091 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4092 vect_pow2 (multi_step_cvt) - 1);
4095 /* Arguments are ready. Create the new vector stmts. */
4096 if (cvt_type)
4097 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4099 if (codecvt1 == CALL_EXPR)
4101 new_stmt = gimple_build_call (decl1, 1, vop0);
4102 new_temp = make_ssa_name (vec_dest, new_stmt);
4103 gimple_call_set_lhs (new_stmt, new_temp);
4105 else
4107 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4108 new_temp = make_ssa_name (vec_dest);
4109 new_stmt = gimple_build_assign (new_temp, codecvt1,
4110 vop0);
4113 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4114 vec_oprnds0[i] = new_temp;
4117 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4118 stmt, vec_dsts, gsi,
4119 slp_node, code1,
4120 &prev_stmt_info);
4123 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4124 break;
4127 vec_oprnds0.release ();
4128 vec_oprnds1.release ();
4129 vec_dsts.release ();
4130 interm_types.release ();
4132 return true;
4136 /* Function vectorizable_assignment.
4138 Check if STMT performs an assignment (copy) that can be vectorized.
4139 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4140 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4141 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4143 static bool
4144 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
4145 gimple *vec_stmt, slp_tree slp_node)
4147 tree vec_dest;
4148 tree scalar_dest;
4149 tree op;
4150 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4151 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4152 tree new_temp;
4153 tree def;
4154 gimple def_stmt;
4155 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4156 int ncopies;
4157 int i, j;
4158 vec<tree> vec_oprnds = vNULL;
4159 tree vop;
4160 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4161 gimple new_stmt = NULL;
4162 stmt_vec_info prev_stmt_info = NULL;
4163 enum tree_code code;
4164 tree vectype_in;
4166 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4167 return false;
4169 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4170 return false;
4172 /* Is vectorizable assignment? */
4173 if (!is_gimple_assign (stmt))
4174 return false;
4176 scalar_dest = gimple_assign_lhs (stmt);
4177 if (TREE_CODE (scalar_dest) != SSA_NAME)
4178 return false;
4180 code = gimple_assign_rhs_code (stmt);
4181 if (gimple_assign_single_p (stmt)
4182 || code == PAREN_EXPR
4183 || CONVERT_EXPR_CODE_P (code))
4184 op = gimple_assign_rhs1 (stmt);
4185 else
4186 return false;
4188 if (code == VIEW_CONVERT_EXPR)
4189 op = TREE_OPERAND (op, 0);
4191 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4192 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4194 /* Multiple types in SLP are handled by creating the appropriate number of
4195 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4196 case of SLP. */
4197 if (slp_node || PURE_SLP_STMT (stmt_info))
4198 ncopies = 1;
4199 else
4200 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4202 gcc_assert (ncopies >= 1);
4204 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4205 &def_stmt, &def, &dt[0], &vectype_in))
4207 if (dump_enabled_p ())
4208 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4209 "use not simple.\n");
4210 return false;
4213 /* We can handle NOP_EXPR conversions that do not change the number
4214 of elements or the vector size. */
4215 if ((CONVERT_EXPR_CODE_P (code)
4216 || code == VIEW_CONVERT_EXPR)
4217 && (!vectype_in
4218 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4219 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4220 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4221 return false;
4223 /* We do not handle bit-precision changes. */
4224 if ((CONVERT_EXPR_CODE_P (code)
4225 || code == VIEW_CONVERT_EXPR)
4226 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4227 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4228 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4229 || ((TYPE_PRECISION (TREE_TYPE (op))
4230 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4231 /* But a conversion that does not change the bit-pattern is ok. */
4232 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4233 > TYPE_PRECISION (TREE_TYPE (op)))
4234 && TYPE_UNSIGNED (TREE_TYPE (op))))
4236 if (dump_enabled_p ())
4237 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4238 "type conversion to/from bit-precision "
4239 "unsupported.\n");
4240 return false;
4243 if (!vec_stmt) /* transformation not required. */
4245 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4246 if (dump_enabled_p ())
4247 dump_printf_loc (MSG_NOTE, vect_location,
4248 "=== vectorizable_assignment ===\n");
4249 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4250 return true;
4253 /** Transform. **/
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4257 /* Handle def. */
4258 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4260 /* Handle use. */
4261 for (j = 0; j < ncopies; j++)
4263 /* Handle uses. */
4264 if (j == 0)
4265 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4266 else
4267 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4269 /* Arguments are ready. create the new vector stmt. */
4270 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4272 if (CONVERT_EXPR_CODE_P (code)
4273 || code == VIEW_CONVERT_EXPR)
4274 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4275 new_stmt = gimple_build_assign (vec_dest, vop);
4276 new_temp = make_ssa_name (vec_dest, new_stmt);
4277 gimple_assign_set_lhs (new_stmt, new_temp);
4278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4279 if (slp_node)
4280 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4283 if (slp_node)
4284 continue;
4286 if (j == 0)
4287 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4288 else
4289 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4291 prev_stmt_info = vinfo_for_stmt (new_stmt);
4294 vec_oprnds.release ();
4295 return true;
4299 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4300 either as shift by a scalar or by a vector. */
4302 bool
4303 vect_supportable_shift (enum tree_code code, tree scalar_type)
4306 machine_mode vec_mode;
4307 optab optab;
4308 int icode;
4309 tree vectype;
4311 vectype = get_vectype_for_scalar_type (scalar_type);
4312 if (!vectype)
4313 return false;
4315 optab = optab_for_tree_code (code, vectype, optab_scalar);
4316 if (!optab
4317 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4319 optab = optab_for_tree_code (code, vectype, optab_vector);
4320 if (!optab
4321 || (optab_handler (optab, TYPE_MODE (vectype))
4322 == CODE_FOR_nothing))
4323 return false;
4326 vec_mode = TYPE_MODE (vectype);
4327 icode = (int) optab_handler (optab, vec_mode);
4328 if (icode == CODE_FOR_nothing)
4329 return false;
4331 return true;
4335 /* Function vectorizable_shift.
4337 Check if STMT performs a shift operation that can be vectorized.
4338 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4339 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4340 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4342 static bool
4343 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4344 gimple *vec_stmt, slp_tree slp_node)
4346 tree vec_dest;
4347 tree scalar_dest;
4348 tree op0, op1 = NULL;
4349 tree vec_oprnd1 = NULL_TREE;
4350 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4351 tree vectype;
4352 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4353 enum tree_code code;
4354 machine_mode vec_mode;
4355 tree new_temp;
4356 optab optab;
4357 int icode;
4358 machine_mode optab_op2_mode;
4359 tree def;
4360 gimple def_stmt;
4361 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4362 gimple new_stmt = NULL;
4363 stmt_vec_info prev_stmt_info;
4364 int nunits_in;
4365 int nunits_out;
4366 tree vectype_out;
4367 tree op1_vectype;
4368 int ncopies;
4369 int j, i;
4370 vec<tree> vec_oprnds0 = vNULL;
4371 vec<tree> vec_oprnds1 = vNULL;
4372 tree vop0, vop1;
4373 unsigned int k;
4374 bool scalar_shift_arg = true;
4375 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4376 int vf;
4378 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4379 return false;
4381 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4382 return false;
4384 /* Is STMT a vectorizable binary/unary operation? */
4385 if (!is_gimple_assign (stmt))
4386 return false;
4388 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4389 return false;
4391 code = gimple_assign_rhs_code (stmt);
4393 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4394 || code == RROTATE_EXPR))
4395 return false;
4397 scalar_dest = gimple_assign_lhs (stmt);
4398 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4399 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4400 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4402 if (dump_enabled_p ())
4403 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4404 "bit-precision shifts not supported.\n");
4405 return false;
4408 op0 = gimple_assign_rhs1 (stmt);
4409 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4410 &def_stmt, &def, &dt[0], &vectype))
4412 if (dump_enabled_p ())
4413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4414 "use not simple.\n");
4415 return false;
4417 /* If op0 is an external or constant def use a vector type with
4418 the same size as the output vector type. */
4419 if (!vectype)
4420 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4421 if (vec_stmt)
4422 gcc_assert (vectype);
4423 if (!vectype)
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4427 "no vectype for scalar type\n");
4428 return false;
4431 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4432 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4433 if (nunits_out != nunits_in)
4434 return false;
4436 op1 = gimple_assign_rhs2 (stmt);
4437 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4438 &def, &dt[1], &op1_vectype))
4440 if (dump_enabled_p ())
4441 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4442 "use not simple.\n");
4443 return false;
4446 if (loop_vinfo)
4447 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4448 else
4449 vf = 1;
4451 /* Multiple types in SLP are handled by creating the appropriate number of
4452 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4453 case of SLP. */
4454 if (slp_node || PURE_SLP_STMT (stmt_info))
4455 ncopies = 1;
4456 else
4457 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4459 gcc_assert (ncopies >= 1);
4461 /* Determine whether the shift amount is a vector, or scalar. If the
4462 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4464 if ((dt[1] == vect_internal_def
4465 || dt[1] == vect_induction_def)
4466 && !slp_node)
4467 scalar_shift_arg = false;
4468 else if (dt[1] == vect_constant_def
4469 || dt[1] == vect_external_def
4470 || dt[1] == vect_internal_def)
4472 /* In SLP, need to check whether the shift count is the same,
4473 in loops if it is a constant or invariant, it is always
4474 a scalar shift. */
4475 if (slp_node)
4477 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4478 gimple slpstmt;
4480 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4481 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4482 scalar_shift_arg = false;
4485 else
4487 if (dump_enabled_p ())
4488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4489 "operand mode requires invariant argument.\n");
4490 return false;
4493 /* Vector shifted by vector. */
4494 if (!scalar_shift_arg)
4496 optab = optab_for_tree_code (code, vectype, optab_vector);
4497 if (dump_enabled_p ())
4498 dump_printf_loc (MSG_NOTE, vect_location,
4499 "vector/vector shift/rotate found.\n");
4501 if (!op1_vectype)
4502 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4503 if (op1_vectype == NULL_TREE
4504 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4506 if (dump_enabled_p ())
4507 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4508 "unusable type for last operand in"
4509 " vector/vector shift/rotate.\n");
4510 return false;
4513 /* See if the machine has a vector shifted by scalar insn and if not
4514 then see if it has a vector shifted by vector insn. */
4515 else
4517 optab = optab_for_tree_code (code, vectype, optab_scalar);
4518 if (optab
4519 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4521 if (dump_enabled_p ())
4522 dump_printf_loc (MSG_NOTE, vect_location,
4523 "vector/scalar shift/rotate found.\n");
4525 else
4527 optab = optab_for_tree_code (code, vectype, optab_vector);
4528 if (optab
4529 && (optab_handler (optab, TYPE_MODE (vectype))
4530 != CODE_FOR_nothing))
4532 scalar_shift_arg = false;
4534 if (dump_enabled_p ())
4535 dump_printf_loc (MSG_NOTE, vect_location,
4536 "vector/vector shift/rotate found.\n");
4538 /* Unlike the other binary operators, shifts/rotates have
4539 the rhs being int, instead of the same type as the lhs,
4540 so make sure the scalar is the right type if we are
4541 dealing with vectors of long long/long/short/char. */
4542 if (dt[1] == vect_constant_def)
4543 op1 = fold_convert (TREE_TYPE (vectype), op1);
4544 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4545 TREE_TYPE (op1)))
4547 if (slp_node
4548 && TYPE_MODE (TREE_TYPE (vectype))
4549 != TYPE_MODE (TREE_TYPE (op1)))
4551 if (dump_enabled_p ())
4552 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4553 "unusable type for last operand in"
4554 " vector/vector shift/rotate.\n");
4555 return false;
4557 if (vec_stmt && !slp_node)
4559 op1 = fold_convert (TREE_TYPE (vectype), op1);
4560 op1 = vect_init_vector (stmt, op1,
4561 TREE_TYPE (vectype), NULL);
4568 /* Supportable by target? */
4569 if (!optab)
4571 if (dump_enabled_p ())
4572 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4573 "no optab.\n");
4574 return false;
4576 vec_mode = TYPE_MODE (vectype);
4577 icode = (int) optab_handler (optab, vec_mode);
4578 if (icode == CODE_FOR_nothing)
4580 if (dump_enabled_p ())
4581 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4582 "op not supported by target.\n");
4583 /* Check only during analysis. */
4584 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4585 || (vf < vect_min_worthwhile_factor (code)
4586 && !vec_stmt))
4587 return false;
4588 if (dump_enabled_p ())
4589 dump_printf_loc (MSG_NOTE, vect_location,
4590 "proceeding using word mode.\n");
4593 /* Worthwhile without SIMD support? Check only during analysis. */
4594 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4595 && vf < vect_min_worthwhile_factor (code)
4596 && !vec_stmt)
4598 if (dump_enabled_p ())
4599 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4600 "not worthwhile without SIMD support.\n");
4601 return false;
4604 if (!vec_stmt) /* transformation not required. */
4606 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4607 if (dump_enabled_p ())
4608 dump_printf_loc (MSG_NOTE, vect_location,
4609 "=== vectorizable_shift ===\n");
4610 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4611 return true;
4614 /** Transform. **/
4616 if (dump_enabled_p ())
4617 dump_printf_loc (MSG_NOTE, vect_location,
4618 "transform binary/unary operation.\n");
4620 /* Handle def. */
4621 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4623 prev_stmt_info = NULL;
4624 for (j = 0; j < ncopies; j++)
4626 /* Handle uses. */
4627 if (j == 0)
4629 if (scalar_shift_arg)
4631 /* Vector shl and shr insn patterns can be defined with scalar
4632 operand 2 (shift operand). In this case, use constant or loop
4633 invariant op1 directly, without extending it to vector mode
4634 first. */
4635 optab_op2_mode = insn_data[icode].operand[2].mode;
4636 if (!VECTOR_MODE_P (optab_op2_mode))
4638 if (dump_enabled_p ())
4639 dump_printf_loc (MSG_NOTE, vect_location,
4640 "operand 1 using scalar mode.\n");
4641 vec_oprnd1 = op1;
4642 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4643 vec_oprnds1.quick_push (vec_oprnd1);
4644 if (slp_node)
4646 /* Store vec_oprnd1 for every vector stmt to be created
4647 for SLP_NODE. We check during the analysis that all
4648 the shift arguments are the same.
4649 TODO: Allow different constants for different vector
4650 stmts generated for an SLP instance. */
4651 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4652 vec_oprnds1.quick_push (vec_oprnd1);
4657 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4658 (a special case for certain kind of vector shifts); otherwise,
4659 operand 1 should be of a vector type (the usual case). */
4660 if (vec_oprnd1)
4661 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4662 slp_node, -1);
4663 else
4664 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4665 slp_node, -1);
4667 else
4668 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4670 /* Arguments are ready. Create the new vector stmt. */
4671 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4673 vop1 = vec_oprnds1[i];
4674 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
4675 new_temp = make_ssa_name (vec_dest, new_stmt);
4676 gimple_assign_set_lhs (new_stmt, new_temp);
4677 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4678 if (slp_node)
4679 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4682 if (slp_node)
4683 continue;
4685 if (j == 0)
4686 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4687 else
4688 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4689 prev_stmt_info = vinfo_for_stmt (new_stmt);
4692 vec_oprnds0.release ();
4693 vec_oprnds1.release ();
4695 return true;
4699 /* Function vectorizable_operation.
4701 Check if STMT performs a binary, unary or ternary operation that can
4702 be vectorized.
4703 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4704 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4705 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4707 static bool
4708 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4709 gimple *vec_stmt, slp_tree slp_node)
4711 tree vec_dest;
4712 tree scalar_dest;
4713 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4714 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4715 tree vectype;
4716 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4717 enum tree_code code;
4718 machine_mode vec_mode;
4719 tree new_temp;
4720 int op_type;
4721 optab optab;
4722 bool target_support_p;
4723 tree def;
4724 gimple def_stmt;
4725 enum vect_def_type dt[3]
4726 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4727 gimple new_stmt = NULL;
4728 stmt_vec_info prev_stmt_info;
4729 int nunits_in;
4730 int nunits_out;
4731 tree vectype_out;
4732 int ncopies;
4733 int j, i;
4734 vec<tree> vec_oprnds0 = vNULL;
4735 vec<tree> vec_oprnds1 = vNULL;
4736 vec<tree> vec_oprnds2 = vNULL;
4737 tree vop0, vop1, vop2;
4738 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4739 int vf;
4741 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4742 return false;
4744 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4745 return false;
4747 /* Is STMT a vectorizable binary/unary operation? */
4748 if (!is_gimple_assign (stmt))
4749 return false;
4751 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4752 return false;
4754 code = gimple_assign_rhs_code (stmt);
4756 /* For pointer addition, we should use the normal plus for
4757 the vector addition. */
4758 if (code == POINTER_PLUS_EXPR)
4759 code = PLUS_EXPR;
4761 /* Support only unary or binary operations. */
4762 op_type = TREE_CODE_LENGTH (code);
4763 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4765 if (dump_enabled_p ())
4766 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4767 "num. args = %d (not unary/binary/ternary op).\n",
4768 op_type);
4769 return false;
4772 scalar_dest = gimple_assign_lhs (stmt);
4773 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4775 /* Most operations cannot handle bit-precision types without extra
4776 truncations. */
4777 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4778 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4779 /* Exception are bitwise binary operations. */
4780 && code != BIT_IOR_EXPR
4781 && code != BIT_XOR_EXPR
4782 && code != BIT_AND_EXPR)
4784 if (dump_enabled_p ())
4785 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4786 "bit-precision arithmetic not supported.\n");
4787 return false;
4790 op0 = gimple_assign_rhs1 (stmt);
4791 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4792 &def_stmt, &def, &dt[0], &vectype))
4794 if (dump_enabled_p ())
4795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4796 "use not simple.\n");
4797 return false;
4799 /* If op0 is an external or constant def use a vector type with
4800 the same size as the output vector type. */
4801 if (!vectype)
4802 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4803 if (vec_stmt)
4804 gcc_assert (vectype);
4805 if (!vectype)
4807 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4810 "no vectype for scalar type ");
4811 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4812 TREE_TYPE (op0));
4813 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4816 return false;
4819 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4820 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4821 if (nunits_out != nunits_in)
4822 return false;
4824 if (op_type == binary_op || op_type == ternary_op)
4826 op1 = gimple_assign_rhs2 (stmt);
4827 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4828 &def, &dt[1]))
4830 if (dump_enabled_p ())
4831 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4832 "use not simple.\n");
4833 return false;
4836 if (op_type == ternary_op)
4838 op2 = gimple_assign_rhs3 (stmt);
4839 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4840 &def, &dt[2]))
4842 if (dump_enabled_p ())
4843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4844 "use not simple.\n");
4845 return false;
4849 if (loop_vinfo)
4850 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4851 else
4852 vf = 1;
4854 /* Multiple types in SLP are handled by creating the appropriate number of
4855 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4856 case of SLP. */
4857 if (slp_node || PURE_SLP_STMT (stmt_info))
4858 ncopies = 1;
4859 else
4860 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4862 gcc_assert (ncopies >= 1);
4864 /* Shifts are handled in vectorizable_shift (). */
4865 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4866 || code == RROTATE_EXPR)
4867 return false;
4869 /* Supportable by target? */
4871 vec_mode = TYPE_MODE (vectype);
4872 if (code == MULT_HIGHPART_EXPR)
4873 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
4874 else
4876 optab = optab_for_tree_code (code, vectype, optab_default);
4877 if (!optab)
4879 if (dump_enabled_p ())
4880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4881 "no optab.\n");
4882 return false;
4884 target_support_p = (optab_handler (optab, vec_mode)
4885 != CODE_FOR_nothing);
4888 if (!target_support_p)
4890 if (dump_enabled_p ())
4891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4892 "op not supported by target.\n");
4893 /* Check only during analysis. */
4894 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4895 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4896 return false;
4897 if (dump_enabled_p ())
4898 dump_printf_loc (MSG_NOTE, vect_location,
4899 "proceeding using word mode.\n");
4902 /* Worthwhile without SIMD support? Check only during analysis. */
4903 if (!VECTOR_MODE_P (vec_mode)
4904 && !vec_stmt
4905 && vf < vect_min_worthwhile_factor (code))
4907 if (dump_enabled_p ())
4908 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4909 "not worthwhile without SIMD support.\n");
4910 return false;
4913 if (!vec_stmt) /* transformation not required. */
4915 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4916 if (dump_enabled_p ())
4917 dump_printf_loc (MSG_NOTE, vect_location,
4918 "=== vectorizable_operation ===\n");
4919 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4920 return true;
4923 /** Transform. **/
4925 if (dump_enabled_p ())
4926 dump_printf_loc (MSG_NOTE, vect_location,
4927 "transform binary/unary operation.\n");
4929 /* Handle def. */
4930 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4932 /* In case the vectorization factor (VF) is bigger than the number
4933 of elements that we can fit in a vectype (nunits), we have to generate
4934 more than one vector stmt - i.e - we need to "unroll" the
4935 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4936 from one copy of the vector stmt to the next, in the field
4937 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4938 stages to find the correct vector defs to be used when vectorizing
4939 stmts that use the defs of the current stmt. The example below
4940 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4941 we need to create 4 vectorized stmts):
4943 before vectorization:
4944 RELATED_STMT VEC_STMT
4945 S1: x = memref - -
4946 S2: z = x + 1 - -
4948 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4949 there):
4950 RELATED_STMT VEC_STMT
4951 VS1_0: vx0 = memref0 VS1_1 -
4952 VS1_1: vx1 = memref1 VS1_2 -
4953 VS1_2: vx2 = memref2 VS1_3 -
4954 VS1_3: vx3 = memref3 - -
4955 S1: x = load - VS1_0
4956 S2: z = x + 1 - -
4958 step2: vectorize stmt S2 (done here):
4959 To vectorize stmt S2 we first need to find the relevant vector
4960 def for the first operand 'x'. This is, as usual, obtained from
4961 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4962 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4963 relevant vector def 'vx0'. Having found 'vx0' we can generate
4964 the vector stmt VS2_0, and as usual, record it in the
4965 STMT_VINFO_VEC_STMT of stmt S2.
4966 When creating the second copy (VS2_1), we obtain the relevant vector
4967 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4968 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4969 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4970 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4971 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4972 chain of stmts and pointers:
4973 RELATED_STMT VEC_STMT
4974 VS1_0: vx0 = memref0 VS1_1 -
4975 VS1_1: vx1 = memref1 VS1_2 -
4976 VS1_2: vx2 = memref2 VS1_3 -
4977 VS1_3: vx3 = memref3 - -
4978 S1: x = load - VS1_0
4979 VS2_0: vz0 = vx0 + v1 VS2_1 -
4980 VS2_1: vz1 = vx1 + v1 VS2_2 -
4981 VS2_2: vz2 = vx2 + v1 VS2_3 -
4982 VS2_3: vz3 = vx3 + v1 - -
4983 S2: z = x + 1 - VS2_0 */
4985 prev_stmt_info = NULL;
4986 for (j = 0; j < ncopies; j++)
4988 /* Handle uses. */
4989 if (j == 0)
4991 if (op_type == binary_op || op_type == ternary_op)
4992 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4993 slp_node, -1);
4994 else
4995 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4996 slp_node, -1);
4997 if (op_type == ternary_op)
4999 vec_oprnds2.create (1);
5000 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
5001 stmt,
5002 NULL));
5005 else
5007 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5008 if (op_type == ternary_op)
5010 tree vec_oprnd = vec_oprnds2.pop ();
5011 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5012 vec_oprnd));
5016 /* Arguments are ready. Create the new vector stmt. */
5017 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5019 vop1 = ((op_type == binary_op || op_type == ternary_op)
5020 ? vec_oprnds1[i] : NULL_TREE);
5021 vop2 = ((op_type == ternary_op)
5022 ? vec_oprnds2[i] : NULL_TREE);
5023 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5024 new_temp = make_ssa_name (vec_dest, new_stmt);
5025 gimple_assign_set_lhs (new_stmt, new_temp);
5026 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5027 if (slp_node)
5028 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5031 if (slp_node)
5032 continue;
5034 if (j == 0)
5035 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5036 else
5037 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5038 prev_stmt_info = vinfo_for_stmt (new_stmt);
5041 vec_oprnds0.release ();
5042 vec_oprnds1.release ();
5043 vec_oprnds2.release ();
5045 return true;
5048 /* A helper function to ensure data reference DR's base alignment
5049 for STMT_INFO. */
5051 static void
5052 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
5054 if (!dr->aux)
5055 return;
5057 if (DR_VECT_AUX (dr)->base_misaligned)
5059 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5060 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5062 if (decl_in_symtab_p (base_decl))
5063 symtab_node::get (base_decl)->increase_alignment (TYPE_ALIGN (vectype));
5064 else
5066 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
5067 DECL_USER_ALIGN (base_decl) = 1;
5069 DR_VECT_AUX (dr)->base_misaligned = false;
5074 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5075 reversal of the vector elements. If that is impossible to do,
5076 returns NULL. */
5078 static tree
5079 perm_mask_for_reverse (tree vectype)
5081 int i, nunits;
5082 unsigned char *sel;
5084 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5085 sel = XALLOCAVEC (unsigned char, nunits);
5087 for (i = 0; i < nunits; ++i)
5088 sel[i] = nunits - 1 - i;
5090 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5091 return NULL_TREE;
5092 return vect_gen_perm_mask_checked (vectype, sel);
5095 /* Function vectorizable_store.
5097 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5098 can be vectorized.
5099 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5100 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5101 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5103 static bool
5104 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5105 slp_tree slp_node)
5107 tree scalar_dest;
5108 tree data_ref;
5109 tree op;
5110 tree vec_oprnd = NULL_TREE;
5111 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5112 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5113 tree elem_type;
5114 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5115 struct loop *loop = NULL;
5116 machine_mode vec_mode;
5117 tree dummy;
5118 enum dr_alignment_support alignment_support_scheme;
5119 tree def;
5120 gimple def_stmt;
5121 enum vect_def_type dt;
5122 stmt_vec_info prev_stmt_info = NULL;
5123 tree dataref_ptr = NULL_TREE;
5124 tree dataref_offset = NULL_TREE;
5125 gimple ptr_incr = NULL;
5126 int ncopies;
5127 int j;
5128 gimple next_stmt, first_stmt = NULL;
5129 bool grouped_store = false;
5130 bool store_lanes_p = false;
5131 unsigned int group_size, i;
5132 vec<tree> dr_chain = vNULL;
5133 vec<tree> oprnds = vNULL;
5134 vec<tree> result_chain = vNULL;
5135 bool inv_p;
5136 bool negative = false;
5137 tree offset = NULL_TREE;
5138 vec<tree> vec_oprnds = vNULL;
5139 bool slp = (slp_node != NULL);
5140 unsigned int vec_num;
5141 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5142 tree aggr_type;
5144 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5145 return false;
5147 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5148 return false;
5150 /* Is vectorizable store? */
5152 if (!is_gimple_assign (stmt))
5153 return false;
5155 scalar_dest = gimple_assign_lhs (stmt);
5156 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5157 && is_pattern_stmt_p (stmt_info))
5158 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5159 if (TREE_CODE (scalar_dest) != ARRAY_REF
5160 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5161 && TREE_CODE (scalar_dest) != INDIRECT_REF
5162 && TREE_CODE (scalar_dest) != COMPONENT_REF
5163 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5164 && TREE_CODE (scalar_dest) != REALPART_EXPR
5165 && TREE_CODE (scalar_dest) != MEM_REF)
5166 return false;
5168 gcc_assert (gimple_assign_single_p (stmt));
5170 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5171 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5173 if (loop_vinfo)
5174 loop = LOOP_VINFO_LOOP (loop_vinfo);
5176 /* Multiple types in SLP are handled by creating the appropriate number of
5177 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5178 case of SLP. */
5179 if (slp || PURE_SLP_STMT (stmt_info))
5180 ncopies = 1;
5181 else
5182 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5184 gcc_assert (ncopies >= 1);
5186 /* FORNOW. This restriction should be relaxed. */
5187 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5189 if (dump_enabled_p ())
5190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5191 "multiple types in nested loop.\n");
5192 return false;
5195 op = gimple_assign_rhs1 (stmt);
5196 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5197 &def, &dt))
5199 if (dump_enabled_p ())
5200 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5201 "use not simple.\n");
5202 return false;
5205 elem_type = TREE_TYPE (vectype);
5206 vec_mode = TYPE_MODE (vectype);
5208 /* FORNOW. In some cases can vectorize even if data-type not supported
5209 (e.g. - array initialization with 0). */
5210 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5211 return false;
5213 if (!STMT_VINFO_DATA_REF (stmt_info))
5214 return false;
5216 if (!STMT_VINFO_STRIDED_P (stmt_info))
5218 negative =
5219 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5220 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5221 size_zero_node) < 0;
5222 if (negative && ncopies > 1)
5224 if (dump_enabled_p ())
5225 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5226 "multiple types with negative step.\n");
5227 return false;
5229 if (negative)
5231 gcc_assert (!grouped_store);
5232 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5233 if (alignment_support_scheme != dr_aligned
5234 && alignment_support_scheme != dr_unaligned_supported)
5236 if (dump_enabled_p ())
5237 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5238 "negative step but alignment required.\n");
5239 return false;
5241 if (dt != vect_constant_def
5242 && dt != vect_external_def
5243 && !perm_mask_for_reverse (vectype))
5245 if (dump_enabled_p ())
5246 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5247 "negative step and reversing not supported.\n");
5248 return false;
5253 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5255 grouped_store = true;
5256 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5257 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5258 if (!slp
5259 && !PURE_SLP_STMT (stmt_info)
5260 && !STMT_VINFO_STRIDED_P (stmt_info))
5262 if (vect_store_lanes_supported (vectype, group_size))
5263 store_lanes_p = true;
5264 else if (!vect_grouped_store_supported (vectype, group_size))
5265 return false;
5268 if (STMT_VINFO_STRIDED_P (stmt_info)
5269 && (slp || PURE_SLP_STMT (stmt_info))
5270 && (group_size > nunits
5271 || nunits % group_size != 0))
5273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5274 "unhandled strided group store\n");
5275 return false;
5278 if (first_stmt == stmt)
5280 /* STMT is the leader of the group. Check the operands of all the
5281 stmts of the group. */
5282 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5283 while (next_stmt)
5285 gcc_assert (gimple_assign_single_p (next_stmt));
5286 op = gimple_assign_rhs1 (next_stmt);
5287 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5288 &def_stmt, &def, &dt))
5290 if (dump_enabled_p ())
5291 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5292 "use not simple.\n");
5293 return false;
5295 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5300 if (!vec_stmt) /* transformation not required. */
5302 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5303 /* The SLP costs are calculated during SLP analysis. */
5304 if (!PURE_SLP_STMT (stmt_info))
5305 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5306 NULL, NULL, NULL);
5307 return true;
5310 /** Transform. **/
5312 ensure_base_align (stmt_info, dr);
5314 if (grouped_store)
5316 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5317 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5319 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5321 /* FORNOW */
5322 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5324 /* We vectorize all the stmts of the interleaving group when we
5325 reach the last stmt in the group. */
5326 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5327 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5328 && !slp)
5330 *vec_stmt = NULL;
5331 return true;
5334 if (slp)
5336 grouped_store = false;
5337 /* VEC_NUM is the number of vect stmts to be created for this
5338 group. */
5339 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5340 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5341 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5342 op = gimple_assign_rhs1 (first_stmt);
5344 else
5345 /* VEC_NUM is the number of vect stmts to be created for this
5346 group. */
5347 vec_num = group_size;
5349 else
5351 first_stmt = stmt;
5352 first_dr = dr;
5353 group_size = vec_num = 1;
5356 if (dump_enabled_p ())
5357 dump_printf_loc (MSG_NOTE, vect_location,
5358 "transform store. ncopies = %d\n", ncopies);
5360 if (STMT_VINFO_STRIDED_P (stmt_info))
5362 gimple_stmt_iterator incr_gsi;
5363 bool insert_after;
5364 gimple incr;
5365 tree offvar;
5366 tree ivstep;
5367 tree running_off;
5368 gimple_seq stmts = NULL;
5369 tree stride_base, stride_step, alias_off;
5370 tree vec_oprnd;
5371 unsigned int g;
5373 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5375 stride_base
5376 = fold_build_pointer_plus
5377 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
5378 size_binop (PLUS_EXPR,
5379 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
5380 convert_to_ptrofftype (DR_INIT(first_dr))));
5381 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
5383 /* For a store with loop-invariant (but other than power-of-2)
5384 stride (i.e. not a grouped access) like so:
5386 for (i = 0; i < n; i += stride)
5387 array[i] = ...;
5389 we generate a new induction variable and new stores from
5390 the components of the (vectorized) rhs:
5392 for (j = 0; ; j += VF*stride)
5393 vectemp = ...;
5394 tmp1 = vectemp[0];
5395 array[j] = tmp1;
5396 tmp2 = vectemp[1];
5397 array[j + stride] = tmp2;
5401 unsigned nstores = nunits;
5402 tree ltype = elem_type;
5403 if (slp)
5405 nstores = nunits / group_size;
5406 if (group_size < nunits)
5407 ltype = build_vector_type (elem_type, group_size);
5408 else
5409 ltype = vectype;
5410 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
5411 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5412 group_size = 1;
5415 ivstep = stride_step;
5416 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
5417 build_int_cst (TREE_TYPE (ivstep),
5418 ncopies * nstores));
5420 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5422 create_iv (stride_base, ivstep, NULL,
5423 loop, &incr_gsi, insert_after,
5424 &offvar, NULL);
5425 incr = gsi_stmt (incr_gsi);
5426 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
5428 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
5429 if (stmts)
5430 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
5432 prev_stmt_info = NULL;
5433 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
5434 next_stmt = first_stmt;
5435 for (g = 0; g < group_size; g++)
5437 running_off = offvar;
5438 if (g)
5440 tree size = TYPE_SIZE_UNIT (ltype);
5441 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
5442 size);
5443 tree newoff = copy_ssa_name (running_off, NULL);
5444 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5445 running_off, pos);
5446 vect_finish_stmt_generation (stmt, incr, gsi);
5447 running_off = newoff;
5449 for (j = 0; j < ncopies; j++)
5451 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5452 and first_stmt == stmt. */
5453 if (j == 0)
5455 if (slp)
5457 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
5458 slp_node, -1);
5459 vec_oprnd = vec_oprnds[0];
5461 else
5463 gcc_assert (gimple_assign_single_p (next_stmt));
5464 op = gimple_assign_rhs1 (next_stmt);
5465 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5466 NULL);
5469 else
5471 if (slp)
5472 vec_oprnd = vec_oprnds[j];
5473 else
5475 vect_is_simple_use (vec_oprnd, NULL, loop_vinfo,
5476 bb_vinfo, &def_stmt, &def, &dt);
5477 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
5481 for (i = 0; i < nstores; i++)
5483 tree newref, newoff;
5484 gimple incr, assign;
5485 tree size = TYPE_SIZE (ltype);
5486 /* Extract the i'th component. */
5487 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
5488 bitsize_int (i), size);
5489 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
5490 size, pos);
5492 elem = force_gimple_operand_gsi (gsi, elem, true,
5493 NULL_TREE, true,
5494 GSI_SAME_STMT);
5496 newref = build2 (MEM_REF, ltype,
5497 running_off, alias_off);
5499 /* And store it to *running_off. */
5500 assign = gimple_build_assign (newref, elem);
5501 vect_finish_stmt_generation (stmt, assign, gsi);
5503 newoff = copy_ssa_name (running_off, NULL);
5504 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
5505 running_off, stride_step);
5506 vect_finish_stmt_generation (stmt, incr, gsi);
5508 running_off = newoff;
5509 if (g == group_size - 1
5510 && !slp)
5512 if (j == 0 && i == 0)
5513 STMT_VINFO_VEC_STMT (stmt_info)
5514 = *vec_stmt = assign;
5515 else
5516 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
5517 prev_stmt_info = vinfo_for_stmt (assign);
5521 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5523 return true;
5526 dr_chain.create (group_size);
5527 oprnds.create (group_size);
5529 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5530 gcc_assert (alignment_support_scheme);
5531 /* Targets with store-lane instructions must not require explicit
5532 realignment. */
5533 gcc_assert (!store_lanes_p
5534 || alignment_support_scheme == dr_aligned
5535 || alignment_support_scheme == dr_unaligned_supported);
5537 if (negative)
5538 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5540 if (store_lanes_p)
5541 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5542 else
5543 aggr_type = vectype;
5545 /* In case the vectorization factor (VF) is bigger than the number
5546 of elements that we can fit in a vectype (nunits), we have to generate
5547 more than one vector stmt - i.e - we need to "unroll" the
5548 vector stmt by a factor VF/nunits. For more details see documentation in
5549 vect_get_vec_def_for_copy_stmt. */
5551 /* In case of interleaving (non-unit grouped access):
5553 S1: &base + 2 = x2
5554 S2: &base = x0
5555 S3: &base + 1 = x1
5556 S4: &base + 3 = x3
5558 We create vectorized stores starting from base address (the access of the
5559 first stmt in the chain (S2 in the above example), when the last store stmt
5560 of the chain (S4) is reached:
5562 VS1: &base = vx2
5563 VS2: &base + vec_size*1 = vx0
5564 VS3: &base + vec_size*2 = vx1
5565 VS4: &base + vec_size*3 = vx3
5567 Then permutation statements are generated:
5569 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5570 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5573 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5574 (the order of the data-refs in the output of vect_permute_store_chain
5575 corresponds to the order of scalar stmts in the interleaving chain - see
5576 the documentation of vect_permute_store_chain()).
5578 In case of both multiple types and interleaving, above vector stores and
5579 permutation stmts are created for every copy. The result vector stmts are
5580 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5581 STMT_VINFO_RELATED_STMT for the next copies.
5584 prev_stmt_info = NULL;
5585 for (j = 0; j < ncopies; j++)
5587 gimple new_stmt;
5589 if (j == 0)
5591 if (slp)
5593 /* Get vectorized arguments for SLP_NODE. */
5594 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5595 NULL, slp_node, -1);
5597 vec_oprnd = vec_oprnds[0];
5599 else
5601 /* For interleaved stores we collect vectorized defs for all the
5602 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5603 used as an input to vect_permute_store_chain(), and OPRNDS as
5604 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5606 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5607 OPRNDS are of size 1. */
5608 next_stmt = first_stmt;
5609 for (i = 0; i < group_size; i++)
5611 /* Since gaps are not supported for interleaved stores,
5612 GROUP_SIZE is the exact number of stmts in the chain.
5613 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5614 there is no interleaving, GROUP_SIZE is 1, and only one
5615 iteration of the loop will be executed. */
5616 gcc_assert (next_stmt
5617 && gimple_assign_single_p (next_stmt));
5618 op = gimple_assign_rhs1 (next_stmt);
5620 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5621 NULL);
5622 dr_chain.quick_push (vec_oprnd);
5623 oprnds.quick_push (vec_oprnd);
5624 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5628 /* We should have catched mismatched types earlier. */
5629 gcc_assert (useless_type_conversion_p (vectype,
5630 TREE_TYPE (vec_oprnd)));
5631 bool simd_lane_access_p
5632 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5633 if (simd_lane_access_p
5634 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5635 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5636 && integer_zerop (DR_OFFSET (first_dr))
5637 && integer_zerop (DR_INIT (first_dr))
5638 && alias_sets_conflict_p (get_alias_set (aggr_type),
5639 get_alias_set (DR_REF (first_dr))))
5641 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5642 dataref_offset = build_int_cst (reference_alias_ptr_type
5643 (DR_REF (first_dr)), 0);
5644 inv_p = false;
5646 else
5647 dataref_ptr
5648 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5649 simd_lane_access_p ? loop : NULL,
5650 offset, &dummy, gsi, &ptr_incr,
5651 simd_lane_access_p, &inv_p);
5652 gcc_assert (bb_vinfo || !inv_p);
5654 else
5656 /* For interleaved stores we created vectorized defs for all the
5657 defs stored in OPRNDS in the previous iteration (previous copy).
5658 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5659 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5660 next copy.
5661 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5662 OPRNDS are of size 1. */
5663 for (i = 0; i < group_size; i++)
5665 op = oprnds[i];
5666 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5667 &def, &dt);
5668 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5669 dr_chain[i] = vec_oprnd;
5670 oprnds[i] = vec_oprnd;
5672 if (dataref_offset)
5673 dataref_offset
5674 = int_const_binop (PLUS_EXPR, dataref_offset,
5675 TYPE_SIZE_UNIT (aggr_type));
5676 else
5677 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5678 TYPE_SIZE_UNIT (aggr_type));
5681 if (store_lanes_p)
5683 tree vec_array;
5685 /* Combine all the vectors into an array. */
5686 vec_array = create_vector_array (vectype, vec_num);
5687 for (i = 0; i < vec_num; i++)
5689 vec_oprnd = dr_chain[i];
5690 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5693 /* Emit:
5694 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5695 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5696 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5697 gimple_call_set_lhs (new_stmt, data_ref);
5698 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5700 else
5702 new_stmt = NULL;
5703 if (grouped_store)
5705 if (j == 0)
5706 result_chain.create (group_size);
5707 /* Permute. */
5708 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5709 &result_chain);
5712 next_stmt = first_stmt;
5713 for (i = 0; i < vec_num; i++)
5715 unsigned align, misalign;
5717 if (i > 0)
5718 /* Bump the vector pointer. */
5719 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5720 stmt, NULL_TREE);
5722 if (slp)
5723 vec_oprnd = vec_oprnds[i];
5724 else if (grouped_store)
5725 /* For grouped stores vectorized defs are interleaved in
5726 vect_permute_store_chain(). */
5727 vec_oprnd = result_chain[i];
5729 data_ref = fold_build2 (MEM_REF, TREE_TYPE (vec_oprnd),
5730 dataref_ptr,
5731 dataref_offset
5732 ? dataref_offset
5733 : build_int_cst (reference_alias_ptr_type
5734 (DR_REF (first_dr)), 0));
5735 align = TYPE_ALIGN_UNIT (vectype);
5736 if (aligned_access_p (first_dr))
5737 misalign = 0;
5738 else if (DR_MISALIGNMENT (first_dr) == -1)
5740 if (DR_VECT_AUX (first_dr)->base_element_aligned)
5741 align = TYPE_ALIGN_UNIT (elem_type);
5742 else
5743 align = get_object_alignment (DR_REF (first_dr))
5744 / BITS_PER_UNIT;
5745 misalign = 0;
5746 TREE_TYPE (data_ref)
5747 = build_aligned_type (TREE_TYPE (data_ref),
5748 align * BITS_PER_UNIT);
5750 else
5752 TREE_TYPE (data_ref)
5753 = build_aligned_type (TREE_TYPE (data_ref),
5754 TYPE_ALIGN (elem_type));
5755 misalign = DR_MISALIGNMENT (first_dr);
5757 if (dataref_offset == NULL_TREE
5758 && TREE_CODE (dataref_ptr) == SSA_NAME)
5759 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5760 misalign);
5762 if (negative
5763 && dt != vect_constant_def
5764 && dt != vect_external_def)
5766 tree perm_mask = perm_mask_for_reverse (vectype);
5767 tree perm_dest
5768 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5769 vectype);
5770 tree new_temp = make_ssa_name (perm_dest);
5772 /* Generate the permute statement. */
5773 gimple perm_stmt
5774 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
5775 vec_oprnd, perm_mask);
5776 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5778 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5779 vec_oprnd = new_temp;
5782 /* Arguments are ready. Create the new vector stmt. */
5783 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5784 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5786 if (slp)
5787 continue;
5789 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5790 if (!next_stmt)
5791 break;
5794 if (!slp)
5796 if (j == 0)
5797 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5798 else
5799 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5800 prev_stmt_info = vinfo_for_stmt (new_stmt);
5804 dr_chain.release ();
5805 oprnds.release ();
5806 result_chain.release ();
5807 vec_oprnds.release ();
5809 return true;
5812 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5813 VECTOR_CST mask. No checks are made that the target platform supports the
5814 mask, so callers may wish to test can_vec_perm_p separately, or use
5815 vect_gen_perm_mask_checked. */
5817 tree
5818 vect_gen_perm_mask_any (tree vectype, const unsigned char *sel)
5820 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5821 int i, nunits;
5823 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5825 mask_elt_type = lang_hooks.types.type_for_mode
5826 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5827 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5829 mask_elts = XALLOCAVEC (tree, nunits);
5830 for (i = nunits - 1; i >= 0; i--)
5831 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5832 mask_vec = build_vector (mask_type, mask_elts);
5834 return mask_vec;
5837 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5838 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5840 tree
5841 vect_gen_perm_mask_checked (tree vectype, const unsigned char *sel)
5843 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype), false, sel));
5844 return vect_gen_perm_mask_any (vectype, sel);
5847 /* Given a vector variable X and Y, that was generated for the scalar
5848 STMT, generate instructions to permute the vector elements of X and Y
5849 using permutation mask MASK_VEC, insert them at *GSI and return the
5850 permuted vector variable. */
5852 static tree
5853 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5854 gimple_stmt_iterator *gsi)
5856 tree vectype = TREE_TYPE (x);
5857 tree perm_dest, data_ref;
5858 gimple perm_stmt;
5860 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5861 data_ref = make_ssa_name (perm_dest);
5863 /* Generate the permute statement. */
5864 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
5865 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5867 return data_ref;
5870 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5871 inserting them on the loops preheader edge. Returns true if we
5872 were successful in doing so (and thus STMT can be moved then),
5873 otherwise returns false. */
5875 static bool
5876 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5878 ssa_op_iter i;
5879 tree op;
5880 bool any = false;
5882 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5884 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5885 if (!gimple_nop_p (def_stmt)
5886 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5888 /* Make sure we don't need to recurse. While we could do
5889 so in simple cases when there are more complex use webs
5890 we don't have an easy way to preserve stmt order to fulfil
5891 dependencies within them. */
5892 tree op2;
5893 ssa_op_iter i2;
5894 if (gimple_code (def_stmt) == GIMPLE_PHI)
5895 return false;
5896 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5898 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5899 if (!gimple_nop_p (def_stmt2)
5900 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5901 return false;
5903 any = true;
5907 if (!any)
5908 return true;
5910 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5912 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5913 if (!gimple_nop_p (def_stmt)
5914 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5916 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5917 gsi_remove (&gsi, false);
5918 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5922 return true;
5925 /* vectorizable_load.
5927 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5928 can be vectorized.
5929 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5930 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5931 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5933 static bool
5934 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5935 slp_tree slp_node, slp_instance slp_node_instance)
5937 tree scalar_dest;
5938 tree vec_dest = NULL;
5939 tree data_ref = NULL;
5940 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5941 stmt_vec_info prev_stmt_info;
5942 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5943 struct loop *loop = NULL;
5944 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5945 bool nested_in_vect_loop = false;
5946 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5947 tree elem_type;
5948 tree new_temp;
5949 machine_mode mode;
5950 gimple new_stmt = NULL;
5951 tree dummy;
5952 enum dr_alignment_support alignment_support_scheme;
5953 tree dataref_ptr = NULL_TREE;
5954 tree dataref_offset = NULL_TREE;
5955 gimple ptr_incr = NULL;
5956 int ncopies;
5957 int i, j, group_size = -1, group_gap_adj;
5958 tree msq = NULL_TREE, lsq;
5959 tree offset = NULL_TREE;
5960 tree byte_offset = NULL_TREE;
5961 tree realignment_token = NULL_TREE;
5962 gphi *phi = NULL;
5963 vec<tree> dr_chain = vNULL;
5964 bool grouped_load = false;
5965 bool load_lanes_p = false;
5966 gimple first_stmt;
5967 bool inv_p;
5968 bool negative = false;
5969 bool compute_in_loop = false;
5970 struct loop *at_loop;
5971 int vec_num;
5972 bool slp = (slp_node != NULL);
5973 bool slp_perm = false;
5974 enum tree_code code;
5975 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5976 int vf;
5977 tree aggr_type;
5978 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5979 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5980 int gather_scale = 1;
5981 enum vect_def_type gather_dt = vect_unknown_def_type;
5983 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5984 return false;
5986 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5987 return false;
5989 /* Is vectorizable load? */
5990 if (!is_gimple_assign (stmt))
5991 return false;
5993 scalar_dest = gimple_assign_lhs (stmt);
5994 if (TREE_CODE (scalar_dest) != SSA_NAME)
5995 return false;
5997 code = gimple_assign_rhs_code (stmt);
5998 if (code != ARRAY_REF
5999 && code != BIT_FIELD_REF
6000 && code != INDIRECT_REF
6001 && code != COMPONENT_REF
6002 && code != IMAGPART_EXPR
6003 && code != REALPART_EXPR
6004 && code != MEM_REF
6005 && TREE_CODE_CLASS (code) != tcc_declaration)
6006 return false;
6008 if (!STMT_VINFO_DATA_REF (stmt_info))
6009 return false;
6011 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6012 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6014 if (loop_vinfo)
6016 loop = LOOP_VINFO_LOOP (loop_vinfo);
6017 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6018 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6020 else
6021 vf = 1;
6023 /* Multiple types in SLP are handled by creating the appropriate number of
6024 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6025 case of SLP. */
6026 if (slp || PURE_SLP_STMT (stmt_info))
6027 ncopies = 1;
6028 else
6029 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6031 gcc_assert (ncopies >= 1);
6033 /* FORNOW. This restriction should be relaxed. */
6034 if (nested_in_vect_loop && ncopies > 1)
6036 if (dump_enabled_p ())
6037 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6038 "multiple types in nested loop.\n");
6039 return false;
6042 /* Invalidate assumptions made by dependence analysis when vectorization
6043 on the unrolled body effectively re-orders stmts. */
6044 if (ncopies > 1
6045 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6046 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6047 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6049 if (dump_enabled_p ())
6050 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6051 "cannot perform implicit CSE when unrolling "
6052 "with negative dependence distance\n");
6053 return false;
6056 elem_type = TREE_TYPE (vectype);
6057 mode = TYPE_MODE (vectype);
6059 /* FORNOW. In some cases can vectorize even if data-type not supported
6060 (e.g. - data copies). */
6061 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6063 if (dump_enabled_p ())
6064 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6065 "Aligned load, but unsupported type.\n");
6066 return false;
6069 /* Check if the load is a part of an interleaving chain. */
6070 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6072 grouped_load = true;
6073 /* FORNOW */
6074 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
6076 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6078 /* If this is single-element interleaving with an element distance
6079 that leaves unused vector loads around punt - we at least create
6080 very sub-optimal code in that case (and blow up memory,
6081 see PR65518). */
6082 if (first_stmt == stmt
6083 && !GROUP_NEXT_ELEMENT (stmt_info)
6084 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
6086 if (dump_enabled_p ())
6087 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6088 "single-element interleaving not supported "
6089 "for not adjacent vector loads\n");
6090 return false;
6093 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6094 slp_perm = true;
6096 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6097 if (!slp
6098 && !PURE_SLP_STMT (stmt_info)
6099 && !STMT_VINFO_STRIDED_P (stmt_info))
6101 if (vect_load_lanes_supported (vectype, group_size))
6102 load_lanes_p = true;
6103 else if (!vect_grouped_load_supported (vectype, group_size))
6104 return false;
6107 /* Invalidate assumptions made by dependence analysis when vectorization
6108 on the unrolled body effectively re-orders stmts. */
6109 if (!PURE_SLP_STMT (stmt_info)
6110 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6111 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6112 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6114 if (dump_enabled_p ())
6115 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6116 "cannot perform implicit CSE when performing "
6117 "group loads with negative dependence distance\n");
6118 return false;
6121 /* Similarly when the stmt is a load that is both part of a SLP
6122 instance and a loop vectorized stmt via the same-dr mechanism
6123 we have to give up. */
6124 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6125 && (STMT_SLP_TYPE (stmt_info)
6126 != STMT_SLP_TYPE (vinfo_for_stmt
6127 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6129 if (dump_enabled_p ())
6130 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6131 "conflicting SLP types for CSEd load\n");
6132 return false;
6137 if (STMT_VINFO_GATHER_P (stmt_info))
6139 gimple def_stmt;
6140 tree def;
6141 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
6142 &gather_off, &gather_scale);
6143 gcc_assert (gather_decl);
6144 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
6145 &def_stmt, &def, &gather_dt,
6146 &gather_off_vectype))
6148 if (dump_enabled_p ())
6149 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6150 "gather index use not simple.\n");
6151 return false;
6154 else if (STMT_VINFO_STRIDED_P (stmt_info))
6156 if ((grouped_load
6157 && (slp || PURE_SLP_STMT (stmt_info)))
6158 && (group_size > nunits
6159 || nunits % group_size != 0))
6161 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6162 "unhandled strided group load\n");
6163 return false;
6166 else
6168 negative = tree_int_cst_compare (nested_in_vect_loop
6169 ? STMT_VINFO_DR_STEP (stmt_info)
6170 : DR_STEP (dr),
6171 size_zero_node) < 0;
6172 if (negative && ncopies > 1)
6174 if (dump_enabled_p ())
6175 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6176 "multiple types with negative step.\n");
6177 return false;
6180 if (negative)
6182 if (grouped_load)
6184 if (dump_enabled_p ())
6185 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6186 "negative step for group load not supported"
6187 "\n");
6188 return false;
6190 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
6191 if (alignment_support_scheme != dr_aligned
6192 && alignment_support_scheme != dr_unaligned_supported)
6194 if (dump_enabled_p ())
6195 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6196 "negative step but alignment required.\n");
6197 return false;
6199 if (!perm_mask_for_reverse (vectype))
6201 if (dump_enabled_p ())
6202 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6203 "negative step and reversing not supported."
6204 "\n");
6205 return false;
6210 if (!vec_stmt) /* transformation not required. */
6212 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6213 /* The SLP costs are calculated during SLP analysis. */
6214 if (!PURE_SLP_STMT (stmt_info))
6215 vect_model_load_cost (stmt_info, ncopies, load_lanes_p,
6216 NULL, NULL, NULL);
6217 return true;
6220 if (dump_enabled_p ())
6221 dump_printf_loc (MSG_NOTE, vect_location,
6222 "transform load. ncopies = %d\n", ncopies);
6224 /** Transform. **/
6226 ensure_base_align (stmt_info, dr);
6228 if (STMT_VINFO_GATHER_P (stmt_info))
6230 tree vec_oprnd0 = NULL_TREE, op;
6231 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
6232 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6233 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6234 edge pe = loop_preheader_edge (loop);
6235 gimple_seq seq;
6236 basic_block new_bb;
6237 enum { NARROW, NONE, WIDEN } modifier;
6238 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
6240 if (nunits == gather_off_nunits)
6241 modifier = NONE;
6242 else if (nunits == gather_off_nunits / 2)
6244 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
6245 modifier = WIDEN;
6247 for (i = 0; i < gather_off_nunits; ++i)
6248 sel[i] = i | nunits;
6250 perm_mask = vect_gen_perm_mask_checked (gather_off_vectype, sel);
6252 else if (nunits == gather_off_nunits * 2)
6254 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
6255 modifier = NARROW;
6257 for (i = 0; i < nunits; ++i)
6258 sel[i] = i < gather_off_nunits
6259 ? i : i + nunits - gather_off_nunits;
6261 perm_mask = vect_gen_perm_mask_checked (vectype, sel);
6262 ncopies *= 2;
6264 else
6265 gcc_unreachable ();
6267 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
6268 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6269 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6270 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6271 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6272 scaletype = TREE_VALUE (arglist);
6273 gcc_checking_assert (types_compatible_p (srctype, rettype));
6275 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6277 ptr = fold_convert (ptrtype, gather_base);
6278 if (!is_gimple_min_invariant (ptr))
6280 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6281 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6282 gcc_assert (!new_bb);
6285 /* Currently we support only unconditional gather loads,
6286 so mask should be all ones. */
6287 if (TREE_CODE (masktype) == INTEGER_TYPE)
6288 mask = build_int_cst (masktype, -1);
6289 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6291 mask = build_int_cst (TREE_TYPE (masktype), -1);
6292 mask = build_vector_from_val (masktype, mask);
6293 mask = vect_init_vector (stmt, mask, masktype, NULL);
6295 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6297 REAL_VALUE_TYPE r;
6298 long tmp[6];
6299 for (j = 0; j < 6; ++j)
6300 tmp[j] = -1;
6301 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6302 mask = build_real (TREE_TYPE (masktype), r);
6303 mask = build_vector_from_val (masktype, mask);
6304 mask = vect_init_vector (stmt, mask, masktype, NULL);
6306 else
6307 gcc_unreachable ();
6309 scale = build_int_cst (scaletype, gather_scale);
6311 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6312 merge = build_int_cst (TREE_TYPE (rettype), 0);
6313 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6315 REAL_VALUE_TYPE r;
6316 long tmp[6];
6317 for (j = 0; j < 6; ++j)
6318 tmp[j] = 0;
6319 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6320 merge = build_real (TREE_TYPE (rettype), r);
6322 else
6323 gcc_unreachable ();
6324 merge = build_vector_from_val (rettype, merge);
6325 merge = vect_init_vector (stmt, merge, rettype, NULL);
6327 prev_stmt_info = NULL;
6328 for (j = 0; j < ncopies; ++j)
6330 if (modifier == WIDEN && (j & 1))
6331 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6332 perm_mask, stmt, gsi);
6333 else if (j == 0)
6334 op = vec_oprnd0
6335 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
6336 else
6337 op = vec_oprnd0
6338 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
6340 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
6342 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
6343 == TYPE_VECTOR_SUBPARTS (idxtype));
6344 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
6345 var = make_ssa_name (var);
6346 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
6347 new_stmt
6348 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6349 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6350 op = var;
6353 new_stmt
6354 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
6356 if (!useless_type_conversion_p (vectype, rettype))
6358 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
6359 == TYPE_VECTOR_SUBPARTS (rettype));
6360 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
6361 op = make_ssa_name (var, new_stmt);
6362 gimple_call_set_lhs (new_stmt, op);
6363 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6364 var = make_ssa_name (vec_dest);
6365 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
6366 new_stmt
6367 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
6369 else
6371 var = make_ssa_name (vec_dest, new_stmt);
6372 gimple_call_set_lhs (new_stmt, var);
6375 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6377 if (modifier == NARROW)
6379 if ((j & 1) == 0)
6381 prev_res = var;
6382 continue;
6384 var = permute_vec_elements (prev_res, var,
6385 perm_mask, stmt, gsi);
6386 new_stmt = SSA_NAME_DEF_STMT (var);
6389 if (prev_stmt_info == NULL)
6390 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6391 else
6392 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6393 prev_stmt_info = vinfo_for_stmt (new_stmt);
6395 return true;
6397 else if (STMT_VINFO_STRIDED_P (stmt_info))
6399 gimple_stmt_iterator incr_gsi;
6400 bool insert_after;
6401 gimple incr;
6402 tree offvar;
6403 tree ivstep;
6404 tree running_off;
6405 vec<constructor_elt, va_gc> *v = NULL;
6406 gimple_seq stmts = NULL;
6407 tree stride_base, stride_step, alias_off;
6409 gcc_assert (!nested_in_vect_loop);
6411 if (slp && grouped_load)
6412 first_dr = STMT_VINFO_DATA_REF
6413 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
6414 else
6415 first_dr = dr;
6417 stride_base
6418 = fold_build_pointer_plus
6419 (DR_BASE_ADDRESS (first_dr),
6420 size_binop (PLUS_EXPR,
6421 convert_to_ptrofftype (DR_OFFSET (first_dr)),
6422 convert_to_ptrofftype (DR_INIT (first_dr))));
6423 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
6425 /* For a load with loop-invariant (but other than power-of-2)
6426 stride (i.e. not a grouped access) like so:
6428 for (i = 0; i < n; i += stride)
6429 ... = array[i];
6431 we generate a new induction variable and new accesses to
6432 form a new vector (or vectors, depending on ncopies):
6434 for (j = 0; ; j += VF*stride)
6435 tmp1 = array[j];
6436 tmp2 = array[j + stride];
6438 vectemp = {tmp1, tmp2, ...}
6441 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
6442 build_int_cst (TREE_TYPE (stride_step), vf));
6444 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6446 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
6447 loop, &incr_gsi, insert_after,
6448 &offvar, NULL);
6449 incr = gsi_stmt (incr_gsi);
6450 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6452 stride_step = force_gimple_operand (unshare_expr (stride_step),
6453 &stmts, true, NULL_TREE);
6454 if (stmts)
6455 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6457 prev_stmt_info = NULL;
6458 running_off = offvar;
6459 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (first_dr)), 0);
6460 int nloads = nunits;
6461 tree ltype = TREE_TYPE (vectype);
6462 auto_vec<tree> dr_chain;
6463 if (slp)
6465 nloads = nunits / group_size;
6466 if (group_size < nunits)
6467 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
6468 else
6469 ltype = vectype;
6470 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
6471 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6472 if (slp_perm)
6473 dr_chain.create (ncopies);
6475 for (j = 0; j < ncopies; j++)
6477 tree vec_inv;
6479 if (nloads > 1)
6481 vec_alloc (v, nloads);
6482 for (i = 0; i < nloads; i++)
6484 tree newref, newoff;
6485 gimple incr;
6486 newref = build2 (MEM_REF, ltype, running_off, alias_off);
6488 newref = force_gimple_operand_gsi (gsi, newref, true,
6489 NULL_TREE, true,
6490 GSI_SAME_STMT);
6491 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6492 newoff = copy_ssa_name (running_off);
6493 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6494 running_off, stride_step);
6495 vect_finish_stmt_generation (stmt, incr, gsi);
6497 running_off = newoff;
6500 vec_inv = build_constructor (vectype, v);
6501 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6502 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6504 else
6506 new_stmt = gimple_build_assign (make_ssa_name (ltype),
6507 build2 (MEM_REF, ltype,
6508 running_off, alias_off));
6509 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6511 tree newoff = copy_ssa_name (running_off);
6512 gimple incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6513 running_off, stride_step);
6514 vect_finish_stmt_generation (stmt, incr, gsi);
6516 running_off = newoff;
6519 if (slp)
6521 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6522 if (slp_perm)
6523 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
6525 else
6527 if (j == 0)
6528 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6529 else
6530 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6531 prev_stmt_info = vinfo_for_stmt (new_stmt);
6534 if (slp_perm)
6535 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6536 slp_node_instance, false);
6537 return true;
6540 if (grouped_load)
6542 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6543 if (slp
6544 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6545 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6546 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6548 /* Check if the chain of loads is already vectorized. */
6549 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6550 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6551 ??? But we can only do so if there is exactly one
6552 as we have no way to get at the rest. Leave the CSE
6553 opportunity alone.
6554 ??? With the group load eventually participating
6555 in multiple different permutations (having multiple
6556 slp nodes which refer to the same group) the CSE
6557 is even wrong code. See PR56270. */
6558 && !slp)
6560 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6561 return true;
6563 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6564 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6565 group_gap_adj = 0;
6567 /* VEC_NUM is the number of vect stmts to be created for this group. */
6568 if (slp)
6570 grouped_load = false;
6571 /* For SLP permutation support we need to load the whole group,
6572 not only the number of vector stmts the permutation result
6573 fits in. */
6574 if (slp_perm)
6575 vec_num = (group_size * vf + nunits - 1) / nunits;
6576 else
6577 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6578 group_gap_adj = vf * group_size - nunits * vec_num;
6580 else
6581 vec_num = group_size;
6583 else
6585 first_stmt = stmt;
6586 first_dr = dr;
6587 group_size = vec_num = 1;
6588 group_gap_adj = 0;
6591 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6592 gcc_assert (alignment_support_scheme);
6593 /* Targets with load-lane instructions must not require explicit
6594 realignment. */
6595 gcc_assert (!load_lanes_p
6596 || alignment_support_scheme == dr_aligned
6597 || alignment_support_scheme == dr_unaligned_supported);
6599 /* In case the vectorization factor (VF) is bigger than the number
6600 of elements that we can fit in a vectype (nunits), we have to generate
6601 more than one vector stmt - i.e - we need to "unroll" the
6602 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6603 from one copy of the vector stmt to the next, in the field
6604 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6605 stages to find the correct vector defs to be used when vectorizing
6606 stmts that use the defs of the current stmt. The example below
6607 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6608 need to create 4 vectorized stmts):
6610 before vectorization:
6611 RELATED_STMT VEC_STMT
6612 S1: x = memref - -
6613 S2: z = x + 1 - -
6615 step 1: vectorize stmt S1:
6616 We first create the vector stmt VS1_0, and, as usual, record a
6617 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6618 Next, we create the vector stmt VS1_1, and record a pointer to
6619 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6620 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6621 stmts and pointers:
6622 RELATED_STMT VEC_STMT
6623 VS1_0: vx0 = memref0 VS1_1 -
6624 VS1_1: vx1 = memref1 VS1_2 -
6625 VS1_2: vx2 = memref2 VS1_3 -
6626 VS1_3: vx3 = memref3 - -
6627 S1: x = load - VS1_0
6628 S2: z = x + 1 - -
6630 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6631 information we recorded in RELATED_STMT field is used to vectorize
6632 stmt S2. */
6634 /* In case of interleaving (non-unit grouped access):
6636 S1: x2 = &base + 2
6637 S2: x0 = &base
6638 S3: x1 = &base + 1
6639 S4: x3 = &base + 3
6641 Vectorized loads are created in the order of memory accesses
6642 starting from the access of the first stmt of the chain:
6644 VS1: vx0 = &base
6645 VS2: vx1 = &base + vec_size*1
6646 VS3: vx3 = &base + vec_size*2
6647 VS4: vx4 = &base + vec_size*3
6649 Then permutation statements are generated:
6651 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6652 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6655 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6656 (the order of the data-refs in the output of vect_permute_load_chain
6657 corresponds to the order of scalar stmts in the interleaving chain - see
6658 the documentation of vect_permute_load_chain()).
6659 The generation of permutation stmts and recording them in
6660 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6662 In case of both multiple types and interleaving, the vector loads and
6663 permutation stmts above are created for every copy. The result vector
6664 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6665 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6667 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6668 on a target that supports unaligned accesses (dr_unaligned_supported)
6669 we generate the following code:
6670 p = initial_addr;
6671 indx = 0;
6672 loop {
6673 p = p + indx * vectype_size;
6674 vec_dest = *(p);
6675 indx = indx + 1;
6678 Otherwise, the data reference is potentially unaligned on a target that
6679 does not support unaligned accesses (dr_explicit_realign_optimized) -
6680 then generate the following code, in which the data in each iteration is
6681 obtained by two vector loads, one from the previous iteration, and one
6682 from the current iteration:
6683 p1 = initial_addr;
6684 msq_init = *(floor(p1))
6685 p2 = initial_addr + VS - 1;
6686 realignment_token = call target_builtin;
6687 indx = 0;
6688 loop {
6689 p2 = p2 + indx * vectype_size
6690 lsq = *(floor(p2))
6691 vec_dest = realign_load (msq, lsq, realignment_token)
6692 indx = indx + 1;
6693 msq = lsq;
6694 } */
6696 /* If the misalignment remains the same throughout the execution of the
6697 loop, we can create the init_addr and permutation mask at the loop
6698 preheader. Otherwise, it needs to be created inside the loop.
6699 This can only occur when vectorizing memory accesses in the inner-loop
6700 nested within an outer-loop that is being vectorized. */
6702 if (nested_in_vect_loop
6703 && (TREE_INT_CST_LOW (DR_STEP (dr))
6704 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6706 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6707 compute_in_loop = true;
6710 if ((alignment_support_scheme == dr_explicit_realign_optimized
6711 || alignment_support_scheme == dr_explicit_realign)
6712 && !compute_in_loop)
6714 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6715 alignment_support_scheme, NULL_TREE,
6716 &at_loop);
6717 if (alignment_support_scheme == dr_explicit_realign_optimized)
6719 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
6720 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6721 size_one_node);
6724 else
6725 at_loop = loop;
6727 if (negative)
6728 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6730 if (load_lanes_p)
6731 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6732 else
6733 aggr_type = vectype;
6735 prev_stmt_info = NULL;
6736 for (j = 0; j < ncopies; j++)
6738 /* 1. Create the vector or array pointer update chain. */
6739 if (j == 0)
6741 bool simd_lane_access_p
6742 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6743 if (simd_lane_access_p
6744 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6745 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6746 && integer_zerop (DR_OFFSET (first_dr))
6747 && integer_zerop (DR_INIT (first_dr))
6748 && alias_sets_conflict_p (get_alias_set (aggr_type),
6749 get_alias_set (DR_REF (first_dr)))
6750 && (alignment_support_scheme == dr_aligned
6751 || alignment_support_scheme == dr_unaligned_supported))
6753 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6754 dataref_offset = build_int_cst (reference_alias_ptr_type
6755 (DR_REF (first_dr)), 0);
6756 inv_p = false;
6758 else
6759 dataref_ptr
6760 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6761 offset, &dummy, gsi, &ptr_incr,
6762 simd_lane_access_p, &inv_p,
6763 byte_offset);
6765 else if (dataref_offset)
6766 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6767 TYPE_SIZE_UNIT (aggr_type));
6768 else
6769 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6770 TYPE_SIZE_UNIT (aggr_type));
6772 if (grouped_load || slp_perm)
6773 dr_chain.create (vec_num);
6775 if (load_lanes_p)
6777 tree vec_array;
6779 vec_array = create_vector_array (vectype, vec_num);
6781 /* Emit:
6782 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6783 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6784 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6785 gimple_call_set_lhs (new_stmt, vec_array);
6786 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6788 /* Extract each vector into an SSA_NAME. */
6789 for (i = 0; i < vec_num; i++)
6791 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6792 vec_array, i);
6793 dr_chain.quick_push (new_temp);
6796 /* Record the mapping between SSA_NAMEs and statements. */
6797 vect_record_grouped_load_vectors (stmt, dr_chain);
6799 else
6801 for (i = 0; i < vec_num; i++)
6803 if (i > 0)
6804 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6805 stmt, NULL_TREE);
6807 /* 2. Create the vector-load in the loop. */
6808 switch (alignment_support_scheme)
6810 case dr_aligned:
6811 case dr_unaligned_supported:
6813 unsigned int align, misalign;
6815 data_ref
6816 = fold_build2 (MEM_REF, vectype, dataref_ptr,
6817 dataref_offset
6818 ? dataref_offset
6819 : build_int_cst (reference_alias_ptr_type
6820 (DR_REF (first_dr)), 0));
6821 align = TYPE_ALIGN_UNIT (vectype);
6822 if (alignment_support_scheme == dr_aligned)
6824 gcc_assert (aligned_access_p (first_dr));
6825 misalign = 0;
6827 else if (DR_MISALIGNMENT (first_dr) == -1)
6829 if (DR_VECT_AUX (first_dr)->base_element_aligned)
6830 align = TYPE_ALIGN_UNIT (elem_type);
6831 else
6832 align = (get_object_alignment (DR_REF (first_dr))
6833 / BITS_PER_UNIT);
6834 misalign = 0;
6835 TREE_TYPE (data_ref)
6836 = build_aligned_type (TREE_TYPE (data_ref),
6837 align * BITS_PER_UNIT);
6839 else
6841 TREE_TYPE (data_ref)
6842 = build_aligned_type (TREE_TYPE (data_ref),
6843 TYPE_ALIGN (elem_type));
6844 misalign = DR_MISALIGNMENT (first_dr);
6846 if (dataref_offset == NULL_TREE
6847 && TREE_CODE (dataref_ptr) == SSA_NAME)
6848 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6849 align, misalign);
6850 break;
6852 case dr_explicit_realign:
6854 tree ptr, bump;
6856 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
6858 if (compute_in_loop)
6859 msq = vect_setup_realignment (first_stmt, gsi,
6860 &realignment_token,
6861 dr_explicit_realign,
6862 dataref_ptr, NULL);
6864 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6865 ptr = copy_ssa_name (dataref_ptr);
6866 else
6867 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6868 new_stmt = gimple_build_assign
6869 (ptr, BIT_AND_EXPR, dataref_ptr,
6870 build_int_cst
6871 (TREE_TYPE (dataref_ptr),
6872 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6873 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6874 data_ref
6875 = build2 (MEM_REF, vectype, ptr,
6876 build_int_cst (reference_alias_ptr_type
6877 (DR_REF (first_dr)), 0));
6878 vec_dest = vect_create_destination_var (scalar_dest,
6879 vectype);
6880 new_stmt = gimple_build_assign (vec_dest, data_ref);
6881 new_temp = make_ssa_name (vec_dest, new_stmt);
6882 gimple_assign_set_lhs (new_stmt, new_temp);
6883 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6884 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6885 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6886 msq = new_temp;
6888 bump = size_binop (MULT_EXPR, vs,
6889 TYPE_SIZE_UNIT (elem_type));
6890 bump = size_binop (MINUS_EXPR, bump, size_one_node);
6891 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6892 new_stmt = gimple_build_assign
6893 (NULL_TREE, BIT_AND_EXPR, ptr,
6894 build_int_cst
6895 (TREE_TYPE (ptr),
6896 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6897 ptr = copy_ssa_name (ptr, new_stmt);
6898 gimple_assign_set_lhs (new_stmt, ptr);
6899 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6900 data_ref
6901 = build2 (MEM_REF, vectype, ptr,
6902 build_int_cst (reference_alias_ptr_type
6903 (DR_REF (first_dr)), 0));
6904 break;
6906 case dr_explicit_realign_optimized:
6907 if (TREE_CODE (dataref_ptr) == SSA_NAME)
6908 new_temp = copy_ssa_name (dataref_ptr);
6909 else
6910 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
6911 new_stmt = gimple_build_assign
6912 (new_temp, BIT_AND_EXPR, dataref_ptr,
6913 build_int_cst
6914 (TREE_TYPE (dataref_ptr),
6915 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6916 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6917 data_ref
6918 = build2 (MEM_REF, vectype, new_temp,
6919 build_int_cst (reference_alias_ptr_type
6920 (DR_REF (first_dr)), 0));
6921 break;
6922 default:
6923 gcc_unreachable ();
6925 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6926 new_stmt = gimple_build_assign (vec_dest, data_ref);
6927 new_temp = make_ssa_name (vec_dest, new_stmt);
6928 gimple_assign_set_lhs (new_stmt, new_temp);
6929 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6931 /* 3. Handle explicit realignment if necessary/supported.
6932 Create in loop:
6933 vec_dest = realign_load (msq, lsq, realignment_token) */
6934 if (alignment_support_scheme == dr_explicit_realign_optimized
6935 || alignment_support_scheme == dr_explicit_realign)
6937 lsq = gimple_assign_lhs (new_stmt);
6938 if (!realignment_token)
6939 realignment_token = dataref_ptr;
6940 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6941 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
6942 msq, lsq, realignment_token);
6943 new_temp = make_ssa_name (vec_dest, new_stmt);
6944 gimple_assign_set_lhs (new_stmt, new_temp);
6945 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6947 if (alignment_support_scheme == dr_explicit_realign_optimized)
6949 gcc_assert (phi);
6950 if (i == vec_num - 1 && j == ncopies - 1)
6951 add_phi_arg (phi, lsq,
6952 loop_latch_edge (containing_loop),
6953 UNKNOWN_LOCATION);
6954 msq = lsq;
6958 /* 4. Handle invariant-load. */
6959 if (inv_p && !bb_vinfo)
6961 gcc_assert (!grouped_load);
6962 /* If we have versioned for aliasing or the loop doesn't
6963 have any data dependencies that would preclude this,
6964 then we are sure this is a loop invariant load and
6965 thus we can insert it on the preheader edge. */
6966 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6967 && !nested_in_vect_loop
6968 && hoist_defs_of_uses (stmt, loop))
6970 if (dump_enabled_p ())
6972 dump_printf_loc (MSG_NOTE, vect_location,
6973 "hoisting out of the vectorized "
6974 "loop: ");
6975 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6977 tree tem = copy_ssa_name (scalar_dest);
6978 gsi_insert_on_edge_immediate
6979 (loop_preheader_edge (loop),
6980 gimple_build_assign (tem,
6981 unshare_expr
6982 (gimple_assign_rhs1 (stmt))));
6983 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6985 else
6987 gimple_stmt_iterator gsi2 = *gsi;
6988 gsi_next (&gsi2);
6989 new_temp = vect_init_vector (stmt, scalar_dest,
6990 vectype, &gsi2);
6992 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6993 set_vinfo_for_stmt (new_stmt,
6994 new_stmt_vec_info (new_stmt, loop_vinfo,
6995 bb_vinfo));
6998 if (negative)
7000 tree perm_mask = perm_mask_for_reverse (vectype);
7001 new_temp = permute_vec_elements (new_temp, new_temp,
7002 perm_mask, stmt, gsi);
7003 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7006 /* Collect vector loads and later create their permutation in
7007 vect_transform_grouped_load (). */
7008 if (grouped_load || slp_perm)
7009 dr_chain.quick_push (new_temp);
7011 /* Store vector loads in the corresponding SLP_NODE. */
7012 if (slp && !slp_perm)
7013 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7015 /* Bump the vector pointer to account for a gap or for excess
7016 elements loaded for a permuted SLP load. */
7017 if (group_gap_adj != 0)
7019 bool ovf;
7020 tree bump
7021 = wide_int_to_tree (sizetype,
7022 wi::smul (TYPE_SIZE_UNIT (elem_type),
7023 group_gap_adj, &ovf));
7024 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7025 stmt, bump);
7029 if (slp && !slp_perm)
7030 continue;
7032 if (slp_perm)
7034 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7035 slp_node_instance, false))
7037 dr_chain.release ();
7038 return false;
7041 else
7043 if (grouped_load)
7045 if (!load_lanes_p)
7046 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7047 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7049 else
7051 if (j == 0)
7052 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7053 else
7054 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7055 prev_stmt_info = vinfo_for_stmt (new_stmt);
7058 dr_chain.release ();
7061 return true;
7064 /* Function vect_is_simple_cond.
7066 Input:
7067 LOOP - the loop that is being vectorized.
7068 COND - Condition that is checked for simple use.
7070 Output:
7071 *COMP_VECTYPE - the vector type for the comparison.
7073 Returns whether a COND can be vectorized. Checks whether
7074 condition operands are supportable using vec_is_simple_use. */
7076 static bool
7077 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
7078 bb_vec_info bb_vinfo, tree *comp_vectype)
7080 tree lhs, rhs;
7081 tree def;
7082 enum vect_def_type dt;
7083 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7085 if (!COMPARISON_CLASS_P (cond))
7086 return false;
7088 lhs = TREE_OPERAND (cond, 0);
7089 rhs = TREE_OPERAND (cond, 1);
7091 if (TREE_CODE (lhs) == SSA_NAME)
7093 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7094 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
7095 &lhs_def_stmt, &def, &dt, &vectype1))
7096 return false;
7098 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
7099 && TREE_CODE (lhs) != FIXED_CST)
7100 return false;
7102 if (TREE_CODE (rhs) == SSA_NAME)
7104 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7105 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
7106 &rhs_def_stmt, &def, &dt, &vectype2))
7107 return false;
7109 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
7110 && TREE_CODE (rhs) != FIXED_CST)
7111 return false;
7113 *comp_vectype = vectype1 ? vectype1 : vectype2;
7114 return true;
7117 /* vectorizable_condition.
7119 Check if STMT is conditional modify expression that can be vectorized.
7120 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7121 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7122 at GSI.
7124 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7125 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7126 else clause if it is 2).
7128 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7130 bool
7131 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
7132 gimple *vec_stmt, tree reduc_def, int reduc_index,
7133 slp_tree slp_node)
7135 tree scalar_dest = NULL_TREE;
7136 tree vec_dest = NULL_TREE;
7137 tree cond_expr, then_clause, else_clause;
7138 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7139 tree comp_vectype = NULL_TREE;
7140 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7141 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7142 tree vec_compare, vec_cond_expr;
7143 tree new_temp;
7144 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7145 tree def;
7146 enum vect_def_type dt, dts[4];
7147 int ncopies;
7148 enum tree_code code;
7149 stmt_vec_info prev_stmt_info = NULL;
7150 int i, j;
7151 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7152 vec<tree> vec_oprnds0 = vNULL;
7153 vec<tree> vec_oprnds1 = vNULL;
7154 vec<tree> vec_oprnds2 = vNULL;
7155 vec<tree> vec_oprnds3 = vNULL;
7156 tree vec_cmp_type;
7158 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7159 return false;
7161 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7162 return false;
7164 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7165 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7166 && reduc_def))
7167 return false;
7169 /* FORNOW: not yet supported. */
7170 if (STMT_VINFO_LIVE_P (stmt_info))
7172 if (dump_enabled_p ())
7173 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7174 "value used after loop.\n");
7175 return false;
7178 /* Is vectorizable conditional operation? */
7179 if (!is_gimple_assign (stmt))
7180 return false;
7182 code = gimple_assign_rhs_code (stmt);
7184 if (code != COND_EXPR)
7185 return false;
7187 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7188 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
7190 if (slp_node || PURE_SLP_STMT (stmt_info))
7191 ncopies = 1;
7192 else
7193 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
7195 gcc_assert (ncopies >= 1);
7196 if (reduc_index && ncopies > 1)
7197 return false; /* FORNOW */
7199 cond_expr = gimple_assign_rhs1 (stmt);
7200 then_clause = gimple_assign_rhs2 (stmt);
7201 else_clause = gimple_assign_rhs3 (stmt);
7203 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
7204 &comp_vectype)
7205 || !comp_vectype)
7206 return false;
7208 if (TREE_CODE (then_clause) == SSA_NAME)
7210 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
7211 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
7212 &then_def_stmt, &def, &dt))
7213 return false;
7215 else if (TREE_CODE (then_clause) != INTEGER_CST
7216 && TREE_CODE (then_clause) != REAL_CST
7217 && TREE_CODE (then_clause) != FIXED_CST)
7218 return false;
7220 if (TREE_CODE (else_clause) == SSA_NAME)
7222 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
7223 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
7224 &else_def_stmt, &def, &dt))
7225 return false;
7227 else if (TREE_CODE (else_clause) != INTEGER_CST
7228 && TREE_CODE (else_clause) != REAL_CST
7229 && TREE_CODE (else_clause) != FIXED_CST)
7230 return false;
7232 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
7233 /* The result of a vector comparison should be signed type. */
7234 tree cmp_type = build_nonstandard_integer_type (prec, 0);
7235 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
7236 if (vec_cmp_type == NULL_TREE)
7237 return false;
7239 if (!vec_stmt)
7241 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
7242 return expand_vec_cond_expr_p (vectype, comp_vectype);
7245 /* Transform. */
7247 if (!slp_node)
7249 vec_oprnds0.create (1);
7250 vec_oprnds1.create (1);
7251 vec_oprnds2.create (1);
7252 vec_oprnds3.create (1);
7255 /* Handle def. */
7256 scalar_dest = gimple_assign_lhs (stmt);
7257 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7259 /* Handle cond expr. */
7260 for (j = 0; j < ncopies; j++)
7262 gassign *new_stmt = NULL;
7263 if (j == 0)
7265 if (slp_node)
7267 auto_vec<tree, 4> ops;
7268 auto_vec<vec<tree>, 4> vec_defs;
7270 ops.safe_push (TREE_OPERAND (cond_expr, 0));
7271 ops.safe_push (TREE_OPERAND (cond_expr, 1));
7272 ops.safe_push (then_clause);
7273 ops.safe_push (else_clause);
7274 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
7275 vec_oprnds3 = vec_defs.pop ();
7276 vec_oprnds2 = vec_defs.pop ();
7277 vec_oprnds1 = vec_defs.pop ();
7278 vec_oprnds0 = vec_defs.pop ();
7280 ops.release ();
7281 vec_defs.release ();
7283 else
7285 gimple gtemp;
7286 vec_cond_lhs =
7287 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
7288 stmt, NULL);
7289 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
7290 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
7292 vec_cond_rhs =
7293 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
7294 stmt, NULL);
7295 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
7296 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
7297 if (reduc_index == 1)
7298 vec_then_clause = reduc_def;
7299 else
7301 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
7302 stmt, NULL);
7303 vect_is_simple_use (then_clause, stmt, loop_vinfo,
7304 NULL, &gtemp, &def, &dts[2]);
7306 if (reduc_index == 2)
7307 vec_else_clause = reduc_def;
7308 else
7310 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
7311 stmt, NULL);
7312 vect_is_simple_use (else_clause, stmt, loop_vinfo,
7313 NULL, &gtemp, &def, &dts[3]);
7317 else
7319 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
7320 vec_oprnds0.pop ());
7321 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
7322 vec_oprnds1.pop ());
7323 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
7324 vec_oprnds2.pop ());
7325 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
7326 vec_oprnds3.pop ());
7329 if (!slp_node)
7331 vec_oprnds0.quick_push (vec_cond_lhs);
7332 vec_oprnds1.quick_push (vec_cond_rhs);
7333 vec_oprnds2.quick_push (vec_then_clause);
7334 vec_oprnds3.quick_push (vec_else_clause);
7337 /* Arguments are ready. Create the new vector stmt. */
7338 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
7340 vec_cond_rhs = vec_oprnds1[i];
7341 vec_then_clause = vec_oprnds2[i];
7342 vec_else_clause = vec_oprnds3[i];
7344 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
7345 vec_cond_lhs, vec_cond_rhs);
7346 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
7347 vec_compare, vec_then_clause, vec_else_clause);
7349 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
7350 new_temp = make_ssa_name (vec_dest, new_stmt);
7351 gimple_assign_set_lhs (new_stmt, new_temp);
7352 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7353 if (slp_node)
7354 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7357 if (slp_node)
7358 continue;
7360 if (j == 0)
7361 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7362 else
7363 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7365 prev_stmt_info = vinfo_for_stmt (new_stmt);
7368 vec_oprnds0.release ();
7369 vec_oprnds1.release ();
7370 vec_oprnds2.release ();
7371 vec_oprnds3.release ();
7373 return true;
7377 /* Make sure the statement is vectorizable. */
7379 bool
7380 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
7382 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7383 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7384 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
7385 bool ok;
7386 tree scalar_type, vectype;
7387 gimple pattern_stmt;
7388 gimple_seq pattern_def_seq;
7390 if (dump_enabled_p ())
7392 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
7393 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7396 if (gimple_has_volatile_ops (stmt))
7398 if (dump_enabled_p ())
7399 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7400 "not vectorized: stmt has volatile operands\n");
7402 return false;
7405 /* Skip stmts that do not need to be vectorized. In loops this is expected
7406 to include:
7407 - the COND_EXPR which is the loop exit condition
7408 - any LABEL_EXPRs in the loop
7409 - computations that are used only for array indexing or loop control.
7410 In basic blocks we only analyze statements that are a part of some SLP
7411 instance, therefore, all the statements are relevant.
7413 Pattern statement needs to be analyzed instead of the original statement
7414 if the original statement is not relevant. Otherwise, we analyze both
7415 statements. In basic blocks we are called from some SLP instance
7416 traversal, don't analyze pattern stmts instead, the pattern stmts
7417 already will be part of SLP instance. */
7419 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7420 if (!STMT_VINFO_RELEVANT_P (stmt_info)
7421 && !STMT_VINFO_LIVE_P (stmt_info))
7423 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7424 && pattern_stmt
7425 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7426 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7428 /* Analyze PATTERN_STMT instead of the original stmt. */
7429 stmt = pattern_stmt;
7430 stmt_info = vinfo_for_stmt (pattern_stmt);
7431 if (dump_enabled_p ())
7433 dump_printf_loc (MSG_NOTE, vect_location,
7434 "==> examining pattern statement: ");
7435 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7438 else
7440 if (dump_enabled_p ())
7441 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
7443 return true;
7446 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7447 && node == NULL
7448 && pattern_stmt
7449 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7450 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7452 /* Analyze PATTERN_STMT too. */
7453 if (dump_enabled_p ())
7455 dump_printf_loc (MSG_NOTE, vect_location,
7456 "==> examining pattern statement: ");
7457 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7460 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7461 return false;
7464 if (is_pattern_stmt_p (stmt_info)
7465 && node == NULL
7466 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7468 gimple_stmt_iterator si;
7470 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7472 gimple pattern_def_stmt = gsi_stmt (si);
7473 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7474 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7476 /* Analyze def stmt of STMT if it's a pattern stmt. */
7477 if (dump_enabled_p ())
7479 dump_printf_loc (MSG_NOTE, vect_location,
7480 "==> examining pattern def statement: ");
7481 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7484 if (!vect_analyze_stmt (pattern_def_stmt,
7485 need_to_vectorize, node))
7486 return false;
7491 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7493 case vect_internal_def:
7494 break;
7496 case vect_reduction_def:
7497 case vect_nested_cycle:
7498 gcc_assert (!bb_vinfo
7499 && (relevance == vect_used_in_outer
7500 || relevance == vect_used_in_outer_by_reduction
7501 || relevance == vect_used_by_reduction
7502 || relevance == vect_unused_in_scope));
7503 break;
7505 case vect_induction_def:
7506 case vect_constant_def:
7507 case vect_external_def:
7508 case vect_unknown_def_type:
7509 default:
7510 gcc_unreachable ();
7513 if (bb_vinfo)
7515 gcc_assert (PURE_SLP_STMT (stmt_info));
7517 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7518 if (dump_enabled_p ())
7520 dump_printf_loc (MSG_NOTE, vect_location,
7521 "get vectype for scalar type: ");
7522 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7523 dump_printf (MSG_NOTE, "\n");
7526 vectype = get_vectype_for_scalar_type (scalar_type);
7527 if (!vectype)
7529 if (dump_enabled_p ())
7531 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7532 "not SLPed: unsupported data-type ");
7533 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7534 scalar_type);
7535 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7537 return false;
7540 if (dump_enabled_p ())
7542 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7543 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7544 dump_printf (MSG_NOTE, "\n");
7547 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7550 if (STMT_VINFO_RELEVANT_P (stmt_info))
7552 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7553 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7554 || (is_gimple_call (stmt)
7555 && gimple_call_lhs (stmt) == NULL_TREE));
7556 *need_to_vectorize = true;
7559 if (PURE_SLP_STMT (stmt_info) && !node)
7561 dump_printf_loc (MSG_NOTE, vect_location,
7562 "handled only by SLP analysis\n");
7563 return true;
7566 ok = true;
7567 if (!bb_vinfo
7568 && (STMT_VINFO_RELEVANT_P (stmt_info)
7569 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7570 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7571 || vectorizable_conversion (stmt, NULL, NULL, node)
7572 || vectorizable_shift (stmt, NULL, NULL, node)
7573 || vectorizable_operation (stmt, NULL, NULL, node)
7574 || vectorizable_assignment (stmt, NULL, NULL, node)
7575 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7576 || vectorizable_call (stmt, NULL, NULL, node)
7577 || vectorizable_store (stmt, NULL, NULL, node)
7578 || vectorizable_reduction (stmt, NULL, NULL, node)
7579 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7580 else
7582 if (bb_vinfo)
7583 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7584 || vectorizable_conversion (stmt, NULL, NULL, node)
7585 || vectorizable_shift (stmt, NULL, NULL, node)
7586 || vectorizable_operation (stmt, NULL, NULL, node)
7587 || vectorizable_assignment (stmt, NULL, NULL, node)
7588 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7589 || vectorizable_call (stmt, NULL, NULL, node)
7590 || vectorizable_store (stmt, NULL, NULL, node)
7591 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7594 if (!ok)
7596 if (dump_enabled_p ())
7598 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7599 "not vectorized: relevant stmt not ");
7600 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7601 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7604 return false;
7607 if (bb_vinfo)
7608 return true;
7610 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7611 need extra handling, except for vectorizable reductions. */
7612 if (STMT_VINFO_LIVE_P (stmt_info)
7613 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7614 ok = vectorizable_live_operation (stmt, NULL, NULL);
7616 if (!ok)
7618 if (dump_enabled_p ())
7620 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7621 "not vectorized: live stmt not ");
7622 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7623 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7626 return false;
7629 return true;
7633 /* Function vect_transform_stmt.
7635 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7637 bool
7638 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7639 bool *grouped_store, slp_tree slp_node,
7640 slp_instance slp_node_instance)
7642 bool is_store = false;
7643 gimple vec_stmt = NULL;
7644 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7645 bool done;
7647 gimple old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7649 switch (STMT_VINFO_TYPE (stmt_info))
7651 case type_demotion_vec_info_type:
7652 case type_promotion_vec_info_type:
7653 case type_conversion_vec_info_type:
7654 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7655 gcc_assert (done);
7656 break;
7658 case induc_vec_info_type:
7659 gcc_assert (!slp_node);
7660 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7661 gcc_assert (done);
7662 break;
7664 case shift_vec_info_type:
7665 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7666 gcc_assert (done);
7667 break;
7669 case op_vec_info_type:
7670 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7671 gcc_assert (done);
7672 break;
7674 case assignment_vec_info_type:
7675 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7676 gcc_assert (done);
7677 break;
7679 case load_vec_info_type:
7680 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7681 slp_node_instance);
7682 gcc_assert (done);
7683 break;
7685 case store_vec_info_type:
7686 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7687 gcc_assert (done);
7688 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7690 /* In case of interleaving, the whole chain is vectorized when the
7691 last store in the chain is reached. Store stmts before the last
7692 one are skipped, and there vec_stmt_info shouldn't be freed
7693 meanwhile. */
7694 *grouped_store = true;
7695 if (STMT_VINFO_VEC_STMT (stmt_info))
7696 is_store = true;
7698 else
7699 is_store = true;
7700 break;
7702 case condition_vec_info_type:
7703 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7704 gcc_assert (done);
7705 break;
7707 case call_vec_info_type:
7708 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7709 stmt = gsi_stmt (*gsi);
7710 if (is_gimple_call (stmt)
7711 && gimple_call_internal_p (stmt)
7712 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7713 is_store = true;
7714 break;
7716 case call_simd_clone_vec_info_type:
7717 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7718 stmt = gsi_stmt (*gsi);
7719 break;
7721 case reduc_vec_info_type:
7722 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7723 gcc_assert (done);
7724 break;
7726 default:
7727 if (!STMT_VINFO_LIVE_P (stmt_info))
7729 if (dump_enabled_p ())
7730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7731 "stmt not supported.\n");
7732 gcc_unreachable ();
7736 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7737 This would break hybrid SLP vectorization. */
7738 if (slp_node)
7739 gcc_assert (!vec_stmt
7740 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
7742 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7743 is being vectorized, but outside the immediately enclosing loop. */
7744 if (vec_stmt
7745 && STMT_VINFO_LOOP_VINFO (stmt_info)
7746 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7747 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7748 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7749 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7750 || STMT_VINFO_RELEVANT (stmt_info) ==
7751 vect_used_in_outer_by_reduction))
7753 struct loop *innerloop = LOOP_VINFO_LOOP (
7754 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7755 imm_use_iterator imm_iter;
7756 use_operand_p use_p;
7757 tree scalar_dest;
7758 gimple exit_phi;
7760 if (dump_enabled_p ())
7761 dump_printf_loc (MSG_NOTE, vect_location,
7762 "Record the vdef for outer-loop vectorization.\n");
7764 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7765 (to be used when vectorizing outer-loop stmts that use the DEF of
7766 STMT). */
7767 if (gimple_code (stmt) == GIMPLE_PHI)
7768 scalar_dest = PHI_RESULT (stmt);
7769 else
7770 scalar_dest = gimple_assign_lhs (stmt);
7772 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7774 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7776 exit_phi = USE_STMT (use_p);
7777 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7782 /* Handle stmts whose DEF is used outside the loop-nest that is
7783 being vectorized. */
7784 if (STMT_VINFO_LIVE_P (stmt_info)
7785 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7787 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7788 gcc_assert (done);
7791 if (vec_stmt)
7792 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7794 return is_store;
7798 /* Remove a group of stores (for SLP or interleaving), free their
7799 stmt_vec_info. */
7801 void
7802 vect_remove_stores (gimple first_stmt)
7804 gimple next = first_stmt;
7805 gimple tmp;
7806 gimple_stmt_iterator next_si;
7808 while (next)
7810 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7812 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7813 if (is_pattern_stmt_p (stmt_info))
7814 next = STMT_VINFO_RELATED_STMT (stmt_info);
7815 /* Free the attached stmt_vec_info and remove the stmt. */
7816 next_si = gsi_for_stmt (next);
7817 unlink_stmt_vdef (next);
7818 gsi_remove (&next_si, true);
7819 release_defs (next);
7820 free_stmt_vec_info (next);
7821 next = tmp;
7826 /* Function new_stmt_vec_info.
7828 Create and initialize a new stmt_vec_info struct for STMT. */
7830 stmt_vec_info
7831 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7832 bb_vec_info bb_vinfo)
7834 stmt_vec_info res;
7835 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7837 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7838 STMT_VINFO_STMT (res) = stmt;
7839 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7840 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7841 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7842 STMT_VINFO_LIVE_P (res) = false;
7843 STMT_VINFO_VECTYPE (res) = NULL;
7844 STMT_VINFO_VEC_STMT (res) = NULL;
7845 STMT_VINFO_VECTORIZABLE (res) = true;
7846 STMT_VINFO_IN_PATTERN_P (res) = false;
7847 STMT_VINFO_RELATED_STMT (res) = NULL;
7848 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7849 STMT_VINFO_DATA_REF (res) = NULL;
7851 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7852 STMT_VINFO_DR_OFFSET (res) = NULL;
7853 STMT_VINFO_DR_INIT (res) = NULL;
7854 STMT_VINFO_DR_STEP (res) = NULL;
7855 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7857 if (gimple_code (stmt) == GIMPLE_PHI
7858 && is_loop_header_bb_p (gimple_bb (stmt)))
7859 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7860 else
7861 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7863 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7864 STMT_SLP_TYPE (res) = loop_vect;
7865 GROUP_FIRST_ELEMENT (res) = NULL;
7866 GROUP_NEXT_ELEMENT (res) = NULL;
7867 GROUP_SIZE (res) = 0;
7868 GROUP_STORE_COUNT (res) = 0;
7869 GROUP_GAP (res) = 0;
7870 GROUP_SAME_DR_STMT (res) = NULL;
7872 return res;
7876 /* Create a hash table for stmt_vec_info. */
7878 void
7879 init_stmt_vec_info_vec (void)
7881 gcc_assert (!stmt_vec_info_vec.exists ());
7882 stmt_vec_info_vec.create (50);
7886 /* Free hash table for stmt_vec_info. */
7888 void
7889 free_stmt_vec_info_vec (void)
7891 unsigned int i;
7892 vec_void_p info;
7893 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7894 if (info != NULL)
7895 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7896 gcc_assert (stmt_vec_info_vec.exists ());
7897 stmt_vec_info_vec.release ();
7901 /* Free stmt vectorization related info. */
7903 void
7904 free_stmt_vec_info (gimple stmt)
7906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7908 if (!stmt_info)
7909 return;
7911 /* Check if this statement has a related "pattern stmt"
7912 (introduced by the vectorizer during the pattern recognition
7913 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7914 too. */
7915 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7917 stmt_vec_info patt_info
7918 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7919 if (patt_info)
7921 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7922 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7923 gimple_set_bb (patt_stmt, NULL);
7924 tree lhs = gimple_get_lhs (patt_stmt);
7925 if (TREE_CODE (lhs) == SSA_NAME)
7926 release_ssa_name (lhs);
7927 if (seq)
7929 gimple_stmt_iterator si;
7930 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7932 gimple seq_stmt = gsi_stmt (si);
7933 gimple_set_bb (seq_stmt, NULL);
7934 lhs = gimple_get_lhs (patt_stmt);
7935 if (TREE_CODE (lhs) == SSA_NAME)
7936 release_ssa_name (lhs);
7937 free_stmt_vec_info (seq_stmt);
7940 free_stmt_vec_info (patt_stmt);
7944 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7945 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
7946 set_vinfo_for_stmt (stmt, NULL);
7947 free (stmt_info);
7951 /* Function get_vectype_for_scalar_type_and_size.
7953 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7954 by the target. */
7956 static tree
7957 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7959 machine_mode inner_mode = TYPE_MODE (scalar_type);
7960 machine_mode simd_mode;
7961 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7962 int nunits;
7963 tree vectype;
7965 if (nbytes == 0)
7966 return NULL_TREE;
7968 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7969 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7970 return NULL_TREE;
7972 /* For vector types of elements whose mode precision doesn't
7973 match their types precision we use a element type of mode
7974 precision. The vectorization routines will have to make sure
7975 they support the proper result truncation/extension.
7976 We also make sure to build vector types with INTEGER_TYPE
7977 component type only. */
7978 if (INTEGRAL_TYPE_P (scalar_type)
7979 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7980 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7981 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7982 TYPE_UNSIGNED (scalar_type));
7984 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7985 When the component mode passes the above test simply use a type
7986 corresponding to that mode. The theory is that any use that
7987 would cause problems with this will disable vectorization anyway. */
7988 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7989 && !INTEGRAL_TYPE_P (scalar_type))
7990 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7992 /* We can't build a vector type of elements with alignment bigger than
7993 their size. */
7994 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7995 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7996 TYPE_UNSIGNED (scalar_type));
7998 /* If we felt back to using the mode fail if there was
7999 no scalar type for it. */
8000 if (scalar_type == NULL_TREE)
8001 return NULL_TREE;
8003 /* If no size was supplied use the mode the target prefers. Otherwise
8004 lookup a vector mode of the specified size. */
8005 if (size == 0)
8006 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
8007 else
8008 simd_mode = mode_for_vector (inner_mode, size / nbytes);
8009 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
8010 if (nunits <= 1)
8011 return NULL_TREE;
8013 vectype = build_vector_type (scalar_type, nunits);
8015 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
8016 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
8017 return NULL_TREE;
8019 return vectype;
8022 unsigned int current_vector_size;
8024 /* Function get_vectype_for_scalar_type.
8026 Returns the vector type corresponding to SCALAR_TYPE as supported
8027 by the target. */
8029 tree
8030 get_vectype_for_scalar_type (tree scalar_type)
8032 tree vectype;
8033 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
8034 current_vector_size);
8035 if (vectype
8036 && current_vector_size == 0)
8037 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
8038 return vectype;
8041 /* Function get_same_sized_vectype
8043 Returns a vector type corresponding to SCALAR_TYPE of size
8044 VECTOR_TYPE if supported by the target. */
8046 tree
8047 get_same_sized_vectype (tree scalar_type, tree vector_type)
8049 return get_vectype_for_scalar_type_and_size
8050 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
8053 /* Function vect_is_simple_use.
8055 Input:
8056 LOOP_VINFO - the vect info of the loop that is being vectorized.
8057 BB_VINFO - the vect info of the basic block that is being vectorized.
8058 OPERAND - operand of STMT in the loop or bb.
8059 DEF - the defining stmt in case OPERAND is an SSA_NAME.
8061 Returns whether a stmt with OPERAND can be vectorized.
8062 For loops, supportable operands are constants, loop invariants, and operands
8063 that are defined by the current iteration of the loop. Unsupportable
8064 operands are those that are defined by a previous iteration of the loop (as
8065 is the case in reduction/induction computations).
8066 For basic blocks, supportable operands are constants and bb invariants.
8067 For now, operands defined outside the basic block are not supported. */
8069 bool
8070 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
8071 bb_vec_info bb_vinfo, gimple *def_stmt,
8072 tree *def, enum vect_def_type *dt)
8074 *def_stmt = NULL;
8075 *def = NULL_TREE;
8076 *dt = vect_unknown_def_type;
8078 if (dump_enabled_p ())
8080 dump_printf_loc (MSG_NOTE, vect_location,
8081 "vect_is_simple_use: operand ");
8082 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
8083 dump_printf (MSG_NOTE, "\n");
8086 if (CONSTANT_CLASS_P (operand))
8088 *dt = vect_constant_def;
8089 return true;
8092 if (is_gimple_min_invariant (operand))
8094 *def = operand;
8095 *dt = vect_external_def;
8096 return true;
8099 if (TREE_CODE (operand) != SSA_NAME)
8101 if (dump_enabled_p ())
8102 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8103 "not ssa-name.\n");
8104 return false;
8107 if (SSA_NAME_IS_DEFAULT_DEF (operand))
8109 *def = operand;
8110 *dt = vect_external_def;
8111 return true;
8114 *def_stmt = SSA_NAME_DEF_STMT (operand);
8115 if (dump_enabled_p ())
8117 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
8118 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
8121 basic_block bb = gimple_bb (*def_stmt);
8122 if ((loop_vinfo && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo), bb))
8123 || (bb_vinfo
8124 && (bb != BB_VINFO_BB (bb_vinfo)
8125 || gimple_code (*def_stmt) == GIMPLE_PHI)))
8126 *dt = vect_external_def;
8127 else
8129 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
8130 if (bb_vinfo && !STMT_VINFO_VECTORIZABLE (stmt_vinfo))
8131 *dt = vect_external_def;
8132 else
8133 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
8136 if (dump_enabled_p ())
8138 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
8139 switch (*dt)
8141 case vect_uninitialized_def:
8142 dump_printf (MSG_NOTE, "uninitialized\n");
8143 break;
8144 case vect_constant_def:
8145 dump_printf (MSG_NOTE, "constant\n");
8146 break;
8147 case vect_external_def:
8148 dump_printf (MSG_NOTE, "external\n");
8149 break;
8150 case vect_internal_def:
8151 dump_printf (MSG_NOTE, "internal\n");
8152 break;
8153 case vect_induction_def:
8154 dump_printf (MSG_NOTE, "induction\n");
8155 break;
8156 case vect_reduction_def:
8157 dump_printf (MSG_NOTE, "reduction\n");
8158 break;
8159 case vect_double_reduction_def:
8160 dump_printf (MSG_NOTE, "double reduction\n");
8161 break;
8162 case vect_nested_cycle:
8163 dump_printf (MSG_NOTE, "nested cycle\n");
8164 break;
8165 case vect_unknown_def_type:
8166 dump_printf (MSG_NOTE, "unknown\n");
8167 break;
8171 if (*dt == vect_unknown_def_type
8172 || (stmt
8173 && *dt == vect_double_reduction_def
8174 && gimple_code (stmt) != GIMPLE_PHI))
8176 if (dump_enabled_p ())
8177 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8178 "Unsupported pattern.\n");
8179 return false;
8182 switch (gimple_code (*def_stmt))
8184 case GIMPLE_PHI:
8185 *def = gimple_phi_result (*def_stmt);
8186 break;
8188 case GIMPLE_ASSIGN:
8189 *def = gimple_assign_lhs (*def_stmt);
8190 break;
8192 case GIMPLE_CALL:
8193 *def = gimple_call_lhs (*def_stmt);
8194 if (*def != NULL)
8195 break;
8196 /* FALLTHRU */
8197 default:
8198 if (dump_enabled_p ())
8199 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8200 "unsupported defining stmt:\n");
8201 return false;
8204 return true;
8207 /* Function vect_is_simple_use_1.
8209 Same as vect_is_simple_use_1 but also determines the vector operand
8210 type of OPERAND and stores it to *VECTYPE. If the definition of
8211 OPERAND is vect_uninitialized_def, vect_constant_def or
8212 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8213 is responsible to compute the best suited vector type for the
8214 scalar operand. */
8216 bool
8217 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
8218 bb_vec_info bb_vinfo, gimple *def_stmt,
8219 tree *def, enum vect_def_type *dt, tree *vectype)
8221 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
8222 def, dt))
8223 return false;
8225 /* Now get a vector type if the def is internal, otherwise supply
8226 NULL_TREE and leave it up to the caller to figure out a proper
8227 type for the use stmt. */
8228 if (*dt == vect_internal_def
8229 || *dt == vect_induction_def
8230 || *dt == vect_reduction_def
8231 || *dt == vect_double_reduction_def
8232 || *dt == vect_nested_cycle)
8234 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
8236 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8237 && !STMT_VINFO_RELEVANT (stmt_info)
8238 && !STMT_VINFO_LIVE_P (stmt_info))
8239 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
8241 *vectype = STMT_VINFO_VECTYPE (stmt_info);
8242 gcc_assert (*vectype != NULL_TREE);
8244 else if (*dt == vect_uninitialized_def
8245 || *dt == vect_constant_def
8246 || *dt == vect_external_def)
8247 *vectype = NULL_TREE;
8248 else
8249 gcc_unreachable ();
8251 return true;
8255 /* Function supportable_widening_operation
8257 Check whether an operation represented by the code CODE is a
8258 widening operation that is supported by the target platform in
8259 vector form (i.e., when operating on arguments of type VECTYPE_IN
8260 producing a result of type VECTYPE_OUT).
8262 Widening operations we currently support are NOP (CONVERT), FLOAT
8263 and WIDEN_MULT. This function checks if these operations are supported
8264 by the target platform either directly (via vector tree-codes), or via
8265 target builtins.
8267 Output:
8268 - CODE1 and CODE2 are codes of vector operations to be used when
8269 vectorizing the operation, if available.
8270 - MULTI_STEP_CVT determines the number of required intermediate steps in
8271 case of multi-step conversion (like char->short->int - in that case
8272 MULTI_STEP_CVT will be 1).
8273 - INTERM_TYPES contains the intermediate type required to perform the
8274 widening operation (short in the above example). */
8276 bool
8277 supportable_widening_operation (enum tree_code code, gimple stmt,
8278 tree vectype_out, tree vectype_in,
8279 enum tree_code *code1, enum tree_code *code2,
8280 int *multi_step_cvt,
8281 vec<tree> *interm_types)
8283 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8284 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
8285 struct loop *vect_loop = NULL;
8286 machine_mode vec_mode;
8287 enum insn_code icode1, icode2;
8288 optab optab1, optab2;
8289 tree vectype = vectype_in;
8290 tree wide_vectype = vectype_out;
8291 enum tree_code c1, c2;
8292 int i;
8293 tree prev_type, intermediate_type;
8294 machine_mode intermediate_mode, prev_mode;
8295 optab optab3, optab4;
8297 *multi_step_cvt = 0;
8298 if (loop_info)
8299 vect_loop = LOOP_VINFO_LOOP (loop_info);
8301 switch (code)
8303 case WIDEN_MULT_EXPR:
8304 /* The result of a vectorized widening operation usually requires
8305 two vectors (because the widened results do not fit into one vector).
8306 The generated vector results would normally be expected to be
8307 generated in the same order as in the original scalar computation,
8308 i.e. if 8 results are generated in each vector iteration, they are
8309 to be organized as follows:
8310 vect1: [res1,res2,res3,res4],
8311 vect2: [res5,res6,res7,res8].
8313 However, in the special case that the result of the widening
8314 operation is used in a reduction computation only, the order doesn't
8315 matter (because when vectorizing a reduction we change the order of
8316 the computation). Some targets can take advantage of this and
8317 generate more efficient code. For example, targets like Altivec,
8318 that support widen_mult using a sequence of {mult_even,mult_odd}
8319 generate the following vectors:
8320 vect1: [res1,res3,res5,res7],
8321 vect2: [res2,res4,res6,res8].
8323 When vectorizing outer-loops, we execute the inner-loop sequentially
8324 (each vectorized inner-loop iteration contributes to VF outer-loop
8325 iterations in parallel). We therefore don't allow to change the
8326 order of the computation in the inner-loop during outer-loop
8327 vectorization. */
8328 /* TODO: Another case in which order doesn't *really* matter is when we
8329 widen and then contract again, e.g. (short)((int)x * y >> 8).
8330 Normally, pack_trunc performs an even/odd permute, whereas the
8331 repack from an even/odd expansion would be an interleave, which
8332 would be significantly simpler for e.g. AVX2. */
8333 /* In any case, in order to avoid duplicating the code below, recurse
8334 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8335 are properly set up for the caller. If we fail, we'll continue with
8336 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8337 if (vect_loop
8338 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
8339 && !nested_in_vect_loop_p (vect_loop, stmt)
8340 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
8341 stmt, vectype_out, vectype_in,
8342 code1, code2, multi_step_cvt,
8343 interm_types))
8345 /* Elements in a vector with vect_used_by_reduction property cannot
8346 be reordered if the use chain with this property does not have the
8347 same operation. One such an example is s += a * b, where elements
8348 in a and b cannot be reordered. Here we check if the vector defined
8349 by STMT is only directly used in the reduction statement. */
8350 tree lhs = gimple_assign_lhs (stmt);
8351 use_operand_p dummy;
8352 gimple use_stmt;
8353 stmt_vec_info use_stmt_info = NULL;
8354 if (single_imm_use (lhs, &dummy, &use_stmt)
8355 && (use_stmt_info = vinfo_for_stmt (use_stmt))
8356 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
8357 return true;
8359 c1 = VEC_WIDEN_MULT_LO_EXPR;
8360 c2 = VEC_WIDEN_MULT_HI_EXPR;
8361 break;
8363 case VEC_WIDEN_MULT_EVEN_EXPR:
8364 /* Support the recursion induced just above. */
8365 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
8366 c2 = VEC_WIDEN_MULT_ODD_EXPR;
8367 break;
8369 case WIDEN_LSHIFT_EXPR:
8370 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
8371 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
8372 break;
8374 CASE_CONVERT:
8375 c1 = VEC_UNPACK_LO_EXPR;
8376 c2 = VEC_UNPACK_HI_EXPR;
8377 break;
8379 case FLOAT_EXPR:
8380 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
8381 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
8382 break;
8384 case FIX_TRUNC_EXPR:
8385 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8386 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8387 computing the operation. */
8388 return false;
8390 default:
8391 gcc_unreachable ();
8394 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
8395 std::swap (c1, c2);
8397 if (code == FIX_TRUNC_EXPR)
8399 /* The signedness is determined from output operand. */
8400 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8401 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
8403 else
8405 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8406 optab2 = optab_for_tree_code (c2, vectype, optab_default);
8409 if (!optab1 || !optab2)
8410 return false;
8412 vec_mode = TYPE_MODE (vectype);
8413 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
8414 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
8415 return false;
8417 *code1 = c1;
8418 *code2 = c2;
8420 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8421 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8422 return true;
8424 /* Check if it's a multi-step conversion that can be done using intermediate
8425 types. */
8427 prev_type = vectype;
8428 prev_mode = vec_mode;
8430 if (!CONVERT_EXPR_CODE_P (code))
8431 return false;
8433 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8434 intermediate steps in promotion sequence. We try
8435 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8436 not. */
8437 interm_types->create (MAX_INTERM_CVT_STEPS);
8438 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8440 intermediate_mode = insn_data[icode1].operand[0].mode;
8441 intermediate_type
8442 = lang_hooks.types.type_for_mode (intermediate_mode,
8443 TYPE_UNSIGNED (prev_type));
8444 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
8445 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
8447 if (!optab3 || !optab4
8448 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
8449 || insn_data[icode1].operand[0].mode != intermediate_mode
8450 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
8451 || insn_data[icode2].operand[0].mode != intermediate_mode
8452 || ((icode1 = optab_handler (optab3, intermediate_mode))
8453 == CODE_FOR_nothing)
8454 || ((icode2 = optab_handler (optab4, intermediate_mode))
8455 == CODE_FOR_nothing))
8456 break;
8458 interm_types->quick_push (intermediate_type);
8459 (*multi_step_cvt)++;
8461 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
8462 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
8463 return true;
8465 prev_type = intermediate_type;
8466 prev_mode = intermediate_mode;
8469 interm_types->release ();
8470 return false;
8474 /* Function supportable_narrowing_operation
8476 Check whether an operation represented by the code CODE is a
8477 narrowing operation that is supported by the target platform in
8478 vector form (i.e., when operating on arguments of type VECTYPE_IN
8479 and producing a result of type VECTYPE_OUT).
8481 Narrowing operations we currently support are NOP (CONVERT) and
8482 FIX_TRUNC. This function checks if these operations are supported by
8483 the target platform directly via vector tree-codes.
8485 Output:
8486 - CODE1 is the code of a vector operation to be used when
8487 vectorizing the operation, if available.
8488 - MULTI_STEP_CVT determines the number of required intermediate steps in
8489 case of multi-step conversion (like int->short->char - in that case
8490 MULTI_STEP_CVT will be 1).
8491 - INTERM_TYPES contains the intermediate type required to perform the
8492 narrowing operation (short in the above example). */
8494 bool
8495 supportable_narrowing_operation (enum tree_code code,
8496 tree vectype_out, tree vectype_in,
8497 enum tree_code *code1, int *multi_step_cvt,
8498 vec<tree> *interm_types)
8500 machine_mode vec_mode;
8501 enum insn_code icode1;
8502 optab optab1, interm_optab;
8503 tree vectype = vectype_in;
8504 tree narrow_vectype = vectype_out;
8505 enum tree_code c1;
8506 tree intermediate_type;
8507 machine_mode intermediate_mode, prev_mode;
8508 int i;
8509 bool uns;
8511 *multi_step_cvt = 0;
8512 switch (code)
8514 CASE_CONVERT:
8515 c1 = VEC_PACK_TRUNC_EXPR;
8516 break;
8518 case FIX_TRUNC_EXPR:
8519 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8520 break;
8522 case FLOAT_EXPR:
8523 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8524 tree code and optabs used for computing the operation. */
8525 return false;
8527 default:
8528 gcc_unreachable ();
8531 if (code == FIX_TRUNC_EXPR)
8532 /* The signedness is determined from output operand. */
8533 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8534 else
8535 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8537 if (!optab1)
8538 return false;
8540 vec_mode = TYPE_MODE (vectype);
8541 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8542 return false;
8544 *code1 = c1;
8546 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8547 return true;
8549 /* Check if it's a multi-step conversion that can be done using intermediate
8550 types. */
8551 prev_mode = vec_mode;
8552 if (code == FIX_TRUNC_EXPR)
8553 uns = TYPE_UNSIGNED (vectype_out);
8554 else
8555 uns = TYPE_UNSIGNED (vectype);
8557 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8558 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8559 costly than signed. */
8560 if (code == FIX_TRUNC_EXPR && uns)
8562 enum insn_code icode2;
8564 intermediate_type
8565 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8566 interm_optab
8567 = optab_for_tree_code (c1, intermediate_type, optab_default);
8568 if (interm_optab != unknown_optab
8569 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8570 && insn_data[icode1].operand[0].mode
8571 == insn_data[icode2].operand[0].mode)
8573 uns = false;
8574 optab1 = interm_optab;
8575 icode1 = icode2;
8579 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8580 intermediate steps in promotion sequence. We try
8581 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8582 interm_types->create (MAX_INTERM_CVT_STEPS);
8583 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8585 intermediate_mode = insn_data[icode1].operand[0].mode;
8586 intermediate_type
8587 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8588 interm_optab
8589 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8590 optab_default);
8591 if (!interm_optab
8592 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8593 || insn_data[icode1].operand[0].mode != intermediate_mode
8594 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8595 == CODE_FOR_nothing))
8596 break;
8598 interm_types->quick_push (intermediate_type);
8599 (*multi_step_cvt)++;
8601 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8602 return true;
8604 prev_mode = intermediate_mode;
8605 optab1 = interm_optab;
8608 interm_types->release ();
8609 return false;