Merge branches/gcc-4_9-branch rev 225109.
[official-gcc.git] / gcc-4_9-branch / gcc / tree-vect-stmts.c
blob075c5009fde7bf271d68322b19d4618b09e7c847
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
34 #include "tree-eh.h"
35 #include "gimple-expr.h"
36 #include "is-a.h"
37 #include "gimple.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "gimple-ssa.h"
42 #include "tree-cfg.h"
43 #include "tree-phinodes.h"
44 #include "ssa-iterators.h"
45 #include "stringpool.h"
46 #include "tree-ssanames.h"
47 #include "tree-ssa-loop-manip.h"
48 #include "cfgloop.h"
49 #include "tree-ssa-loop.h"
50 #include "tree-scalar-evolution.h"
51 #include "expr.h"
52 #include "recog.h" /* FIXME: for insn_data */
53 #include "optabs.h"
54 #include "diagnostic-core.h"
55 #include "tree-vectorizer.h"
56 #include "dumpfile.h"
57 #include "cgraph.h"
59 /* For lang_hooks.types.type_for_mode. */
60 #include "langhooks.h"
62 /* Return the vectorized type for the given statement. */
64 tree
65 stmt_vectype (struct _stmt_vec_info *stmt_info)
67 return STMT_VINFO_VECTYPE (stmt_info);
70 /* Return TRUE iff the given statement is in an inner loop relative to
71 the loop being vectorized. */
72 bool
73 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
75 gimple stmt = STMT_VINFO_STMT (stmt_info);
76 basic_block bb = gimple_bb (stmt);
77 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
78 struct loop* loop;
80 if (!loop_vinfo)
81 return false;
83 loop = LOOP_VINFO_LOOP (loop_vinfo);
85 return (bb->loop_father == loop->inner);
88 /* Record the cost of a statement, either by directly informing the
89 target model or by saving it in a vector for later processing.
90 Return a preliminary estimate of the statement's cost. */
92 unsigned
93 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
94 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
95 int misalign, enum vect_cost_model_location where)
97 if (body_cost_vec)
99 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
100 add_stmt_info_to_vec (body_cost_vec, count, kind,
101 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
102 misalign);
103 return (unsigned)
104 (builtin_vectorization_cost (kind, vectype, misalign) * count);
107 else
109 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
110 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
111 void *target_cost_data;
113 if (loop_vinfo)
114 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
115 else
116 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
118 return add_stmt_cost (target_cost_data, count, kind, stmt_info,
119 misalign, where);
123 /* Return a variable of type ELEM_TYPE[NELEMS]. */
125 static tree
126 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
128 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
129 "vect_array");
132 /* ARRAY is an array of vectors created by create_vector_array.
133 Return an SSA_NAME for the vector in index N. The reference
134 is part of the vectorization of STMT and the vector is associated
135 with scalar destination SCALAR_DEST. */
137 static tree
138 read_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
139 tree array, unsigned HOST_WIDE_INT n)
141 tree vect_type, vect, vect_name, array_ref;
142 gimple new_stmt;
144 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
145 vect_type = TREE_TYPE (TREE_TYPE (array));
146 vect = vect_create_destination_var (scalar_dest, vect_type);
147 array_ref = build4 (ARRAY_REF, vect_type, array,
148 build_int_cst (size_type_node, n),
149 NULL_TREE, NULL_TREE);
151 new_stmt = gimple_build_assign (vect, array_ref);
152 vect_name = make_ssa_name (vect, new_stmt);
153 gimple_assign_set_lhs (new_stmt, vect_name);
154 vect_finish_stmt_generation (stmt, new_stmt, gsi);
156 return vect_name;
159 /* ARRAY is an array of vectors created by create_vector_array.
160 Emit code to store SSA_NAME VECT in index N of the array.
161 The store is part of the vectorization of STMT. */
163 static void
164 write_vector_array (gimple stmt, gimple_stmt_iterator *gsi, tree vect,
165 tree array, unsigned HOST_WIDE_INT n)
167 tree array_ref;
168 gimple new_stmt;
170 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
171 build_int_cst (size_type_node, n),
172 NULL_TREE, NULL_TREE);
174 new_stmt = gimple_build_assign (array_ref, vect);
175 vect_finish_stmt_generation (stmt, new_stmt, gsi);
178 /* PTR is a pointer to an array of type TYPE. Return a representation
179 of *PTR. The memory reference replaces those in FIRST_DR
180 (and its group). */
182 static tree
183 create_array_ref (tree type, tree ptr, struct data_reference *first_dr)
185 tree mem_ref, alias_ptr_type;
187 alias_ptr_type = reference_alias_ptr_type (DR_REF (first_dr));
188 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
189 /* Arrays have the same alignment as their type. */
190 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
191 return mem_ref;
194 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
196 /* Function vect_mark_relevant.
198 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
200 static void
201 vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
202 enum vect_relevant relevant, bool live_p,
203 bool used_in_pattern)
205 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
206 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
207 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
208 gimple pattern_stmt;
210 if (dump_enabled_p ())
211 dump_printf_loc (MSG_NOTE, vect_location,
212 "mark relevant %d, live %d.\n", relevant, live_p);
214 /* If this stmt is an original stmt in a pattern, we might need to mark its
215 related pattern stmt instead of the original stmt. However, such stmts
216 may have their own uses that are not in any pattern, in such cases the
217 stmt itself should be marked. */
218 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
220 bool found = false;
221 if (!used_in_pattern)
223 imm_use_iterator imm_iter;
224 use_operand_p use_p;
225 gimple use_stmt;
226 tree lhs;
227 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
228 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
230 if (is_gimple_assign (stmt))
231 lhs = gimple_assign_lhs (stmt);
232 else
233 lhs = gimple_call_lhs (stmt);
235 /* This use is out of pattern use, if LHS has other uses that are
236 pattern uses, we should mark the stmt itself, and not the pattern
237 stmt. */
238 if (lhs && TREE_CODE (lhs) == SSA_NAME)
239 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
241 if (is_gimple_debug (USE_STMT (use_p)))
242 continue;
243 use_stmt = USE_STMT (use_p);
245 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
246 continue;
248 if (vinfo_for_stmt (use_stmt)
249 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
251 found = true;
252 break;
257 if (!found)
259 /* This is the last stmt in a sequence that was detected as a
260 pattern that can potentially be vectorized. Don't mark the stmt
261 as relevant/live because it's not going to be vectorized.
262 Instead mark the pattern-stmt that replaces it. */
264 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_NOTE, vect_location,
268 "last stmt in pattern. don't mark"
269 " relevant/live.\n");
270 stmt_info = vinfo_for_stmt (pattern_stmt);
271 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
272 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
273 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
274 stmt = pattern_stmt;
278 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
279 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
280 STMT_VINFO_RELEVANT (stmt_info) = relevant;
282 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
283 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
285 if (dump_enabled_p ())
286 dump_printf_loc (MSG_NOTE, vect_location,
287 "already marked relevant/live.\n");
288 return;
291 worklist->safe_push (stmt);
295 /* Function vect_stmt_relevant_p.
297 Return true if STMT in loop that is represented by LOOP_VINFO is
298 "relevant for vectorization".
300 A stmt is considered "relevant for vectorization" if:
301 - it has uses outside the loop.
302 - it has vdefs (it alters memory).
303 - control stmts in the loop (except for the exit condition).
305 CHECKME: what other side effects would the vectorizer allow? */
307 static bool
308 vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
309 enum vect_relevant *relevant, bool *live_p)
311 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
312 ssa_op_iter op_iter;
313 imm_use_iterator imm_iter;
314 use_operand_p use_p;
315 def_operand_p def_p;
317 *relevant = vect_unused_in_scope;
318 *live_p = false;
320 /* cond stmt other than loop exit cond. */
321 if (is_ctrl_stmt (stmt)
322 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
323 != loop_exit_ctrl_vec_info_type)
324 *relevant = vect_used_in_scope;
326 /* changing memory. */
327 if (gimple_code (stmt) != GIMPLE_PHI)
328 if (gimple_vdef (stmt)
329 && !gimple_clobber_p (stmt))
331 if (dump_enabled_p ())
332 dump_printf_loc (MSG_NOTE, vect_location,
333 "vec_stmt_relevant_p: stmt has vdefs.\n");
334 *relevant = vect_used_in_scope;
337 /* uses outside the loop. */
338 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
340 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
342 basic_block bb = gimple_bb (USE_STMT (use_p));
343 if (!flow_bb_inside_loop_p (loop, bb))
345 if (dump_enabled_p ())
346 dump_printf_loc (MSG_NOTE, vect_location,
347 "vec_stmt_relevant_p: used out of loop.\n");
349 if (is_gimple_debug (USE_STMT (use_p)))
350 continue;
352 /* We expect all such uses to be in the loop exit phis
353 (because of loop closed form) */
354 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
355 gcc_assert (bb == single_exit (loop)->dest);
357 *live_p = true;
362 return (*live_p || *relevant);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
371 static bool
372 exist_non_indexing_operands_for_use_p (tree use, gimple stmt)
374 tree operand;
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
381 return true;
383 /* STMT has a data_ref. FORNOW this means that its of one of
384 the following forms:
385 -1- ARRAY_REF = var
386 -2- var = ARRAY_REF
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
391 for array indexing.
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt))
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
402 case IFN_MASK_STORE:
403 operand = gimple_call_arg (stmt, 3);
404 if (operand == use)
405 return true;
406 /* FALLTHRU */
407 case IFN_MASK_LOAD:
408 operand = gimple_call_arg (stmt, 2);
409 if (operand == use)
410 return true;
411 break;
412 default:
413 break;
415 return false;
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
424 if (operand == use)
425 return true;
427 return false;
432 Function process_use.
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
458 static bool
459 process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
460 enum vect_relevant relevant, vec<gimple> *worklist,
461 bool force)
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 tree def;
468 gimple def_stmt;
469 enum vect_def_type dt;
471 /* case 1: we are only interested in uses that need to be vectorized. Uses
472 that are used for address computation are not considered relevant. */
473 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
474 return true;
476 if (!vect_is_simple_use (use, stmt, loop_vinfo, NULL, &def_stmt, &def, &dt))
478 if (dump_enabled_p ())
479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
480 "not vectorized: unsupported use in stmt.\n");
481 return false;
484 if (!def_stmt || gimple_nop_p (def_stmt))
485 return true;
487 def_bb = gimple_bb (def_stmt);
488 if (!flow_bb_inside_loop_p (loop, def_bb))
490 if (dump_enabled_p ())
491 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
492 return true;
495 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
496 DEF_STMT must have already been processed, because this should be the
497 only way that STMT, which is a reduction-phi, was put in the worklist,
498 as there should be no other uses for DEF_STMT in the loop. So we just
499 check that everything is as expected, and we are done. */
500 dstmt_vinfo = vinfo_for_stmt (def_stmt);
501 bb = gimple_bb (stmt);
502 if (gimple_code (stmt) == GIMPLE_PHI
503 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
504 && gimple_code (def_stmt) != GIMPLE_PHI
505 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
506 && bb->loop_father == def_bb->loop_father)
508 if (dump_enabled_p ())
509 dump_printf_loc (MSG_NOTE, vect_location,
510 "reduc-stmt defining reduc-phi in the same nest.\n");
511 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
512 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
513 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
514 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
515 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
516 return true;
519 /* case 3a: outer-loop stmt defining an inner-loop stmt:
520 outer-loop-header-bb:
521 d = def_stmt
522 inner-loop:
523 stmt # use (d)
524 outer-loop-tail-bb:
525 ... */
526 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
528 if (dump_enabled_p ())
529 dump_printf_loc (MSG_NOTE, vect_location,
530 "outer-loop def-stmt defining inner-loop stmt.\n");
532 switch (relevant)
534 case vect_unused_in_scope:
535 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
536 vect_used_in_scope : vect_unused_in_scope;
537 break;
539 case vect_used_in_outer_by_reduction:
540 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
541 relevant = vect_used_by_reduction;
542 break;
544 case vect_used_in_outer:
545 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
546 relevant = vect_used_in_scope;
547 break;
549 case vect_used_in_scope:
550 break;
552 default:
553 gcc_unreachable ();
557 /* case 3b: inner-loop stmt defining an outer-loop stmt:
558 outer-loop-header-bb:
560 inner-loop:
561 d = def_stmt
562 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
563 stmt # use (d) */
564 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "inner-loop def-stmt defining outer-loop stmt.\n");
570 switch (relevant)
572 case vect_unused_in_scope:
573 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
574 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
575 vect_used_in_outer_by_reduction : vect_unused_in_scope;
576 break;
578 case vect_used_by_reduction:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
586 default:
587 gcc_unreachable ();
591 vect_mark_relevant (worklist, def_stmt, relevant, live_p,
592 is_pattern_stmt_p (stmt_vinfo));
593 return true;
597 /* Function vect_mark_stmts_to_be_vectorized.
599 Not all stmts in the loop need to be vectorized. For example:
601 for i...
602 for j...
603 1. T0 = i + j
604 2. T1 = a[T0]
606 3. j = j + 1
608 Stmt 1 and 3 do not need to be vectorized, because loop control and
609 addressing of vectorized data-refs are handled differently.
611 This pass detects such stmts. */
613 bool
614 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
616 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
617 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
618 unsigned int nbbs = loop->num_nodes;
619 gimple_stmt_iterator si;
620 gimple stmt;
621 unsigned int i;
622 stmt_vec_info stmt_vinfo;
623 basic_block bb;
624 gimple phi;
625 bool live_p;
626 enum vect_relevant relevant, tmp_relevant;
627 enum vect_def_type def_type;
629 if (dump_enabled_p ())
630 dump_printf_loc (MSG_NOTE, vect_location,
631 "=== vect_mark_stmts_to_be_vectorized ===\n");
633 auto_vec<gimple, 64> worklist;
635 /* 1. Init worklist. */
636 for (i = 0; i < nbbs; i++)
638 bb = bbs[i];
639 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
641 phi = gsi_stmt (si);
642 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
645 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
646 dump_printf (MSG_NOTE, "\n");
649 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
650 vect_mark_relevant (&worklist, phi, relevant, live_p, false);
652 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
654 stmt = gsi_stmt (si);
655 if (dump_enabled_p ())
657 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
658 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
659 dump_printf (MSG_NOTE, "\n");
662 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, stmt, relevant, live_p, false);
667 /* 2. Process_worklist */
668 while (worklist.length () > 0)
670 use_operand_p use_p;
671 ssa_op_iter iter;
673 stmt = worklist.pop ();
674 if (dump_enabled_p ())
676 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
677 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
678 dump_printf (MSG_NOTE, "\n");
681 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
682 (DEF_STMT) as relevant/irrelevant and live/dead according to the
683 liveness and relevance properties of STMT. */
684 stmt_vinfo = vinfo_for_stmt (stmt);
685 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
686 live_p = STMT_VINFO_LIVE_P (stmt_vinfo);
688 /* Generally, the liveness and relevance properties of STMT are
689 propagated as is to the DEF_STMTs of its USEs:
690 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
691 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the liveness/relevance as follows:
695 live_p = false
696 relevant = vect_used_by_reduction
697 This is because we distinguish between two kinds of relevant stmts -
698 those that are used by a reduction computation, and those that are
699 (also) used by a regular computation. This allows us later on to
700 identify stmts that are used solely by a reduction, and therefore the
701 order of the results that they produce does not have to be kept. */
703 def_type = STMT_VINFO_DEF_TYPE (stmt_vinfo);
704 tmp_relevant = relevant;
705 switch (def_type)
707 case vect_reduction_def:
708 switch (tmp_relevant)
710 case vect_unused_in_scope:
711 relevant = vect_used_by_reduction;
712 break;
714 case vect_used_by_reduction:
715 if (gimple_code (stmt) == GIMPLE_PHI)
716 break;
717 /* fall through */
719 default:
720 if (dump_enabled_p ())
721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
722 "unsupported use of reduction.\n");
723 return false;
726 live_p = false;
727 break;
729 case vect_nested_cycle:
730 if (tmp_relevant != vect_unused_in_scope
731 && tmp_relevant != vect_used_in_outer_by_reduction
732 && tmp_relevant != vect_used_in_outer)
734 if (dump_enabled_p ())
735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
736 "unsupported use of nested cycle.\n");
738 return false;
741 live_p = false;
742 break;
744 case vect_double_reduction_def:
745 if (tmp_relevant != vect_unused_in_scope
746 && tmp_relevant != vect_used_by_reduction)
748 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
750 "unsupported use of double reduction.\n");
752 return false;
755 live_p = false;
756 break;
758 default:
759 break;
762 if (is_pattern_stmt_p (stmt_vinfo))
764 /* Pattern statements are not inserted into the code, so
765 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
766 have to scan the RHS or function arguments instead. */
767 if (is_gimple_assign (stmt))
769 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
770 tree op = gimple_assign_rhs1 (stmt);
772 i = 1;
773 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
775 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
776 live_p, relevant, &worklist, false)
777 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
778 live_p, relevant, &worklist, false))
779 return false;
780 i = 2;
782 for (; i < gimple_num_ops (stmt); i++)
784 op = gimple_op (stmt, i);
785 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
786 &worklist, false))
787 return false;
790 else if (is_gimple_call (stmt))
792 for (i = 0; i < gimple_call_num_args (stmt); i++)
794 tree arg = gimple_call_arg (stmt, i);
795 if (!process_use (stmt, arg, loop_vinfo, live_p, relevant,
796 &worklist, false))
797 return false;
801 else
802 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
804 tree op = USE_FROM_PTR (use_p);
805 if (!process_use (stmt, op, loop_vinfo, live_p, relevant,
806 &worklist, false))
807 return false;
810 if (STMT_VINFO_GATHER_P (stmt_vinfo))
812 tree off;
813 tree decl = vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
814 gcc_assert (decl);
815 if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
816 &worklist, true))
817 return false;
819 } /* while worklist */
821 return true;
825 /* Function vect_model_simple_cost.
827 Models cost for simple operations, i.e. those that only emit ncopies of a
828 single op. Right now, this does not account for multiple insns that could
829 be generated for the single vector op. We will handle that shortly. */
831 void
832 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
833 enum vect_def_type *dt,
834 stmt_vector_for_cost *prologue_cost_vec,
835 stmt_vector_for_cost *body_cost_vec)
837 int i;
838 int inside_cost = 0, prologue_cost = 0;
840 /* The SLP costs were already calculated during SLP tree build. */
841 if (PURE_SLP_STMT (stmt_info))
842 return;
844 /* FORNOW: Assuming maximum 2 args per stmts. */
845 for (i = 0; i < 2; i++)
846 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
847 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
848 stmt_info, 0, vect_prologue);
850 /* Pass the inside-of-loop statements to the target-specific cost model. */
851 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
852 stmt_info, 0, vect_body);
854 if (dump_enabled_p ())
855 dump_printf_loc (MSG_NOTE, vect_location,
856 "vect_model_simple_cost: inside_cost = %d, "
857 "prologue_cost = %d .\n", inside_cost, prologue_cost);
861 /* Model cost for type demotion and promotion operations. PWR is normally
862 zero for single-step promotions and demotions. It will be one if
863 two-step promotion/demotion is required, and so on. Each additional
864 step doubles the number of instructions required. */
866 static void
867 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
868 enum vect_def_type *dt, int pwr)
870 int i, tmp;
871 int inside_cost = 0, prologue_cost = 0;
872 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
873 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
874 void *target_cost_data;
876 /* The SLP costs were already calculated during SLP tree build. */
877 if (PURE_SLP_STMT (stmt_info))
878 return;
880 if (loop_vinfo)
881 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
882 else
883 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
885 for (i = 0; i < pwr + 1; i++)
887 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
888 (i + 1) : i;
889 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
890 vec_promote_demote, stmt_info, 0,
891 vect_body);
894 /* FORNOW: Assuming maximum 2 args per stmts. */
895 for (i = 0; i < 2; i++)
896 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
897 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
898 stmt_info, 0, vect_prologue);
900 if (dump_enabled_p ())
901 dump_printf_loc (MSG_NOTE, vect_location,
902 "vect_model_promotion_demotion_cost: inside_cost = %d, "
903 "prologue_cost = %d .\n", inside_cost, prologue_cost);
906 /* Function vect_cost_group_size
908 For grouped load or store, return the group_size only if it is the first
909 load or store of a group, else return 1. This ensures that group size is
910 only returned once per group. */
912 static int
913 vect_cost_group_size (stmt_vec_info stmt_info)
915 gimple first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
917 if (first_stmt == STMT_VINFO_STMT (stmt_info))
918 return GROUP_SIZE (stmt_info);
920 return 1;
924 /* Function vect_model_store_cost
926 Models cost for stores. In the case of grouped accesses, one access
927 has the overhead of the grouped access attributed to it. */
929 void
930 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
931 bool store_lanes_p, enum vect_def_type dt,
932 slp_tree slp_node,
933 stmt_vector_for_cost *prologue_cost_vec,
934 stmt_vector_for_cost *body_cost_vec)
936 int group_size;
937 unsigned int inside_cost = 0, prologue_cost = 0;
938 struct data_reference *first_dr;
939 gimple first_stmt;
941 /* The SLP costs were already calculated during SLP tree build. */
942 if (PURE_SLP_STMT (stmt_info))
943 return;
945 if (dt == vect_constant_def || dt == vect_external_def)
946 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
947 stmt_info, 0, vect_prologue);
949 /* Grouped access? */
950 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
952 if (slp_node)
954 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
955 group_size = 1;
957 else
959 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
960 group_size = vect_cost_group_size (stmt_info);
963 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
965 /* Not a grouped access. */
966 else
968 group_size = 1;
969 first_dr = STMT_VINFO_DATA_REF (stmt_info);
972 /* We assume that the cost of a single store-lanes instruction is
973 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
974 access is instead being provided by a permute-and-store operation,
975 include the cost of the permutes. */
976 if (!store_lanes_p && group_size > 1)
978 /* Uses a high and low interleave operation for each needed permute. */
980 int nstmts = ncopies * exact_log2 (group_size) * group_size;
981 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
982 stmt_info, 0, vect_body);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE, vect_location,
986 "vect_model_store_cost: strided group_size = %d .\n",
987 group_size);
990 /* Costs of the stores. */
991 vect_get_store_cost (first_dr, ncopies, &inside_cost, body_cost_vec);
993 if (dump_enabled_p ())
994 dump_printf_loc (MSG_NOTE, vect_location,
995 "vect_model_store_cost: inside_cost = %d, "
996 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1000 /* Calculate cost of DR's memory access. */
1001 void
1002 vect_get_store_cost (struct data_reference *dr, int ncopies,
1003 unsigned int *inside_cost,
1004 stmt_vector_for_cost *body_cost_vec)
1006 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1007 gimple stmt = DR_STMT (dr);
1008 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1010 switch (alignment_support_scheme)
1012 case dr_aligned:
1014 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1015 vector_store, stmt_info, 0,
1016 vect_body);
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_NOTE, vect_location,
1020 "vect_model_store_cost: aligned.\n");
1021 break;
1024 case dr_unaligned_supported:
1026 /* Here, we assign an additional cost for the unaligned store. */
1027 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1028 unaligned_store, stmt_info,
1029 DR_MISALIGNMENT (dr), vect_body);
1030 if (dump_enabled_p ())
1031 dump_printf_loc (MSG_NOTE, vect_location,
1032 "vect_model_store_cost: unaligned supported by "
1033 "hardware.\n");
1034 break;
1037 case dr_unaligned_unsupported:
1039 *inside_cost = VECT_MAX_COST;
1041 if (dump_enabled_p ())
1042 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1043 "vect_model_store_cost: unsupported access.\n");
1044 break;
1047 default:
1048 gcc_unreachable ();
1053 /* Function vect_model_load_cost
1055 Models cost for loads. In the case of grouped accesses, the last access
1056 has the overhead of the grouped access attributed to it. Since unaligned
1057 accesses are supported for loads, we also account for the costs of the
1058 access scheme chosen. */
1060 void
1061 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1062 bool load_lanes_p, slp_tree slp_node,
1063 stmt_vector_for_cost *prologue_cost_vec,
1064 stmt_vector_for_cost *body_cost_vec)
1066 int group_size;
1067 gimple first_stmt;
1068 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr;
1069 unsigned int inside_cost = 0, prologue_cost = 0;
1071 /* The SLP costs were already calculated during SLP tree build. */
1072 if (PURE_SLP_STMT (stmt_info))
1073 return;
1075 /* Grouped accesses? */
1076 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1077 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && first_stmt && !slp_node)
1079 group_size = vect_cost_group_size (stmt_info);
1080 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1082 /* Not a grouped access. */
1083 else
1085 group_size = 1;
1086 first_dr = dr;
1089 /* We assume that the cost of a single load-lanes instruction is
1090 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1091 access is instead being provided by a load-and-permute operation,
1092 include the cost of the permutes. */
1093 if (!load_lanes_p && group_size > 1)
1095 /* Uses an even and odd extract operations for each needed permute. */
1096 int nstmts = ncopies * exact_log2 (group_size) * group_size;
1097 inside_cost += record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1098 stmt_info, 0, vect_body);
1100 if (dump_enabled_p ())
1101 dump_printf_loc (MSG_NOTE, vect_location,
1102 "vect_model_load_cost: strided group_size = %d .\n",
1103 group_size);
1106 /* The loads themselves. */
1107 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1109 /* N scalar loads plus gathering them into a vector. */
1110 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1111 inside_cost += record_stmt_cost (body_cost_vec,
1112 ncopies * TYPE_VECTOR_SUBPARTS (vectype),
1113 scalar_load, stmt_info, 0, vect_body);
1114 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1115 stmt_info, 0, vect_body);
1117 else
1118 vect_get_load_cost (first_dr, ncopies,
1119 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1120 || group_size > 1 || slp_node),
1121 &inside_cost, &prologue_cost,
1122 prologue_cost_vec, body_cost_vec, true);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE, vect_location,
1126 "vect_model_load_cost: inside_cost = %d, "
1127 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1131 /* Calculate cost of DR's memory access. */
1132 void
1133 vect_get_load_cost (struct data_reference *dr, int ncopies,
1134 bool add_realign_cost, unsigned int *inside_cost,
1135 unsigned int *prologue_cost,
1136 stmt_vector_for_cost *prologue_cost_vec,
1137 stmt_vector_for_cost *body_cost_vec,
1138 bool record_prologue_costs)
1140 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1141 gimple stmt = DR_STMT (dr);
1142 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1144 switch (alignment_support_scheme)
1146 case dr_aligned:
1148 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1149 stmt_info, 0, vect_body);
1151 if (dump_enabled_p ())
1152 dump_printf_loc (MSG_NOTE, vect_location,
1153 "vect_model_load_cost: aligned.\n");
1155 break;
1157 case dr_unaligned_supported:
1159 /* Here, we assign an additional cost for the unaligned load. */
1160 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1161 unaligned_load, stmt_info,
1162 DR_MISALIGNMENT (dr), vect_body);
1164 if (dump_enabled_p ())
1165 dump_printf_loc (MSG_NOTE, vect_location,
1166 "vect_model_load_cost: unaligned supported by "
1167 "hardware.\n");
1169 break;
1171 case dr_explicit_realign:
1173 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1174 vector_load, stmt_info, 0, vect_body);
1175 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1176 vec_perm, stmt_info, 0, vect_body);
1178 /* FIXME: If the misalignment remains fixed across the iterations of
1179 the containing loop, the following cost should be added to the
1180 prologue costs. */
1181 if (targetm.vectorize.builtin_mask_for_load)
1182 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1183 stmt_info, 0, vect_body);
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_NOTE, vect_location,
1187 "vect_model_load_cost: explicit realign\n");
1189 break;
1191 case dr_explicit_realign_optimized:
1193 if (dump_enabled_p ())
1194 dump_printf_loc (MSG_NOTE, vect_location,
1195 "vect_model_load_cost: unaligned software "
1196 "pipelined.\n");
1198 /* Unaligned software pipeline has a load of an address, an initial
1199 load, and possibly a mask operation to "prime" the loop. However,
1200 if this is an access in a group of loads, which provide grouped
1201 access, then the above cost should only be considered for one
1202 access in the group. Inside the loop, there is a load op
1203 and a realignment op. */
1205 if (add_realign_cost && record_prologue_costs)
1207 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1208 vector_stmt, stmt_info,
1209 0, vect_prologue);
1210 if (targetm.vectorize.builtin_mask_for_load)
1211 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1212 vector_stmt, stmt_info,
1213 0, vect_prologue);
1216 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1217 stmt_info, 0, vect_body);
1218 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1219 stmt_info, 0, vect_body);
1221 if (dump_enabled_p ())
1222 dump_printf_loc (MSG_NOTE, vect_location,
1223 "vect_model_load_cost: explicit realign optimized"
1224 "\n");
1226 break;
1229 case dr_unaligned_unsupported:
1231 *inside_cost = VECT_MAX_COST;
1233 if (dump_enabled_p ())
1234 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1235 "vect_model_load_cost: unsupported access.\n");
1236 break;
1239 default:
1240 gcc_unreachable ();
1244 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1245 the loop preheader for the vectorized stmt STMT. */
1247 static void
1248 vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
1250 if (gsi)
1251 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1252 else
1254 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1255 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1257 if (loop_vinfo)
1259 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1260 basic_block new_bb;
1261 edge pe;
1263 if (nested_in_vect_loop_p (loop, stmt))
1264 loop = loop->inner;
1266 pe = loop_preheader_edge (loop);
1267 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1268 gcc_assert (!new_bb);
1270 else
1272 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1273 basic_block bb;
1274 gimple_stmt_iterator gsi_bb_start;
1276 gcc_assert (bb_vinfo);
1277 bb = BB_VINFO_BB (bb_vinfo);
1278 gsi_bb_start = gsi_after_labels (bb);
1279 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1283 if (dump_enabled_p ())
1285 dump_printf_loc (MSG_NOTE, vect_location,
1286 "created new init_stmt: ");
1287 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1288 dump_printf (MSG_NOTE, "\n");
1292 /* Function vect_init_vector.
1294 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1295 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1296 vector type a vector with all elements equal to VAL is created first.
1297 Place the initialization at BSI if it is not NULL. Otherwise, place the
1298 initialization at the loop preheader.
1299 Return the DEF of INIT_STMT.
1300 It will be used in the vectorization of STMT. */
1302 tree
1303 vect_init_vector (gimple stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1305 tree new_var;
1306 gimple init_stmt;
1307 tree vec_oprnd;
1308 tree new_temp;
1310 if (TREE_CODE (type) == VECTOR_TYPE
1311 && TREE_CODE (TREE_TYPE (val)) != VECTOR_TYPE)
1313 if (!types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1315 if (CONSTANT_CLASS_P (val))
1316 val = fold_unary (VIEW_CONVERT_EXPR, TREE_TYPE (type), val);
1317 else
1319 new_temp = make_ssa_name (TREE_TYPE (type), NULL);
1320 init_stmt = gimple_build_assign_with_ops (NOP_EXPR,
1321 new_temp, val,
1322 NULL_TREE);
1323 vect_init_vector_1 (stmt, init_stmt, gsi);
1324 val = new_temp;
1327 val = build_vector_from_val (type, val);
1330 new_var = vect_get_new_vect_var (type, vect_simple_var, "cst_");
1331 init_stmt = gimple_build_assign (new_var, val);
1332 new_temp = make_ssa_name (new_var, init_stmt);
1333 gimple_assign_set_lhs (init_stmt, new_temp);
1334 vect_init_vector_1 (stmt, init_stmt, gsi);
1335 vec_oprnd = gimple_assign_lhs (init_stmt);
1336 return vec_oprnd;
1340 /* Function vect_get_vec_def_for_operand.
1342 OP is an operand in STMT. This function returns a (vector) def that will be
1343 used in the vectorized stmt for STMT.
1345 In the case that OP is an SSA_NAME which is defined in the loop, then
1346 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1348 In case OP is an invariant or constant, a new stmt that creates a vector def
1349 needs to be introduced. */
1351 tree
1352 vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
1354 tree vec_oprnd;
1355 gimple vec_stmt;
1356 gimple def_stmt;
1357 stmt_vec_info def_stmt_info = NULL;
1358 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1359 unsigned int nunits;
1360 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1361 tree def;
1362 enum vect_def_type dt;
1363 bool is_simple_use;
1364 tree vector_type;
1366 if (dump_enabled_p ())
1368 dump_printf_loc (MSG_NOTE, vect_location,
1369 "vect_get_vec_def_for_operand: ");
1370 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1371 dump_printf (MSG_NOTE, "\n");
1374 is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
1375 &def_stmt, &def, &dt);
1376 gcc_assert (is_simple_use);
1377 if (dump_enabled_p ())
1379 int loc_printed = 0;
1380 if (def)
1382 dump_printf_loc (MSG_NOTE, vect_location, "def = ");
1383 loc_printed = 1;
1384 dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
1385 dump_printf (MSG_NOTE, "\n");
1387 if (def_stmt)
1389 if (loc_printed)
1390 dump_printf (MSG_NOTE, " def_stmt = ");
1391 else
1392 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1393 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1394 dump_printf (MSG_NOTE, "\n");
1398 switch (dt)
1400 /* Case 1: operand is a constant. */
1401 case vect_constant_def:
1403 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1404 gcc_assert (vector_type);
1405 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
1407 if (scalar_def)
1408 *scalar_def = op;
1410 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1411 if (dump_enabled_p ())
1412 dump_printf_loc (MSG_NOTE, vect_location,
1413 "Create vector_cst. nunits = %d\n", nunits);
1415 return vect_init_vector (stmt, op, vector_type, NULL);
1418 /* Case 2: operand is defined outside the loop - loop invariant. */
1419 case vect_external_def:
1421 vector_type = get_vectype_for_scalar_type (TREE_TYPE (def));
1422 gcc_assert (vector_type);
1424 if (scalar_def)
1425 *scalar_def = def;
1427 /* Create 'vec_inv = {inv,inv,..,inv}' */
1428 if (dump_enabled_p ())
1429 dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
1431 return vect_init_vector (stmt, def, vector_type, NULL);
1434 /* Case 3: operand is defined inside the loop. */
1435 case vect_internal_def:
1437 if (scalar_def)
1438 *scalar_def = NULL/* FIXME tuples: def_stmt*/;
1440 /* Get the def from the vectorized stmt. */
1441 def_stmt_info = vinfo_for_stmt (def_stmt);
1443 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1444 /* Get vectorized pattern statement. */
1445 if (!vec_stmt
1446 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1447 && !STMT_VINFO_RELEVANT (def_stmt_info))
1448 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1449 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1450 gcc_assert (vec_stmt);
1451 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1452 vec_oprnd = PHI_RESULT (vec_stmt);
1453 else if (is_gimple_call (vec_stmt))
1454 vec_oprnd = gimple_call_lhs (vec_stmt);
1455 else
1456 vec_oprnd = gimple_assign_lhs (vec_stmt);
1457 return vec_oprnd;
1460 /* Case 4: operand is defined by a loop header phi - reduction */
1461 case vect_reduction_def:
1462 case vect_double_reduction_def:
1463 case vect_nested_cycle:
1465 struct loop *loop;
1467 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1468 loop = (gimple_bb (def_stmt))->loop_father;
1470 /* Get the def before the loop */
1471 op = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
1472 return get_initial_def_for_reduction (stmt, op, scalar_def);
1475 /* Case 5: operand is defined by loop-header phi - induction. */
1476 case vect_induction_def:
1478 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1480 /* Get the def from the vectorized stmt. */
1481 def_stmt_info = vinfo_for_stmt (def_stmt);
1482 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1483 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1484 vec_oprnd = PHI_RESULT (vec_stmt);
1485 else
1486 vec_oprnd = gimple_get_lhs (vec_stmt);
1487 return vec_oprnd;
1490 default:
1491 gcc_unreachable ();
1496 /* Function vect_get_vec_def_for_stmt_copy
1498 Return a vector-def for an operand. This function is used when the
1499 vectorized stmt to be created (by the caller to this function) is a "copy"
1500 created in case the vectorized result cannot fit in one vector, and several
1501 copies of the vector-stmt are required. In this case the vector-def is
1502 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1503 of the stmt that defines VEC_OPRND.
1504 DT is the type of the vector def VEC_OPRND.
1506 Context:
1507 In case the vectorization factor (VF) is bigger than the number
1508 of elements that can fit in a vectype (nunits), we have to generate
1509 more than one vector stmt to vectorize the scalar stmt. This situation
1510 arises when there are multiple data-types operated upon in the loop; the
1511 smallest data-type determines the VF, and as a result, when vectorizing
1512 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1513 vector stmt (each computing a vector of 'nunits' results, and together
1514 computing 'VF' results in each iteration). This function is called when
1515 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1516 which VF=16 and nunits=4, so the number of copies required is 4):
1518 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1520 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1521 VS1.1: vx.1 = memref1 VS1.2
1522 VS1.2: vx.2 = memref2 VS1.3
1523 VS1.3: vx.3 = memref3
1525 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1526 VSnew.1: vz1 = vx.1 + ... VSnew.2
1527 VSnew.2: vz2 = vx.2 + ... VSnew.3
1528 VSnew.3: vz3 = vx.3 + ...
1530 The vectorization of S1 is explained in vectorizable_load.
1531 The vectorization of S2:
1532 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1533 the function 'vect_get_vec_def_for_operand' is called to
1534 get the relevant vector-def for each operand of S2. For operand x it
1535 returns the vector-def 'vx.0'.
1537 To create the remaining copies of the vector-stmt (VSnew.j), this
1538 function is called to get the relevant vector-def for each operand. It is
1539 obtained from the respective VS1.j stmt, which is recorded in the
1540 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1542 For example, to obtain the vector-def 'vx.1' in order to create the
1543 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1544 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1545 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1546 and return its def ('vx.1').
1547 Overall, to create the above sequence this function will be called 3 times:
1548 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1549 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1550 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1552 tree
1553 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1555 gimple vec_stmt_for_operand;
1556 stmt_vec_info def_stmt_info;
1558 /* Do nothing; can reuse same def. */
1559 if (dt == vect_external_def || dt == vect_constant_def )
1560 return vec_oprnd;
1562 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1563 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1564 gcc_assert (def_stmt_info);
1565 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1566 gcc_assert (vec_stmt_for_operand);
1567 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1568 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1569 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1570 else
1571 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1572 return vec_oprnd;
1576 /* Get vectorized definitions for the operands to create a copy of an original
1577 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1579 static void
1580 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1581 vec<tree> *vec_oprnds0,
1582 vec<tree> *vec_oprnds1)
1584 tree vec_oprnd = vec_oprnds0->pop ();
1586 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1587 vec_oprnds0->quick_push (vec_oprnd);
1589 if (vec_oprnds1 && vec_oprnds1->length ())
1591 vec_oprnd = vec_oprnds1->pop ();
1592 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1593 vec_oprnds1->quick_push (vec_oprnd);
1598 /* Get vectorized definitions for OP0 and OP1.
1599 REDUC_INDEX is the index of reduction operand in case of reduction,
1600 and -1 otherwise. */
1602 void
1603 vect_get_vec_defs (tree op0, tree op1, gimple stmt,
1604 vec<tree> *vec_oprnds0,
1605 vec<tree> *vec_oprnds1,
1606 slp_tree slp_node, int reduc_index)
1608 if (slp_node)
1610 int nops = (op1 == NULL_TREE) ? 1 : 2;
1611 auto_vec<tree> ops (nops);
1612 auto_vec<vec<tree> > vec_defs (nops);
1614 ops.quick_push (op0);
1615 if (op1)
1616 ops.quick_push (op1);
1618 vect_get_slp_defs (ops, slp_node, &vec_defs, reduc_index);
1620 *vec_oprnds0 = vec_defs[0];
1621 if (op1)
1622 *vec_oprnds1 = vec_defs[1];
1624 else
1626 tree vec_oprnd;
1628 vec_oprnds0->create (1);
1629 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt, NULL);
1630 vec_oprnds0->quick_push (vec_oprnd);
1632 if (op1)
1634 vec_oprnds1->create (1);
1635 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt, NULL);
1636 vec_oprnds1->quick_push (vec_oprnd);
1642 /* Function vect_finish_stmt_generation.
1644 Insert a new stmt. */
1646 void
1647 vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
1648 gimple_stmt_iterator *gsi)
1650 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1651 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1652 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
1654 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1656 if (!gsi_end_p (*gsi)
1657 && gimple_has_mem_ops (vec_stmt))
1659 gimple at_stmt = gsi_stmt (*gsi);
1660 tree vuse = gimple_vuse (at_stmt);
1661 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1663 tree vdef = gimple_vdef (at_stmt);
1664 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1665 /* If we have an SSA vuse and insert a store, update virtual
1666 SSA form to avoid triggering the renamer. Do so only
1667 if we can easily see all uses - which is what almost always
1668 happens with the way vectorized stmts are inserted. */
1669 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1670 && ((is_gimple_assign (vec_stmt)
1671 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1672 || (is_gimple_call (vec_stmt)
1673 && !(gimple_call_flags (vec_stmt)
1674 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1676 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1677 gimple_set_vdef (vec_stmt, new_vdef);
1678 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1682 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1684 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, loop_vinfo,
1685 bb_vinfo));
1687 if (dump_enabled_p ())
1689 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1690 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1691 dump_printf (MSG_NOTE, "\n");
1694 gimple_set_location (vec_stmt, gimple_location (stmt));
1696 /* While EH edges will generally prevent vectorization, stmt might
1697 e.g. be in a must-not-throw region. Ensure newly created stmts
1698 that could throw are part of the same region. */
1699 int lp_nr = lookup_stmt_eh_lp (stmt);
1700 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1701 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1704 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1705 a function declaration if the target has a vectorized version
1706 of the function, or NULL_TREE if the function cannot be vectorized. */
1708 tree
1709 vectorizable_function (gimple call, tree vectype_out, tree vectype_in)
1711 tree fndecl = gimple_call_fndecl (call);
1713 /* We only handle functions that do not read or clobber memory -- i.e.
1714 const or novops ones. */
1715 if (!(gimple_call_flags (call) & (ECF_CONST | ECF_NOVOPS)))
1716 return NULL_TREE;
1718 if (!fndecl
1719 || TREE_CODE (fndecl) != FUNCTION_DECL
1720 || !DECL_BUILT_IN (fndecl))
1721 return NULL_TREE;
1723 return targetm.vectorize.builtin_vectorized_function (fndecl, vectype_out,
1724 vectype_in);
1728 static tree permute_vec_elements (tree, tree, tree, gimple,
1729 gimple_stmt_iterator *);
1732 /* Function vectorizable_mask_load_store.
1734 Check if STMT performs a conditional load or store that can be vectorized.
1735 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1736 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1737 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1739 static bool
1740 vectorizable_mask_load_store (gimple stmt, gimple_stmt_iterator *gsi,
1741 gimple *vec_stmt, slp_tree slp_node)
1743 tree vec_dest = NULL;
1744 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1745 stmt_vec_info prev_stmt_info;
1746 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1747 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1748 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
1749 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1750 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1751 tree elem_type;
1752 gimple new_stmt;
1753 tree dummy;
1754 tree dataref_ptr = NULL_TREE;
1755 gimple ptr_incr;
1756 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
1757 int ncopies;
1758 int i, j;
1759 bool inv_p;
1760 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
1761 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
1762 int gather_scale = 1;
1763 enum vect_def_type gather_dt = vect_unknown_def_type;
1764 bool is_store;
1765 tree mask;
1766 gimple def_stmt;
1767 tree def;
1768 enum vect_def_type dt;
1770 if (slp_node != NULL)
1771 return false;
1773 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
1774 gcc_assert (ncopies >= 1);
1776 is_store = gimple_call_internal_fn (stmt) == IFN_MASK_STORE;
1777 mask = gimple_call_arg (stmt, 2);
1778 if (TYPE_PRECISION (TREE_TYPE (mask))
1779 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))))
1780 return false;
1782 /* FORNOW. This restriction should be relaxed. */
1783 if (nested_in_vect_loop && ncopies > 1)
1785 if (dump_enabled_p ())
1786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1787 "multiple types in nested loop.");
1788 return false;
1791 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1792 return false;
1794 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
1795 return false;
1797 if (!STMT_VINFO_DATA_REF (stmt_info))
1798 return false;
1800 elem_type = TREE_TYPE (vectype);
1802 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1803 return false;
1805 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1806 return false;
1808 if (STMT_VINFO_GATHER_P (stmt_info))
1810 gimple def_stmt;
1811 tree def;
1812 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
1813 &gather_off, &gather_scale);
1814 gcc_assert (gather_decl);
1815 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, NULL,
1816 &def_stmt, &def, &gather_dt,
1817 &gather_off_vectype))
1819 if (dump_enabled_p ())
1820 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1821 "gather index use not simple.");
1822 return false;
1825 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1826 tree masktype
1827 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
1828 if (TREE_CODE (masktype) == INTEGER_TYPE)
1830 if (dump_enabled_p ())
1831 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1832 "masked gather with integer mask not supported.");
1833 return false;
1836 else if (tree_int_cst_compare (nested_in_vect_loop
1837 ? STMT_VINFO_DR_STEP (stmt_info)
1838 : DR_STEP (dr), size_zero_node) <= 0)
1839 return false;
1840 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
1841 || !can_vec_mask_load_store_p (TYPE_MODE (vectype), !is_store))
1842 return false;
1844 if (TREE_CODE (mask) != SSA_NAME)
1845 return false;
1847 if (!vect_is_simple_use (mask, stmt, loop_vinfo, NULL,
1848 &def_stmt, &def, &dt))
1849 return false;
1851 if (is_store)
1853 tree rhs = gimple_call_arg (stmt, 3);
1854 if (!vect_is_simple_use (rhs, stmt, loop_vinfo, NULL,
1855 &def_stmt, &def, &dt))
1856 return false;
1859 if (!vec_stmt) /* transformation not required. */
1861 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
1862 if (is_store)
1863 vect_model_store_cost (stmt_info, ncopies, false, dt,
1864 NULL, NULL, NULL);
1865 else
1866 vect_model_load_cost (stmt_info, ncopies, false, NULL, NULL, NULL);
1867 return true;
1870 /** Transform. **/
1872 if (STMT_VINFO_GATHER_P (stmt_info))
1874 tree vec_oprnd0 = NULL_TREE, op;
1875 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
1876 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
1877 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
1878 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
1879 tree mask_perm_mask = NULL_TREE;
1880 edge pe = loop_preheader_edge (loop);
1881 gimple_seq seq;
1882 basic_block new_bb;
1883 enum { NARROW, NONE, WIDEN } modifier;
1884 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
1886 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
1887 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1888 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1889 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1890 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
1891 scaletype = TREE_VALUE (arglist);
1892 gcc_checking_assert (types_compatible_p (srctype, rettype)
1893 && types_compatible_p (srctype, masktype));
1895 if (nunits == gather_off_nunits)
1896 modifier = NONE;
1897 else if (nunits == gather_off_nunits / 2)
1899 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
1900 modifier = WIDEN;
1902 for (i = 0; i < gather_off_nunits; ++i)
1903 sel[i] = i | nunits;
1905 perm_mask = vect_gen_perm_mask (gather_off_vectype, sel);
1906 gcc_assert (perm_mask != NULL_TREE);
1908 else if (nunits == gather_off_nunits * 2)
1910 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
1911 modifier = NARROW;
1913 for (i = 0; i < nunits; ++i)
1914 sel[i] = i < gather_off_nunits
1915 ? i : i + nunits - gather_off_nunits;
1917 perm_mask = vect_gen_perm_mask (vectype, sel);
1918 gcc_assert (perm_mask != NULL_TREE);
1919 ncopies *= 2;
1920 for (i = 0; i < nunits; ++i)
1921 sel[i] = i | gather_off_nunits;
1922 mask_perm_mask = vect_gen_perm_mask (masktype, sel);
1923 gcc_assert (mask_perm_mask != NULL_TREE);
1925 else
1926 gcc_unreachable ();
1928 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
1930 ptr = fold_convert (ptrtype, gather_base);
1931 if (!is_gimple_min_invariant (ptr))
1933 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
1934 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
1935 gcc_assert (!new_bb);
1938 scale = build_int_cst (scaletype, gather_scale);
1940 prev_stmt_info = NULL;
1941 for (j = 0; j < ncopies; ++j)
1943 if (modifier == WIDEN && (j & 1))
1944 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
1945 perm_mask, stmt, gsi);
1946 else if (j == 0)
1947 op = vec_oprnd0
1948 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
1949 else
1950 op = vec_oprnd0
1951 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
1953 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
1955 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
1956 == TYPE_VECTOR_SUBPARTS (idxtype));
1957 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
1958 var = make_ssa_name (var, NULL);
1959 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
1960 new_stmt
1961 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
1962 op, NULL_TREE);
1963 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1964 op = var;
1967 if (mask_perm_mask && (j & 1))
1968 mask_op = permute_vec_elements (mask_op, mask_op,
1969 mask_perm_mask, stmt, gsi);
1970 else
1972 if (j == 0)
1973 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
1974 else
1976 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL,
1977 &def_stmt, &def, &dt);
1978 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
1981 mask_op = vec_mask;
1982 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
1984 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
1985 == TYPE_VECTOR_SUBPARTS (masktype));
1986 var = vect_get_new_vect_var (masktype, vect_simple_var,
1987 NULL);
1988 var = make_ssa_name (var, NULL);
1989 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
1990 new_stmt
1991 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
1992 mask_op, NULL_TREE);
1993 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1994 mask_op = var;
1998 new_stmt
1999 = gimple_build_call (gather_decl, 5, mask_op, ptr, op, mask_op,
2000 scale);
2002 if (!useless_type_conversion_p (vectype, rettype))
2004 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
2005 == TYPE_VECTOR_SUBPARTS (rettype));
2006 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
2007 op = make_ssa_name (var, new_stmt);
2008 gimple_call_set_lhs (new_stmt, op);
2009 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2010 var = make_ssa_name (vec_dest, NULL);
2011 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2012 new_stmt
2013 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
2014 NULL_TREE);
2016 else
2018 var = make_ssa_name (vec_dest, new_stmt);
2019 gimple_call_set_lhs (new_stmt, var);
2022 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2024 if (modifier == NARROW)
2026 if ((j & 1) == 0)
2028 prev_res = var;
2029 continue;
2031 var = permute_vec_elements (prev_res, var,
2032 perm_mask, stmt, gsi);
2033 new_stmt = SSA_NAME_DEF_STMT (var);
2036 if (prev_stmt_info == NULL)
2037 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2038 else
2039 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2040 prev_stmt_info = vinfo_for_stmt (new_stmt);
2043 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2044 from the IL. */
2045 tree lhs = gimple_call_lhs (stmt);
2046 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2047 set_vinfo_for_stmt (new_stmt, stmt_info);
2048 set_vinfo_for_stmt (stmt, NULL);
2049 STMT_VINFO_STMT (stmt_info) = new_stmt;
2050 gsi_replace (gsi, new_stmt, true);
2051 return true;
2053 else if (is_store)
2055 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2056 prev_stmt_info = NULL;
2057 for (i = 0; i < ncopies; i++)
2059 unsigned align, misalign;
2061 if (i == 0)
2063 tree rhs = gimple_call_arg (stmt, 3);
2064 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt, NULL);
2065 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2066 /* We should have catched mismatched types earlier. */
2067 gcc_assert (useless_type_conversion_p (vectype,
2068 TREE_TYPE (vec_rhs)));
2069 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2070 NULL_TREE, &dummy, gsi,
2071 &ptr_incr, false, &inv_p);
2072 gcc_assert (!inv_p);
2074 else
2076 vect_is_simple_use (vec_rhs, NULL, loop_vinfo, NULL, &def_stmt,
2077 &def, &dt);
2078 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2079 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2080 &def, &dt);
2081 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2082 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2083 TYPE_SIZE_UNIT (vectype));
2086 align = TYPE_ALIGN_UNIT (vectype);
2087 if (aligned_access_p (dr))
2088 misalign = 0;
2089 else if (DR_MISALIGNMENT (dr) == -1)
2091 align = TYPE_ALIGN_UNIT (elem_type);
2092 misalign = 0;
2094 else
2095 misalign = DR_MISALIGNMENT (dr);
2096 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2097 misalign);
2098 new_stmt
2099 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2100 gimple_call_arg (stmt, 1),
2101 vec_mask, vec_rhs);
2102 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2103 if (i == 0)
2104 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2105 else
2106 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2107 prev_stmt_info = vinfo_for_stmt (new_stmt);
2110 else
2112 tree vec_mask = NULL_TREE;
2113 prev_stmt_info = NULL;
2114 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2115 for (i = 0; i < ncopies; i++)
2117 unsigned align, misalign;
2119 if (i == 0)
2121 vec_mask = vect_get_vec_def_for_operand (mask, stmt, NULL);
2122 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2123 NULL_TREE, &dummy, gsi,
2124 &ptr_incr, false, &inv_p);
2125 gcc_assert (!inv_p);
2127 else
2129 vect_is_simple_use (vec_mask, NULL, loop_vinfo, NULL, &def_stmt,
2130 &def, &dt);
2131 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2132 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2133 TYPE_SIZE_UNIT (vectype));
2136 align = TYPE_ALIGN_UNIT (vectype);
2137 if (aligned_access_p (dr))
2138 misalign = 0;
2139 else if (DR_MISALIGNMENT (dr) == -1)
2141 align = TYPE_ALIGN_UNIT (elem_type);
2142 misalign = 0;
2144 else
2145 misalign = DR_MISALIGNMENT (dr);
2146 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2147 misalign);
2148 new_stmt
2149 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2150 gimple_call_arg (stmt, 1),
2151 vec_mask);
2152 gimple_call_set_lhs (new_stmt, make_ssa_name (vec_dest, NULL));
2153 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2154 if (i == 0)
2155 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2156 else
2157 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2158 prev_stmt_info = vinfo_for_stmt (new_stmt);
2162 if (!is_store)
2164 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2165 from the IL. */
2166 tree lhs = gimple_call_lhs (stmt);
2167 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
2168 set_vinfo_for_stmt (new_stmt, stmt_info);
2169 set_vinfo_for_stmt (stmt, NULL);
2170 STMT_VINFO_STMT (stmt_info) = new_stmt;
2171 gsi_replace (gsi, new_stmt, true);
2174 return true;
2178 /* Function vectorizable_call.
2180 Check if STMT performs a function call that can be vectorized.
2181 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2182 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2183 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2185 static bool
2186 vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
2187 slp_tree slp_node)
2189 tree vec_dest;
2190 tree scalar_dest;
2191 tree op, type;
2192 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2193 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2194 tree vectype_out, vectype_in;
2195 int nunits_in;
2196 int nunits_out;
2197 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2198 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2199 tree fndecl, new_temp, def, rhs_type;
2200 gimple def_stmt;
2201 enum vect_def_type dt[3]
2202 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2203 gimple new_stmt = NULL;
2204 int ncopies, j;
2205 vec<tree> vargs = vNULL;
2206 enum { NARROW, NONE, WIDEN } modifier;
2207 size_t i, nargs;
2208 tree lhs;
2210 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2211 return false;
2213 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2214 return false;
2216 /* Is STMT a vectorizable call? */
2217 if (!is_gimple_call (stmt))
2218 return false;
2220 if (gimple_call_internal_p (stmt)
2221 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2222 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2223 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2224 slp_node);
2226 if (gimple_call_lhs (stmt) == NULL_TREE
2227 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2228 return false;
2230 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2232 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2234 /* Process function arguments. */
2235 rhs_type = NULL_TREE;
2236 vectype_in = NULL_TREE;
2237 nargs = gimple_call_num_args (stmt);
2239 /* Bail out if the function has more than three arguments, we do not have
2240 interesting builtin functions to vectorize with more than two arguments
2241 except for fma. No arguments is also not good. */
2242 if (nargs == 0 || nargs > 3)
2243 return false;
2245 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2246 if (gimple_call_internal_p (stmt)
2247 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2249 nargs = 0;
2250 rhs_type = unsigned_type_node;
2253 for (i = 0; i < nargs; i++)
2255 tree opvectype;
2257 op = gimple_call_arg (stmt, i);
2259 /* We can only handle calls with arguments of the same type. */
2260 if (rhs_type
2261 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2263 if (dump_enabled_p ())
2264 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2265 "argument types differ.\n");
2266 return false;
2268 if (!rhs_type)
2269 rhs_type = TREE_TYPE (op);
2271 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2272 &def_stmt, &def, &dt[i], &opvectype))
2274 if (dump_enabled_p ())
2275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2276 "use not simple.\n");
2277 return false;
2280 if (!vectype_in)
2281 vectype_in = opvectype;
2282 else if (opvectype
2283 && opvectype != vectype_in)
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2287 "argument vector types differ.\n");
2288 return false;
2291 /* If all arguments are external or constant defs use a vector type with
2292 the same size as the output vector type. */
2293 if (!vectype_in)
2294 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2295 if (vec_stmt)
2296 gcc_assert (vectype_in);
2297 if (!vectype_in)
2299 if (dump_enabled_p ())
2301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2302 "no vectype for scalar type ");
2303 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2304 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2307 return false;
2310 /* FORNOW */
2311 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2312 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2313 if (nunits_in == nunits_out / 2)
2314 modifier = NARROW;
2315 else if (nunits_out == nunits_in)
2316 modifier = NONE;
2317 else if (nunits_out == nunits_in / 2)
2318 modifier = WIDEN;
2319 else
2320 return false;
2322 /* For now, we only vectorize functions if a target specific builtin
2323 is available. TODO -- in some cases, it might be profitable to
2324 insert the calls for pieces of the vector, in order to be able
2325 to vectorize other operations in the loop. */
2326 fndecl = vectorizable_function (stmt, vectype_out, vectype_in);
2327 if (fndecl == NULL_TREE)
2329 if (gimple_call_internal_p (stmt)
2330 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
2331 && !slp_node
2332 && loop_vinfo
2333 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2334 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2335 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2336 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2338 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2339 { 0, 1, 2, ... vf - 1 } vector. */
2340 gcc_assert (nargs == 0);
2342 else
2344 if (dump_enabled_p ())
2345 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2346 "function is not vectorizable.\n");
2347 return false;
2351 gcc_assert (!gimple_vuse (stmt));
2353 if (slp_node || PURE_SLP_STMT (stmt_info))
2354 ncopies = 1;
2355 else if (modifier == NARROW)
2356 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
2357 else
2358 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
2360 /* Sanity check: make sure that at least one copy of the vectorized stmt
2361 needs to be generated. */
2362 gcc_assert (ncopies >= 1);
2364 if (!vec_stmt) /* transformation not required. */
2366 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2367 if (dump_enabled_p ())
2368 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2369 "\n");
2370 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
2371 return true;
2374 /** Transform. **/
2376 if (dump_enabled_p ())
2377 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2379 /* Handle def. */
2380 scalar_dest = gimple_call_lhs (stmt);
2381 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2383 prev_stmt_info = NULL;
2384 switch (modifier)
2386 case NONE:
2387 for (j = 0; j < ncopies; ++j)
2389 /* Build argument list for the vectorized call. */
2390 if (j == 0)
2391 vargs.create (nargs);
2392 else
2393 vargs.truncate (0);
2395 if (slp_node)
2397 auto_vec<vec<tree> > vec_defs (nargs);
2398 vec<tree> vec_oprnds0;
2400 for (i = 0; i < nargs; i++)
2401 vargs.quick_push (gimple_call_arg (stmt, i));
2402 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2403 vec_oprnds0 = vec_defs[0];
2405 /* Arguments are ready. Create the new vector stmt. */
2406 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2408 size_t k;
2409 for (k = 0; k < nargs; k++)
2411 vec<tree> vec_oprndsk = vec_defs[k];
2412 vargs[k] = vec_oprndsk[i];
2414 new_stmt = gimple_build_call_vec (fndecl, vargs);
2415 new_temp = make_ssa_name (vec_dest, new_stmt);
2416 gimple_call_set_lhs (new_stmt, new_temp);
2417 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2418 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2421 for (i = 0; i < nargs; i++)
2423 vec<tree> vec_oprndsi = vec_defs[i];
2424 vec_oprndsi.release ();
2426 continue;
2429 for (i = 0; i < nargs; i++)
2431 op = gimple_call_arg (stmt, i);
2432 if (j == 0)
2433 vec_oprnd0
2434 = vect_get_vec_def_for_operand (op, stmt, NULL);
2435 else
2437 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2438 vec_oprnd0
2439 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2442 vargs.quick_push (vec_oprnd0);
2445 if (gimple_call_internal_p (stmt)
2446 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2448 tree *v = XALLOCAVEC (tree, nunits_out);
2449 int k;
2450 for (k = 0; k < nunits_out; ++k)
2451 v[k] = build_int_cst (unsigned_type_node, j * nunits_out + k);
2452 tree cst = build_vector (vectype_out, v);
2453 tree new_var
2454 = vect_get_new_vect_var (vectype_out, vect_simple_var, "cst_");
2455 gimple init_stmt = gimple_build_assign (new_var, cst);
2456 new_temp = make_ssa_name (new_var, init_stmt);
2457 gimple_assign_set_lhs (init_stmt, new_temp);
2458 vect_init_vector_1 (stmt, init_stmt, NULL);
2459 new_temp = make_ssa_name (vec_dest, NULL);
2460 new_stmt = gimple_build_assign (new_temp,
2461 gimple_assign_lhs (init_stmt));
2463 else
2465 new_stmt = gimple_build_call_vec (fndecl, vargs);
2466 new_temp = make_ssa_name (vec_dest, new_stmt);
2467 gimple_call_set_lhs (new_stmt, new_temp);
2469 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2471 if (j == 0)
2472 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2473 else
2474 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2476 prev_stmt_info = vinfo_for_stmt (new_stmt);
2479 break;
2481 case NARROW:
2482 for (j = 0; j < ncopies; ++j)
2484 /* Build argument list for the vectorized call. */
2485 if (j == 0)
2486 vargs.create (nargs * 2);
2487 else
2488 vargs.truncate (0);
2490 if (slp_node)
2492 auto_vec<vec<tree> > vec_defs (nargs);
2493 vec<tree> vec_oprnds0;
2495 for (i = 0; i < nargs; i++)
2496 vargs.quick_push (gimple_call_arg (stmt, i));
2497 vect_get_slp_defs (vargs, slp_node, &vec_defs, -1);
2498 vec_oprnds0 = vec_defs[0];
2500 /* Arguments are ready. Create the new vector stmt. */
2501 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
2503 size_t k;
2504 vargs.truncate (0);
2505 for (k = 0; k < nargs; k++)
2507 vec<tree> vec_oprndsk = vec_defs[k];
2508 vargs.quick_push (vec_oprndsk[i]);
2509 vargs.quick_push (vec_oprndsk[i + 1]);
2511 new_stmt = gimple_build_call_vec (fndecl, vargs);
2512 new_temp = make_ssa_name (vec_dest, new_stmt);
2513 gimple_call_set_lhs (new_stmt, new_temp);
2514 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2515 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2518 for (i = 0; i < nargs; i++)
2520 vec<tree> vec_oprndsi = vec_defs[i];
2521 vec_oprndsi.release ();
2523 continue;
2526 for (i = 0; i < nargs; i++)
2528 op = gimple_call_arg (stmt, i);
2529 if (j == 0)
2531 vec_oprnd0
2532 = vect_get_vec_def_for_operand (op, stmt, NULL);
2533 vec_oprnd1
2534 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2536 else
2538 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
2539 vec_oprnd0
2540 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
2541 vec_oprnd1
2542 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2545 vargs.quick_push (vec_oprnd0);
2546 vargs.quick_push (vec_oprnd1);
2549 new_stmt = gimple_build_call_vec (fndecl, vargs);
2550 new_temp = make_ssa_name (vec_dest, new_stmt);
2551 gimple_call_set_lhs (new_stmt, new_temp);
2552 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2554 if (j == 0)
2555 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
2556 else
2557 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2559 prev_stmt_info = vinfo_for_stmt (new_stmt);
2562 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
2564 break;
2566 case WIDEN:
2567 /* No current target implements this case. */
2568 return false;
2571 vargs.release ();
2573 /* The call in STMT might prevent it from being removed in dce.
2574 We however cannot remove it here, due to the way the ssa name
2575 it defines is mapped to the new definition. So just replace
2576 rhs of the statement with something harmless. */
2578 if (slp_node)
2579 return true;
2581 type = TREE_TYPE (scalar_dest);
2582 if (is_pattern_stmt_p (stmt_info))
2583 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
2584 else
2585 lhs = gimple_call_lhs (stmt);
2586 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
2587 set_vinfo_for_stmt (new_stmt, stmt_info);
2588 set_vinfo_for_stmt (stmt, NULL);
2589 STMT_VINFO_STMT (stmt_info) = new_stmt;
2590 gsi_replace (gsi, new_stmt, false);
2592 return true;
2596 struct simd_call_arg_info
2598 tree vectype;
2599 tree op;
2600 enum vect_def_type dt;
2601 HOST_WIDE_INT linear_step;
2602 unsigned int align;
2605 /* Function vectorizable_simd_clone_call.
2607 Check if STMT performs a function call that can be vectorized
2608 by calling a simd clone of the function.
2609 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2610 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2611 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2613 static bool
2614 vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
2615 gimple *vec_stmt, slp_tree slp_node)
2617 tree vec_dest;
2618 tree scalar_dest;
2619 tree op, type;
2620 tree vec_oprnd0 = NULL_TREE;
2621 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
2622 tree vectype;
2623 unsigned int nunits;
2624 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2625 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2626 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
2627 tree fndecl, new_temp, def;
2628 gimple def_stmt;
2629 gimple new_stmt = NULL;
2630 int ncopies, j;
2631 vec<simd_call_arg_info> arginfo = vNULL;
2632 vec<tree> vargs = vNULL;
2633 size_t i, nargs;
2634 tree lhs, rtype, ratype;
2635 vec<constructor_elt, va_gc> *ret_ctor_elts;
2637 /* Is STMT a vectorizable call? */
2638 if (!is_gimple_call (stmt))
2639 return false;
2641 fndecl = gimple_call_fndecl (stmt);
2642 if (fndecl == NULL_TREE)
2643 return false;
2645 struct cgraph_node *node = cgraph_get_node (fndecl);
2646 if (node == NULL || node->simd_clones == NULL)
2647 return false;
2649 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2650 return false;
2652 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
2653 return false;
2655 if (gimple_call_lhs (stmt)
2656 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2657 return false;
2659 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2661 vectype = STMT_VINFO_VECTYPE (stmt_info);
2663 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
2664 return false;
2666 /* FORNOW */
2667 if (slp_node || PURE_SLP_STMT (stmt_info))
2668 return false;
2670 /* Process function arguments. */
2671 nargs = gimple_call_num_args (stmt);
2673 /* Bail out if the function has zero arguments. */
2674 if (nargs == 0)
2675 return false;
2677 arginfo.create (nargs);
2679 for (i = 0; i < nargs; i++)
2681 simd_call_arg_info thisarginfo;
2682 affine_iv iv;
2684 thisarginfo.linear_step = 0;
2685 thisarginfo.align = 0;
2686 thisarginfo.op = NULL_TREE;
2688 op = gimple_call_arg (stmt, i);
2689 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
2690 &def_stmt, &def, &thisarginfo.dt,
2691 &thisarginfo.vectype)
2692 || thisarginfo.dt == vect_uninitialized_def)
2694 if (dump_enabled_p ())
2695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2696 "use not simple.\n");
2697 arginfo.release ();
2698 return false;
2701 if (thisarginfo.dt == vect_constant_def
2702 || thisarginfo.dt == vect_external_def)
2703 gcc_assert (thisarginfo.vectype == NULL_TREE);
2704 else
2705 gcc_assert (thisarginfo.vectype != NULL_TREE);
2707 if (thisarginfo.dt != vect_constant_def
2708 && thisarginfo.dt != vect_external_def
2709 && loop_vinfo
2710 && TREE_CODE (op) == SSA_NAME
2711 && simple_iv (loop, loop_containing_stmt (stmt), op, &iv, false)
2712 && tree_fits_shwi_p (iv.step))
2714 thisarginfo.linear_step = tree_to_shwi (iv.step);
2715 thisarginfo.op = iv.base;
2717 else if ((thisarginfo.dt == vect_constant_def
2718 || thisarginfo.dt == vect_external_def)
2719 && POINTER_TYPE_P (TREE_TYPE (op)))
2720 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
2722 arginfo.quick_push (thisarginfo);
2725 unsigned int badness = 0;
2726 struct cgraph_node *bestn = NULL;
2727 if (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info))
2728 bestn = cgraph_get_node (STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info));
2729 else
2730 for (struct cgraph_node *n = node->simd_clones; n != NULL;
2731 n = n->simdclone->next_clone)
2733 unsigned int this_badness = 0;
2734 if (n->simdclone->simdlen
2735 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2736 || n->simdclone->nargs != nargs)
2737 continue;
2738 if (n->simdclone->simdlen
2739 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2740 this_badness += (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo))
2741 - exact_log2 (n->simdclone->simdlen)) * 1024;
2742 if (n->simdclone->inbranch)
2743 this_badness += 2048;
2744 int target_badness = targetm.simd_clone.usable (n);
2745 if (target_badness < 0)
2746 continue;
2747 this_badness += target_badness * 512;
2748 /* FORNOW: Have to add code to add the mask argument. */
2749 if (n->simdclone->inbranch)
2750 continue;
2751 for (i = 0; i < nargs; i++)
2753 switch (n->simdclone->args[i].arg_type)
2755 case SIMD_CLONE_ARG_TYPE_VECTOR:
2756 if (!useless_type_conversion_p
2757 (n->simdclone->args[i].orig_type,
2758 TREE_TYPE (gimple_call_arg (stmt, i))))
2759 i = -1;
2760 else if (arginfo[i].dt == vect_constant_def
2761 || arginfo[i].dt == vect_external_def
2762 || arginfo[i].linear_step)
2763 this_badness += 64;
2764 break;
2765 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2766 if (arginfo[i].dt != vect_constant_def
2767 && arginfo[i].dt != vect_external_def)
2768 i = -1;
2769 break;
2770 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2771 if (arginfo[i].dt == vect_constant_def
2772 || arginfo[i].dt == vect_external_def
2773 || (arginfo[i].linear_step
2774 != n->simdclone->args[i].linear_step))
2775 i = -1;
2776 break;
2777 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
2778 /* FORNOW */
2779 i = -1;
2780 break;
2781 case SIMD_CLONE_ARG_TYPE_MASK:
2782 gcc_unreachable ();
2784 if (i == (size_t) -1)
2785 break;
2786 if (n->simdclone->args[i].alignment > arginfo[i].align)
2788 i = -1;
2789 break;
2791 if (arginfo[i].align)
2792 this_badness += (exact_log2 (arginfo[i].align)
2793 - exact_log2 (n->simdclone->args[i].alignment));
2795 if (i == (size_t) -1)
2796 continue;
2797 if (bestn == NULL || this_badness < badness)
2799 bestn = n;
2800 badness = this_badness;
2804 if (bestn == NULL)
2806 arginfo.release ();
2807 return false;
2810 for (i = 0; i < nargs; i++)
2811 if ((arginfo[i].dt == vect_constant_def
2812 || arginfo[i].dt == vect_external_def)
2813 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
2815 arginfo[i].vectype
2816 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
2817 i)));
2818 if (arginfo[i].vectype == NULL
2819 || (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2820 > bestn->simdclone->simdlen))
2822 arginfo.release ();
2823 return false;
2827 fndecl = bestn->decl;
2828 nunits = bestn->simdclone->simdlen;
2829 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
2831 /* If the function isn't const, only allow it in simd loops where user
2832 has asserted that at least nunits consecutive iterations can be
2833 performed using SIMD instructions. */
2834 if ((loop == NULL || (unsigned) loop->safelen < nunits)
2835 && gimple_vuse (stmt))
2837 arginfo.release ();
2838 return false;
2841 /* Sanity check: make sure that at least one copy of the vectorized stmt
2842 needs to be generated. */
2843 gcc_assert (ncopies >= 1);
2845 if (!vec_stmt) /* transformation not required. */
2847 STMT_VINFO_SIMD_CLONE_FNDECL (stmt_info) = bestn->decl;
2848 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
2849 if (dump_enabled_p ())
2850 dump_printf_loc (MSG_NOTE, vect_location,
2851 "=== vectorizable_simd_clone_call ===\n");
2852 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
2853 arginfo.release ();
2854 return true;
2857 /** Transform. **/
2859 if (dump_enabled_p ())
2860 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2862 /* Handle def. */
2863 scalar_dest = gimple_call_lhs (stmt);
2864 vec_dest = NULL_TREE;
2865 rtype = NULL_TREE;
2866 ratype = NULL_TREE;
2867 if (scalar_dest)
2869 vec_dest = vect_create_destination_var (scalar_dest, vectype);
2870 rtype = TREE_TYPE (TREE_TYPE (fndecl));
2871 if (TREE_CODE (rtype) == ARRAY_TYPE)
2873 ratype = rtype;
2874 rtype = TREE_TYPE (ratype);
2878 prev_stmt_info = NULL;
2879 for (j = 0; j < ncopies; ++j)
2881 /* Build argument list for the vectorized call. */
2882 if (j == 0)
2883 vargs.create (nargs);
2884 else
2885 vargs.truncate (0);
2887 for (i = 0; i < nargs; i++)
2889 unsigned int k, l, m, o;
2890 tree atype;
2891 op = gimple_call_arg (stmt, i);
2892 switch (bestn->simdclone->args[i].arg_type)
2894 case SIMD_CLONE_ARG_TYPE_VECTOR:
2895 atype = bestn->simdclone->args[i].vector_type;
2896 o = nunits / TYPE_VECTOR_SUBPARTS (atype);
2897 for (m = j * o; m < (j + 1) * o; m++)
2899 if (TYPE_VECTOR_SUBPARTS (atype)
2900 < TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))
2902 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
2903 k = (TYPE_VECTOR_SUBPARTS (arginfo[i].vectype)
2904 / TYPE_VECTOR_SUBPARTS (atype));
2905 gcc_assert ((k & (k - 1)) == 0);
2906 if (m == 0)
2907 vec_oprnd0
2908 = vect_get_vec_def_for_operand (op, stmt, NULL);
2909 else
2911 vec_oprnd0 = arginfo[i].op;
2912 if ((m & (k - 1)) == 0)
2913 vec_oprnd0
2914 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2915 vec_oprnd0);
2917 arginfo[i].op = vec_oprnd0;
2918 vec_oprnd0
2919 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
2920 size_int (prec),
2921 bitsize_int ((m & (k - 1)) * prec));
2922 new_stmt
2923 = gimple_build_assign (make_ssa_name (atype, NULL),
2924 vec_oprnd0);
2925 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2926 vargs.safe_push (gimple_assign_lhs (new_stmt));
2928 else
2930 k = (TYPE_VECTOR_SUBPARTS (atype)
2931 / TYPE_VECTOR_SUBPARTS (arginfo[i].vectype));
2932 gcc_assert ((k & (k - 1)) == 0);
2933 vec<constructor_elt, va_gc> *ctor_elts;
2934 if (k != 1)
2935 vec_alloc (ctor_elts, k);
2936 else
2937 ctor_elts = NULL;
2938 for (l = 0; l < k; l++)
2940 if (m == 0 && l == 0)
2941 vec_oprnd0
2942 = vect_get_vec_def_for_operand (op, stmt, NULL);
2943 else
2944 vec_oprnd0
2945 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
2946 arginfo[i].op);
2947 arginfo[i].op = vec_oprnd0;
2948 if (k == 1)
2949 break;
2950 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
2951 vec_oprnd0);
2953 if (k == 1)
2954 vargs.safe_push (vec_oprnd0);
2955 else
2957 vec_oprnd0 = build_constructor (atype, ctor_elts);
2958 new_stmt
2959 = gimple_build_assign (make_ssa_name (atype, NULL),
2960 vec_oprnd0);
2961 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2962 vargs.safe_push (gimple_assign_lhs (new_stmt));
2966 break;
2967 case SIMD_CLONE_ARG_TYPE_UNIFORM:
2968 vargs.safe_push (op);
2969 break;
2970 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
2971 if (j == 0)
2973 gimple_seq stmts;
2974 arginfo[i].op
2975 = force_gimple_operand (arginfo[i].op, &stmts, true,
2976 NULL_TREE);
2977 if (stmts != NULL)
2979 basic_block new_bb;
2980 edge pe = loop_preheader_edge (loop);
2981 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2982 gcc_assert (!new_bb);
2984 tree phi_res = copy_ssa_name (op, NULL);
2985 gimple new_phi = create_phi_node (phi_res, loop->header);
2986 set_vinfo_for_stmt (new_phi,
2987 new_stmt_vec_info (new_phi, loop_vinfo,
2988 NULL));
2989 add_phi_arg (new_phi, arginfo[i].op,
2990 loop_preheader_edge (loop), UNKNOWN_LOCATION);
2991 enum tree_code code
2992 = POINTER_TYPE_P (TREE_TYPE (op))
2993 ? POINTER_PLUS_EXPR : PLUS_EXPR;
2994 tree type = POINTER_TYPE_P (TREE_TYPE (op))
2995 ? sizetype : TREE_TYPE (op);
2996 double_int cst
2997 = double_int::from_shwi
2998 (bestn->simdclone->args[i].linear_step);
2999 cst *= double_int::from_uhwi (ncopies * nunits);
3000 tree tcst = double_int_to_tree (type, cst);
3001 tree phi_arg = copy_ssa_name (op, NULL);
3002 new_stmt = gimple_build_assign_with_ops (code, phi_arg,
3003 phi_res, tcst);
3004 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3005 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3006 set_vinfo_for_stmt (new_stmt,
3007 new_stmt_vec_info (new_stmt, loop_vinfo,
3008 NULL));
3009 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3010 UNKNOWN_LOCATION);
3011 arginfo[i].op = phi_res;
3012 vargs.safe_push (phi_res);
3014 else
3016 enum tree_code code
3017 = POINTER_TYPE_P (TREE_TYPE (op))
3018 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3019 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3020 ? sizetype : TREE_TYPE (op);
3021 double_int cst
3022 = double_int::from_shwi
3023 (bestn->simdclone->args[i].linear_step);
3024 cst *= double_int::from_uhwi (j * nunits);
3025 tree tcst = double_int_to_tree (type, cst);
3026 new_temp = make_ssa_name (TREE_TYPE (op), NULL);
3027 new_stmt
3028 = gimple_build_assign_with_ops (code, new_temp,
3029 arginfo[i].op, tcst);
3030 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3031 vargs.safe_push (new_temp);
3033 break;
3034 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3035 default:
3036 gcc_unreachable ();
3040 new_stmt = gimple_build_call_vec (fndecl, vargs);
3041 if (vec_dest)
3043 gcc_assert (ratype || TYPE_VECTOR_SUBPARTS (rtype) == nunits);
3044 if (ratype)
3045 new_temp = create_tmp_var (ratype, NULL);
3046 else if (TYPE_VECTOR_SUBPARTS (vectype)
3047 == TYPE_VECTOR_SUBPARTS (rtype))
3048 new_temp = make_ssa_name (vec_dest, new_stmt);
3049 else
3050 new_temp = make_ssa_name (rtype, new_stmt);
3051 gimple_call_set_lhs (new_stmt, new_temp);
3053 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3055 if (vec_dest)
3057 if (TYPE_VECTOR_SUBPARTS (vectype) < nunits)
3059 unsigned int k, l;
3060 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3061 k = nunits / TYPE_VECTOR_SUBPARTS (vectype);
3062 gcc_assert ((k & (k - 1)) == 0);
3063 for (l = 0; l < k; l++)
3065 tree t;
3066 if (ratype)
3068 t = build_fold_addr_expr (new_temp);
3069 t = build2 (MEM_REF, vectype, t,
3070 build_int_cst (TREE_TYPE (t),
3071 l * prec / BITS_PER_UNIT));
3073 else
3074 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3075 size_int (prec), bitsize_int (l * prec));
3076 new_stmt
3077 = gimple_build_assign (make_ssa_name (vectype, NULL), t);
3078 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3079 if (j == 0 && l == 0)
3080 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3081 else
3082 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3084 prev_stmt_info = vinfo_for_stmt (new_stmt);
3087 if (ratype)
3089 tree clobber = build_constructor (ratype, NULL);
3090 TREE_THIS_VOLATILE (clobber) = 1;
3091 new_stmt = gimple_build_assign (new_temp, clobber);
3092 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3094 continue;
3096 else if (TYPE_VECTOR_SUBPARTS (vectype) > nunits)
3098 unsigned int k = (TYPE_VECTOR_SUBPARTS (vectype)
3099 / TYPE_VECTOR_SUBPARTS (rtype));
3100 gcc_assert ((k & (k - 1)) == 0);
3101 if ((j & (k - 1)) == 0)
3102 vec_alloc (ret_ctor_elts, k);
3103 if (ratype)
3105 unsigned int m, o = nunits / TYPE_VECTOR_SUBPARTS (rtype);
3106 for (m = 0; m < o; m++)
3108 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3109 size_int (m), NULL_TREE, NULL_TREE);
3110 new_stmt
3111 = gimple_build_assign (make_ssa_name (rtype, NULL),
3112 tem);
3113 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3114 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3115 gimple_assign_lhs (new_stmt));
3117 tree clobber = build_constructor (ratype, NULL);
3118 TREE_THIS_VOLATILE (clobber) = 1;
3119 new_stmt = gimple_build_assign (new_temp, clobber);
3120 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3122 else
3123 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3124 if ((j & (k - 1)) != k - 1)
3125 continue;
3126 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3127 new_stmt
3128 = gimple_build_assign (make_ssa_name (vec_dest, NULL),
3129 vec_oprnd0);
3130 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3132 if ((unsigned) j == k - 1)
3133 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3134 else
3135 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3137 prev_stmt_info = vinfo_for_stmt (new_stmt);
3138 continue;
3140 else if (ratype)
3142 tree t = build_fold_addr_expr (new_temp);
3143 t = build2 (MEM_REF, vectype, t,
3144 build_int_cst (TREE_TYPE (t), 0));
3145 new_stmt
3146 = gimple_build_assign (make_ssa_name (vec_dest, NULL), t);
3147 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3148 tree clobber = build_constructor (ratype, NULL);
3149 TREE_THIS_VOLATILE (clobber) = 1;
3150 vect_finish_stmt_generation (stmt,
3151 gimple_build_assign (new_temp,
3152 clobber), gsi);
3156 if (j == 0)
3157 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3158 else
3159 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3161 prev_stmt_info = vinfo_for_stmt (new_stmt);
3164 vargs.release ();
3166 /* The call in STMT might prevent it from being removed in dce.
3167 We however cannot remove it here, due to the way the ssa name
3168 it defines is mapped to the new definition. So just replace
3169 rhs of the statement with something harmless. */
3171 if (slp_node)
3172 return true;
3174 if (scalar_dest)
3176 type = TREE_TYPE (scalar_dest);
3177 if (is_pattern_stmt_p (stmt_info))
3178 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3179 else
3180 lhs = gimple_call_lhs (stmt);
3181 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3183 else
3184 new_stmt = gimple_build_nop ();
3185 set_vinfo_for_stmt (new_stmt, stmt_info);
3186 set_vinfo_for_stmt (stmt, NULL);
3187 STMT_VINFO_STMT (stmt_info) = new_stmt;
3188 gsi_replace (gsi, new_stmt, true);
3189 unlink_stmt_vdef (stmt);
3191 return true;
3195 /* Function vect_gen_widened_results_half
3197 Create a vector stmt whose code, type, number of arguments, and result
3198 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3199 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3200 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3201 needs to be created (DECL is a function-decl of a target-builtin).
3202 STMT is the original scalar stmt that we are vectorizing. */
3204 static gimple
3205 vect_gen_widened_results_half (enum tree_code code,
3206 tree decl,
3207 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3208 tree vec_dest, gimple_stmt_iterator *gsi,
3209 gimple stmt)
3211 gimple new_stmt;
3212 tree new_temp;
3214 /* Generate half of the widened result: */
3215 if (code == CALL_EXPR)
3217 /* Target specific support */
3218 if (op_type == binary_op)
3219 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3220 else
3221 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3222 new_temp = make_ssa_name (vec_dest, new_stmt);
3223 gimple_call_set_lhs (new_stmt, new_temp);
3225 else
3227 /* Generic support */
3228 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3229 if (op_type != binary_op)
3230 vec_oprnd1 = NULL;
3231 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vec_oprnd0,
3232 vec_oprnd1);
3233 new_temp = make_ssa_name (vec_dest, new_stmt);
3234 gimple_assign_set_lhs (new_stmt, new_temp);
3236 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3238 return new_stmt;
3242 /* Get vectorized definitions for loop-based vectorization. For the first
3243 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3244 scalar operand), and for the rest we get a copy with
3245 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3246 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3247 The vectors are collected into VEC_OPRNDS. */
3249 static void
3250 vect_get_loop_based_defs (tree *oprnd, gimple stmt, enum vect_def_type dt,
3251 vec<tree> *vec_oprnds, int multi_step_cvt)
3253 tree vec_oprnd;
3255 /* Get first vector operand. */
3256 /* All the vector operands except the very first one (that is scalar oprnd)
3257 are stmt copies. */
3258 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3259 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt, NULL);
3260 else
3261 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3263 vec_oprnds->quick_push (vec_oprnd);
3265 /* Get second vector operand. */
3266 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3267 vec_oprnds->quick_push (vec_oprnd);
3269 *oprnd = vec_oprnd;
3271 /* For conversion in multiple steps, continue to get operands
3272 recursively. */
3273 if (multi_step_cvt)
3274 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3278 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3279 For multi-step conversions store the resulting vectors and call the function
3280 recursively. */
3282 static void
3283 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3284 int multi_step_cvt, gimple stmt,
3285 vec<tree> vec_dsts,
3286 gimple_stmt_iterator *gsi,
3287 slp_tree slp_node, enum tree_code code,
3288 stmt_vec_info *prev_stmt_info)
3290 unsigned int i;
3291 tree vop0, vop1, new_tmp, vec_dest;
3292 gimple new_stmt;
3293 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3295 vec_dest = vec_dsts.pop ();
3297 for (i = 0; i < vec_oprnds->length (); i += 2)
3299 /* Create demotion operation. */
3300 vop0 = (*vec_oprnds)[i];
3301 vop1 = (*vec_oprnds)[i + 1];
3302 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
3303 new_tmp = make_ssa_name (vec_dest, new_stmt);
3304 gimple_assign_set_lhs (new_stmt, new_tmp);
3305 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3307 if (multi_step_cvt)
3308 /* Store the resulting vector for next recursive call. */
3309 (*vec_oprnds)[i/2] = new_tmp;
3310 else
3312 /* This is the last step of the conversion sequence. Store the
3313 vectors in SLP_NODE or in vector info of the scalar statement
3314 (or in STMT_VINFO_RELATED_STMT chain). */
3315 if (slp_node)
3316 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3317 else
3319 if (!*prev_stmt_info)
3320 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3321 else
3322 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3324 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3329 /* For multi-step demotion operations we first generate demotion operations
3330 from the source type to the intermediate types, and then combine the
3331 results (stored in VEC_OPRNDS) in demotion operation to the destination
3332 type. */
3333 if (multi_step_cvt)
3335 /* At each level of recursion we have half of the operands we had at the
3336 previous level. */
3337 vec_oprnds->truncate ((i+1)/2);
3338 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
3339 stmt, vec_dsts, gsi, slp_node,
3340 VEC_PACK_TRUNC_EXPR,
3341 prev_stmt_info);
3344 vec_dsts.quick_push (vec_dest);
3348 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3349 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3350 the resulting vectors and call the function recursively. */
3352 static void
3353 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
3354 vec<tree> *vec_oprnds1,
3355 gimple stmt, tree vec_dest,
3356 gimple_stmt_iterator *gsi,
3357 enum tree_code code1,
3358 enum tree_code code2, tree decl1,
3359 tree decl2, int op_type)
3361 int i;
3362 tree vop0, vop1, new_tmp1, new_tmp2;
3363 gimple new_stmt1, new_stmt2;
3364 vec<tree> vec_tmp = vNULL;
3366 vec_tmp.create (vec_oprnds0->length () * 2);
3367 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
3369 if (op_type == binary_op)
3370 vop1 = (*vec_oprnds1)[i];
3371 else
3372 vop1 = NULL_TREE;
3374 /* Generate the two halves of promotion operation. */
3375 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
3376 op_type, vec_dest, gsi, stmt);
3377 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
3378 op_type, vec_dest, gsi, stmt);
3379 if (is_gimple_call (new_stmt1))
3381 new_tmp1 = gimple_call_lhs (new_stmt1);
3382 new_tmp2 = gimple_call_lhs (new_stmt2);
3384 else
3386 new_tmp1 = gimple_assign_lhs (new_stmt1);
3387 new_tmp2 = gimple_assign_lhs (new_stmt2);
3390 /* Store the results for the next step. */
3391 vec_tmp.quick_push (new_tmp1);
3392 vec_tmp.quick_push (new_tmp2);
3395 vec_oprnds0->release ();
3396 *vec_oprnds0 = vec_tmp;
3400 /* Check if STMT performs a conversion operation, that can be vectorized.
3401 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3402 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3403 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3405 static bool
3406 vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
3407 gimple *vec_stmt, slp_tree slp_node)
3409 tree vec_dest;
3410 tree scalar_dest;
3411 tree op0, op1 = NULL_TREE;
3412 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
3413 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3414 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3415 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
3416 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
3417 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
3418 tree new_temp;
3419 tree def;
3420 gimple def_stmt;
3421 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
3422 gimple new_stmt = NULL;
3423 stmt_vec_info prev_stmt_info;
3424 int nunits_in;
3425 int nunits_out;
3426 tree vectype_out, vectype_in;
3427 int ncopies, i, j;
3428 tree lhs_type, rhs_type;
3429 enum { NARROW, NONE, WIDEN } modifier;
3430 vec<tree> vec_oprnds0 = vNULL;
3431 vec<tree> vec_oprnds1 = vNULL;
3432 tree vop0;
3433 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3434 int multi_step_cvt = 0;
3435 vec<tree> vec_dsts = vNULL;
3436 vec<tree> interm_types = vNULL;
3437 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
3438 int op_type;
3439 enum machine_mode rhs_mode;
3440 unsigned short fltsz;
3442 /* Is STMT a vectorizable conversion? */
3444 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3445 return false;
3447 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
3448 return false;
3450 if (!is_gimple_assign (stmt))
3451 return false;
3453 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
3454 return false;
3456 code = gimple_assign_rhs_code (stmt);
3457 if (!CONVERT_EXPR_CODE_P (code)
3458 && code != FIX_TRUNC_EXPR
3459 && code != FLOAT_EXPR
3460 && code != WIDEN_MULT_EXPR
3461 && code != WIDEN_LSHIFT_EXPR)
3462 return false;
3464 op_type = TREE_CODE_LENGTH (code);
3466 /* Check types of lhs and rhs. */
3467 scalar_dest = gimple_assign_lhs (stmt);
3468 lhs_type = TREE_TYPE (scalar_dest);
3469 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
3471 op0 = gimple_assign_rhs1 (stmt);
3472 rhs_type = TREE_TYPE (op0);
3474 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3475 && !((INTEGRAL_TYPE_P (lhs_type)
3476 && INTEGRAL_TYPE_P (rhs_type))
3477 || (SCALAR_FLOAT_TYPE_P (lhs_type)
3478 && SCALAR_FLOAT_TYPE_P (rhs_type))))
3479 return false;
3481 if ((INTEGRAL_TYPE_P (lhs_type)
3482 && (TYPE_PRECISION (lhs_type)
3483 != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
3484 || (INTEGRAL_TYPE_P (rhs_type)
3485 && (TYPE_PRECISION (rhs_type)
3486 != GET_MODE_PRECISION (TYPE_MODE (rhs_type)))))
3488 if (dump_enabled_p ())
3489 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3490 "type conversion to/from bit-precision unsupported."
3491 "\n");
3492 return false;
3495 /* Check the operands of the operation. */
3496 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
3497 &def_stmt, &def, &dt[0], &vectype_in))
3499 if (dump_enabled_p ())
3500 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3501 "use not simple.\n");
3502 return false;
3504 if (op_type == binary_op)
3506 bool ok;
3508 op1 = gimple_assign_rhs2 (stmt);
3509 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
3510 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3511 OP1. */
3512 if (CONSTANT_CLASS_P (op0))
3513 ok = vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo,
3514 &def_stmt, &def, &dt[1], &vectype_in);
3515 else
3516 ok = vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
3517 &def, &dt[1]);
3519 if (!ok)
3521 if (dump_enabled_p ())
3522 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3523 "use not simple.\n");
3524 return false;
3528 /* If op0 is an external or constant defs use a vector type of
3529 the same size as the output vector type. */
3530 if (!vectype_in)
3531 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
3532 if (vec_stmt)
3533 gcc_assert (vectype_in);
3534 if (!vectype_in)
3536 if (dump_enabled_p ())
3538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3539 "no vectype for scalar type ");
3540 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
3541 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3544 return false;
3547 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
3548 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
3549 if (nunits_in < nunits_out)
3550 modifier = NARROW;
3551 else if (nunits_out == nunits_in)
3552 modifier = NONE;
3553 else
3554 modifier = WIDEN;
3556 /* Multiple types in SLP are handled by creating the appropriate number of
3557 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3558 case of SLP. */
3559 if (slp_node || PURE_SLP_STMT (stmt_info))
3560 ncopies = 1;
3561 else if (modifier == NARROW)
3562 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_out;
3563 else
3564 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
3566 /* Sanity check: make sure that at least one copy of the vectorized stmt
3567 needs to be generated. */
3568 gcc_assert (ncopies >= 1);
3570 /* Supportable by target? */
3571 switch (modifier)
3573 case NONE:
3574 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
3575 return false;
3576 if (supportable_convert_operation (code, vectype_out, vectype_in,
3577 &decl1, &code1))
3578 break;
3579 /* FALLTHRU */
3580 unsupported:
3581 if (dump_enabled_p ())
3582 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3583 "conversion not supported by target.\n");
3584 return false;
3586 case WIDEN:
3587 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
3588 &code1, &code2, &multi_step_cvt,
3589 &interm_types))
3591 /* Binary widening operation can only be supported directly by the
3592 architecture. */
3593 gcc_assert (!(multi_step_cvt && op_type == binary_op));
3594 break;
3597 if (code != FLOAT_EXPR
3598 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3599 <= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3600 goto unsupported;
3602 rhs_mode = TYPE_MODE (rhs_type);
3603 fltsz = GET_MODE_SIZE (TYPE_MODE (lhs_type));
3604 for (rhs_mode = GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type));
3605 rhs_mode != VOIDmode && GET_MODE_SIZE (rhs_mode) <= fltsz;
3606 rhs_mode = GET_MODE_2XWIDER_MODE (rhs_mode))
3608 cvt_type
3609 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3610 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3611 if (cvt_type == NULL_TREE)
3612 goto unsupported;
3614 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3616 if (!supportable_convert_operation (code, vectype_out,
3617 cvt_type, &decl1, &codecvt1))
3618 goto unsupported;
3620 else if (!supportable_widening_operation (code, stmt, vectype_out,
3621 cvt_type, &codecvt1,
3622 &codecvt2, &multi_step_cvt,
3623 &interm_types))
3624 continue;
3625 else
3626 gcc_assert (multi_step_cvt == 0);
3628 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
3629 vectype_in, &code1, &code2,
3630 &multi_step_cvt, &interm_types))
3631 break;
3634 if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
3635 goto unsupported;
3637 if (GET_MODE_SIZE (rhs_mode) == fltsz)
3638 codecvt2 = ERROR_MARK;
3639 else
3641 multi_step_cvt++;
3642 interm_types.safe_push (cvt_type);
3643 cvt_type = NULL_TREE;
3645 break;
3647 case NARROW:
3648 gcc_assert (op_type == unary_op);
3649 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
3650 &code1, &multi_step_cvt,
3651 &interm_types))
3652 break;
3654 if (code != FIX_TRUNC_EXPR
3655 || (GET_MODE_SIZE (TYPE_MODE (lhs_type))
3656 >= GET_MODE_SIZE (TYPE_MODE (rhs_type))))
3657 goto unsupported;
3659 rhs_mode = TYPE_MODE (rhs_type);
3660 cvt_type
3661 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
3662 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
3663 if (cvt_type == NULL_TREE)
3664 goto unsupported;
3665 if (!supportable_convert_operation (code, cvt_type, vectype_in,
3666 &decl1, &codecvt1))
3667 goto unsupported;
3668 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
3669 &code1, &multi_step_cvt,
3670 &interm_types))
3671 break;
3672 goto unsupported;
3674 default:
3675 gcc_unreachable ();
3678 if (!vec_stmt) /* transformation not required. */
3680 if (dump_enabled_p ())
3681 dump_printf_loc (MSG_NOTE, vect_location,
3682 "=== vectorizable_conversion ===\n");
3683 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
3685 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
3686 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
3688 else if (modifier == NARROW)
3690 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
3691 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3693 else
3695 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
3696 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
3698 interm_types.release ();
3699 return true;
3702 /** Transform. **/
3703 if (dump_enabled_p ())
3704 dump_printf_loc (MSG_NOTE, vect_location,
3705 "transform conversion. ncopies = %d.\n", ncopies);
3707 if (op_type == binary_op)
3709 if (CONSTANT_CLASS_P (op0))
3710 op0 = fold_convert (TREE_TYPE (op1), op0);
3711 else if (CONSTANT_CLASS_P (op1))
3712 op1 = fold_convert (TREE_TYPE (op0), op1);
3715 /* In case of multi-step conversion, we first generate conversion operations
3716 to the intermediate types, and then from that types to the final one.
3717 We create vector destinations for the intermediate type (TYPES) received
3718 from supportable_*_operation, and store them in the correct order
3719 for future use in vect_create_vectorized_*_stmts (). */
3720 vec_dsts.create (multi_step_cvt + 1);
3721 vec_dest = vect_create_destination_var (scalar_dest,
3722 (cvt_type && modifier == WIDEN)
3723 ? cvt_type : vectype_out);
3724 vec_dsts.quick_push (vec_dest);
3726 if (multi_step_cvt)
3728 for (i = interm_types.length () - 1;
3729 interm_types.iterate (i, &intermediate_type); i--)
3731 vec_dest = vect_create_destination_var (scalar_dest,
3732 intermediate_type);
3733 vec_dsts.quick_push (vec_dest);
3737 if (cvt_type)
3738 vec_dest = vect_create_destination_var (scalar_dest,
3739 modifier == WIDEN
3740 ? vectype_out : cvt_type);
3742 if (!slp_node)
3744 if (modifier == WIDEN)
3746 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
3747 if (op_type == binary_op)
3748 vec_oprnds1.create (1);
3750 else if (modifier == NARROW)
3751 vec_oprnds0.create (
3752 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
3754 else if (code == WIDEN_LSHIFT_EXPR)
3755 vec_oprnds1.create (slp_node->vec_stmts_size);
3757 last_oprnd = op0;
3758 prev_stmt_info = NULL;
3759 switch (modifier)
3761 case NONE:
3762 for (j = 0; j < ncopies; j++)
3764 if (j == 0)
3765 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node,
3766 -1);
3767 else
3768 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
3770 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3772 /* Arguments are ready, create the new vector stmt. */
3773 if (code1 == CALL_EXPR)
3775 new_stmt = gimple_build_call (decl1, 1, vop0);
3776 new_temp = make_ssa_name (vec_dest, new_stmt);
3777 gimple_call_set_lhs (new_stmt, new_temp);
3779 else
3781 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
3782 new_stmt = gimple_build_assign_with_ops (code1, vec_dest,
3783 vop0, NULL);
3784 new_temp = make_ssa_name (vec_dest, new_stmt);
3785 gimple_assign_set_lhs (new_stmt, new_temp);
3788 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3789 if (slp_node)
3790 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3793 if (j == 0)
3794 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3795 else
3796 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3797 prev_stmt_info = vinfo_for_stmt (new_stmt);
3799 break;
3801 case WIDEN:
3802 /* In case the vectorization factor (VF) is bigger than the number
3803 of elements that we can fit in a vectype (nunits), we have to
3804 generate more than one vector stmt - i.e - we need to "unroll"
3805 the vector stmt by a factor VF/nunits. */
3806 for (j = 0; j < ncopies; j++)
3808 /* Handle uses. */
3809 if (j == 0)
3811 if (slp_node)
3813 if (code == WIDEN_LSHIFT_EXPR)
3815 unsigned int k;
3817 vec_oprnd1 = op1;
3818 /* Store vec_oprnd1 for every vector stmt to be created
3819 for SLP_NODE. We check during the analysis that all
3820 the shift arguments are the same. */
3821 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
3822 vec_oprnds1.quick_push (vec_oprnd1);
3824 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3825 slp_node, -1);
3827 else
3828 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
3829 &vec_oprnds1, slp_node, -1);
3831 else
3833 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
3834 vec_oprnds0.quick_push (vec_oprnd0);
3835 if (op_type == binary_op)
3837 if (code == WIDEN_LSHIFT_EXPR)
3838 vec_oprnd1 = op1;
3839 else
3840 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt,
3841 NULL);
3842 vec_oprnds1.quick_push (vec_oprnd1);
3846 else
3848 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
3849 vec_oprnds0.truncate (0);
3850 vec_oprnds0.quick_push (vec_oprnd0);
3851 if (op_type == binary_op)
3853 if (code == WIDEN_LSHIFT_EXPR)
3854 vec_oprnd1 = op1;
3855 else
3856 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
3857 vec_oprnd1);
3858 vec_oprnds1.truncate (0);
3859 vec_oprnds1.quick_push (vec_oprnd1);
3863 /* Arguments are ready. Create the new vector stmts. */
3864 for (i = multi_step_cvt; i >= 0; i--)
3866 tree this_dest = vec_dsts[i];
3867 enum tree_code c1 = code1, c2 = code2;
3868 if (i == 0 && codecvt2 != ERROR_MARK)
3870 c1 = codecvt1;
3871 c2 = codecvt2;
3873 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
3874 &vec_oprnds1,
3875 stmt, this_dest, gsi,
3876 c1, c2, decl1, decl2,
3877 op_type);
3880 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3882 if (cvt_type)
3884 if (codecvt1 == CALL_EXPR)
3886 new_stmt = gimple_build_call (decl1, 1, vop0);
3887 new_temp = make_ssa_name (vec_dest, new_stmt);
3888 gimple_call_set_lhs (new_stmt, new_temp);
3890 else
3892 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3893 new_temp = make_ssa_name (vec_dest, NULL);
3894 new_stmt = gimple_build_assign_with_ops (codecvt1,
3895 new_temp,
3896 vop0, NULL);
3899 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3901 else
3902 new_stmt = SSA_NAME_DEF_STMT (vop0);
3904 if (slp_node)
3905 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3906 else
3908 if (!prev_stmt_info)
3909 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3910 else
3911 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3912 prev_stmt_info = vinfo_for_stmt (new_stmt);
3917 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3918 break;
3920 case NARROW:
3921 /* In case the vectorization factor (VF) is bigger than the number
3922 of elements that we can fit in a vectype (nunits), we have to
3923 generate more than one vector stmt - i.e - we need to "unroll"
3924 the vector stmt by a factor VF/nunits. */
3925 for (j = 0; j < ncopies; j++)
3927 /* Handle uses. */
3928 if (slp_node)
3929 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
3930 slp_node, -1);
3931 else
3933 vec_oprnds0.truncate (0);
3934 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
3935 vect_pow2 (multi_step_cvt) - 1);
3938 /* Arguments are ready. Create the new vector stmts. */
3939 if (cvt_type)
3940 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
3942 if (codecvt1 == CALL_EXPR)
3944 new_stmt = gimple_build_call (decl1, 1, vop0);
3945 new_temp = make_ssa_name (vec_dest, new_stmt);
3946 gimple_call_set_lhs (new_stmt, new_temp);
3948 else
3950 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
3951 new_temp = make_ssa_name (vec_dest, NULL);
3952 new_stmt = gimple_build_assign_with_ops (codecvt1, new_temp,
3953 vop0, NULL);
3956 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3957 vec_oprnds0[i] = new_temp;
3960 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
3961 stmt, vec_dsts, gsi,
3962 slp_node, code1,
3963 &prev_stmt_info);
3966 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3967 break;
3970 vec_oprnds0.release ();
3971 vec_oprnds1.release ();
3972 vec_dsts.release ();
3973 interm_types.release ();
3975 return true;
3979 /* Function vectorizable_assignment.
3981 Check if STMT performs an assignment (copy) that can be vectorized.
3982 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3983 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3984 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3986 static bool
3987 vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
3988 gimple *vec_stmt, slp_tree slp_node)
3990 tree vec_dest;
3991 tree scalar_dest;
3992 tree op;
3993 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3994 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3995 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3996 tree new_temp;
3997 tree def;
3998 gimple def_stmt;
3999 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4000 unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4001 int ncopies;
4002 int i, j;
4003 vec<tree> vec_oprnds = vNULL;
4004 tree vop;
4005 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4006 gimple new_stmt = NULL;
4007 stmt_vec_info prev_stmt_info = NULL;
4008 enum tree_code code;
4009 tree vectype_in;
4011 /* Multiple types in SLP are handled by creating the appropriate number of
4012 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4013 case of SLP. */
4014 if (slp_node || PURE_SLP_STMT (stmt_info))
4015 ncopies = 1;
4016 else
4017 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4019 gcc_assert (ncopies >= 1);
4021 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4022 return false;
4024 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4025 return false;
4027 /* Is vectorizable assignment? */
4028 if (!is_gimple_assign (stmt))
4029 return false;
4031 scalar_dest = gimple_assign_lhs (stmt);
4032 if (TREE_CODE (scalar_dest) != SSA_NAME)
4033 return false;
4035 code = gimple_assign_rhs_code (stmt);
4036 if (gimple_assign_single_p (stmt)
4037 || code == PAREN_EXPR
4038 || CONVERT_EXPR_CODE_P (code))
4039 op = gimple_assign_rhs1 (stmt);
4040 else
4041 return false;
4043 if (code == VIEW_CONVERT_EXPR)
4044 op = TREE_OPERAND (op, 0);
4046 if (!vect_is_simple_use_1 (op, stmt, loop_vinfo, bb_vinfo,
4047 &def_stmt, &def, &dt[0], &vectype_in))
4049 if (dump_enabled_p ())
4050 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4051 "use not simple.\n");
4052 return false;
4055 /* We can handle NOP_EXPR conversions that do not change the number
4056 of elements or the vector size. */
4057 if ((CONVERT_EXPR_CODE_P (code)
4058 || code == VIEW_CONVERT_EXPR)
4059 && (!vectype_in
4060 || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
4061 || (GET_MODE_SIZE (TYPE_MODE (vectype))
4062 != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4063 return false;
4065 /* We do not handle bit-precision changes. */
4066 if ((CONVERT_EXPR_CODE_P (code)
4067 || code == VIEW_CONVERT_EXPR)
4068 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4069 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4070 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4071 || ((TYPE_PRECISION (TREE_TYPE (op))
4072 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
4073 /* But a conversion that does not change the bit-pattern is ok. */
4074 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4075 > TYPE_PRECISION (TREE_TYPE (op)))
4076 && TYPE_UNSIGNED (TREE_TYPE (op))))
4078 if (dump_enabled_p ())
4079 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4080 "type conversion to/from bit-precision "
4081 "unsupported.\n");
4082 return false;
4085 if (!vec_stmt) /* transformation not required. */
4087 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4088 if (dump_enabled_p ())
4089 dump_printf_loc (MSG_NOTE, vect_location,
4090 "=== vectorizable_assignment ===\n");
4091 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4092 return true;
4095 /** Transform. **/
4096 if (dump_enabled_p ())
4097 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4099 /* Handle def. */
4100 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4102 /* Handle use. */
4103 for (j = 0; j < ncopies; j++)
4105 /* Handle uses. */
4106 if (j == 0)
4107 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node, -1);
4108 else
4109 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4111 /* Arguments are ready. create the new vector stmt. */
4112 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4114 if (CONVERT_EXPR_CODE_P (code)
4115 || code == VIEW_CONVERT_EXPR)
4116 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4117 new_stmt = gimple_build_assign (vec_dest, vop);
4118 new_temp = make_ssa_name (vec_dest, new_stmt);
4119 gimple_assign_set_lhs (new_stmt, new_temp);
4120 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4121 if (slp_node)
4122 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4125 if (slp_node)
4126 continue;
4128 if (j == 0)
4129 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4130 else
4131 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4133 prev_stmt_info = vinfo_for_stmt (new_stmt);
4136 vec_oprnds.release ();
4137 return true;
4141 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4142 either as shift by a scalar or by a vector. */
4144 bool
4145 vect_supportable_shift (enum tree_code code, tree scalar_type)
4148 enum machine_mode vec_mode;
4149 optab optab;
4150 int icode;
4151 tree vectype;
4153 vectype = get_vectype_for_scalar_type (scalar_type);
4154 if (!vectype)
4155 return false;
4157 optab = optab_for_tree_code (code, vectype, optab_scalar);
4158 if (!optab
4159 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4161 optab = optab_for_tree_code (code, vectype, optab_vector);
4162 if (!optab
4163 || (optab_handler (optab, TYPE_MODE (vectype))
4164 == CODE_FOR_nothing))
4165 return false;
4168 vec_mode = TYPE_MODE (vectype);
4169 icode = (int) optab_handler (optab, vec_mode);
4170 if (icode == CODE_FOR_nothing)
4171 return false;
4173 return true;
4177 /* Function vectorizable_shift.
4179 Check if STMT performs a shift operation that can be vectorized.
4180 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4181 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4182 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4184 static bool
4185 vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
4186 gimple *vec_stmt, slp_tree slp_node)
4188 tree vec_dest;
4189 tree scalar_dest;
4190 tree op0, op1 = NULL;
4191 tree vec_oprnd1 = NULL_TREE;
4192 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4193 tree vectype;
4194 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4195 enum tree_code code;
4196 enum machine_mode vec_mode;
4197 tree new_temp;
4198 optab optab;
4199 int icode;
4200 enum machine_mode optab_op2_mode;
4201 tree def;
4202 gimple def_stmt;
4203 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4204 gimple new_stmt = NULL;
4205 stmt_vec_info prev_stmt_info;
4206 int nunits_in;
4207 int nunits_out;
4208 tree vectype_out;
4209 tree op1_vectype;
4210 int ncopies;
4211 int j, i;
4212 vec<tree> vec_oprnds0 = vNULL;
4213 vec<tree> vec_oprnds1 = vNULL;
4214 tree vop0, vop1;
4215 unsigned int k;
4216 bool scalar_shift_arg = true;
4217 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4218 int vf;
4220 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4221 return false;
4223 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4224 return false;
4226 /* Is STMT a vectorizable binary/unary operation? */
4227 if (!is_gimple_assign (stmt))
4228 return false;
4230 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4231 return false;
4233 code = gimple_assign_rhs_code (stmt);
4235 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4236 || code == RROTATE_EXPR))
4237 return false;
4239 scalar_dest = gimple_assign_lhs (stmt);
4240 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4241 if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
4242 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4244 if (dump_enabled_p ())
4245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4246 "bit-precision shifts not supported.\n");
4247 return false;
4250 op0 = gimple_assign_rhs1 (stmt);
4251 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4252 &def_stmt, &def, &dt[0], &vectype))
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4256 "use not simple.\n");
4257 return false;
4259 /* If op0 is an external or constant def use a vector type with
4260 the same size as the output vector type. */
4261 if (!vectype)
4262 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4263 if (vec_stmt)
4264 gcc_assert (vectype);
4265 if (!vectype)
4267 if (dump_enabled_p ())
4268 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4269 "no vectype for scalar type\n");
4270 return false;
4273 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4274 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4275 if (nunits_out != nunits_in)
4276 return false;
4278 op1 = gimple_assign_rhs2 (stmt);
4279 if (!vect_is_simple_use_1 (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4280 &def, &dt[1], &op1_vectype))
4282 if (dump_enabled_p ())
4283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4284 "use not simple.\n");
4285 return false;
4288 if (loop_vinfo)
4289 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4290 else
4291 vf = 1;
4293 /* Multiple types in SLP are handled by creating the appropriate number of
4294 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4295 case of SLP. */
4296 if (slp_node || PURE_SLP_STMT (stmt_info))
4297 ncopies = 1;
4298 else
4299 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4301 gcc_assert (ncopies >= 1);
4303 /* Determine whether the shift amount is a vector, or scalar. If the
4304 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4306 if (dt[1] == vect_internal_def && !slp_node)
4307 scalar_shift_arg = false;
4308 else if (dt[1] == vect_constant_def
4309 || dt[1] == vect_external_def
4310 || dt[1] == vect_internal_def)
4312 /* In SLP, need to check whether the shift count is the same,
4313 in loops if it is a constant or invariant, it is always
4314 a scalar shift. */
4315 if (slp_node)
4317 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4318 gimple slpstmt;
4320 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
4321 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
4322 scalar_shift_arg = false;
4325 else
4327 if (dump_enabled_p ())
4328 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4329 "operand mode requires invariant argument.\n");
4330 return false;
4333 /* Vector shifted by vector. */
4334 if (!scalar_shift_arg)
4336 optab = optab_for_tree_code (code, vectype, optab_vector);
4337 if (dump_enabled_p ())
4338 dump_printf_loc (MSG_NOTE, vect_location,
4339 "vector/vector shift/rotate found.\n");
4341 if (!op1_vectype)
4342 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
4343 if (op1_vectype == NULL_TREE
4344 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
4346 if (dump_enabled_p ())
4347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4348 "unusable type for last operand in"
4349 " vector/vector shift/rotate.\n");
4350 return false;
4353 /* See if the machine has a vector shifted by scalar insn and if not
4354 then see if it has a vector shifted by vector insn. */
4355 else
4357 optab = optab_for_tree_code (code, vectype, optab_scalar);
4358 if (optab
4359 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
4361 if (dump_enabled_p ())
4362 dump_printf_loc (MSG_NOTE, vect_location,
4363 "vector/scalar shift/rotate found.\n");
4365 else
4367 optab = optab_for_tree_code (code, vectype, optab_vector);
4368 if (optab
4369 && (optab_handler (optab, TYPE_MODE (vectype))
4370 != CODE_FOR_nothing))
4372 scalar_shift_arg = false;
4374 if (dump_enabled_p ())
4375 dump_printf_loc (MSG_NOTE, vect_location,
4376 "vector/vector shift/rotate found.\n");
4378 /* Unlike the other binary operators, shifts/rotates have
4379 the rhs being int, instead of the same type as the lhs,
4380 so make sure the scalar is the right type if we are
4381 dealing with vectors of long long/long/short/char. */
4382 if (dt[1] == vect_constant_def)
4383 op1 = fold_convert (TREE_TYPE (vectype), op1);
4384 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
4385 TREE_TYPE (op1)))
4387 if (slp_node
4388 && TYPE_MODE (TREE_TYPE (vectype))
4389 != TYPE_MODE (TREE_TYPE (op1)))
4391 if (dump_enabled_p ())
4392 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4393 "unusable type for last operand in"
4394 " vector/vector shift/rotate.\n");
4395 return false;
4397 if (vec_stmt && !slp_node)
4399 op1 = fold_convert (TREE_TYPE (vectype), op1);
4400 op1 = vect_init_vector (stmt, op1,
4401 TREE_TYPE (vectype), NULL);
4408 /* Supportable by target? */
4409 if (!optab)
4411 if (dump_enabled_p ())
4412 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4413 "no optab.\n");
4414 return false;
4416 vec_mode = TYPE_MODE (vectype);
4417 icode = (int) optab_handler (optab, vec_mode);
4418 if (icode == CODE_FOR_nothing)
4420 if (dump_enabled_p ())
4421 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4422 "op not supported by target.\n");
4423 /* Check only during analysis. */
4424 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4425 || (vf < vect_min_worthwhile_factor (code)
4426 && !vec_stmt))
4427 return false;
4428 if (dump_enabled_p ())
4429 dump_printf_loc (MSG_NOTE, vect_location,
4430 "proceeding using word mode.\n");
4433 /* Worthwhile without SIMD support? Check only during analysis. */
4434 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
4435 && vf < vect_min_worthwhile_factor (code)
4436 && !vec_stmt)
4438 if (dump_enabled_p ())
4439 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4440 "not worthwhile without SIMD support.\n");
4441 return false;
4444 if (!vec_stmt) /* transformation not required. */
4446 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
4447 if (dump_enabled_p ())
4448 dump_printf_loc (MSG_NOTE, vect_location,
4449 "=== vectorizable_shift ===\n");
4450 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4451 return true;
4454 /** Transform. **/
4456 if (dump_enabled_p ())
4457 dump_printf_loc (MSG_NOTE, vect_location,
4458 "transform binary/unary operation.\n");
4460 /* Handle def. */
4461 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4463 prev_stmt_info = NULL;
4464 for (j = 0; j < ncopies; j++)
4466 /* Handle uses. */
4467 if (j == 0)
4469 if (scalar_shift_arg)
4471 /* Vector shl and shr insn patterns can be defined with scalar
4472 operand 2 (shift operand). In this case, use constant or loop
4473 invariant op1 directly, without extending it to vector mode
4474 first. */
4475 optab_op2_mode = insn_data[icode].operand[2].mode;
4476 if (!VECTOR_MODE_P (optab_op2_mode))
4478 if (dump_enabled_p ())
4479 dump_printf_loc (MSG_NOTE, vect_location,
4480 "operand 1 using scalar mode.\n");
4481 vec_oprnd1 = op1;
4482 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
4483 vec_oprnds1.quick_push (vec_oprnd1);
4484 if (slp_node)
4486 /* Store vec_oprnd1 for every vector stmt to be created
4487 for SLP_NODE. We check during the analysis that all
4488 the shift arguments are the same.
4489 TODO: Allow different constants for different vector
4490 stmts generated for an SLP instance. */
4491 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4492 vec_oprnds1.quick_push (vec_oprnd1);
4497 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4498 (a special case for certain kind of vector shifts); otherwise,
4499 operand 1 should be of a vector type (the usual case). */
4500 if (vec_oprnd1)
4501 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4502 slp_node, -1);
4503 else
4504 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4505 slp_node, -1);
4507 else
4508 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4510 /* Arguments are ready. Create the new vector stmt. */
4511 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4513 vop1 = vec_oprnds1[i];
4514 new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0, vop1);
4515 new_temp = make_ssa_name (vec_dest, new_stmt);
4516 gimple_assign_set_lhs (new_stmt, new_temp);
4517 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4518 if (slp_node)
4519 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4522 if (slp_node)
4523 continue;
4525 if (j == 0)
4526 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4527 else
4528 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4529 prev_stmt_info = vinfo_for_stmt (new_stmt);
4532 vec_oprnds0.release ();
4533 vec_oprnds1.release ();
4535 return true;
4539 /* Function vectorizable_operation.
4541 Check if STMT performs a binary, unary or ternary operation that can
4542 be vectorized.
4543 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4544 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4545 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4547 static bool
4548 vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
4549 gimple *vec_stmt, slp_tree slp_node)
4551 tree vec_dest;
4552 tree scalar_dest;
4553 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
4554 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4555 tree vectype;
4556 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4557 enum tree_code code;
4558 enum machine_mode vec_mode;
4559 tree new_temp;
4560 int op_type;
4561 optab optab;
4562 int icode;
4563 tree def;
4564 gimple def_stmt;
4565 enum vect_def_type dt[3]
4566 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
4567 gimple new_stmt = NULL;
4568 stmt_vec_info prev_stmt_info;
4569 int nunits_in;
4570 int nunits_out;
4571 tree vectype_out;
4572 int ncopies;
4573 int j, i;
4574 vec<tree> vec_oprnds0 = vNULL;
4575 vec<tree> vec_oprnds1 = vNULL;
4576 vec<tree> vec_oprnds2 = vNULL;
4577 tree vop0, vop1, vop2;
4578 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4579 int vf;
4581 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4582 return false;
4584 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
4585 return false;
4587 /* Is STMT a vectorizable binary/unary operation? */
4588 if (!is_gimple_assign (stmt))
4589 return false;
4591 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4592 return false;
4594 code = gimple_assign_rhs_code (stmt);
4596 /* For pointer addition, we should use the normal plus for
4597 the vector addition. */
4598 if (code == POINTER_PLUS_EXPR)
4599 code = PLUS_EXPR;
4601 /* Support only unary or binary operations. */
4602 op_type = TREE_CODE_LENGTH (code);
4603 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
4605 if (dump_enabled_p ())
4606 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4607 "num. args = %d (not unary/binary/ternary op).\n",
4608 op_type);
4609 return false;
4612 scalar_dest = gimple_assign_lhs (stmt);
4613 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4615 /* Most operations cannot handle bit-precision types without extra
4616 truncations. */
4617 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4618 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
4619 /* Exception are bitwise binary operations. */
4620 && code != BIT_IOR_EXPR
4621 && code != BIT_XOR_EXPR
4622 && code != BIT_AND_EXPR)
4624 if (dump_enabled_p ())
4625 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4626 "bit-precision arithmetic not supported.\n");
4627 return false;
4630 op0 = gimple_assign_rhs1 (stmt);
4631 if (!vect_is_simple_use_1 (op0, stmt, loop_vinfo, bb_vinfo,
4632 &def_stmt, &def, &dt[0], &vectype))
4634 if (dump_enabled_p ())
4635 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4636 "use not simple.\n");
4637 return false;
4639 /* If op0 is an external or constant def use a vector type with
4640 the same size as the output vector type. */
4641 if (!vectype)
4642 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4643 if (vec_stmt)
4644 gcc_assert (vectype);
4645 if (!vectype)
4647 if (dump_enabled_p ())
4649 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4650 "no vectype for scalar type ");
4651 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
4652 TREE_TYPE (op0));
4653 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4656 return false;
4659 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4660 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4661 if (nunits_out != nunits_in)
4662 return false;
4664 if (op_type == binary_op || op_type == ternary_op)
4666 op1 = gimple_assign_rhs2 (stmt);
4667 if (!vect_is_simple_use (op1, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4668 &def, &dt[1]))
4670 if (dump_enabled_p ())
4671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4672 "use not simple.\n");
4673 return false;
4676 if (op_type == ternary_op)
4678 op2 = gimple_assign_rhs3 (stmt);
4679 if (!vect_is_simple_use (op2, stmt, loop_vinfo, bb_vinfo, &def_stmt,
4680 &def, &dt[2]))
4682 if (dump_enabled_p ())
4683 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4684 "use not simple.\n");
4685 return false;
4689 if (loop_vinfo)
4690 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4691 else
4692 vf = 1;
4694 /* Multiple types in SLP are handled by creating the appropriate number of
4695 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4696 case of SLP. */
4697 if (slp_node || PURE_SLP_STMT (stmt_info))
4698 ncopies = 1;
4699 else
4700 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits_in;
4702 gcc_assert (ncopies >= 1);
4704 /* Shifts are handled in vectorizable_shift (). */
4705 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4706 || code == RROTATE_EXPR)
4707 return false;
4709 /* Supportable by target? */
4711 vec_mode = TYPE_MODE (vectype);
4712 if (code == MULT_HIGHPART_EXPR)
4714 if (can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype)))
4715 icode = LAST_INSN_CODE;
4716 else
4717 icode = CODE_FOR_nothing;
4719 else
4721 optab = optab_for_tree_code (code, vectype, optab_default);
4722 if (!optab)
4724 if (dump_enabled_p ())
4725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4726 "no optab.\n");
4727 return false;
4729 icode = (int) optab_handler (optab, vec_mode);
4732 if (icode == CODE_FOR_nothing)
4734 if (dump_enabled_p ())
4735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4736 "op not supported by target.\n");
4737 /* Check only during analysis. */
4738 if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
4739 || (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
4740 return false;
4741 if (dump_enabled_p ())
4742 dump_printf_loc (MSG_NOTE, vect_location,
4743 "proceeding using word mode.\n");
4746 /* Worthwhile without SIMD support? Check only during analysis. */
4747 if (!VECTOR_MODE_P (vec_mode)
4748 && !vec_stmt
4749 && vf < vect_min_worthwhile_factor (code))
4751 if (dump_enabled_p ())
4752 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4753 "not worthwhile without SIMD support.\n");
4754 return false;
4757 if (!vec_stmt) /* transformation not required. */
4759 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
4760 if (dump_enabled_p ())
4761 dump_printf_loc (MSG_NOTE, vect_location,
4762 "=== vectorizable_operation ===\n");
4763 vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
4764 return true;
4767 /** Transform. **/
4769 if (dump_enabled_p ())
4770 dump_printf_loc (MSG_NOTE, vect_location,
4771 "transform binary/unary operation.\n");
4773 /* Handle def. */
4774 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4776 /* In case the vectorization factor (VF) is bigger than the number
4777 of elements that we can fit in a vectype (nunits), we have to generate
4778 more than one vector stmt - i.e - we need to "unroll" the
4779 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4780 from one copy of the vector stmt to the next, in the field
4781 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4782 stages to find the correct vector defs to be used when vectorizing
4783 stmts that use the defs of the current stmt. The example below
4784 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4785 we need to create 4 vectorized stmts):
4787 before vectorization:
4788 RELATED_STMT VEC_STMT
4789 S1: x = memref - -
4790 S2: z = x + 1 - -
4792 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4793 there):
4794 RELATED_STMT VEC_STMT
4795 VS1_0: vx0 = memref0 VS1_1 -
4796 VS1_1: vx1 = memref1 VS1_2 -
4797 VS1_2: vx2 = memref2 VS1_3 -
4798 VS1_3: vx3 = memref3 - -
4799 S1: x = load - VS1_0
4800 S2: z = x + 1 - -
4802 step2: vectorize stmt S2 (done here):
4803 To vectorize stmt S2 we first need to find the relevant vector
4804 def for the first operand 'x'. This is, as usual, obtained from
4805 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4806 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4807 relevant vector def 'vx0'. Having found 'vx0' we can generate
4808 the vector stmt VS2_0, and as usual, record it in the
4809 STMT_VINFO_VEC_STMT of stmt S2.
4810 When creating the second copy (VS2_1), we obtain the relevant vector
4811 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4812 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4813 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4814 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4815 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4816 chain of stmts and pointers:
4817 RELATED_STMT VEC_STMT
4818 VS1_0: vx0 = memref0 VS1_1 -
4819 VS1_1: vx1 = memref1 VS1_2 -
4820 VS1_2: vx2 = memref2 VS1_3 -
4821 VS1_3: vx3 = memref3 - -
4822 S1: x = load - VS1_0
4823 VS2_0: vz0 = vx0 + v1 VS2_1 -
4824 VS2_1: vz1 = vx1 + v1 VS2_2 -
4825 VS2_2: vz2 = vx2 + v1 VS2_3 -
4826 VS2_3: vz3 = vx3 + v1 - -
4827 S2: z = x + 1 - VS2_0 */
4829 prev_stmt_info = NULL;
4830 for (j = 0; j < ncopies; j++)
4832 /* Handle uses. */
4833 if (j == 0)
4835 if (op_type == binary_op || op_type == ternary_op)
4836 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
4837 slp_node, -1);
4838 else
4839 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4840 slp_node, -1);
4841 if (op_type == ternary_op)
4843 vec_oprnds2.create (1);
4844 vec_oprnds2.quick_push (vect_get_vec_def_for_operand (op2,
4845 stmt,
4846 NULL));
4849 else
4851 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
4852 if (op_type == ternary_op)
4854 tree vec_oprnd = vec_oprnds2.pop ();
4855 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
4856 vec_oprnd));
4860 /* Arguments are ready. Create the new vector stmt. */
4861 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4863 vop1 = ((op_type == binary_op || op_type == ternary_op)
4864 ? vec_oprnds1[i] : NULL_TREE);
4865 vop2 = ((op_type == ternary_op)
4866 ? vec_oprnds2[i] : NULL_TREE);
4867 new_stmt = gimple_build_assign_with_ops (code, vec_dest,
4868 vop0, vop1, vop2);
4869 new_temp = make_ssa_name (vec_dest, new_stmt);
4870 gimple_assign_set_lhs (new_stmt, new_temp);
4871 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4872 if (slp_node)
4873 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4876 if (slp_node)
4877 continue;
4879 if (j == 0)
4880 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4881 else
4882 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4883 prev_stmt_info = vinfo_for_stmt (new_stmt);
4886 vec_oprnds0.release ();
4887 vec_oprnds1.release ();
4888 vec_oprnds2.release ();
4890 return true;
4893 /* A helper function to ensure data reference DR's base alignment
4894 for STMT_INFO. */
4896 static void
4897 ensure_base_align (stmt_vec_info stmt_info, struct data_reference *dr)
4899 if (!dr->aux)
4900 return;
4902 if (((dataref_aux *)dr->aux)->base_misaligned)
4904 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4905 tree base_decl = ((dataref_aux *)dr->aux)->base_decl;
4907 DECL_ALIGN (base_decl) = TYPE_ALIGN (vectype);
4908 DECL_USER_ALIGN (base_decl) = 1;
4909 ((dataref_aux *)dr->aux)->base_misaligned = false;
4914 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
4915 reversal of the vector elements. If that is impossible to do,
4916 returns NULL. */
4918 static tree
4919 perm_mask_for_reverse (tree vectype)
4921 int i, nunits;
4922 unsigned char *sel;
4924 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4925 sel = XALLOCAVEC (unsigned char, nunits);
4927 for (i = 0; i < nunits; ++i)
4928 sel[i] = nunits - 1 - i;
4930 return vect_gen_perm_mask (vectype, sel);
4933 /* Function vectorizable_store.
4935 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
4936 can be vectorized.
4937 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4938 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4939 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4941 static bool
4942 vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
4943 slp_tree slp_node)
4945 tree scalar_dest;
4946 tree data_ref;
4947 tree op;
4948 tree vec_oprnd = NULL_TREE;
4949 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4950 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
4951 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4952 tree elem_type;
4953 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4954 struct loop *loop = NULL;
4955 enum machine_mode vec_mode;
4956 tree dummy;
4957 enum dr_alignment_support alignment_support_scheme;
4958 tree def;
4959 gimple def_stmt;
4960 enum vect_def_type dt;
4961 stmt_vec_info prev_stmt_info = NULL;
4962 tree dataref_ptr = NULL_TREE;
4963 tree dataref_offset = NULL_TREE;
4964 gimple ptr_incr = NULL;
4965 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
4966 int ncopies;
4967 int j;
4968 gimple next_stmt, first_stmt = NULL;
4969 bool grouped_store = false;
4970 bool store_lanes_p = false;
4971 unsigned int group_size, i;
4972 vec<tree> dr_chain = vNULL;
4973 vec<tree> oprnds = vNULL;
4974 vec<tree> result_chain = vNULL;
4975 bool inv_p;
4976 bool negative = false;
4977 tree offset = NULL_TREE;
4978 vec<tree> vec_oprnds = vNULL;
4979 bool slp = (slp_node != NULL);
4980 unsigned int vec_num;
4981 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4982 tree aggr_type;
4984 if (loop_vinfo)
4985 loop = LOOP_VINFO_LOOP (loop_vinfo);
4987 /* Multiple types in SLP are handled by creating the appropriate number of
4988 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4989 case of SLP. */
4990 if (slp || PURE_SLP_STMT (stmt_info))
4991 ncopies = 1;
4992 else
4993 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
4995 gcc_assert (ncopies >= 1);
4997 /* FORNOW. This restriction should be relaxed. */
4998 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5000 if (dump_enabled_p ())
5001 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5002 "multiple types in nested loop.\n");
5003 return false;
5006 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5007 return false;
5009 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5010 return false;
5012 /* Is vectorizable store? */
5014 if (!is_gimple_assign (stmt))
5015 return false;
5017 scalar_dest = gimple_assign_lhs (stmt);
5018 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5019 && is_pattern_stmt_p (stmt_info))
5020 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5021 if (TREE_CODE (scalar_dest) != ARRAY_REF
5022 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5023 && TREE_CODE (scalar_dest) != INDIRECT_REF
5024 && TREE_CODE (scalar_dest) != COMPONENT_REF
5025 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5026 && TREE_CODE (scalar_dest) != REALPART_EXPR
5027 && TREE_CODE (scalar_dest) != MEM_REF)
5028 return false;
5030 gcc_assert (gimple_assign_single_p (stmt));
5031 op = gimple_assign_rhs1 (stmt);
5032 if (!vect_is_simple_use (op, stmt, loop_vinfo, bb_vinfo, &def_stmt,
5033 &def, &dt))
5035 if (dump_enabled_p ())
5036 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5037 "use not simple.\n");
5038 return false;
5041 elem_type = TREE_TYPE (vectype);
5042 vec_mode = TYPE_MODE (vectype);
5044 /* FORNOW. In some cases can vectorize even if data-type not supported
5045 (e.g. - array initialization with 0). */
5046 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5047 return false;
5049 if (!STMT_VINFO_DATA_REF (stmt_info))
5050 return false;
5052 negative =
5053 tree_int_cst_compare (loop && nested_in_vect_loop_p (loop, stmt)
5054 ? STMT_VINFO_DR_STEP (stmt_info) : DR_STEP (dr),
5055 size_zero_node) < 0;
5056 if (negative && ncopies > 1)
5058 if (dump_enabled_p ())
5059 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5060 "multiple types with negative step.\n");
5061 return false;
5064 if (negative)
5066 gcc_assert (!grouped_store);
5067 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5068 if (alignment_support_scheme != dr_aligned
5069 && alignment_support_scheme != dr_unaligned_supported)
5071 if (dump_enabled_p ())
5072 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5073 "negative step but alignment required.\n");
5074 return false;
5076 if (dt != vect_constant_def
5077 && dt != vect_external_def
5078 && !perm_mask_for_reverse (vectype))
5080 if (dump_enabled_p ())
5081 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5082 "negative step and reversing not supported.\n");
5083 return false;
5087 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5089 grouped_store = true;
5090 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5091 if (!slp && !PURE_SLP_STMT (stmt_info))
5093 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5094 if (vect_store_lanes_supported (vectype, group_size))
5095 store_lanes_p = true;
5096 else if (!vect_grouped_store_supported (vectype, group_size))
5097 return false;
5100 if (first_stmt == stmt)
5102 /* STMT is the leader of the group. Check the operands of all the
5103 stmts of the group. */
5104 next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
5105 while (next_stmt)
5107 gcc_assert (gimple_assign_single_p (next_stmt));
5108 op = gimple_assign_rhs1 (next_stmt);
5109 if (!vect_is_simple_use (op, next_stmt, loop_vinfo, bb_vinfo,
5110 &def_stmt, &def, &dt))
5112 if (dump_enabled_p ())
5113 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5114 "use not simple.\n");
5115 return false;
5117 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5122 if (!vec_stmt) /* transformation not required. */
5124 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5125 vect_model_store_cost (stmt_info, ncopies, store_lanes_p, dt,
5126 NULL, NULL, NULL);
5127 return true;
5130 /** Transform. **/
5132 ensure_base_align (stmt_info, dr);
5134 if (grouped_store)
5136 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5137 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5139 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5141 /* FORNOW */
5142 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5144 /* We vectorize all the stmts of the interleaving group when we
5145 reach the last stmt in the group. */
5146 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5147 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5148 && !slp)
5150 *vec_stmt = NULL;
5151 return true;
5154 if (slp)
5156 grouped_store = false;
5157 /* VEC_NUM is the number of vect stmts to be created for this
5158 group. */
5159 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
5160 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
5161 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5162 op = gimple_assign_rhs1 (first_stmt);
5164 else
5165 /* VEC_NUM is the number of vect stmts to be created for this
5166 group. */
5167 vec_num = group_size;
5169 else
5171 first_stmt = stmt;
5172 first_dr = dr;
5173 group_size = vec_num = 1;
5176 if (dump_enabled_p ())
5177 dump_printf_loc (MSG_NOTE, vect_location,
5178 "transform store. ncopies = %d\n", ncopies);
5180 dr_chain.create (group_size);
5181 oprnds.create (group_size);
5183 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
5184 gcc_assert (alignment_support_scheme);
5185 /* Targets with store-lane instructions must not require explicit
5186 realignment. */
5187 gcc_assert (!store_lanes_p
5188 || alignment_support_scheme == dr_aligned
5189 || alignment_support_scheme == dr_unaligned_supported);
5191 if (negative)
5192 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
5194 if (store_lanes_p)
5195 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
5196 else
5197 aggr_type = vectype;
5199 /* In case the vectorization factor (VF) is bigger than the number
5200 of elements that we can fit in a vectype (nunits), we have to generate
5201 more than one vector stmt - i.e - we need to "unroll" the
5202 vector stmt by a factor VF/nunits. For more details see documentation in
5203 vect_get_vec_def_for_copy_stmt. */
5205 /* In case of interleaving (non-unit grouped access):
5207 S1: &base + 2 = x2
5208 S2: &base = x0
5209 S3: &base + 1 = x1
5210 S4: &base + 3 = x3
5212 We create vectorized stores starting from base address (the access of the
5213 first stmt in the chain (S2 in the above example), when the last store stmt
5214 of the chain (S4) is reached:
5216 VS1: &base = vx2
5217 VS2: &base + vec_size*1 = vx0
5218 VS3: &base + vec_size*2 = vx1
5219 VS4: &base + vec_size*3 = vx3
5221 Then permutation statements are generated:
5223 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5224 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5227 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5228 (the order of the data-refs in the output of vect_permute_store_chain
5229 corresponds to the order of scalar stmts in the interleaving chain - see
5230 the documentation of vect_permute_store_chain()).
5232 In case of both multiple types and interleaving, above vector stores and
5233 permutation stmts are created for every copy. The result vector stmts are
5234 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5235 STMT_VINFO_RELATED_STMT for the next copies.
5238 prev_stmt_info = NULL;
5239 for (j = 0; j < ncopies; j++)
5241 gimple new_stmt;
5243 if (j == 0)
5245 if (slp)
5247 /* Get vectorized arguments for SLP_NODE. */
5248 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
5249 NULL, slp_node, -1);
5251 vec_oprnd = vec_oprnds[0];
5253 else
5255 /* For interleaved stores we collect vectorized defs for all the
5256 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5257 used as an input to vect_permute_store_chain(), and OPRNDS as
5258 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5260 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5261 OPRNDS are of size 1. */
5262 next_stmt = first_stmt;
5263 for (i = 0; i < group_size; i++)
5265 /* Since gaps are not supported for interleaved stores,
5266 GROUP_SIZE is the exact number of stmts in the chain.
5267 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5268 there is no interleaving, GROUP_SIZE is 1, and only one
5269 iteration of the loop will be executed. */
5270 gcc_assert (next_stmt
5271 && gimple_assign_single_p (next_stmt));
5272 op = gimple_assign_rhs1 (next_stmt);
5274 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt,
5275 NULL);
5276 dr_chain.quick_push (vec_oprnd);
5277 oprnds.quick_push (vec_oprnd);
5278 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5282 /* We should have catched mismatched types earlier. */
5283 gcc_assert (useless_type_conversion_p (vectype,
5284 TREE_TYPE (vec_oprnd)));
5285 bool simd_lane_access_p
5286 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
5287 if (simd_lane_access_p
5288 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
5289 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
5290 && integer_zerop (DR_OFFSET (first_dr))
5291 && integer_zerop (DR_INIT (first_dr))
5292 && alias_sets_conflict_p (get_alias_set (aggr_type),
5293 get_alias_set (DR_REF (first_dr))))
5295 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
5296 dataref_offset = build_int_cst (reference_alias_ptr_type
5297 (DR_REF (first_dr)), 0);
5298 inv_p = false;
5300 else
5301 dataref_ptr
5302 = vect_create_data_ref_ptr (first_stmt, aggr_type,
5303 simd_lane_access_p ? loop : NULL,
5304 offset, &dummy, gsi, &ptr_incr,
5305 simd_lane_access_p, &inv_p);
5306 gcc_assert (bb_vinfo || !inv_p);
5308 else
5310 /* For interleaved stores we created vectorized defs for all the
5311 defs stored in OPRNDS in the previous iteration (previous copy).
5312 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5313 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5314 next copy.
5315 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5316 OPRNDS are of size 1. */
5317 for (i = 0; i < group_size; i++)
5319 op = oprnds[i];
5320 vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo, &def_stmt,
5321 &def, &dt);
5322 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
5323 dr_chain[i] = vec_oprnd;
5324 oprnds[i] = vec_oprnd;
5326 if (dataref_offset)
5327 dataref_offset
5328 = int_const_binop (PLUS_EXPR, dataref_offset,
5329 TYPE_SIZE_UNIT (aggr_type));
5330 else
5331 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
5332 TYPE_SIZE_UNIT (aggr_type));
5335 if (store_lanes_p)
5337 tree vec_array;
5339 /* Combine all the vectors into an array. */
5340 vec_array = create_vector_array (vectype, vec_num);
5341 for (i = 0; i < vec_num; i++)
5343 vec_oprnd = dr_chain[i];
5344 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
5347 /* Emit:
5348 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5349 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
5350 new_stmt = gimple_build_call_internal (IFN_STORE_LANES, 1, vec_array);
5351 gimple_call_set_lhs (new_stmt, data_ref);
5352 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5354 else
5356 new_stmt = NULL;
5357 if (grouped_store)
5359 if (j == 0)
5360 result_chain.create (group_size);
5361 /* Permute. */
5362 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
5363 &result_chain);
5366 next_stmt = first_stmt;
5367 for (i = 0; i < vec_num; i++)
5369 unsigned align, misalign;
5371 if (i > 0)
5372 /* Bump the vector pointer. */
5373 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
5374 stmt, NULL_TREE);
5376 if (slp)
5377 vec_oprnd = vec_oprnds[i];
5378 else if (grouped_store)
5379 /* For grouped stores vectorized defs are interleaved in
5380 vect_permute_store_chain(). */
5381 vec_oprnd = result_chain[i];
5383 data_ref = build2 (MEM_REF, TREE_TYPE (vec_oprnd), dataref_ptr,
5384 dataref_offset
5385 ? dataref_offset
5386 : build_int_cst (reference_alias_ptr_type
5387 (DR_REF (first_dr)), 0));
5388 align = TYPE_ALIGN_UNIT (vectype);
5389 if (aligned_access_p (first_dr))
5390 misalign = 0;
5391 else if (DR_MISALIGNMENT (first_dr) == -1)
5393 TREE_TYPE (data_ref)
5394 = build_aligned_type (TREE_TYPE (data_ref),
5395 TYPE_ALIGN (elem_type));
5396 align = TYPE_ALIGN_UNIT (elem_type);
5397 misalign = 0;
5399 else
5401 TREE_TYPE (data_ref)
5402 = build_aligned_type (TREE_TYPE (data_ref),
5403 TYPE_ALIGN (elem_type));
5404 misalign = DR_MISALIGNMENT (first_dr);
5406 if (dataref_offset == NULL_TREE)
5407 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
5408 misalign);
5410 if (negative
5411 && dt != vect_constant_def
5412 && dt != vect_external_def)
5414 tree perm_mask = perm_mask_for_reverse (vectype);
5415 tree perm_dest
5416 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
5417 vectype);
5418 tree new_temp = make_ssa_name (perm_dest, NULL);
5420 /* Generate the permute statement. */
5421 gimple perm_stmt
5422 = gimple_build_assign_with_ops (VEC_PERM_EXPR, new_temp,
5423 vec_oprnd, vec_oprnd,
5424 perm_mask);
5425 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5427 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
5428 vec_oprnd = new_temp;
5431 /* Arguments are ready. Create the new vector stmt. */
5432 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
5433 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5435 if (slp)
5436 continue;
5438 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5439 if (!next_stmt)
5440 break;
5443 if (!slp)
5445 if (j == 0)
5446 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5447 else
5448 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5449 prev_stmt_info = vinfo_for_stmt (new_stmt);
5453 dr_chain.release ();
5454 oprnds.release ();
5455 result_chain.release ();
5456 vec_oprnds.release ();
5458 return true;
5461 /* Given a vector type VECTYPE and permutation SEL returns
5462 the VECTOR_CST mask that implements the permutation of the
5463 vector elements. If that is impossible to do, returns NULL. */
5465 tree
5466 vect_gen_perm_mask (tree vectype, unsigned char *sel)
5468 tree mask_elt_type, mask_type, mask_vec, *mask_elts;
5469 int i, nunits;
5471 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5473 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5474 return NULL;
5476 mask_elt_type = lang_hooks.types.type_for_mode
5477 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
5478 mask_type = get_vectype_for_scalar_type (mask_elt_type);
5480 mask_elts = XALLOCAVEC (tree, nunits);
5481 for (i = nunits - 1; i >= 0; i--)
5482 mask_elts[i] = build_int_cst (mask_elt_type, sel[i]);
5483 mask_vec = build_vector (mask_type, mask_elts);
5485 return mask_vec;
5488 /* Given a vector variable X and Y, that was generated for the scalar
5489 STMT, generate instructions to permute the vector elements of X and Y
5490 using permutation mask MASK_VEC, insert them at *GSI and return the
5491 permuted vector variable. */
5493 static tree
5494 permute_vec_elements (tree x, tree y, tree mask_vec, gimple stmt,
5495 gimple_stmt_iterator *gsi)
5497 tree vectype = TREE_TYPE (x);
5498 tree perm_dest, data_ref;
5499 gimple perm_stmt;
5501 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
5502 data_ref = make_ssa_name (perm_dest, NULL);
5504 /* Generate the permute statement. */
5505 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, data_ref,
5506 x, y, mask_vec);
5507 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5509 return data_ref;
5512 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5513 inserting them on the loops preheader edge. Returns true if we
5514 were successful in doing so (and thus STMT can be moved then),
5515 otherwise returns false. */
5517 static bool
5518 hoist_defs_of_uses (gimple stmt, struct loop *loop)
5520 ssa_op_iter i;
5521 tree op;
5522 bool any = false;
5524 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5526 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5527 if (!gimple_nop_p (def_stmt)
5528 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5530 /* Make sure we don't need to recurse. While we could do
5531 so in simple cases when there are more complex use webs
5532 we don't have an easy way to preserve stmt order to fulfil
5533 dependencies within them. */
5534 tree op2;
5535 ssa_op_iter i2;
5536 if (gimple_code (def_stmt) == GIMPLE_PHI)
5537 return false;
5538 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
5540 gimple def_stmt2 = SSA_NAME_DEF_STMT (op2);
5541 if (!gimple_nop_p (def_stmt2)
5542 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
5543 return false;
5545 any = true;
5549 if (!any)
5550 return true;
5552 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
5554 gimple def_stmt = SSA_NAME_DEF_STMT (op);
5555 if (!gimple_nop_p (def_stmt)
5556 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
5558 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
5559 gsi_remove (&gsi, false);
5560 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
5564 return true;
5567 /* vectorizable_load.
5569 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5570 can be vectorized.
5571 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5572 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5573 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5575 static bool
5576 vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
5577 slp_tree slp_node, slp_instance slp_node_instance)
5579 tree scalar_dest;
5580 tree vec_dest = NULL;
5581 tree data_ref = NULL;
5582 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5583 stmt_vec_info prev_stmt_info;
5584 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5585 struct loop *loop = NULL;
5586 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
5587 bool nested_in_vect_loop = false;
5588 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5589 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5590 tree elem_type;
5591 tree new_temp;
5592 enum machine_mode mode;
5593 gimple new_stmt = NULL;
5594 tree dummy;
5595 enum dr_alignment_support alignment_support_scheme;
5596 tree dataref_ptr = NULL_TREE;
5597 tree dataref_offset = NULL_TREE;
5598 gimple ptr_incr = NULL;
5599 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
5600 int ncopies;
5601 int i, j, group_size, group_gap;
5602 tree msq = NULL_TREE, lsq;
5603 tree offset = NULL_TREE;
5604 tree byte_offset = NULL_TREE;
5605 tree realignment_token = NULL_TREE;
5606 gimple phi = NULL;
5607 vec<tree> dr_chain = vNULL;
5608 bool grouped_load = false;
5609 bool load_lanes_p = false;
5610 gimple first_stmt;
5611 bool inv_p;
5612 bool negative = false;
5613 bool compute_in_loop = false;
5614 struct loop *at_loop;
5615 int vec_num;
5616 bool slp = (slp_node != NULL);
5617 bool slp_perm = false;
5618 enum tree_code code;
5619 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5620 int vf;
5621 tree aggr_type;
5622 tree gather_base = NULL_TREE, gather_off = NULL_TREE;
5623 tree gather_off_vectype = NULL_TREE, gather_decl = NULL_TREE;
5624 int gather_scale = 1;
5625 enum vect_def_type gather_dt = vect_unknown_def_type;
5627 if (loop_vinfo)
5629 loop = LOOP_VINFO_LOOP (loop_vinfo);
5630 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
5631 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5633 else
5634 vf = 1;
5636 /* Multiple types in SLP are handled by creating the appropriate number of
5637 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5638 case of SLP. */
5639 if (slp || PURE_SLP_STMT (stmt_info))
5640 ncopies = 1;
5641 else
5642 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
5644 gcc_assert (ncopies >= 1);
5646 /* FORNOW. This restriction should be relaxed. */
5647 if (nested_in_vect_loop && ncopies > 1)
5649 if (dump_enabled_p ())
5650 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5651 "multiple types in nested loop.\n");
5652 return false;
5655 /* Invalidate assumptions made by dependence analysis when vectorization
5656 on the unrolled body effectively re-orders stmts. */
5657 if (ncopies > 1
5658 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5659 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5660 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5662 if (dump_enabled_p ())
5663 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5664 "cannot perform implicit CSE when unrolling "
5665 "with negative dependence distance\n");
5666 return false;
5669 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5670 return false;
5672 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def)
5673 return false;
5675 /* Is vectorizable load? */
5676 if (!is_gimple_assign (stmt))
5677 return false;
5679 scalar_dest = gimple_assign_lhs (stmt);
5680 if (TREE_CODE (scalar_dest) != SSA_NAME)
5681 return false;
5683 code = gimple_assign_rhs_code (stmt);
5684 if (code != ARRAY_REF
5685 && code != BIT_FIELD_REF
5686 && code != INDIRECT_REF
5687 && code != COMPONENT_REF
5688 && code != IMAGPART_EXPR
5689 && code != REALPART_EXPR
5690 && code != MEM_REF
5691 && TREE_CODE_CLASS (code) != tcc_declaration)
5692 return false;
5694 if (!STMT_VINFO_DATA_REF (stmt_info))
5695 return false;
5697 elem_type = TREE_TYPE (vectype);
5698 mode = TYPE_MODE (vectype);
5700 /* FORNOW. In some cases can vectorize even if data-type not supported
5701 (e.g. - data copies). */
5702 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
5704 if (dump_enabled_p ())
5705 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5706 "Aligned load, but unsupported type.\n");
5707 return false;
5710 /* Check if the load is a part of an interleaving chain. */
5711 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
5713 grouped_load = true;
5714 /* FORNOW */
5715 gcc_assert (! nested_in_vect_loop && !STMT_VINFO_GATHER_P (stmt_info));
5717 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5719 /* If this is single-element interleaving with an element distance
5720 that leaves unused vector loads around punt - we at least create
5721 very sub-optimal code in that case (and blow up memory,
5722 see PR65518). */
5723 if (first_stmt == stmt
5724 && !GROUP_NEXT_ELEMENT (stmt_info)
5725 && GROUP_SIZE (stmt_info) > TYPE_VECTOR_SUBPARTS (vectype))
5727 if (dump_enabled_p ())
5728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5729 "single-element interleaving not supported "
5730 "for not adjacent vector loads\n");
5731 return false;
5734 if (!slp && !PURE_SLP_STMT (stmt_info))
5736 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5737 if (vect_load_lanes_supported (vectype, group_size))
5738 load_lanes_p = true;
5739 else if (!vect_grouped_load_supported (vectype, group_size))
5740 return false;
5743 /* Invalidate assumptions made by dependence analysis when vectorization
5744 on the unrolled body effectively re-orders stmts. */
5745 if (!PURE_SLP_STMT (stmt_info)
5746 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
5747 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo)
5748 > STMT_VINFO_MIN_NEG_DIST (stmt_info)))
5750 if (dump_enabled_p ())
5751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5752 "cannot perform implicit CSE when performing "
5753 "group loads with negative dependence distance\n");
5754 return false;
5759 if (STMT_VINFO_GATHER_P (stmt_info))
5761 gimple def_stmt;
5762 tree def;
5763 gather_decl = vect_check_gather (stmt, loop_vinfo, &gather_base,
5764 &gather_off, &gather_scale);
5765 gcc_assert (gather_decl);
5766 if (!vect_is_simple_use_1 (gather_off, NULL, loop_vinfo, bb_vinfo,
5767 &def_stmt, &def, &gather_dt,
5768 &gather_off_vectype))
5770 if (dump_enabled_p ())
5771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5772 "gather index use not simple.\n");
5773 return false;
5776 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
5778 else
5780 negative = tree_int_cst_compare (nested_in_vect_loop
5781 ? STMT_VINFO_DR_STEP (stmt_info)
5782 : DR_STEP (dr),
5783 size_zero_node) < 0;
5784 if (negative && ncopies > 1)
5786 if (dump_enabled_p ())
5787 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5788 "multiple types with negative step.\n");
5789 return false;
5792 if (negative)
5794 if (grouped_load)
5796 if (dump_enabled_p ())
5797 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5798 "negative step for group load not supported"
5799 "\n");
5800 return false;
5802 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
5803 if (alignment_support_scheme != dr_aligned
5804 && alignment_support_scheme != dr_unaligned_supported)
5806 if (dump_enabled_p ())
5807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5808 "negative step but alignment required.\n");
5809 return false;
5811 if (!perm_mask_for_reverse (vectype))
5813 if (dump_enabled_p ())
5814 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5815 "negative step and reversing not supported."
5816 "\n");
5817 return false;
5822 if (!vec_stmt) /* transformation not required. */
5824 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
5825 vect_model_load_cost (stmt_info, ncopies, load_lanes_p, NULL, NULL, NULL);
5826 return true;
5829 if (dump_enabled_p ())
5830 dump_printf_loc (MSG_NOTE, vect_location,
5831 "transform load. ncopies = %d\n", ncopies);
5833 /** Transform. **/
5835 ensure_base_align (stmt_info, dr);
5837 if (STMT_VINFO_GATHER_P (stmt_info))
5839 tree vec_oprnd0 = NULL_TREE, op;
5840 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gather_decl));
5841 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5842 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
5843 edge pe = loop_preheader_edge (loop);
5844 gimple_seq seq;
5845 basic_block new_bb;
5846 enum { NARROW, NONE, WIDEN } modifier;
5847 int gather_off_nunits = TYPE_VECTOR_SUBPARTS (gather_off_vectype);
5849 if (nunits == gather_off_nunits)
5850 modifier = NONE;
5851 else if (nunits == gather_off_nunits / 2)
5853 unsigned char *sel = XALLOCAVEC (unsigned char, gather_off_nunits);
5854 modifier = WIDEN;
5856 for (i = 0; i < gather_off_nunits; ++i)
5857 sel[i] = i | nunits;
5859 perm_mask = vect_gen_perm_mask (gather_off_vectype, sel);
5860 gcc_assert (perm_mask != NULL_TREE);
5862 else if (nunits == gather_off_nunits * 2)
5864 unsigned char *sel = XALLOCAVEC (unsigned char, nunits);
5865 modifier = NARROW;
5867 for (i = 0; i < nunits; ++i)
5868 sel[i] = i < gather_off_nunits
5869 ? i : i + nunits - gather_off_nunits;
5871 perm_mask = vect_gen_perm_mask (vectype, sel);
5872 gcc_assert (perm_mask != NULL_TREE);
5873 ncopies *= 2;
5875 else
5876 gcc_unreachable ();
5878 rettype = TREE_TYPE (TREE_TYPE (gather_decl));
5879 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5880 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5881 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5882 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5883 scaletype = TREE_VALUE (arglist);
5884 gcc_checking_assert (types_compatible_p (srctype, rettype));
5886 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5888 ptr = fold_convert (ptrtype, gather_base);
5889 if (!is_gimple_min_invariant (ptr))
5891 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5892 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5893 gcc_assert (!new_bb);
5896 /* Currently we support only unconditional gather loads,
5897 so mask should be all ones. */
5898 if (TREE_CODE (masktype) == INTEGER_TYPE)
5899 mask = build_int_cst (masktype, -1);
5900 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
5902 mask = build_int_cst (TREE_TYPE (masktype), -1);
5903 mask = build_vector_from_val (masktype, mask);
5904 mask = vect_init_vector (stmt, mask, masktype, NULL);
5906 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
5908 REAL_VALUE_TYPE r;
5909 long tmp[6];
5910 for (j = 0; j < 6; ++j)
5911 tmp[j] = -1;
5912 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
5913 mask = build_real (TREE_TYPE (masktype), r);
5914 mask = build_vector_from_val (masktype, mask);
5915 mask = vect_init_vector (stmt, mask, masktype, NULL);
5917 else
5918 gcc_unreachable ();
5920 scale = build_int_cst (scaletype, gather_scale);
5922 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
5923 merge = build_int_cst (TREE_TYPE (rettype), 0);
5924 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
5926 REAL_VALUE_TYPE r;
5927 long tmp[6];
5928 for (j = 0; j < 6; ++j)
5929 tmp[j] = 0;
5930 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
5931 merge = build_real (TREE_TYPE (rettype), r);
5933 else
5934 gcc_unreachable ();
5935 merge = build_vector_from_val (rettype, merge);
5936 merge = vect_init_vector (stmt, merge, rettype, NULL);
5938 prev_stmt_info = NULL;
5939 for (j = 0; j < ncopies; ++j)
5941 if (modifier == WIDEN && (j & 1))
5942 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
5943 perm_mask, stmt, gsi);
5944 else if (j == 0)
5945 op = vec_oprnd0
5946 = vect_get_vec_def_for_operand (gather_off, stmt, NULL);
5947 else
5948 op = vec_oprnd0
5949 = vect_get_vec_def_for_stmt_copy (gather_dt, vec_oprnd0);
5951 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5953 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
5954 == TYPE_VECTOR_SUBPARTS (idxtype));
5955 var = vect_get_new_vect_var (idxtype, vect_simple_var, NULL);
5956 var = make_ssa_name (var, NULL);
5957 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5958 new_stmt
5959 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var,
5960 op, NULL_TREE);
5961 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5962 op = var;
5965 new_stmt
5966 = gimple_build_call (gather_decl, 5, merge, ptr, op, mask, scale);
5968 if (!useless_type_conversion_p (vectype, rettype))
5970 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
5971 == TYPE_VECTOR_SUBPARTS (rettype));
5972 var = vect_get_new_vect_var (rettype, vect_simple_var, NULL);
5973 op = make_ssa_name (var, new_stmt);
5974 gimple_call_set_lhs (new_stmt, op);
5975 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5976 var = make_ssa_name (vec_dest, NULL);
5977 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
5978 new_stmt
5979 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR, var, op,
5980 NULL_TREE);
5982 else
5984 var = make_ssa_name (vec_dest, new_stmt);
5985 gimple_call_set_lhs (new_stmt, var);
5988 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5990 if (modifier == NARROW)
5992 if ((j & 1) == 0)
5994 prev_res = var;
5995 continue;
5997 var = permute_vec_elements (prev_res, var,
5998 perm_mask, stmt, gsi);
5999 new_stmt = SSA_NAME_DEF_STMT (var);
6002 if (prev_stmt_info == NULL)
6003 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6004 else
6005 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6006 prev_stmt_info = vinfo_for_stmt (new_stmt);
6008 return true;
6010 else if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
6012 gimple_stmt_iterator incr_gsi;
6013 bool insert_after;
6014 gimple incr;
6015 tree offvar;
6016 tree ivstep;
6017 tree running_off;
6018 vec<constructor_elt, va_gc> *v = NULL;
6019 gimple_seq stmts = NULL;
6020 tree stride_base, stride_step, alias_off;
6022 gcc_assert (!nested_in_vect_loop);
6024 stride_base
6025 = fold_build_pointer_plus
6026 (unshare_expr (DR_BASE_ADDRESS (dr)),
6027 size_binop (PLUS_EXPR,
6028 convert_to_ptrofftype (unshare_expr (DR_OFFSET (dr))),
6029 convert_to_ptrofftype (DR_INIT (dr))));
6030 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (dr)));
6032 /* For a load with loop-invariant (but other than power-of-2)
6033 stride (i.e. not a grouped access) like so:
6035 for (i = 0; i < n; i += stride)
6036 ... = array[i];
6038 we generate a new induction variable and new accesses to
6039 form a new vector (or vectors, depending on ncopies):
6041 for (j = 0; ; j += VF*stride)
6042 tmp1 = array[j];
6043 tmp2 = array[j + stride];
6045 vectemp = {tmp1, tmp2, ...}
6048 ivstep = stride_step;
6049 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6050 build_int_cst (TREE_TYPE (ivstep), vf));
6052 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6054 create_iv (stride_base, ivstep, NULL,
6055 loop, &incr_gsi, insert_after,
6056 &offvar, NULL);
6057 incr = gsi_stmt (incr_gsi);
6058 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
6060 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6061 if (stmts)
6062 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6064 prev_stmt_info = NULL;
6065 running_off = offvar;
6066 alias_off = build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0);
6067 for (j = 0; j < ncopies; j++)
6069 tree vec_inv;
6071 vec_alloc (v, nunits);
6072 for (i = 0; i < nunits; i++)
6074 tree newref, newoff;
6075 gimple incr;
6076 newref = build2 (MEM_REF, TREE_TYPE (vectype),
6077 running_off, alias_off);
6079 newref = force_gimple_operand_gsi (gsi, newref, true,
6080 NULL_TREE, true,
6081 GSI_SAME_STMT);
6082 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, newref);
6083 newoff = copy_ssa_name (running_off, NULL);
6084 incr = gimple_build_assign_with_ops (POINTER_PLUS_EXPR, newoff,
6085 running_off, stride_step);
6086 vect_finish_stmt_generation (stmt, incr, gsi);
6088 running_off = newoff;
6091 vec_inv = build_constructor (vectype, v);
6092 new_temp = vect_init_vector (stmt, vec_inv, vectype, gsi);
6093 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6095 if (j == 0)
6096 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6097 else
6098 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6099 prev_stmt_info = vinfo_for_stmt (new_stmt);
6101 return true;
6104 if (grouped_load)
6106 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6107 if (slp
6108 && !SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
6109 && first_stmt != SLP_TREE_SCALAR_STMTS (slp_node)[0])
6110 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6112 /* Check if the chain of loads is already vectorized. */
6113 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
6114 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6115 ??? But we can only do so if there is exactly one
6116 as we have no way to get at the rest. Leave the CSE
6117 opportunity alone.
6118 ??? With the group load eventually participating
6119 in multiple different permutations (having multiple
6120 slp nodes which refer to the same group) the CSE
6121 is even wrong code. See PR56270. */
6122 && !slp)
6124 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6125 return true;
6127 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6128 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6130 /* VEC_NUM is the number of vect stmts to be created for this group. */
6131 if (slp)
6133 grouped_load = false;
6134 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6135 if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6136 slp_perm = true;
6137 group_gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
6139 else
6141 vec_num = group_size;
6142 group_gap = 0;
6145 else
6147 first_stmt = stmt;
6148 first_dr = dr;
6149 group_size = vec_num = 1;
6150 group_gap = 0;
6153 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6154 gcc_assert (alignment_support_scheme);
6155 /* Targets with load-lane instructions must not require explicit
6156 realignment. */
6157 gcc_assert (!load_lanes_p
6158 || alignment_support_scheme == dr_aligned
6159 || alignment_support_scheme == dr_unaligned_supported);
6161 /* In case the vectorization factor (VF) is bigger than the number
6162 of elements that we can fit in a vectype (nunits), we have to generate
6163 more than one vector stmt - i.e - we need to "unroll" the
6164 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6165 from one copy of the vector stmt to the next, in the field
6166 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6167 stages to find the correct vector defs to be used when vectorizing
6168 stmts that use the defs of the current stmt. The example below
6169 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6170 need to create 4 vectorized stmts):
6172 before vectorization:
6173 RELATED_STMT VEC_STMT
6174 S1: x = memref - -
6175 S2: z = x + 1 - -
6177 step 1: vectorize stmt S1:
6178 We first create the vector stmt VS1_0, and, as usual, record a
6179 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6180 Next, we create the vector stmt VS1_1, and record a pointer to
6181 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6182 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6183 stmts and pointers:
6184 RELATED_STMT VEC_STMT
6185 VS1_0: vx0 = memref0 VS1_1 -
6186 VS1_1: vx1 = memref1 VS1_2 -
6187 VS1_2: vx2 = memref2 VS1_3 -
6188 VS1_3: vx3 = memref3 - -
6189 S1: x = load - VS1_0
6190 S2: z = x + 1 - -
6192 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6193 information we recorded in RELATED_STMT field is used to vectorize
6194 stmt S2. */
6196 /* In case of interleaving (non-unit grouped access):
6198 S1: x2 = &base + 2
6199 S2: x0 = &base
6200 S3: x1 = &base + 1
6201 S4: x3 = &base + 3
6203 Vectorized loads are created in the order of memory accesses
6204 starting from the access of the first stmt of the chain:
6206 VS1: vx0 = &base
6207 VS2: vx1 = &base + vec_size*1
6208 VS3: vx3 = &base + vec_size*2
6209 VS4: vx4 = &base + vec_size*3
6211 Then permutation statements are generated:
6213 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6214 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6217 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6218 (the order of the data-refs in the output of vect_permute_load_chain
6219 corresponds to the order of scalar stmts in the interleaving chain - see
6220 the documentation of vect_permute_load_chain()).
6221 The generation of permutation stmts and recording them in
6222 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6224 In case of both multiple types and interleaving, the vector loads and
6225 permutation stmts above are created for every copy. The result vector
6226 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6227 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6229 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6230 on a target that supports unaligned accesses (dr_unaligned_supported)
6231 we generate the following code:
6232 p = initial_addr;
6233 indx = 0;
6234 loop {
6235 p = p + indx * vectype_size;
6236 vec_dest = *(p);
6237 indx = indx + 1;
6240 Otherwise, the data reference is potentially unaligned on a target that
6241 does not support unaligned accesses (dr_explicit_realign_optimized) -
6242 then generate the following code, in which the data in each iteration is
6243 obtained by two vector loads, one from the previous iteration, and one
6244 from the current iteration:
6245 p1 = initial_addr;
6246 msq_init = *(floor(p1))
6247 p2 = initial_addr + VS - 1;
6248 realignment_token = call target_builtin;
6249 indx = 0;
6250 loop {
6251 p2 = p2 + indx * vectype_size
6252 lsq = *(floor(p2))
6253 vec_dest = realign_load (msq, lsq, realignment_token)
6254 indx = indx + 1;
6255 msq = lsq;
6256 } */
6258 /* If the misalignment remains the same throughout the execution of the
6259 loop, we can create the init_addr and permutation mask at the loop
6260 preheader. Otherwise, it needs to be created inside the loop.
6261 This can only occur when vectorizing memory accesses in the inner-loop
6262 nested within an outer-loop that is being vectorized. */
6264 if (nested_in_vect_loop
6265 && (TREE_INT_CST_LOW (DR_STEP (dr))
6266 % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0))
6268 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
6269 compute_in_loop = true;
6272 if ((alignment_support_scheme == dr_explicit_realign_optimized
6273 || alignment_support_scheme == dr_explicit_realign)
6274 && !compute_in_loop)
6276 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
6277 alignment_support_scheme, NULL_TREE,
6278 &at_loop);
6279 if (alignment_support_scheme == dr_explicit_realign_optimized)
6281 phi = SSA_NAME_DEF_STMT (msq);
6282 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
6283 size_one_node);
6286 else
6287 at_loop = loop;
6289 if (negative)
6290 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6292 if (load_lanes_p)
6293 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6294 else
6295 aggr_type = vectype;
6297 prev_stmt_info = NULL;
6298 for (j = 0; j < ncopies; j++)
6300 /* 1. Create the vector or array pointer update chain. */
6301 if (j == 0)
6303 bool simd_lane_access_p
6304 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6305 if (simd_lane_access_p
6306 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6307 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6308 && integer_zerop (DR_OFFSET (first_dr))
6309 && integer_zerop (DR_INIT (first_dr))
6310 && alias_sets_conflict_p (get_alias_set (aggr_type),
6311 get_alias_set (DR_REF (first_dr)))
6312 && (alignment_support_scheme == dr_aligned
6313 || alignment_support_scheme == dr_unaligned_supported))
6315 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6316 dataref_offset = build_int_cst (reference_alias_ptr_type
6317 (DR_REF (first_dr)), 0);
6318 inv_p = false;
6320 else
6321 dataref_ptr
6322 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
6323 offset, &dummy, gsi, &ptr_incr,
6324 simd_lane_access_p, &inv_p,
6325 byte_offset);
6327 else if (dataref_offset)
6328 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
6329 TYPE_SIZE_UNIT (aggr_type));
6330 else
6331 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6332 TYPE_SIZE_UNIT (aggr_type));
6334 if (grouped_load || slp_perm)
6335 dr_chain.create (vec_num);
6337 if (load_lanes_p)
6339 tree vec_array;
6341 vec_array = create_vector_array (vectype, vec_num);
6343 /* Emit:
6344 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6345 data_ref = create_array_ref (aggr_type, dataref_ptr, first_dr);
6346 new_stmt = gimple_build_call_internal (IFN_LOAD_LANES, 1, data_ref);
6347 gimple_call_set_lhs (new_stmt, vec_array);
6348 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6350 /* Extract each vector into an SSA_NAME. */
6351 for (i = 0; i < vec_num; i++)
6353 new_temp = read_vector_array (stmt, gsi, scalar_dest,
6354 vec_array, i);
6355 dr_chain.quick_push (new_temp);
6358 /* Record the mapping between SSA_NAMEs and statements. */
6359 vect_record_grouped_load_vectors (stmt, dr_chain);
6361 else
6363 for (i = 0; i < vec_num; i++)
6365 if (i > 0)
6366 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6367 stmt, NULL_TREE);
6369 /* 2. Create the vector-load in the loop. */
6370 switch (alignment_support_scheme)
6372 case dr_aligned:
6373 case dr_unaligned_supported:
6375 unsigned int align, misalign;
6377 data_ref
6378 = build2 (MEM_REF, vectype, dataref_ptr,
6379 dataref_offset
6380 ? dataref_offset
6381 : build_int_cst (reference_alias_ptr_type
6382 (DR_REF (first_dr)), 0));
6383 align = TYPE_ALIGN_UNIT (vectype);
6384 if (alignment_support_scheme == dr_aligned)
6386 gcc_assert (aligned_access_p (first_dr));
6387 misalign = 0;
6389 else if (DR_MISALIGNMENT (first_dr) == -1)
6391 TREE_TYPE (data_ref)
6392 = build_aligned_type (TREE_TYPE (data_ref),
6393 TYPE_ALIGN (elem_type));
6394 align = TYPE_ALIGN_UNIT (elem_type);
6395 misalign = 0;
6397 else
6399 TREE_TYPE (data_ref)
6400 = build_aligned_type (TREE_TYPE (data_ref),
6401 TYPE_ALIGN (elem_type));
6402 misalign = DR_MISALIGNMENT (first_dr);
6404 if (dataref_offset == NULL_TREE)
6405 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
6406 align, misalign);
6407 break;
6409 case dr_explicit_realign:
6411 tree ptr, bump;
6412 tree vs_minus_1;
6414 vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
6416 if (compute_in_loop)
6417 msq = vect_setup_realignment (first_stmt, gsi,
6418 &realignment_token,
6419 dr_explicit_realign,
6420 dataref_ptr, NULL);
6422 ptr = copy_ssa_name (dataref_ptr, NULL);
6423 new_stmt = gimple_build_assign_with_ops
6424 (BIT_AND_EXPR, ptr, dataref_ptr,
6425 build_int_cst
6426 (TREE_TYPE (dataref_ptr),
6427 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6428 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6429 data_ref
6430 = build2 (MEM_REF, vectype, ptr,
6431 build_int_cst (reference_alias_ptr_type
6432 (DR_REF (first_dr)), 0));
6433 vec_dest = vect_create_destination_var (scalar_dest,
6434 vectype);
6435 new_stmt = gimple_build_assign (vec_dest, data_ref);
6436 new_temp = make_ssa_name (vec_dest, new_stmt);
6437 gimple_assign_set_lhs (new_stmt, new_temp);
6438 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
6439 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
6440 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6441 msq = new_temp;
6443 bump = size_binop (MULT_EXPR, vs_minus_1,
6444 TYPE_SIZE_UNIT (elem_type));
6445 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
6446 new_stmt = gimple_build_assign_with_ops
6447 (BIT_AND_EXPR, NULL_TREE, ptr,
6448 build_int_cst
6449 (TREE_TYPE (ptr),
6450 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6451 ptr = copy_ssa_name (dataref_ptr, new_stmt);
6452 gimple_assign_set_lhs (new_stmt, ptr);
6453 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6454 data_ref
6455 = build2 (MEM_REF, vectype, ptr,
6456 build_int_cst (reference_alias_ptr_type
6457 (DR_REF (first_dr)), 0));
6458 break;
6460 case dr_explicit_realign_optimized:
6461 new_temp = copy_ssa_name (dataref_ptr, NULL);
6462 new_stmt = gimple_build_assign_with_ops
6463 (BIT_AND_EXPR, new_temp, dataref_ptr,
6464 build_int_cst
6465 (TREE_TYPE (dataref_ptr),
6466 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
6467 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6468 data_ref
6469 = build2 (MEM_REF, vectype, new_temp,
6470 build_int_cst (reference_alias_ptr_type
6471 (DR_REF (first_dr)), 0));
6472 break;
6473 default:
6474 gcc_unreachable ();
6476 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6477 new_stmt = gimple_build_assign (vec_dest, data_ref);
6478 new_temp = make_ssa_name (vec_dest, new_stmt);
6479 gimple_assign_set_lhs (new_stmt, new_temp);
6480 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6482 /* 3. Handle explicit realignment if necessary/supported.
6483 Create in loop:
6484 vec_dest = realign_load (msq, lsq, realignment_token) */
6485 if (alignment_support_scheme == dr_explicit_realign_optimized
6486 || alignment_support_scheme == dr_explicit_realign)
6488 lsq = gimple_assign_lhs (new_stmt);
6489 if (!realignment_token)
6490 realignment_token = dataref_ptr;
6491 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6492 new_stmt
6493 = gimple_build_assign_with_ops (REALIGN_LOAD_EXPR,
6494 vec_dest, msq, lsq,
6495 realignment_token);
6496 new_temp = make_ssa_name (vec_dest, new_stmt);
6497 gimple_assign_set_lhs (new_stmt, new_temp);
6498 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6500 if (alignment_support_scheme == dr_explicit_realign_optimized)
6502 gcc_assert (phi);
6503 if (i == vec_num - 1 && j == ncopies - 1)
6504 add_phi_arg (phi, lsq,
6505 loop_latch_edge (containing_loop),
6506 UNKNOWN_LOCATION);
6507 msq = lsq;
6511 /* 4. Handle invariant-load. */
6512 if (inv_p && !bb_vinfo)
6514 gcc_assert (!grouped_load);
6515 /* If we have versioned for aliasing or the loop doesn't
6516 have any data dependencies that would preclude this,
6517 then we are sure this is a loop invariant load and
6518 thus we can insert it on the preheader edge. */
6519 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
6520 && !nested_in_vect_loop
6521 && hoist_defs_of_uses (stmt, loop))
6523 if (dump_enabled_p ())
6525 dump_printf_loc (MSG_NOTE, vect_location,
6526 "hoisting out of the vectorized "
6527 "loop: ");
6528 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6529 dump_printf (MSG_NOTE, "\n");
6531 tree tem = copy_ssa_name (scalar_dest, NULL);
6532 gsi_insert_on_edge_immediate
6533 (loop_preheader_edge (loop),
6534 gimple_build_assign (tem,
6535 unshare_expr
6536 (gimple_assign_rhs1 (stmt))));
6537 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
6539 else
6541 gimple_stmt_iterator gsi2 = *gsi;
6542 gsi_next (&gsi2);
6543 new_temp = vect_init_vector (stmt, scalar_dest,
6544 vectype, &gsi2);
6546 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6547 set_vinfo_for_stmt (new_stmt,
6548 new_stmt_vec_info (new_stmt, loop_vinfo,
6549 bb_vinfo));
6552 if (negative)
6554 tree perm_mask = perm_mask_for_reverse (vectype);
6555 new_temp = permute_vec_elements (new_temp, new_temp,
6556 perm_mask, stmt, gsi);
6557 new_stmt = SSA_NAME_DEF_STMT (new_temp);
6560 /* Collect vector loads and later create their permutation in
6561 vect_transform_grouped_load (). */
6562 if (grouped_load || slp_perm)
6563 dr_chain.quick_push (new_temp);
6565 /* Store vector loads in the corresponding SLP_NODE. */
6566 if (slp && !slp_perm)
6567 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6569 /* Bump the vector pointer to account for a gap. */
6570 if (slp && group_gap != 0)
6572 tree bump = size_binop (MULT_EXPR,
6573 TYPE_SIZE_UNIT (elem_type),
6574 size_int (group_gap));
6575 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6576 stmt, bump);
6580 if (slp && !slp_perm)
6581 continue;
6583 if (slp_perm)
6585 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
6586 slp_node_instance, false))
6588 dr_chain.release ();
6589 return false;
6592 else
6594 if (grouped_load)
6596 if (!load_lanes_p)
6597 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
6598 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
6600 else
6602 if (j == 0)
6603 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6604 else
6605 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6606 prev_stmt_info = vinfo_for_stmt (new_stmt);
6609 dr_chain.release ();
6612 return true;
6615 /* Function vect_is_simple_cond.
6617 Input:
6618 LOOP - the loop that is being vectorized.
6619 COND - Condition that is checked for simple use.
6621 Output:
6622 *COMP_VECTYPE - the vector type for the comparison.
6624 Returns whether a COND can be vectorized. Checks whether
6625 condition operands are supportable using vec_is_simple_use. */
6627 static bool
6628 vect_is_simple_cond (tree cond, gimple stmt, loop_vec_info loop_vinfo,
6629 bb_vec_info bb_vinfo, tree *comp_vectype)
6631 tree lhs, rhs;
6632 tree def;
6633 enum vect_def_type dt;
6634 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
6636 if (!COMPARISON_CLASS_P (cond))
6637 return false;
6639 lhs = TREE_OPERAND (cond, 0);
6640 rhs = TREE_OPERAND (cond, 1);
6642 if (TREE_CODE (lhs) == SSA_NAME)
6644 gimple lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
6645 if (!vect_is_simple_use_1 (lhs, stmt, loop_vinfo, bb_vinfo,
6646 &lhs_def_stmt, &def, &dt, &vectype1))
6647 return false;
6649 else if (TREE_CODE (lhs) != INTEGER_CST && TREE_CODE (lhs) != REAL_CST
6650 && TREE_CODE (lhs) != FIXED_CST)
6651 return false;
6653 if (TREE_CODE (rhs) == SSA_NAME)
6655 gimple rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
6656 if (!vect_is_simple_use_1 (rhs, stmt, loop_vinfo, bb_vinfo,
6657 &rhs_def_stmt, &def, &dt, &vectype2))
6658 return false;
6660 else if (TREE_CODE (rhs) != INTEGER_CST && TREE_CODE (rhs) != REAL_CST
6661 && TREE_CODE (rhs) != FIXED_CST)
6662 return false;
6664 *comp_vectype = vectype1 ? vectype1 : vectype2;
6665 return true;
6668 /* vectorizable_condition.
6670 Check if STMT is conditional modify expression that can be vectorized.
6671 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6672 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
6673 at GSI.
6675 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
6676 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
6677 else caluse if it is 2).
6679 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6681 bool
6682 vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
6683 gimple *vec_stmt, tree reduc_def, int reduc_index,
6684 slp_tree slp_node)
6686 tree scalar_dest = NULL_TREE;
6687 tree vec_dest = NULL_TREE;
6688 tree cond_expr, then_clause, else_clause;
6689 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6690 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6691 tree comp_vectype = NULL_TREE;
6692 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
6693 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
6694 tree vec_compare, vec_cond_expr;
6695 tree new_temp;
6696 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6697 tree def;
6698 enum vect_def_type dt, dts[4];
6699 int nunits = TYPE_VECTOR_SUBPARTS (vectype);
6700 int ncopies;
6701 enum tree_code code;
6702 stmt_vec_info prev_stmt_info = NULL;
6703 int i, j;
6704 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6705 vec<tree> vec_oprnds0 = vNULL;
6706 vec<tree> vec_oprnds1 = vNULL;
6707 vec<tree> vec_oprnds2 = vNULL;
6708 vec<tree> vec_oprnds3 = vNULL;
6709 tree vec_cmp_type;
6711 if (slp_node || PURE_SLP_STMT (stmt_info))
6712 ncopies = 1;
6713 else
6714 ncopies = LOOP_VINFO_VECT_FACTOR (loop_vinfo) / nunits;
6716 gcc_assert (ncopies >= 1);
6717 if (reduc_index && ncopies > 1)
6718 return false; /* FORNOW */
6720 if (reduc_index && STMT_SLP_TYPE (stmt_info))
6721 return false;
6723 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6724 return false;
6726 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6727 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
6728 && reduc_def))
6729 return false;
6731 /* FORNOW: not yet supported. */
6732 if (STMT_VINFO_LIVE_P (stmt_info))
6734 if (dump_enabled_p ())
6735 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6736 "value used after loop.\n");
6737 return false;
6740 /* Is vectorizable conditional operation? */
6741 if (!is_gimple_assign (stmt))
6742 return false;
6744 code = gimple_assign_rhs_code (stmt);
6746 if (code != COND_EXPR)
6747 return false;
6749 cond_expr = gimple_assign_rhs1 (stmt);
6750 then_clause = gimple_assign_rhs2 (stmt);
6751 else_clause = gimple_assign_rhs3 (stmt);
6753 if (!vect_is_simple_cond (cond_expr, stmt, loop_vinfo, bb_vinfo,
6754 &comp_vectype)
6755 || !comp_vectype)
6756 return false;
6758 if (TREE_CODE (then_clause) == SSA_NAME)
6760 gimple then_def_stmt = SSA_NAME_DEF_STMT (then_clause);
6761 if (!vect_is_simple_use (then_clause, stmt, loop_vinfo, bb_vinfo,
6762 &then_def_stmt, &def, &dt))
6763 return false;
6765 else if (TREE_CODE (then_clause) != INTEGER_CST
6766 && TREE_CODE (then_clause) != REAL_CST
6767 && TREE_CODE (then_clause) != FIXED_CST)
6768 return false;
6770 if (TREE_CODE (else_clause) == SSA_NAME)
6772 gimple else_def_stmt = SSA_NAME_DEF_STMT (else_clause);
6773 if (!vect_is_simple_use (else_clause, stmt, loop_vinfo, bb_vinfo,
6774 &else_def_stmt, &def, &dt))
6775 return false;
6777 else if (TREE_CODE (else_clause) != INTEGER_CST
6778 && TREE_CODE (else_clause) != REAL_CST
6779 && TREE_CODE (else_clause) != FIXED_CST)
6780 return false;
6782 unsigned int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype)));
6783 /* The result of a vector comparison should be signed type. */
6784 tree cmp_type = build_nonstandard_integer_type (prec, 0);
6785 vec_cmp_type = get_same_sized_vectype (cmp_type, vectype);
6786 if (vec_cmp_type == NULL_TREE)
6787 return false;
6789 if (!vec_stmt)
6791 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
6792 return expand_vec_cond_expr_p (vectype, comp_vectype);
6795 /* Transform. */
6797 if (!slp_node)
6799 vec_oprnds0.create (1);
6800 vec_oprnds1.create (1);
6801 vec_oprnds2.create (1);
6802 vec_oprnds3.create (1);
6805 /* Handle def. */
6806 scalar_dest = gimple_assign_lhs (stmt);
6807 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6809 /* Handle cond expr. */
6810 for (j = 0; j < ncopies; j++)
6812 gimple new_stmt = NULL;
6813 if (j == 0)
6815 if (slp_node)
6817 auto_vec<tree, 4> ops;
6818 auto_vec<vec<tree>, 4> vec_defs;
6820 ops.safe_push (TREE_OPERAND (cond_expr, 0));
6821 ops.safe_push (TREE_OPERAND (cond_expr, 1));
6822 ops.safe_push (then_clause);
6823 ops.safe_push (else_clause);
6824 vect_get_slp_defs (ops, slp_node, &vec_defs, -1);
6825 vec_oprnds3 = vec_defs.pop ();
6826 vec_oprnds2 = vec_defs.pop ();
6827 vec_oprnds1 = vec_defs.pop ();
6828 vec_oprnds0 = vec_defs.pop ();
6830 ops.release ();
6831 vec_defs.release ();
6833 else
6835 gimple gtemp;
6836 vec_cond_lhs =
6837 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 0),
6838 stmt, NULL);
6839 vect_is_simple_use (TREE_OPERAND (cond_expr, 0), stmt,
6840 loop_vinfo, NULL, &gtemp, &def, &dts[0]);
6842 vec_cond_rhs =
6843 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr, 1),
6844 stmt, NULL);
6845 vect_is_simple_use (TREE_OPERAND (cond_expr, 1), stmt,
6846 loop_vinfo, NULL, &gtemp, &def, &dts[1]);
6847 if (reduc_index == 1)
6848 vec_then_clause = reduc_def;
6849 else
6851 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
6852 stmt, NULL);
6853 vect_is_simple_use (then_clause, stmt, loop_vinfo,
6854 NULL, &gtemp, &def, &dts[2]);
6856 if (reduc_index == 2)
6857 vec_else_clause = reduc_def;
6858 else
6860 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
6861 stmt, NULL);
6862 vect_is_simple_use (else_clause, stmt, loop_vinfo,
6863 NULL, &gtemp, &def, &dts[3]);
6867 else
6869 vec_cond_lhs = vect_get_vec_def_for_stmt_copy (dts[0],
6870 vec_oprnds0.pop ());
6871 vec_cond_rhs = vect_get_vec_def_for_stmt_copy (dts[1],
6872 vec_oprnds1.pop ());
6873 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
6874 vec_oprnds2.pop ());
6875 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
6876 vec_oprnds3.pop ());
6879 if (!slp_node)
6881 vec_oprnds0.quick_push (vec_cond_lhs);
6882 vec_oprnds1.quick_push (vec_cond_rhs);
6883 vec_oprnds2.quick_push (vec_then_clause);
6884 vec_oprnds3.quick_push (vec_else_clause);
6887 /* Arguments are ready. Create the new vector stmt. */
6888 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
6890 vec_cond_rhs = vec_oprnds1[i];
6891 vec_then_clause = vec_oprnds2[i];
6892 vec_else_clause = vec_oprnds3[i];
6894 vec_compare = build2 (TREE_CODE (cond_expr), vec_cmp_type,
6895 vec_cond_lhs, vec_cond_rhs);
6896 vec_cond_expr = build3 (VEC_COND_EXPR, vectype,
6897 vec_compare, vec_then_clause, vec_else_clause);
6899 new_stmt = gimple_build_assign (vec_dest, vec_cond_expr);
6900 new_temp = make_ssa_name (vec_dest, new_stmt);
6901 gimple_assign_set_lhs (new_stmt, new_temp);
6902 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6903 if (slp_node)
6904 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
6907 if (slp_node)
6908 continue;
6910 if (j == 0)
6911 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6912 else
6913 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6915 prev_stmt_info = vinfo_for_stmt (new_stmt);
6918 vec_oprnds0.release ();
6919 vec_oprnds1.release ();
6920 vec_oprnds2.release ();
6921 vec_oprnds3.release ();
6923 return true;
6927 /* Make sure the statement is vectorizable. */
6929 bool
6930 vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
6932 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6933 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6934 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
6935 bool ok;
6936 tree scalar_type, vectype;
6937 gimple pattern_stmt;
6938 gimple_seq pattern_def_seq;
6940 if (dump_enabled_p ())
6942 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
6943 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6944 dump_printf (MSG_NOTE, "\n");
6947 if (gimple_has_volatile_ops (stmt))
6949 if (dump_enabled_p ())
6950 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6951 "not vectorized: stmt has volatile operands\n");
6953 return false;
6956 /* Skip stmts that do not need to be vectorized. In loops this is expected
6957 to include:
6958 - the COND_EXPR which is the loop exit condition
6959 - any LABEL_EXPRs in the loop
6960 - computations that are used only for array indexing or loop control.
6961 In basic blocks we only analyze statements that are a part of some SLP
6962 instance, therefore, all the statements are relevant.
6964 Pattern statement needs to be analyzed instead of the original statement
6965 if the original statement is not relevant. Otherwise, we analyze both
6966 statements. In basic blocks we are called from some SLP instance
6967 traversal, don't analyze pattern stmts instead, the pattern stmts
6968 already will be part of SLP instance. */
6970 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
6971 if (!STMT_VINFO_RELEVANT_P (stmt_info)
6972 && !STMT_VINFO_LIVE_P (stmt_info))
6974 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
6975 && pattern_stmt
6976 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
6977 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
6979 /* Analyze PATTERN_STMT instead of the original stmt. */
6980 stmt = pattern_stmt;
6981 stmt_info = vinfo_for_stmt (pattern_stmt);
6982 if (dump_enabled_p ())
6984 dump_printf_loc (MSG_NOTE, vect_location,
6985 "==> examining pattern statement: ");
6986 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
6987 dump_printf (MSG_NOTE, "\n");
6990 else
6992 if (dump_enabled_p ())
6993 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
6995 return true;
6998 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
6999 && node == NULL
7000 && pattern_stmt
7001 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
7002 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
7004 /* Analyze PATTERN_STMT too. */
7005 if (dump_enabled_p ())
7007 dump_printf_loc (MSG_NOTE, vect_location,
7008 "==> examining pattern statement: ");
7009 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7010 dump_printf (MSG_NOTE, "\n");
7013 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
7014 return false;
7017 if (is_pattern_stmt_p (stmt_info)
7018 && node == NULL
7019 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
7021 gimple_stmt_iterator si;
7023 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
7025 gimple pattern_def_stmt = gsi_stmt (si);
7026 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
7027 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
7029 /* Analyze def stmt of STMT if it's a pattern stmt. */
7030 if (dump_enabled_p ())
7032 dump_printf_loc (MSG_NOTE, vect_location,
7033 "==> examining pattern def statement: ");
7034 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
7035 dump_printf (MSG_NOTE, "\n");
7038 if (!vect_analyze_stmt (pattern_def_stmt,
7039 need_to_vectorize, node))
7040 return false;
7045 switch (STMT_VINFO_DEF_TYPE (stmt_info))
7047 case vect_internal_def:
7048 break;
7050 case vect_reduction_def:
7051 case vect_nested_cycle:
7052 gcc_assert (!bb_vinfo && (relevance == vect_used_in_outer
7053 || relevance == vect_used_in_outer_by_reduction
7054 || relevance == vect_unused_in_scope));
7055 break;
7057 case vect_induction_def:
7058 case vect_constant_def:
7059 case vect_external_def:
7060 case vect_unknown_def_type:
7061 default:
7062 gcc_unreachable ();
7065 if (bb_vinfo)
7067 gcc_assert (PURE_SLP_STMT (stmt_info));
7069 scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
7070 if (dump_enabled_p ())
7072 dump_printf_loc (MSG_NOTE, vect_location,
7073 "get vectype for scalar type: ");
7074 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
7075 dump_printf (MSG_NOTE, "\n");
7078 vectype = get_vectype_for_scalar_type (scalar_type);
7079 if (!vectype)
7081 if (dump_enabled_p ())
7083 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7084 "not SLPed: unsupported data-type ");
7085 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
7086 scalar_type);
7087 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7089 return false;
7092 if (dump_enabled_p ())
7094 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
7095 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
7096 dump_printf (MSG_NOTE, "\n");
7099 STMT_VINFO_VECTYPE (stmt_info) = vectype;
7102 if (STMT_VINFO_RELEVANT_P (stmt_info))
7104 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
7105 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
7106 || (is_gimple_call (stmt)
7107 && gimple_call_lhs (stmt) == NULL_TREE));
7108 *need_to_vectorize = true;
7111 ok = true;
7112 if (!bb_vinfo
7113 && (STMT_VINFO_RELEVANT_P (stmt_info)
7114 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
7115 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, NULL)
7116 || vectorizable_conversion (stmt, NULL, NULL, NULL)
7117 || vectorizable_shift (stmt, NULL, NULL, NULL)
7118 || vectorizable_operation (stmt, NULL, NULL, NULL)
7119 || vectorizable_assignment (stmt, NULL, NULL, NULL)
7120 || vectorizable_load (stmt, NULL, NULL, NULL, NULL)
7121 || vectorizable_call (stmt, NULL, NULL, NULL)
7122 || vectorizable_store (stmt, NULL, NULL, NULL)
7123 || vectorizable_reduction (stmt, NULL, NULL, NULL)
7124 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, NULL));
7125 else
7127 if (bb_vinfo)
7128 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
7129 || vectorizable_conversion (stmt, NULL, NULL, node)
7130 || vectorizable_shift (stmt, NULL, NULL, node)
7131 || vectorizable_operation (stmt, NULL, NULL, node)
7132 || vectorizable_assignment (stmt, NULL, NULL, node)
7133 || vectorizable_load (stmt, NULL, NULL, node, NULL)
7134 || vectorizable_call (stmt, NULL, NULL, node)
7135 || vectorizable_store (stmt, NULL, NULL, node)
7136 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node));
7139 if (!ok)
7141 if (dump_enabled_p ())
7143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7144 "not vectorized: relevant stmt not ");
7145 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7146 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7147 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7150 return false;
7153 if (bb_vinfo)
7154 return true;
7156 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7157 need extra handling, except for vectorizable reductions. */
7158 if (STMT_VINFO_LIVE_P (stmt_info)
7159 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7160 ok = vectorizable_live_operation (stmt, NULL, NULL);
7162 if (!ok)
7164 if (dump_enabled_p ())
7166 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7167 "not vectorized: live stmt not ");
7168 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
7169 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
7170 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
7173 return false;
7176 return true;
7180 /* Function vect_transform_stmt.
7182 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7184 bool
7185 vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
7186 bool *grouped_store, slp_tree slp_node,
7187 slp_instance slp_node_instance)
7189 bool is_store = false;
7190 gimple vec_stmt = NULL;
7191 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7192 bool done;
7194 switch (STMT_VINFO_TYPE (stmt_info))
7196 case type_demotion_vec_info_type:
7197 case type_promotion_vec_info_type:
7198 case type_conversion_vec_info_type:
7199 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
7200 gcc_assert (done);
7201 break;
7203 case induc_vec_info_type:
7204 gcc_assert (!slp_node);
7205 done = vectorizable_induction (stmt, gsi, &vec_stmt);
7206 gcc_assert (done);
7207 break;
7209 case shift_vec_info_type:
7210 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
7211 gcc_assert (done);
7212 break;
7214 case op_vec_info_type:
7215 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
7216 gcc_assert (done);
7217 break;
7219 case assignment_vec_info_type:
7220 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
7221 gcc_assert (done);
7222 break;
7224 case load_vec_info_type:
7225 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
7226 slp_node_instance);
7227 gcc_assert (done);
7228 break;
7230 case store_vec_info_type:
7231 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
7232 gcc_assert (done);
7233 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
7235 /* In case of interleaving, the whole chain is vectorized when the
7236 last store in the chain is reached. Store stmts before the last
7237 one are skipped, and there vec_stmt_info shouldn't be freed
7238 meanwhile. */
7239 *grouped_store = true;
7240 if (STMT_VINFO_VEC_STMT (stmt_info))
7241 is_store = true;
7243 else
7244 is_store = true;
7245 break;
7247 case condition_vec_info_type:
7248 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
7249 gcc_assert (done);
7250 break;
7252 case call_vec_info_type:
7253 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
7254 stmt = gsi_stmt (*gsi);
7255 if (is_gimple_call (stmt)
7256 && gimple_call_internal_p (stmt)
7257 && gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
7258 is_store = true;
7259 break;
7261 case call_simd_clone_vec_info_type:
7262 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
7263 stmt = gsi_stmt (*gsi);
7264 break;
7266 case reduc_vec_info_type:
7267 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node);
7268 gcc_assert (done);
7269 break;
7271 default:
7272 if (!STMT_VINFO_LIVE_P (stmt_info))
7274 if (dump_enabled_p ())
7275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7276 "stmt not supported.\n");
7277 gcc_unreachable ();
7281 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7282 is being vectorized, but outside the immediately enclosing loop. */
7283 if (vec_stmt
7284 && STMT_VINFO_LOOP_VINFO (stmt_info)
7285 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7286 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
7287 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
7288 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
7289 || STMT_VINFO_RELEVANT (stmt_info) ==
7290 vect_used_in_outer_by_reduction))
7292 struct loop *innerloop = LOOP_VINFO_LOOP (
7293 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
7294 imm_use_iterator imm_iter;
7295 use_operand_p use_p;
7296 tree scalar_dest;
7297 gimple exit_phi;
7299 if (dump_enabled_p ())
7300 dump_printf_loc (MSG_NOTE, vect_location,
7301 "Record the vdef for outer-loop vectorization.\n");
7303 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7304 (to be used when vectorizing outer-loop stmts that use the DEF of
7305 STMT). */
7306 if (gimple_code (stmt) == GIMPLE_PHI)
7307 scalar_dest = PHI_RESULT (stmt);
7308 else
7309 scalar_dest = gimple_assign_lhs (stmt);
7311 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
7313 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
7315 exit_phi = USE_STMT (use_p);
7316 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
7321 /* Handle stmts whose DEF is used outside the loop-nest that is
7322 being vectorized. */
7323 if (STMT_VINFO_LIVE_P (stmt_info)
7324 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
7326 done = vectorizable_live_operation (stmt, gsi, &vec_stmt);
7327 gcc_assert (done);
7330 if (vec_stmt)
7331 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
7333 return is_store;
7337 /* Remove a group of stores (for SLP or interleaving), free their
7338 stmt_vec_info. */
7340 void
7341 vect_remove_stores (gimple first_stmt)
7343 gimple next = first_stmt;
7344 gimple tmp;
7345 gimple_stmt_iterator next_si;
7347 while (next)
7349 stmt_vec_info stmt_info = vinfo_for_stmt (next);
7351 tmp = GROUP_NEXT_ELEMENT (stmt_info);
7352 if (is_pattern_stmt_p (stmt_info))
7353 next = STMT_VINFO_RELATED_STMT (stmt_info);
7354 /* Free the attached stmt_vec_info and remove the stmt. */
7355 next_si = gsi_for_stmt (next);
7356 unlink_stmt_vdef (next);
7357 gsi_remove (&next_si, true);
7358 release_defs (next);
7359 free_stmt_vec_info (next);
7360 next = tmp;
7365 /* Function new_stmt_vec_info.
7367 Create and initialize a new stmt_vec_info struct for STMT. */
7369 stmt_vec_info
7370 new_stmt_vec_info (gimple stmt, loop_vec_info loop_vinfo,
7371 bb_vec_info bb_vinfo)
7373 stmt_vec_info res;
7374 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
7376 STMT_VINFO_TYPE (res) = undef_vec_info_type;
7377 STMT_VINFO_STMT (res) = stmt;
7378 STMT_VINFO_LOOP_VINFO (res) = loop_vinfo;
7379 STMT_VINFO_BB_VINFO (res) = bb_vinfo;
7380 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
7381 STMT_VINFO_LIVE_P (res) = false;
7382 STMT_VINFO_VECTYPE (res) = NULL;
7383 STMT_VINFO_VEC_STMT (res) = NULL;
7384 STMT_VINFO_VECTORIZABLE (res) = true;
7385 STMT_VINFO_IN_PATTERN_P (res) = false;
7386 STMT_VINFO_RELATED_STMT (res) = NULL;
7387 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
7388 STMT_VINFO_DATA_REF (res) = NULL;
7390 STMT_VINFO_DR_BASE_ADDRESS (res) = NULL;
7391 STMT_VINFO_DR_OFFSET (res) = NULL;
7392 STMT_VINFO_DR_INIT (res) = NULL;
7393 STMT_VINFO_DR_STEP (res) = NULL;
7394 STMT_VINFO_DR_ALIGNED_TO (res) = NULL;
7396 if (gimple_code (stmt) == GIMPLE_PHI
7397 && is_loop_header_bb_p (gimple_bb (stmt)))
7398 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
7399 else
7400 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
7402 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
7403 STMT_SLP_TYPE (res) = loop_vect;
7404 GROUP_FIRST_ELEMENT (res) = NULL;
7405 GROUP_NEXT_ELEMENT (res) = NULL;
7406 GROUP_SIZE (res) = 0;
7407 GROUP_STORE_COUNT (res) = 0;
7408 GROUP_GAP (res) = 0;
7409 GROUP_SAME_DR_STMT (res) = NULL;
7411 return res;
7415 /* Create a hash table for stmt_vec_info. */
7417 void
7418 init_stmt_vec_info_vec (void)
7420 gcc_assert (!stmt_vec_info_vec.exists ());
7421 stmt_vec_info_vec.create (50);
7425 /* Free hash table for stmt_vec_info. */
7427 void
7428 free_stmt_vec_info_vec (void)
7430 unsigned int i;
7431 vec_void_p info;
7432 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
7433 if (info != NULL)
7434 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info) info));
7435 gcc_assert (stmt_vec_info_vec.exists ());
7436 stmt_vec_info_vec.release ();
7440 /* Free stmt vectorization related info. */
7442 void
7443 free_stmt_vec_info (gimple stmt)
7445 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7447 if (!stmt_info)
7448 return;
7450 /* Check if this statement has a related "pattern stmt"
7451 (introduced by the vectorizer during the pattern recognition
7452 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7453 too. */
7454 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
7456 stmt_vec_info patt_info
7457 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7458 if (patt_info)
7460 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
7461 gimple patt_stmt = STMT_VINFO_STMT (patt_info);
7462 gimple_set_bb (patt_stmt, NULL);
7463 tree lhs = gimple_get_lhs (patt_stmt);
7464 if (TREE_CODE (lhs) == SSA_NAME)
7465 release_ssa_name (lhs);
7466 if (seq)
7468 gimple_stmt_iterator si;
7469 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
7471 gimple seq_stmt = gsi_stmt (si);
7472 gimple_set_bb (seq_stmt, NULL);
7473 lhs = gimple_get_lhs (patt_stmt);
7474 if (TREE_CODE (lhs) == SSA_NAME)
7475 release_ssa_name (lhs);
7476 free_stmt_vec_info (seq_stmt);
7479 free_stmt_vec_info (patt_stmt);
7483 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
7484 set_vinfo_for_stmt (stmt, NULL);
7485 free (stmt_info);
7489 /* Function get_vectype_for_scalar_type_and_size.
7491 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7492 by the target. */
7494 static tree
7495 get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
7497 enum machine_mode inner_mode = TYPE_MODE (scalar_type);
7498 enum machine_mode simd_mode;
7499 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
7500 int nunits;
7501 tree vectype;
7503 if (nbytes == 0)
7504 return NULL_TREE;
7506 if (GET_MODE_CLASS (inner_mode) != MODE_INT
7507 && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
7508 return NULL_TREE;
7510 /* For vector types of elements whose mode precision doesn't
7511 match their types precision we use a element type of mode
7512 precision. The vectorization routines will have to make sure
7513 they support the proper result truncation/extension.
7514 We also make sure to build vector types with INTEGER_TYPE
7515 component type only. */
7516 if (INTEGRAL_TYPE_P (scalar_type)
7517 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
7518 || TREE_CODE (scalar_type) != INTEGER_TYPE))
7519 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
7520 TYPE_UNSIGNED (scalar_type));
7522 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7523 When the component mode passes the above test simply use a type
7524 corresponding to that mode. The theory is that any use that
7525 would cause problems with this will disable vectorization anyway. */
7526 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
7527 && !INTEGRAL_TYPE_P (scalar_type))
7528 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
7530 /* We can't build a vector type of elements with alignment bigger than
7531 their size. */
7532 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
7533 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
7534 TYPE_UNSIGNED (scalar_type));
7536 /* If we felt back to using the mode fail if there was
7537 no scalar type for it. */
7538 if (scalar_type == NULL_TREE)
7539 return NULL_TREE;
7541 /* If no size was supplied use the mode the target prefers. Otherwise
7542 lookup a vector mode of the specified size. */
7543 if (size == 0)
7544 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
7545 else
7546 simd_mode = mode_for_vector (inner_mode, size / nbytes);
7547 nunits = GET_MODE_SIZE (simd_mode) / nbytes;
7548 if (nunits <= 1)
7549 return NULL_TREE;
7551 vectype = build_vector_type (scalar_type, nunits);
7553 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
7554 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
7555 return NULL_TREE;
7557 return vectype;
7560 unsigned int current_vector_size;
7562 /* Function get_vectype_for_scalar_type.
7564 Returns the vector type corresponding to SCALAR_TYPE as supported
7565 by the target. */
7567 tree
7568 get_vectype_for_scalar_type (tree scalar_type)
7570 tree vectype;
7571 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
7572 current_vector_size);
7573 if (vectype
7574 && current_vector_size == 0)
7575 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
7576 return vectype;
7579 /* Function get_same_sized_vectype
7581 Returns a vector type corresponding to SCALAR_TYPE of size
7582 VECTOR_TYPE if supported by the target. */
7584 tree
7585 get_same_sized_vectype (tree scalar_type, tree vector_type)
7587 return get_vectype_for_scalar_type_and_size
7588 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
7591 /* Function vect_is_simple_use.
7593 Input:
7594 LOOP_VINFO - the vect info of the loop that is being vectorized.
7595 BB_VINFO - the vect info of the basic block that is being vectorized.
7596 OPERAND - operand of STMT in the loop or bb.
7597 DEF - the defining stmt in case OPERAND is an SSA_NAME.
7599 Returns whether a stmt with OPERAND can be vectorized.
7600 For loops, supportable operands are constants, loop invariants, and operands
7601 that are defined by the current iteration of the loop. Unsupportable
7602 operands are those that are defined by a previous iteration of the loop (as
7603 is the case in reduction/induction computations).
7604 For basic blocks, supportable operands are constants and bb invariants.
7605 For now, operands defined outside the basic block are not supported. */
7607 bool
7608 vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7609 bb_vec_info bb_vinfo, gimple *def_stmt,
7610 tree *def, enum vect_def_type *dt)
7612 basic_block bb;
7613 stmt_vec_info stmt_vinfo;
7614 struct loop *loop = NULL;
7616 if (loop_vinfo)
7617 loop = LOOP_VINFO_LOOP (loop_vinfo);
7619 *def_stmt = NULL;
7620 *def = NULL_TREE;
7622 if (dump_enabled_p ())
7624 dump_printf_loc (MSG_NOTE, vect_location,
7625 "vect_is_simple_use: operand ");
7626 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
7627 dump_printf (MSG_NOTE, "\n");
7630 if (CONSTANT_CLASS_P (operand))
7632 *dt = vect_constant_def;
7633 return true;
7636 if (is_gimple_min_invariant (operand))
7638 *def = operand;
7639 *dt = vect_external_def;
7640 return true;
7643 if (TREE_CODE (operand) == PAREN_EXPR)
7645 if (dump_enabled_p ())
7646 dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.\n");
7647 operand = TREE_OPERAND (operand, 0);
7650 if (TREE_CODE (operand) != SSA_NAME)
7652 if (dump_enabled_p ())
7653 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7654 "not ssa-name.\n");
7655 return false;
7658 *def_stmt = SSA_NAME_DEF_STMT (operand);
7659 if (*def_stmt == NULL)
7661 if (dump_enabled_p ())
7662 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7663 "no def_stmt.\n");
7664 return false;
7667 if (dump_enabled_p ())
7669 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
7670 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
7671 dump_printf (MSG_NOTE, "\n");
7674 /* Empty stmt is expected only in case of a function argument.
7675 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
7676 if (gimple_nop_p (*def_stmt))
7678 *def = operand;
7679 *dt = vect_external_def;
7680 return true;
7683 bb = gimple_bb (*def_stmt);
7685 if ((loop && !flow_bb_inside_loop_p (loop, bb))
7686 || (!loop && bb != BB_VINFO_BB (bb_vinfo))
7687 || (!loop && gimple_code (*def_stmt) == GIMPLE_PHI))
7688 *dt = vect_external_def;
7689 else
7691 stmt_vinfo = vinfo_for_stmt (*def_stmt);
7692 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
7695 if (*dt == vect_unknown_def_type
7696 || (stmt
7697 && *dt == vect_double_reduction_def
7698 && gimple_code (stmt) != GIMPLE_PHI))
7700 if (dump_enabled_p ())
7701 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7702 "Unsupported pattern.\n");
7703 return false;
7706 if (dump_enabled_p ())
7707 dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.\n", *dt);
7709 switch (gimple_code (*def_stmt))
7711 case GIMPLE_PHI:
7712 *def = gimple_phi_result (*def_stmt);
7713 break;
7715 case GIMPLE_ASSIGN:
7716 *def = gimple_assign_lhs (*def_stmt);
7717 break;
7719 case GIMPLE_CALL:
7720 *def = gimple_call_lhs (*def_stmt);
7721 if (*def != NULL)
7722 break;
7723 /* FALLTHRU */
7724 default:
7725 if (dump_enabled_p ())
7726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7727 "unsupported defining stmt:\n");
7728 return false;
7731 return true;
7734 /* Function vect_is_simple_use_1.
7736 Same as vect_is_simple_use_1 but also determines the vector operand
7737 type of OPERAND and stores it to *VECTYPE. If the definition of
7738 OPERAND is vect_uninitialized_def, vect_constant_def or
7739 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
7740 is responsible to compute the best suited vector type for the
7741 scalar operand. */
7743 bool
7744 vect_is_simple_use_1 (tree operand, gimple stmt, loop_vec_info loop_vinfo,
7745 bb_vec_info bb_vinfo, gimple *def_stmt,
7746 tree *def, enum vect_def_type *dt, tree *vectype)
7748 if (!vect_is_simple_use (operand, stmt, loop_vinfo, bb_vinfo, def_stmt,
7749 def, dt))
7750 return false;
7752 /* Now get a vector type if the def is internal, otherwise supply
7753 NULL_TREE and leave it up to the caller to figure out a proper
7754 type for the use stmt. */
7755 if (*dt == vect_internal_def
7756 || *dt == vect_induction_def
7757 || *dt == vect_reduction_def
7758 || *dt == vect_double_reduction_def
7759 || *dt == vect_nested_cycle)
7761 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
7763 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
7764 && !STMT_VINFO_RELEVANT (stmt_info)
7765 && !STMT_VINFO_LIVE_P (stmt_info))
7766 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
7768 *vectype = STMT_VINFO_VECTYPE (stmt_info);
7769 gcc_assert (*vectype != NULL_TREE);
7771 else if (*dt == vect_uninitialized_def
7772 || *dt == vect_constant_def
7773 || *dt == vect_external_def)
7774 *vectype = NULL_TREE;
7775 else
7776 gcc_unreachable ();
7778 return true;
7782 /* Function supportable_widening_operation
7784 Check whether an operation represented by the code CODE is a
7785 widening operation that is supported by the target platform in
7786 vector form (i.e., when operating on arguments of type VECTYPE_IN
7787 producing a result of type VECTYPE_OUT).
7789 Widening operations we currently support are NOP (CONVERT), FLOAT
7790 and WIDEN_MULT. This function checks if these operations are supported
7791 by the target platform either directly (via vector tree-codes), or via
7792 target builtins.
7794 Output:
7795 - CODE1 and CODE2 are codes of vector operations to be used when
7796 vectorizing the operation, if available.
7797 - MULTI_STEP_CVT determines the number of required intermediate steps in
7798 case of multi-step conversion (like char->short->int - in that case
7799 MULTI_STEP_CVT will be 1).
7800 - INTERM_TYPES contains the intermediate type required to perform the
7801 widening operation (short in the above example). */
7803 bool
7804 supportable_widening_operation (enum tree_code code, gimple stmt,
7805 tree vectype_out, tree vectype_in,
7806 enum tree_code *code1, enum tree_code *code2,
7807 int *multi_step_cvt,
7808 vec<tree> *interm_types)
7810 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7811 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
7812 struct loop *vect_loop = NULL;
7813 enum machine_mode vec_mode;
7814 enum insn_code icode1, icode2;
7815 optab optab1, optab2;
7816 tree vectype = vectype_in;
7817 tree wide_vectype = vectype_out;
7818 enum tree_code c1, c2;
7819 int i;
7820 tree prev_type, intermediate_type;
7821 enum machine_mode intermediate_mode, prev_mode;
7822 optab optab3, optab4;
7824 *multi_step_cvt = 0;
7825 if (loop_info)
7826 vect_loop = LOOP_VINFO_LOOP (loop_info);
7828 switch (code)
7830 case WIDEN_MULT_EXPR:
7831 /* The result of a vectorized widening operation usually requires
7832 two vectors (because the widened results do not fit into one vector).
7833 The generated vector results would normally be expected to be
7834 generated in the same order as in the original scalar computation,
7835 i.e. if 8 results are generated in each vector iteration, they are
7836 to be organized as follows:
7837 vect1: [res1,res2,res3,res4],
7838 vect2: [res5,res6,res7,res8].
7840 However, in the special case that the result of the widening
7841 operation is used in a reduction computation only, the order doesn't
7842 matter (because when vectorizing a reduction we change the order of
7843 the computation). Some targets can take advantage of this and
7844 generate more efficient code. For example, targets like Altivec,
7845 that support widen_mult using a sequence of {mult_even,mult_odd}
7846 generate the following vectors:
7847 vect1: [res1,res3,res5,res7],
7848 vect2: [res2,res4,res6,res8].
7850 When vectorizing outer-loops, we execute the inner-loop sequentially
7851 (each vectorized inner-loop iteration contributes to VF outer-loop
7852 iterations in parallel). We therefore don't allow to change the
7853 order of the computation in the inner-loop during outer-loop
7854 vectorization. */
7855 /* TODO: Another case in which order doesn't *really* matter is when we
7856 widen and then contract again, e.g. (short)((int)x * y >> 8).
7857 Normally, pack_trunc performs an even/odd permute, whereas the
7858 repack from an even/odd expansion would be an interleave, which
7859 would be significantly simpler for e.g. AVX2. */
7860 /* In any case, in order to avoid duplicating the code below, recurse
7861 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
7862 are properly set up for the caller. If we fail, we'll continue with
7863 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
7864 if (vect_loop
7865 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
7866 && !nested_in_vect_loop_p (vect_loop, stmt)
7867 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
7868 stmt, vectype_out, vectype_in,
7869 code1, code2, multi_step_cvt,
7870 interm_types))
7872 /* Elements in a vector with vect_used_by_reduction property cannot
7873 be reordered if the use chain with this property does not have the
7874 same operation. One such an example is s += a * b, where elements
7875 in a and b cannot be reordered. Here we check if the vector defined
7876 by STMT is only directly used in the reduction statement. */
7877 tree lhs = gimple_assign_lhs (stmt);
7878 use_operand_p dummy;
7879 gimple use_stmt;
7880 stmt_vec_info use_stmt_info = NULL;
7881 if (single_imm_use (lhs, &dummy, &use_stmt)
7882 && (use_stmt_info = vinfo_for_stmt (use_stmt))
7883 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
7884 return true;
7886 c1 = VEC_WIDEN_MULT_LO_EXPR;
7887 c2 = VEC_WIDEN_MULT_HI_EXPR;
7888 break;
7890 case VEC_WIDEN_MULT_EVEN_EXPR:
7891 /* Support the recursion induced just above. */
7892 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
7893 c2 = VEC_WIDEN_MULT_ODD_EXPR;
7894 break;
7896 case WIDEN_LSHIFT_EXPR:
7897 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
7898 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
7899 break;
7901 CASE_CONVERT:
7902 c1 = VEC_UNPACK_LO_EXPR;
7903 c2 = VEC_UNPACK_HI_EXPR;
7904 break;
7906 case FLOAT_EXPR:
7907 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
7908 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
7909 break;
7911 case FIX_TRUNC_EXPR:
7912 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
7913 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
7914 computing the operation. */
7915 return false;
7917 default:
7918 gcc_unreachable ();
7921 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
7923 enum tree_code ctmp = c1;
7924 c1 = c2;
7925 c2 = ctmp;
7928 if (code == FIX_TRUNC_EXPR)
7930 /* The signedness is determined from output operand. */
7931 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
7932 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
7934 else
7936 optab1 = optab_for_tree_code (c1, vectype, optab_default);
7937 optab2 = optab_for_tree_code (c2, vectype, optab_default);
7940 if (!optab1 || !optab2)
7941 return false;
7943 vec_mode = TYPE_MODE (vectype);
7944 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
7945 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
7946 return false;
7948 *code1 = c1;
7949 *code2 = c2;
7951 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
7952 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
7953 return true;
7955 /* Check if it's a multi-step conversion that can be done using intermediate
7956 types. */
7958 prev_type = vectype;
7959 prev_mode = vec_mode;
7961 if (!CONVERT_EXPR_CODE_P (code))
7962 return false;
7964 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
7965 intermediate steps in promotion sequence. We try
7966 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
7967 not. */
7968 interm_types->create (MAX_INTERM_CVT_STEPS);
7969 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
7971 intermediate_mode = insn_data[icode1].operand[0].mode;
7972 intermediate_type
7973 = lang_hooks.types.type_for_mode (intermediate_mode,
7974 TYPE_UNSIGNED (prev_type));
7975 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
7976 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
7978 if (!optab3 || !optab4
7979 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
7980 || insn_data[icode1].operand[0].mode != intermediate_mode
7981 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
7982 || insn_data[icode2].operand[0].mode != intermediate_mode
7983 || ((icode1 = optab_handler (optab3, intermediate_mode))
7984 == CODE_FOR_nothing)
7985 || ((icode2 = optab_handler (optab4, intermediate_mode))
7986 == CODE_FOR_nothing))
7987 break;
7989 interm_types->quick_push (intermediate_type);
7990 (*multi_step_cvt)++;
7992 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
7993 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
7994 return true;
7996 prev_type = intermediate_type;
7997 prev_mode = intermediate_mode;
8000 interm_types->release ();
8001 return false;
8005 /* Function supportable_narrowing_operation
8007 Check whether an operation represented by the code CODE is a
8008 narrowing operation that is supported by the target platform in
8009 vector form (i.e., when operating on arguments of type VECTYPE_IN
8010 and producing a result of type VECTYPE_OUT).
8012 Narrowing operations we currently support are NOP (CONVERT) and
8013 FIX_TRUNC. This function checks if these operations are supported by
8014 the target platform directly via vector tree-codes.
8016 Output:
8017 - CODE1 is the code of a vector operation to be used when
8018 vectorizing the operation, if available.
8019 - MULTI_STEP_CVT determines the number of required intermediate steps in
8020 case of multi-step conversion (like int->short->char - in that case
8021 MULTI_STEP_CVT will be 1).
8022 - INTERM_TYPES contains the intermediate type required to perform the
8023 narrowing operation (short in the above example). */
8025 bool
8026 supportable_narrowing_operation (enum tree_code code,
8027 tree vectype_out, tree vectype_in,
8028 enum tree_code *code1, int *multi_step_cvt,
8029 vec<tree> *interm_types)
8031 enum machine_mode vec_mode;
8032 enum insn_code icode1;
8033 optab optab1, interm_optab;
8034 tree vectype = vectype_in;
8035 tree narrow_vectype = vectype_out;
8036 enum tree_code c1;
8037 tree intermediate_type;
8038 enum machine_mode intermediate_mode, prev_mode;
8039 int i;
8040 bool uns;
8042 *multi_step_cvt = 0;
8043 switch (code)
8045 CASE_CONVERT:
8046 c1 = VEC_PACK_TRUNC_EXPR;
8047 break;
8049 case FIX_TRUNC_EXPR:
8050 c1 = VEC_PACK_FIX_TRUNC_EXPR;
8051 break;
8053 case FLOAT_EXPR:
8054 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8055 tree code and optabs used for computing the operation. */
8056 return false;
8058 default:
8059 gcc_unreachable ();
8062 if (code == FIX_TRUNC_EXPR)
8063 /* The signedness is determined from output operand. */
8064 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
8065 else
8066 optab1 = optab_for_tree_code (c1, vectype, optab_default);
8068 if (!optab1)
8069 return false;
8071 vec_mode = TYPE_MODE (vectype);
8072 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
8073 return false;
8075 *code1 = c1;
8077 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8078 return true;
8080 /* Check if it's a multi-step conversion that can be done using intermediate
8081 types. */
8082 prev_mode = vec_mode;
8083 if (code == FIX_TRUNC_EXPR)
8084 uns = TYPE_UNSIGNED (vectype_out);
8085 else
8086 uns = TYPE_UNSIGNED (vectype);
8088 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8089 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8090 costly than signed. */
8091 if (code == FIX_TRUNC_EXPR && uns)
8093 enum insn_code icode2;
8095 intermediate_type
8096 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
8097 interm_optab
8098 = optab_for_tree_code (c1, intermediate_type, optab_default);
8099 if (interm_optab != unknown_optab
8100 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
8101 && insn_data[icode1].operand[0].mode
8102 == insn_data[icode2].operand[0].mode)
8104 uns = false;
8105 optab1 = interm_optab;
8106 icode1 = icode2;
8110 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8111 intermediate steps in promotion sequence. We try
8112 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8113 interm_types->create (MAX_INTERM_CVT_STEPS);
8114 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
8116 intermediate_mode = insn_data[icode1].operand[0].mode;
8117 intermediate_type
8118 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
8119 interm_optab
8120 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
8121 optab_default);
8122 if (!interm_optab
8123 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
8124 || insn_data[icode1].operand[0].mode != intermediate_mode
8125 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
8126 == CODE_FOR_nothing))
8127 break;
8129 interm_types->quick_push (intermediate_type);
8130 (*multi_step_cvt)++;
8132 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
8133 return true;
8135 prev_mode = intermediate_mode;
8136 optab1 = interm_optab;
8139 interm_types->release ();
8140 return false;