Move code that stubs out IFN_MASK_LOADs
[official-gcc.git] / gcc / tree-vect-stmts.c
blob96c6605d9591856ce98033c7b8f2ca12663bc59c
1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "ssa.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
34 #include "cgraph.h"
35 #include "dumpfile.h"
36 #include "alias.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "tree-eh.h"
40 #include "gimplify.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
43 #include "tree-cfg.h"
44 #include "tree-ssa-loop-manip.h"
45 #include "cfgloop.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "builtins.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Says whether a statement is a load, a store of a vectorized statement
58 result, or a store of an invariant value. */
59 enum vec_load_store_type {
60 VLS_LOAD,
61 VLS_STORE,
62 VLS_STORE_INVARIANT
65 /* Return the vectorized type for the given statement. */
67 tree
68 stmt_vectype (struct _stmt_vec_info *stmt_info)
70 return STMT_VINFO_VECTYPE (stmt_info);
73 /* Return TRUE iff the given statement is in an inner loop relative to
74 the loop being vectorized. */
75 bool
76 stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
78 gimple *stmt = STMT_VINFO_STMT (stmt_info);
79 basic_block bb = gimple_bb (stmt);
80 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
81 struct loop* loop;
83 if (!loop_vinfo)
84 return false;
86 loop = LOOP_VINFO_LOOP (loop_vinfo);
88 return (bb->loop_father == loop->inner);
91 /* Record the cost of a statement, either by directly informing the
92 target model or by saving it in a vector for later processing.
93 Return a preliminary estimate of the statement's cost. */
95 unsigned
96 record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
97 enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
98 int misalign, enum vect_cost_model_location where)
100 if ((kind == vector_load || kind == unaligned_load)
101 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
102 kind = vector_gather_load;
103 if ((kind == vector_store || kind == unaligned_store)
104 && STMT_VINFO_GATHER_SCATTER_P (stmt_info))
105 kind = vector_scatter_store;
106 if (body_cost_vec)
108 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
109 stmt_info_for_cost si = { count, kind,
110 stmt_info ? STMT_VINFO_STMT (stmt_info) : NULL,
111 misalign };
112 body_cost_vec->safe_push (si);
113 return (unsigned)
114 (builtin_vectorization_cost (kind, vectype, misalign) * count);
116 else
117 return add_stmt_cost (stmt_info->vinfo->target_cost_data,
118 count, kind, stmt_info, misalign, where);
121 /* Return a variable of type ELEM_TYPE[NELEMS]. */
123 static tree
124 create_vector_array (tree elem_type, unsigned HOST_WIDE_INT nelems)
126 return create_tmp_var (build_array_type_nelts (elem_type, nelems),
127 "vect_array");
130 /* ARRAY is an array of vectors created by create_vector_array.
131 Return an SSA_NAME for the vector in index N. The reference
132 is part of the vectorization of STMT and the vector is associated
133 with scalar destination SCALAR_DEST. */
135 static tree
136 read_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree scalar_dest,
137 tree array, unsigned HOST_WIDE_INT n)
139 tree vect_type, vect, vect_name, array_ref;
140 gimple *new_stmt;
142 gcc_assert (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE);
143 vect_type = TREE_TYPE (TREE_TYPE (array));
144 vect = vect_create_destination_var (scalar_dest, vect_type);
145 array_ref = build4 (ARRAY_REF, vect_type, array,
146 build_int_cst (size_type_node, n),
147 NULL_TREE, NULL_TREE);
149 new_stmt = gimple_build_assign (vect, array_ref);
150 vect_name = make_ssa_name (vect, new_stmt);
151 gimple_assign_set_lhs (new_stmt, vect_name);
152 vect_finish_stmt_generation (stmt, new_stmt, gsi);
154 return vect_name;
157 /* ARRAY is an array of vectors created by create_vector_array.
158 Emit code to store SSA_NAME VECT in index N of the array.
159 The store is part of the vectorization of STMT. */
161 static void
162 write_vector_array (gimple *stmt, gimple_stmt_iterator *gsi, tree vect,
163 tree array, unsigned HOST_WIDE_INT n)
165 tree array_ref;
166 gimple *new_stmt;
168 array_ref = build4 (ARRAY_REF, TREE_TYPE (vect), array,
169 build_int_cst (size_type_node, n),
170 NULL_TREE, NULL_TREE);
172 new_stmt = gimple_build_assign (array_ref, vect);
173 vect_finish_stmt_generation (stmt, new_stmt, gsi);
176 /* PTR is a pointer to an array of type TYPE. Return a representation
177 of *PTR. The memory reference replaces those in FIRST_DR
178 (and its group). */
180 static tree
181 create_array_ref (tree type, tree ptr, tree alias_ptr_type)
183 tree mem_ref;
185 mem_ref = build2 (MEM_REF, type, ptr, build_int_cst (alias_ptr_type, 0));
186 /* Arrays have the same alignment as their type. */
187 set_ptr_info_alignment (get_ptr_info (ptr), TYPE_ALIGN_UNIT (type), 0);
188 return mem_ref;
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
197 static void
198 vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
199 enum vect_relevant relevant, bool live_p)
201 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
202 enum vect_relevant save_relevant = STMT_VINFO_RELEVANT (stmt_info);
203 bool save_live_p = STMT_VINFO_LIVE_P (stmt_info);
204 gimple *pattern_stmt;
206 if (dump_enabled_p ())
208 dump_printf_loc (MSG_NOTE, vect_location,
209 "mark relevant %d, live %d: ", relevant, live_p);
210 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
213 /* If this stmt is an original stmt in a pattern, we might need to mark its
214 related pattern stmt instead of the original stmt. However, such stmts
215 may have their own uses that are not in any pattern, in such cases the
216 stmt itself should be marked. */
217 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
219 /* This is the last stmt in a sequence that was detected as a
220 pattern that can potentially be vectorized. Don't mark the stmt
221 as relevant/live because it's not going to be vectorized.
222 Instead mark the pattern-stmt that replaces it. */
224 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_NOTE, vect_location,
228 "last stmt in pattern. don't mark"
229 " relevant/live.\n");
230 stmt_info = vinfo_for_stmt (pattern_stmt);
231 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
232 save_relevant = STMT_VINFO_RELEVANT (stmt_info);
233 save_live_p = STMT_VINFO_LIVE_P (stmt_info);
234 stmt = pattern_stmt;
237 STMT_VINFO_LIVE_P (stmt_info) |= live_p;
238 if (relevant > STMT_VINFO_RELEVANT (stmt_info))
239 STMT_VINFO_RELEVANT (stmt_info) = relevant;
241 if (STMT_VINFO_RELEVANT (stmt_info) == save_relevant
242 && STMT_VINFO_LIVE_P (stmt_info) == save_live_p)
244 if (dump_enabled_p ())
245 dump_printf_loc (MSG_NOTE, vect_location,
246 "already marked relevant/live.\n");
247 return;
250 worklist->safe_push (stmt);
254 /* Function is_simple_and_all_uses_invariant
256 Return true if STMT is simple and all uses of it are invariant. */
258 bool
259 is_simple_and_all_uses_invariant (gimple *stmt, loop_vec_info loop_vinfo)
261 tree op;
262 gimple *def_stmt;
263 ssa_op_iter iter;
265 if (!is_gimple_assign (stmt))
266 return false;
268 FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
270 enum vect_def_type dt = vect_uninitialized_def;
272 if (!vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt))
274 if (dump_enabled_p ())
275 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
276 "use not simple.\n");
277 return false;
280 if (dt != vect_external_def && dt != vect_constant_def)
281 return false;
283 return true;
286 /* Function vect_stmt_relevant_p.
288 Return true if STMT in loop that is represented by LOOP_VINFO is
289 "relevant for vectorization".
291 A stmt is considered "relevant for vectorization" if:
292 - it has uses outside the loop.
293 - it has vdefs (it alters memory).
294 - control stmts in the loop (except for the exit condition).
296 CHECKME: what other side effects would the vectorizer allow? */
298 static bool
299 vect_stmt_relevant_p (gimple *stmt, loop_vec_info loop_vinfo,
300 enum vect_relevant *relevant, bool *live_p)
302 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
303 ssa_op_iter op_iter;
304 imm_use_iterator imm_iter;
305 use_operand_p use_p;
306 def_operand_p def_p;
308 *relevant = vect_unused_in_scope;
309 *live_p = false;
311 /* cond stmt other than loop exit cond. */
312 if (is_ctrl_stmt (stmt)
313 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt))
314 != loop_exit_ctrl_vec_info_type)
315 *relevant = vect_used_in_scope;
317 /* changing memory. */
318 if (gimple_code (stmt) != GIMPLE_PHI)
319 if (gimple_vdef (stmt)
320 && !gimple_clobber_p (stmt))
322 if (dump_enabled_p ())
323 dump_printf_loc (MSG_NOTE, vect_location,
324 "vec_stmt_relevant_p: stmt has vdefs.\n");
325 *relevant = vect_used_in_scope;
328 /* uses outside the loop. */
329 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
331 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p))
333 basic_block bb = gimple_bb (USE_STMT (use_p));
334 if (!flow_bb_inside_loop_p (loop, bb))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE, vect_location,
338 "vec_stmt_relevant_p: used out of loop.\n");
340 if (is_gimple_debug (USE_STMT (use_p)))
341 continue;
343 /* We expect all such uses to be in the loop exit phis
344 (because of loop closed form) */
345 gcc_assert (gimple_code (USE_STMT (use_p)) == GIMPLE_PHI);
346 gcc_assert (bb == single_exit (loop)->dest);
348 *live_p = true;
353 if (*live_p && *relevant == vect_unused_in_scope
354 && !is_simple_and_all_uses_invariant (stmt, loop_vinfo))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE, vect_location,
358 "vec_stmt_relevant_p: stmt live but not relevant.\n");
359 *relevant = vect_used_only_live;
362 return (*live_p || *relevant);
366 /* Function exist_non_indexing_operands_for_use_p
368 USE is one of the uses attached to STMT. Check if USE is
369 used in STMT for anything other than indexing an array. */
371 static bool
372 exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
374 tree operand;
375 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
377 /* USE corresponds to some operand in STMT. If there is no data
378 reference in STMT, then any operand that corresponds to USE
379 is not indexing an array. */
380 if (!STMT_VINFO_DATA_REF (stmt_info))
381 return true;
383 /* STMT has a data_ref. FORNOW this means that its of one of
384 the following forms:
385 -1- ARRAY_REF = var
386 -2- var = ARRAY_REF
387 (This should have been verified in analyze_data_refs).
389 'var' in the second case corresponds to a def, not a use,
390 so USE cannot correspond to any operands that are not used
391 for array indexing.
393 Therefore, all we need to check is if STMT falls into the
394 first case, and whether var corresponds to USE. */
396 if (!gimple_assign_copy_p (stmt))
398 if (is_gimple_call (stmt)
399 && gimple_call_internal_p (stmt))
400 switch (gimple_call_internal_fn (stmt))
402 case IFN_MASK_STORE:
403 operand = gimple_call_arg (stmt, 3);
404 if (operand == use)
405 return true;
406 /* FALLTHRU */
407 case IFN_MASK_LOAD:
408 operand = gimple_call_arg (stmt, 2);
409 if (operand == use)
410 return true;
411 break;
412 default:
413 break;
415 return false;
418 if (TREE_CODE (gimple_assign_lhs (stmt)) == SSA_NAME)
419 return false;
420 operand = gimple_assign_rhs1 (stmt);
421 if (TREE_CODE (operand) != SSA_NAME)
422 return false;
424 if (operand == use)
425 return true;
427 return false;
432 Function process_use.
434 Inputs:
435 - a USE in STMT in a loop represented by LOOP_VINFO
436 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
437 that defined USE. This is done by calling mark_relevant and passing it
438 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
439 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
440 be performed.
442 Outputs:
443 Generally, LIVE_P and RELEVANT are used to define the liveness and
444 relevance info of the DEF_STMT of this USE:
445 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
446 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
447 Exceptions:
448 - case 1: If USE is used only for address computations (e.g. array indexing),
449 which does not need to be directly vectorized, then the liveness/relevance
450 of the respective DEF_STMT is left unchanged.
451 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
452 skip DEF_STMT cause it had already been processed.
453 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
454 be modified accordingly.
456 Return true if everything is as expected. Return false otherwise. */
458 static bool
459 process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
460 enum vect_relevant relevant, vec<gimple *> *worklist,
461 bool force)
463 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
464 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
465 stmt_vec_info dstmt_vinfo;
466 basic_block bb, def_bb;
467 gimple *def_stmt;
468 enum vect_def_type dt;
470 /* case 1: we are only interested in uses that need to be vectorized. Uses
471 that are used for address computation are not considered relevant. */
472 if (!force && !exist_non_indexing_operands_for_use_p (use, stmt))
473 return true;
475 if (!vect_is_simple_use (use, loop_vinfo, &def_stmt, &dt))
477 if (dump_enabled_p ())
478 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
479 "not vectorized: unsupported use in stmt.\n");
480 return false;
483 if (!def_stmt || gimple_nop_p (def_stmt))
484 return true;
486 def_bb = gimple_bb (def_stmt);
487 if (!flow_bb_inside_loop_p (loop, def_bb))
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
491 return true;
494 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
495 DEF_STMT must have already been processed, because this should be the
496 only way that STMT, which is a reduction-phi, was put in the worklist,
497 as there should be no other uses for DEF_STMT in the loop. So we just
498 check that everything is as expected, and we are done. */
499 dstmt_vinfo = vinfo_for_stmt (def_stmt);
500 bb = gimple_bb (stmt);
501 if (gimple_code (stmt) == GIMPLE_PHI
502 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
503 && gimple_code (def_stmt) != GIMPLE_PHI
504 && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
505 && bb->loop_father == def_bb->loop_father)
507 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE, vect_location,
509 "reduc-stmt defining reduc-phi in the same nest.\n");
510 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
511 dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
512 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
513 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo)
514 || STMT_VINFO_RELEVANT (dstmt_vinfo) > vect_unused_in_scope);
515 return true;
518 /* case 3a: outer-loop stmt defining an inner-loop stmt:
519 outer-loop-header-bb:
520 d = def_stmt
521 inner-loop:
522 stmt # use (d)
523 outer-loop-tail-bb:
524 ... */
525 if (flow_loop_nested_p (def_bb->loop_father, bb->loop_father))
527 if (dump_enabled_p ())
528 dump_printf_loc (MSG_NOTE, vect_location,
529 "outer-loop def-stmt defining inner-loop stmt.\n");
531 switch (relevant)
533 case vect_unused_in_scope:
534 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_nested_cycle) ?
535 vect_used_in_scope : vect_unused_in_scope;
536 break;
538 case vect_used_in_outer_by_reduction:
539 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
540 relevant = vect_used_by_reduction;
541 break;
543 case vect_used_in_outer:
544 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) != vect_reduction_def);
545 relevant = vect_used_in_scope;
546 break;
548 case vect_used_in_scope:
549 break;
551 default:
552 gcc_unreachable ();
556 /* case 3b: inner-loop stmt defining an outer-loop stmt:
557 outer-loop-header-bb:
559 inner-loop:
560 d = def_stmt
561 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
562 stmt # use (d) */
563 else if (flow_loop_nested_p (bb->loop_father, def_bb->loop_father))
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "inner-loop def-stmt defining outer-loop stmt.\n");
569 switch (relevant)
571 case vect_unused_in_scope:
572 relevant = (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
573 || STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_double_reduction_def) ?
574 vect_used_in_outer_by_reduction : vect_unused_in_scope;
575 break;
577 case vect_used_by_reduction:
578 case vect_used_only_live:
579 relevant = vect_used_in_outer_by_reduction;
580 break;
582 case vect_used_in_scope:
583 relevant = vect_used_in_outer;
584 break;
586 default:
587 gcc_unreachable ();
590 /* We are also not interested in uses on loop PHI backedges that are
591 inductions. Otherwise we'll needlessly vectorize the IV increment
592 and cause hybrid SLP for SLP inductions. Unless the PHI is live
593 of course. */
594 else if (gimple_code (stmt) == GIMPLE_PHI
595 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
596 && ! STMT_VINFO_LIVE_P (stmt_vinfo)
597 && (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father))
598 == use))
600 if (dump_enabled_p ())
601 dump_printf_loc (MSG_NOTE, vect_location,
602 "induction value on backedge.\n");
603 return true;
607 vect_mark_relevant (worklist, def_stmt, relevant, false);
608 return true;
612 /* Function vect_mark_stmts_to_be_vectorized.
614 Not all stmts in the loop need to be vectorized. For example:
616 for i...
617 for j...
618 1. T0 = i + j
619 2. T1 = a[T0]
621 3. j = j + 1
623 Stmt 1 and 3 do not need to be vectorized, because loop control and
624 addressing of vectorized data-refs are handled differently.
626 This pass detects such stmts. */
628 bool
629 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
631 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
632 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
633 unsigned int nbbs = loop->num_nodes;
634 gimple_stmt_iterator si;
635 gimple *stmt;
636 unsigned int i;
637 stmt_vec_info stmt_vinfo;
638 basic_block bb;
639 gimple *phi;
640 bool live_p;
641 enum vect_relevant relevant;
643 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE, vect_location,
645 "=== vect_mark_stmts_to_be_vectorized ===\n");
647 auto_vec<gimple *, 64> worklist;
649 /* 1. Init worklist. */
650 for (i = 0; i < nbbs; i++)
652 bb = bbs[i];
653 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
655 phi = gsi_stmt (si);
656 if (dump_enabled_p ())
658 dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
662 if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
663 vect_mark_relevant (&worklist, phi, relevant, live_p);
665 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
667 stmt = gsi_stmt (si);
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
671 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
674 if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
675 vect_mark_relevant (&worklist, stmt, relevant, live_p);
679 /* 2. Process_worklist */
680 while (worklist.length () > 0)
682 use_operand_p use_p;
683 ssa_op_iter iter;
685 stmt = worklist.pop ();
686 if (dump_enabled_p ())
688 dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
692 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
693 (DEF_STMT) as relevant/irrelevant according to the relevance property
694 of STMT. */
695 stmt_vinfo = vinfo_for_stmt (stmt);
696 relevant = STMT_VINFO_RELEVANT (stmt_vinfo);
698 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
699 propagated as is to the DEF_STMTs of its USEs.
701 One exception is when STMT has been identified as defining a reduction
702 variable; in this case we set the relevance to vect_used_by_reduction.
703 This is because we distinguish between two kinds of relevant stmts -
704 those that are used by a reduction computation, and those that are
705 (also) used by a regular computation. This allows us later on to
706 identify stmts that are used solely by a reduction, and therefore the
707 order of the results that they produce does not have to be kept. */
709 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo))
711 case vect_reduction_def:
712 gcc_assert (relevant != vect_unused_in_scope);
713 if (relevant != vect_unused_in_scope
714 && relevant != vect_used_in_scope
715 && relevant != vect_used_by_reduction
716 && relevant != vect_used_only_live)
718 if (dump_enabled_p ())
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
720 "unsupported use of reduction.\n");
721 return false;
723 break;
725 case vect_nested_cycle:
726 if (relevant != vect_unused_in_scope
727 && relevant != vect_used_in_outer_by_reduction
728 && relevant != vect_used_in_outer)
730 if (dump_enabled_p ())
731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
732 "unsupported use of nested cycle.\n");
734 return false;
736 break;
738 case vect_double_reduction_def:
739 if (relevant != vect_unused_in_scope
740 && relevant != vect_used_by_reduction
741 && relevant != vect_used_only_live)
743 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "unsupported use of double reduction.\n");
747 return false;
749 break;
751 default:
752 break;
755 if (is_pattern_stmt_p (stmt_vinfo))
757 /* Pattern statements are not inserted into the code, so
758 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
759 have to scan the RHS or function arguments instead. */
760 if (is_gimple_assign (stmt))
762 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
763 tree op = gimple_assign_rhs1 (stmt);
765 i = 1;
766 if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
768 if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo,
769 relevant, &worklist, false)
770 || !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
771 relevant, &worklist, false))
772 return false;
773 i = 2;
775 for (; i < gimple_num_ops (stmt); i++)
777 op = gimple_op (stmt, i);
778 if (TREE_CODE (op) == SSA_NAME
779 && !process_use (stmt, op, loop_vinfo, relevant,
780 &worklist, false))
781 return false;
784 else if (is_gimple_call (stmt))
786 for (i = 0; i < gimple_call_num_args (stmt); i++)
788 tree arg = gimple_call_arg (stmt, i);
789 if (!process_use (stmt, arg, loop_vinfo, relevant,
790 &worklist, false))
791 return false;
795 else
796 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
798 tree op = USE_FROM_PTR (use_p);
799 if (!process_use (stmt, op, loop_vinfo, relevant,
800 &worklist, false))
801 return false;
804 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
806 gather_scatter_info gs_info;
807 if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info))
808 gcc_unreachable ();
809 if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant,
810 &worklist, true))
811 return false;
813 } /* while worklist */
815 return true;
819 /* Function vect_model_simple_cost.
821 Models cost for simple operations, i.e. those that only emit ncopies of a
822 single op. Right now, this does not account for multiple insns that could
823 be generated for the single vector op. We will handle that shortly. */
825 void
826 vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
827 enum vect_def_type *dt,
828 int ndts,
829 stmt_vector_for_cost *prologue_cost_vec,
830 stmt_vector_for_cost *body_cost_vec)
832 int i;
833 int inside_cost = 0, prologue_cost = 0;
835 /* The SLP costs were already calculated during SLP tree build. */
836 if (PURE_SLP_STMT (stmt_info))
837 return;
839 /* Cost the "broadcast" of a scalar operand in to a vector operand.
840 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
841 cost model. */
842 for (i = 0; i < ndts; i++)
843 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
844 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
845 stmt_info, 0, vect_prologue);
847 /* Pass the inside-of-loop statements to the target-specific cost model. */
848 inside_cost = record_stmt_cost (body_cost_vec, ncopies, vector_stmt,
849 stmt_info, 0, vect_body);
851 if (dump_enabled_p ())
852 dump_printf_loc (MSG_NOTE, vect_location,
853 "vect_model_simple_cost: inside_cost = %d, "
854 "prologue_cost = %d .\n", inside_cost, prologue_cost);
858 /* Model cost for type demotion and promotion operations. PWR is normally
859 zero for single-step promotions and demotions. It will be one if
860 two-step promotion/demotion is required, and so on. Each additional
861 step doubles the number of instructions required. */
863 static void
864 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
865 enum vect_def_type *dt, int pwr)
867 int i, tmp;
868 int inside_cost = 0, prologue_cost = 0;
869 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
870 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
871 void *target_cost_data;
873 /* The SLP costs were already calculated during SLP tree build. */
874 if (PURE_SLP_STMT (stmt_info))
875 return;
877 if (loop_vinfo)
878 target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
879 else
880 target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
882 for (i = 0; i < pwr + 1; i++)
884 tmp = (STMT_VINFO_TYPE (stmt_info) == type_promotion_vec_info_type) ?
885 (i + 1) : i;
886 inside_cost += add_stmt_cost (target_cost_data, vect_pow2 (tmp),
887 vec_promote_demote, stmt_info, 0,
888 vect_body);
891 /* FORNOW: Assuming maximum 2 args per stmts. */
892 for (i = 0; i < 2; i++)
893 if (dt[i] == vect_constant_def || dt[i] == vect_external_def)
894 prologue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
895 stmt_info, 0, vect_prologue);
897 if (dump_enabled_p ())
898 dump_printf_loc (MSG_NOTE, vect_location,
899 "vect_model_promotion_demotion_cost: inside_cost = %d, "
900 "prologue_cost = %d .\n", inside_cost, prologue_cost);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
908 void
909 vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
910 vect_memory_access_type memory_access_type,
911 enum vect_def_type dt, slp_tree slp_node,
912 stmt_vector_for_cost *prologue_cost_vec,
913 stmt_vector_for_cost *body_cost_vec)
915 unsigned int inside_cost = 0, prologue_cost = 0;
916 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
917 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
918 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
920 if (dt == vect_constant_def || dt == vect_external_def)
921 prologue_cost += record_stmt_cost (prologue_cost_vec, 1, scalar_to_vec,
922 stmt_info, 0, vect_prologue);
924 /* Grouped stores update all elements in the group at once,
925 so we want the DR for the first statement. */
926 if (!slp_node && grouped_access_p)
928 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
929 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
932 /* True if we should include any once-per-group costs as well as
933 the cost of the statement itself. For SLP we only get called
934 once per group anyhow. */
935 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
937 /* We assume that the cost of a single store-lanes instruction is
938 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
939 access is instead being provided by a permute-and-store operation,
940 include the cost of the permutes. */
941 if (first_stmt_p
942 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
944 /* Uses a high and low interleave or shuffle operations for each
945 needed permute. */
946 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
947 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
948 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
949 stmt_info, 0, vect_body);
951 if (dump_enabled_p ())
952 dump_printf_loc (MSG_NOTE, vect_location,
953 "vect_model_store_cost: strided group_size = %d .\n",
954 group_size);
957 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
958 /* Costs of the stores. */
959 if (memory_access_type == VMAT_ELEMENTWISE
960 || memory_access_type == VMAT_GATHER_SCATTER)
962 /* N scalar stores plus extracting the elements. */
963 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
964 inside_cost += record_stmt_cost (body_cost_vec,
965 ncopies * assumed_nunits,
966 scalar_store, stmt_info, 0, vect_body);
968 else
969 vect_get_store_cost (dr, ncopies, &inside_cost, body_cost_vec);
971 if (memory_access_type == VMAT_ELEMENTWISE
972 || memory_access_type == VMAT_STRIDED_SLP)
974 /* N scalar stores plus extracting the elements. */
975 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
976 inside_cost += record_stmt_cost (body_cost_vec,
977 ncopies * assumed_nunits,
978 vec_to_scalar, stmt_info, 0, vect_body);
981 if (dump_enabled_p ())
982 dump_printf_loc (MSG_NOTE, vect_location,
983 "vect_model_store_cost: inside_cost = %d, "
984 "prologue_cost = %d .\n", inside_cost, prologue_cost);
988 /* Calculate cost of DR's memory access. */
989 void
990 vect_get_store_cost (struct data_reference *dr, int ncopies,
991 unsigned int *inside_cost,
992 stmt_vector_for_cost *body_cost_vec)
994 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
995 gimple *stmt = DR_STMT (dr);
996 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
998 switch (alignment_support_scheme)
1000 case dr_aligned:
1002 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1003 vector_store, stmt_info, 0,
1004 vect_body);
1006 if (dump_enabled_p ())
1007 dump_printf_loc (MSG_NOTE, vect_location,
1008 "vect_model_store_cost: aligned.\n");
1009 break;
1012 case dr_unaligned_supported:
1014 /* Here, we assign an additional cost for the unaligned store. */
1015 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1016 unaligned_store, stmt_info,
1017 DR_MISALIGNMENT (dr), vect_body);
1018 if (dump_enabled_p ())
1019 dump_printf_loc (MSG_NOTE, vect_location,
1020 "vect_model_store_cost: unaligned supported by "
1021 "hardware.\n");
1022 break;
1025 case dr_unaligned_unsupported:
1027 *inside_cost = VECT_MAX_COST;
1029 if (dump_enabled_p ())
1030 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1031 "vect_model_store_cost: unsupported access.\n");
1032 break;
1035 default:
1036 gcc_unreachable ();
1041 /* Function vect_model_load_cost
1043 Models cost for loads. In the case of grouped accesses, one access has
1044 the overhead of the grouped access attributed to it. Since unaligned
1045 accesses are supported for loads, we also account for the costs of the
1046 access scheme chosen. */
1048 void
1049 vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
1050 vect_memory_access_type memory_access_type,
1051 slp_tree slp_node,
1052 stmt_vector_for_cost *prologue_cost_vec,
1053 stmt_vector_for_cost *body_cost_vec)
1055 gimple *first_stmt = STMT_VINFO_STMT (stmt_info);
1056 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1057 unsigned int inside_cost = 0, prologue_cost = 0;
1058 bool grouped_access_p = STMT_VINFO_GROUPED_ACCESS (stmt_info);
1060 /* Grouped loads read all elements in the group at once,
1061 so we want the DR for the first statement. */
1062 if (!slp_node && grouped_access_p)
1064 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1065 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1068 /* True if we should include any once-per-group costs as well as
1069 the cost of the statement itself. For SLP we only get called
1070 once per group anyhow. */
1071 bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
1073 /* We assume that the cost of a single load-lanes instruction is
1074 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1075 access is instead being provided by a load-and-permute operation,
1076 include the cost of the permutes. */
1077 if (first_stmt_p
1078 && memory_access_type == VMAT_CONTIGUOUS_PERMUTE)
1080 /* Uses an even and odd extract operations or shuffle operations
1081 for each needed permute. */
1082 int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1083 int nstmts = ncopies * ceil_log2 (group_size) * group_size;
1084 inside_cost = record_stmt_cost (body_cost_vec, nstmts, vec_perm,
1085 stmt_info, 0, vect_body);
1087 if (dump_enabled_p ())
1088 dump_printf_loc (MSG_NOTE, vect_location,
1089 "vect_model_load_cost: strided group_size = %d .\n",
1090 group_size);
1093 /* The loads themselves. */
1094 if (memory_access_type == VMAT_ELEMENTWISE
1095 || memory_access_type == VMAT_GATHER_SCATTER)
1097 /* N scalar loads plus gathering them into a vector. */
1098 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1099 unsigned int assumed_nunits = vect_nunits_for_cost (vectype);
1100 inside_cost += record_stmt_cost (body_cost_vec,
1101 ncopies * assumed_nunits,
1102 scalar_load, stmt_info, 0, vect_body);
1104 else
1105 vect_get_load_cost (dr, ncopies, first_stmt_p,
1106 &inside_cost, &prologue_cost,
1107 prologue_cost_vec, body_cost_vec, true);
1108 if (memory_access_type == VMAT_ELEMENTWISE
1109 || memory_access_type == VMAT_STRIDED_SLP)
1110 inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_construct,
1111 stmt_info, 0, vect_body);
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE, vect_location,
1115 "vect_model_load_cost: inside_cost = %d, "
1116 "prologue_cost = %d .\n", inside_cost, prologue_cost);
1120 /* Calculate cost of DR's memory access. */
1121 void
1122 vect_get_load_cost (struct data_reference *dr, int ncopies,
1123 bool add_realign_cost, unsigned int *inside_cost,
1124 unsigned int *prologue_cost,
1125 stmt_vector_for_cost *prologue_cost_vec,
1126 stmt_vector_for_cost *body_cost_vec,
1127 bool record_prologue_costs)
1129 int alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1130 gimple *stmt = DR_STMT (dr);
1131 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1133 switch (alignment_support_scheme)
1135 case dr_aligned:
1137 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1138 stmt_info, 0, vect_body);
1140 if (dump_enabled_p ())
1141 dump_printf_loc (MSG_NOTE, vect_location,
1142 "vect_model_load_cost: aligned.\n");
1144 break;
1146 case dr_unaligned_supported:
1148 /* Here, we assign an additional cost for the unaligned load. */
1149 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1150 unaligned_load, stmt_info,
1151 DR_MISALIGNMENT (dr), vect_body);
1153 if (dump_enabled_p ())
1154 dump_printf_loc (MSG_NOTE, vect_location,
1155 "vect_model_load_cost: unaligned supported by "
1156 "hardware.\n");
1158 break;
1160 case dr_explicit_realign:
1162 *inside_cost += record_stmt_cost (body_cost_vec, ncopies * 2,
1163 vector_load, stmt_info, 0, vect_body);
1164 *inside_cost += record_stmt_cost (body_cost_vec, ncopies,
1165 vec_perm, stmt_info, 0, vect_body);
1167 /* FIXME: If the misalignment remains fixed across the iterations of
1168 the containing loop, the following cost should be added to the
1169 prologue costs. */
1170 if (targetm.vectorize.builtin_mask_for_load)
1171 *inside_cost += record_stmt_cost (body_cost_vec, 1, vector_stmt,
1172 stmt_info, 0, vect_body);
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE, vect_location,
1176 "vect_model_load_cost: explicit realign\n");
1178 break;
1180 case dr_explicit_realign_optimized:
1182 if (dump_enabled_p ())
1183 dump_printf_loc (MSG_NOTE, vect_location,
1184 "vect_model_load_cost: unaligned software "
1185 "pipelined.\n");
1187 /* Unaligned software pipeline has a load of an address, an initial
1188 load, and possibly a mask operation to "prime" the loop. However,
1189 if this is an access in a group of loads, which provide grouped
1190 access, then the above cost should only be considered for one
1191 access in the group. Inside the loop, there is a load op
1192 and a realignment op. */
1194 if (add_realign_cost && record_prologue_costs)
1196 *prologue_cost += record_stmt_cost (prologue_cost_vec, 2,
1197 vector_stmt, stmt_info,
1198 0, vect_prologue);
1199 if (targetm.vectorize.builtin_mask_for_load)
1200 *prologue_cost += record_stmt_cost (prologue_cost_vec, 1,
1201 vector_stmt, stmt_info,
1202 0, vect_prologue);
1205 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vector_load,
1206 stmt_info, 0, vect_body);
1207 *inside_cost += record_stmt_cost (body_cost_vec, ncopies, vec_perm,
1208 stmt_info, 0, vect_body);
1210 if (dump_enabled_p ())
1211 dump_printf_loc (MSG_NOTE, vect_location,
1212 "vect_model_load_cost: explicit realign optimized"
1213 "\n");
1215 break;
1218 case dr_unaligned_unsupported:
1220 *inside_cost = VECT_MAX_COST;
1222 if (dump_enabled_p ())
1223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1224 "vect_model_load_cost: unsupported access.\n");
1225 break;
1228 default:
1229 gcc_unreachable ();
1233 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1234 the loop preheader for the vectorized stmt STMT. */
1236 static void
1237 vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
1239 if (gsi)
1240 vect_finish_stmt_generation (stmt, new_stmt, gsi);
1241 else
1243 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1244 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1246 if (loop_vinfo)
1248 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1249 basic_block new_bb;
1250 edge pe;
1252 if (nested_in_vect_loop_p (loop, stmt))
1253 loop = loop->inner;
1255 pe = loop_preheader_edge (loop);
1256 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
1257 gcc_assert (!new_bb);
1259 else
1261 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1262 basic_block bb;
1263 gimple_stmt_iterator gsi_bb_start;
1265 gcc_assert (bb_vinfo);
1266 bb = BB_VINFO_BB (bb_vinfo);
1267 gsi_bb_start = gsi_after_labels (bb);
1268 gsi_insert_before (&gsi_bb_start, new_stmt, GSI_SAME_STMT);
1272 if (dump_enabled_p ())
1274 dump_printf_loc (MSG_NOTE, vect_location,
1275 "created new init_stmt: ");
1276 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
1280 /* Function vect_init_vector.
1282 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1283 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1284 vector type a vector with all elements equal to VAL is created first.
1285 Place the initialization at BSI if it is not NULL. Otherwise, place the
1286 initialization at the loop preheader.
1287 Return the DEF of INIT_STMT.
1288 It will be used in the vectorization of STMT. */
1290 tree
1291 vect_init_vector (gimple *stmt, tree val, tree type, gimple_stmt_iterator *gsi)
1293 gimple *init_stmt;
1294 tree new_temp;
1296 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1297 if (! useless_type_conversion_p (type, TREE_TYPE (val)))
1299 gcc_assert (TREE_CODE (type) == VECTOR_TYPE);
1300 if (! types_compatible_p (TREE_TYPE (type), TREE_TYPE (val)))
1302 /* Scalar boolean value should be transformed into
1303 all zeros or all ones value before building a vector. */
1304 if (VECTOR_BOOLEAN_TYPE_P (type))
1306 tree true_val = build_all_ones_cst (TREE_TYPE (type));
1307 tree false_val = build_zero_cst (TREE_TYPE (type));
1309 if (CONSTANT_CLASS_P (val))
1310 val = integer_zerop (val) ? false_val : true_val;
1311 else
1313 new_temp = make_ssa_name (TREE_TYPE (type));
1314 init_stmt = gimple_build_assign (new_temp, COND_EXPR,
1315 val, true_val, false_val);
1316 vect_init_vector_1 (stmt, init_stmt, gsi);
1317 val = new_temp;
1320 else if (CONSTANT_CLASS_P (val))
1321 val = fold_convert (TREE_TYPE (type), val);
1322 else
1324 new_temp = make_ssa_name (TREE_TYPE (type));
1325 if (! INTEGRAL_TYPE_P (TREE_TYPE (val)))
1326 init_stmt = gimple_build_assign (new_temp,
1327 fold_build1 (VIEW_CONVERT_EXPR,
1328 TREE_TYPE (type),
1329 val));
1330 else
1331 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, val);
1332 vect_init_vector_1 (stmt, init_stmt, gsi);
1333 val = new_temp;
1336 val = build_vector_from_val (type, val);
1339 new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
1340 init_stmt = gimple_build_assign (new_temp, val);
1341 vect_init_vector_1 (stmt, init_stmt, gsi);
1342 return new_temp;
1345 /* Function vect_get_vec_def_for_operand_1.
1347 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1348 DT that will be used in the vectorized stmt. */
1350 tree
1351 vect_get_vec_def_for_operand_1 (gimple *def_stmt, enum vect_def_type dt)
1353 tree vec_oprnd;
1354 gimple *vec_stmt;
1355 stmt_vec_info def_stmt_info = NULL;
1357 switch (dt)
1359 /* operand is a constant or a loop invariant. */
1360 case vect_constant_def:
1361 case vect_external_def:
1362 /* Code should use vect_get_vec_def_for_operand. */
1363 gcc_unreachable ();
1365 /* operand is defined inside the loop. */
1366 case vect_internal_def:
1368 /* Get the def from the vectorized stmt. */
1369 def_stmt_info = vinfo_for_stmt (def_stmt);
1371 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1372 /* Get vectorized pattern statement. */
1373 if (!vec_stmt
1374 && STMT_VINFO_IN_PATTERN_P (def_stmt_info)
1375 && !STMT_VINFO_RELEVANT (def_stmt_info))
1376 vec_stmt = STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1377 STMT_VINFO_RELATED_STMT (def_stmt_info)));
1378 gcc_assert (vec_stmt);
1379 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1380 vec_oprnd = PHI_RESULT (vec_stmt);
1381 else if (is_gimple_call (vec_stmt))
1382 vec_oprnd = gimple_call_lhs (vec_stmt);
1383 else
1384 vec_oprnd = gimple_assign_lhs (vec_stmt);
1385 return vec_oprnd;
1388 /* operand is defined by a loop header phi. */
1389 case vect_reduction_def:
1390 case vect_double_reduction_def:
1391 case vect_nested_cycle:
1392 case vect_induction_def:
1394 gcc_assert (gimple_code (def_stmt) == GIMPLE_PHI);
1396 /* Get the def from the vectorized stmt. */
1397 def_stmt_info = vinfo_for_stmt (def_stmt);
1398 vec_stmt = STMT_VINFO_VEC_STMT (def_stmt_info);
1399 if (gimple_code (vec_stmt) == GIMPLE_PHI)
1400 vec_oprnd = PHI_RESULT (vec_stmt);
1401 else
1402 vec_oprnd = gimple_get_lhs (vec_stmt);
1403 return vec_oprnd;
1406 default:
1407 gcc_unreachable ();
1412 /* Function vect_get_vec_def_for_operand.
1414 OP is an operand in STMT. This function returns a (vector) def that will be
1415 used in the vectorized stmt for STMT.
1417 In the case that OP is an SSA_NAME which is defined in the loop, then
1418 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1420 In case OP is an invariant or constant, a new stmt that creates a vector def
1421 needs to be introduced. VECTYPE may be used to specify a required type for
1422 vector invariant. */
1424 tree
1425 vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
1427 gimple *def_stmt;
1428 enum vect_def_type dt;
1429 bool is_simple_use;
1430 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1431 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1433 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_NOTE, vect_location,
1436 "vect_get_vec_def_for_operand: ");
1437 dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
1438 dump_printf (MSG_NOTE, "\n");
1441 is_simple_use = vect_is_simple_use (op, loop_vinfo, &def_stmt, &dt);
1442 gcc_assert (is_simple_use);
1443 if (def_stmt && dump_enabled_p ())
1445 dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
1446 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
1449 if (dt == vect_constant_def || dt == vect_external_def)
1451 tree stmt_vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
1452 tree vector_type;
1454 if (vectype)
1455 vector_type = vectype;
1456 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op))
1457 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype))
1458 vector_type = build_same_sized_truth_vector_type (stmt_vectype);
1459 else
1460 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
1462 gcc_assert (vector_type);
1463 return vect_init_vector (stmt, op, vector_type, NULL);
1465 else
1466 return vect_get_vec_def_for_operand_1 (def_stmt, dt);
1470 /* Function vect_get_vec_def_for_stmt_copy
1472 Return a vector-def for an operand. This function is used when the
1473 vectorized stmt to be created (by the caller to this function) is a "copy"
1474 created in case the vectorized result cannot fit in one vector, and several
1475 copies of the vector-stmt are required. In this case the vector-def is
1476 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1477 of the stmt that defines VEC_OPRND.
1478 DT is the type of the vector def VEC_OPRND.
1480 Context:
1481 In case the vectorization factor (VF) is bigger than the number
1482 of elements that can fit in a vectype (nunits), we have to generate
1483 more than one vector stmt to vectorize the scalar stmt. This situation
1484 arises when there are multiple data-types operated upon in the loop; the
1485 smallest data-type determines the VF, and as a result, when vectorizing
1486 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1487 vector stmt (each computing a vector of 'nunits' results, and together
1488 computing 'VF' results in each iteration). This function is called when
1489 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1490 which VF=16 and nunits=4, so the number of copies required is 4):
1492 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1494 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1495 VS1.1: vx.1 = memref1 VS1.2
1496 VS1.2: vx.2 = memref2 VS1.3
1497 VS1.3: vx.3 = memref3
1499 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1500 VSnew.1: vz1 = vx.1 + ... VSnew.2
1501 VSnew.2: vz2 = vx.2 + ... VSnew.3
1502 VSnew.3: vz3 = vx.3 + ...
1504 The vectorization of S1 is explained in vectorizable_load.
1505 The vectorization of S2:
1506 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1507 the function 'vect_get_vec_def_for_operand' is called to
1508 get the relevant vector-def for each operand of S2. For operand x it
1509 returns the vector-def 'vx.0'.
1511 To create the remaining copies of the vector-stmt (VSnew.j), this
1512 function is called to get the relevant vector-def for each operand. It is
1513 obtained from the respective VS1.j stmt, which is recorded in the
1514 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1516 For example, to obtain the vector-def 'vx.1' in order to create the
1517 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1518 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1519 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1520 and return its def ('vx.1').
1521 Overall, to create the above sequence this function will be called 3 times:
1522 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1523 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1524 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1526 tree
1527 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt, tree vec_oprnd)
1529 gimple *vec_stmt_for_operand;
1530 stmt_vec_info def_stmt_info;
1532 /* Do nothing; can reuse same def. */
1533 if (dt == vect_external_def || dt == vect_constant_def )
1534 return vec_oprnd;
1536 vec_stmt_for_operand = SSA_NAME_DEF_STMT (vec_oprnd);
1537 def_stmt_info = vinfo_for_stmt (vec_stmt_for_operand);
1538 gcc_assert (def_stmt_info);
1539 vec_stmt_for_operand = STMT_VINFO_RELATED_STMT (def_stmt_info);
1540 gcc_assert (vec_stmt_for_operand);
1541 if (gimple_code (vec_stmt_for_operand) == GIMPLE_PHI)
1542 vec_oprnd = PHI_RESULT (vec_stmt_for_operand);
1543 else
1544 vec_oprnd = gimple_get_lhs (vec_stmt_for_operand);
1545 return vec_oprnd;
1549 /* Get vectorized definitions for the operands to create a copy of an original
1550 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1552 void
1553 vect_get_vec_defs_for_stmt_copy (enum vect_def_type *dt,
1554 vec<tree> *vec_oprnds0,
1555 vec<tree> *vec_oprnds1)
1557 tree vec_oprnd = vec_oprnds0->pop ();
1559 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd);
1560 vec_oprnds0->quick_push (vec_oprnd);
1562 if (vec_oprnds1 && vec_oprnds1->length ())
1564 vec_oprnd = vec_oprnds1->pop ();
1565 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd);
1566 vec_oprnds1->quick_push (vec_oprnd);
1571 /* Get vectorized definitions for OP0 and OP1. */
1573 void
1574 vect_get_vec_defs (tree op0, tree op1, gimple *stmt,
1575 vec<tree> *vec_oprnds0,
1576 vec<tree> *vec_oprnds1,
1577 slp_tree slp_node)
1579 if (slp_node)
1581 int nops = (op1 == NULL_TREE) ? 1 : 2;
1582 auto_vec<tree> ops (nops);
1583 auto_vec<vec<tree> > vec_defs (nops);
1585 ops.quick_push (op0);
1586 if (op1)
1587 ops.quick_push (op1);
1589 vect_get_slp_defs (ops, slp_node, &vec_defs);
1591 *vec_oprnds0 = vec_defs[0];
1592 if (op1)
1593 *vec_oprnds1 = vec_defs[1];
1595 else
1597 tree vec_oprnd;
1599 vec_oprnds0->create (1);
1600 vec_oprnd = vect_get_vec_def_for_operand (op0, stmt);
1601 vec_oprnds0->quick_push (vec_oprnd);
1603 if (op1)
1605 vec_oprnds1->create (1);
1606 vec_oprnd = vect_get_vec_def_for_operand (op1, stmt);
1607 vec_oprnds1->quick_push (vec_oprnd);
1613 /* Function vect_finish_stmt_generation.
1615 Insert a new stmt. */
1617 void
1618 vect_finish_stmt_generation (gimple *stmt, gimple *vec_stmt,
1619 gimple_stmt_iterator *gsi)
1621 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1622 vec_info *vinfo = stmt_info->vinfo;
1624 gcc_assert (gimple_code (stmt) != GIMPLE_LABEL);
1626 if (!gsi_end_p (*gsi)
1627 && gimple_has_mem_ops (vec_stmt))
1629 gimple *at_stmt = gsi_stmt (*gsi);
1630 tree vuse = gimple_vuse (at_stmt);
1631 if (vuse && TREE_CODE (vuse) == SSA_NAME)
1633 tree vdef = gimple_vdef (at_stmt);
1634 gimple_set_vuse (vec_stmt, gimple_vuse (at_stmt));
1635 /* If we have an SSA vuse and insert a store, update virtual
1636 SSA form to avoid triggering the renamer. Do so only
1637 if we can easily see all uses - which is what almost always
1638 happens with the way vectorized stmts are inserted. */
1639 if ((vdef && TREE_CODE (vdef) == SSA_NAME)
1640 && ((is_gimple_assign (vec_stmt)
1641 && !is_gimple_reg (gimple_assign_lhs (vec_stmt)))
1642 || (is_gimple_call (vec_stmt)
1643 && !(gimple_call_flags (vec_stmt)
1644 & (ECF_CONST|ECF_PURE|ECF_NOVOPS)))))
1646 tree new_vdef = copy_ssa_name (vuse, vec_stmt);
1647 gimple_set_vdef (vec_stmt, new_vdef);
1648 SET_USE (gimple_vuse_op (at_stmt), new_vdef);
1652 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
1654 set_vinfo_for_stmt (vec_stmt, new_stmt_vec_info (vec_stmt, vinfo));
1656 if (dump_enabled_p ())
1658 dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
1659 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
1662 gimple_set_location (vec_stmt, gimple_location (stmt));
1664 /* While EH edges will generally prevent vectorization, stmt might
1665 e.g. be in a must-not-throw region. Ensure newly created stmts
1666 that could throw are part of the same region. */
1667 int lp_nr = lookup_stmt_eh_lp (stmt);
1668 if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
1669 add_stmt_to_eh_lp (vec_stmt, lp_nr);
1672 /* We want to vectorize a call to combined function CFN with function
1673 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1674 as the types of all inputs. Check whether this is possible using
1675 an internal function, returning its code if so or IFN_LAST if not. */
1677 static internal_fn
1678 vectorizable_internal_function (combined_fn cfn, tree fndecl,
1679 tree vectype_out, tree vectype_in)
1681 internal_fn ifn;
1682 if (internal_fn_p (cfn))
1683 ifn = as_internal_fn (cfn);
1684 else
1685 ifn = associated_internal_fn (fndecl);
1686 if (ifn != IFN_LAST && direct_internal_fn_p (ifn))
1688 const direct_internal_fn_info &info = direct_internal_fn (ifn);
1689 if (info.vectorizable)
1691 tree type0 = (info.type0 < 0 ? vectype_out : vectype_in);
1692 tree type1 = (info.type1 < 0 ? vectype_out : vectype_in);
1693 if (direct_internal_fn_supported_p (ifn, tree_pair (type0, type1),
1694 OPTIMIZE_FOR_SPEED))
1695 return ifn;
1698 return IFN_LAST;
1702 static tree permute_vec_elements (tree, tree, tree, gimple *,
1703 gimple_stmt_iterator *);
1705 /* STMT is a non-strided load or store, meaning that it accesses
1706 elements with a known constant step. Return -1 if that step
1707 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1709 static int
1710 compare_step_with_zero (gimple *stmt)
1712 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1713 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1714 return tree_int_cst_compare (vect_dr_behavior (dr)->step,
1715 size_zero_node);
1718 /* If the target supports a permute mask that reverses the elements in
1719 a vector of type VECTYPE, return that mask, otherwise return null. */
1721 static tree
1722 perm_mask_for_reverse (tree vectype)
1724 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1726 /* The encoding has a single stepped pattern. */
1727 vec_perm_builder sel (nunits, 1, 3);
1728 for (int i = 0; i < 3; ++i)
1729 sel.quick_push (nunits - 1 - i);
1731 vec_perm_indices indices (sel, 1, nunits);
1732 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
1733 return NULL_TREE;
1734 return vect_gen_perm_mask_checked (vectype, indices);
1737 /* A subroutine of get_load_store_type, with a subset of the same
1738 arguments. Handle the case where STMT is part of a grouped load
1739 or store.
1741 For stores, the statements in the group are all consecutive
1742 and there is no gap at the end. For loads, the statements in the
1743 group might not be consecutive; there can be gaps between statements
1744 as well as at the end. */
1746 static bool
1747 get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
1748 vec_load_store_type vls_type,
1749 vect_memory_access_type *memory_access_type)
1751 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1752 vec_info *vinfo = stmt_info->vinfo;
1753 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1754 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1755 gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
1756 data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
1757 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
1758 bool single_element_p = (stmt == first_stmt
1759 && !GROUP_NEXT_ELEMENT (stmt_info));
1760 unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
1761 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1763 /* True if the vectorized statements would access beyond the last
1764 statement in the group. */
1765 bool overrun_p = false;
1767 /* True if we can cope with such overrun by peeling for gaps, so that
1768 there is at least one final scalar iteration after the vector loop. */
1769 bool can_overrun_p = (vls_type == VLS_LOAD && loop_vinfo && !loop->inner);
1771 /* There can only be a gap at the end of the group if the stride is
1772 known at compile time. */
1773 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info) || gap == 0);
1775 /* Stores can't yet have gaps. */
1776 gcc_assert (slp || vls_type == VLS_LOAD || gap == 0);
1778 if (slp)
1780 if (STMT_VINFO_STRIDED_P (stmt_info))
1782 /* Try to use consecutive accesses of GROUP_SIZE elements,
1783 separated by the stride, until we have a complete vector.
1784 Fall back to scalar accesses if that isn't possible. */
1785 if (multiple_p (nunits, group_size))
1786 *memory_access_type = VMAT_STRIDED_SLP;
1787 else
1788 *memory_access_type = VMAT_ELEMENTWISE;
1790 else
1792 overrun_p = loop_vinfo && gap != 0;
1793 if (overrun_p && vls_type != VLS_LOAD)
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1796 "Grouped store with gaps requires"
1797 " non-consecutive accesses\n");
1798 return false;
1800 /* An overrun is fine if the trailing elements are smaller
1801 than the alignment boundary B. Every vector access will
1802 be a multiple of B and so we are guaranteed to access a
1803 non-gap element in the same B-sized block. */
1804 if (overrun_p
1805 && gap < (vect_known_alignment_in_bytes (first_dr)
1806 / vect_get_scalar_dr_size (first_dr)))
1807 overrun_p = false;
1808 if (overrun_p && !can_overrun_p)
1810 if (dump_enabled_p ())
1811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1812 "Peeling for outer loop is not supported\n");
1813 return false;
1815 *memory_access_type = VMAT_CONTIGUOUS;
1818 else
1820 /* We can always handle this case using elementwise accesses,
1821 but see if something more efficient is available. */
1822 *memory_access_type = VMAT_ELEMENTWISE;
1824 /* If there is a gap at the end of the group then these optimizations
1825 would access excess elements in the last iteration. */
1826 bool would_overrun_p = (gap != 0);
1827 /* An overrun is fine if the trailing elements are smaller than the
1828 alignment boundary B. Every vector access will be a multiple of B
1829 and so we are guaranteed to access a non-gap element in the
1830 same B-sized block. */
1831 if (would_overrun_p
1832 && gap < (vect_known_alignment_in_bytes (first_dr)
1833 / vect_get_scalar_dr_size (first_dr)))
1834 would_overrun_p = false;
1836 if (!STMT_VINFO_STRIDED_P (stmt_info)
1837 && (can_overrun_p || !would_overrun_p)
1838 && compare_step_with_zero (stmt) > 0)
1840 /* First try using LOAD/STORE_LANES. */
1841 if (vls_type == VLS_LOAD
1842 ? vect_load_lanes_supported (vectype, group_size)
1843 : vect_store_lanes_supported (vectype, group_size))
1845 *memory_access_type = VMAT_LOAD_STORE_LANES;
1846 overrun_p = would_overrun_p;
1849 /* If that fails, try using permuting loads. */
1850 if (*memory_access_type == VMAT_ELEMENTWISE
1851 && (vls_type == VLS_LOAD
1852 ? vect_grouped_load_supported (vectype, single_element_p,
1853 group_size)
1854 : vect_grouped_store_supported (vectype, group_size)))
1856 *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
1857 overrun_p = would_overrun_p;
1862 if (vls_type != VLS_LOAD && first_stmt == stmt)
1864 /* STMT is the leader of the group. Check the operands of all the
1865 stmts of the group. */
1866 gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info);
1867 while (next_stmt)
1869 gcc_assert (gimple_assign_single_p (next_stmt));
1870 tree op = gimple_assign_rhs1 (next_stmt);
1871 gimple *def_stmt;
1872 enum vect_def_type dt;
1873 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt))
1875 if (dump_enabled_p ())
1876 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1877 "use not simple.\n");
1878 return false;
1880 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
1884 if (overrun_p)
1886 gcc_assert (can_overrun_p);
1887 if (dump_enabled_p ())
1888 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1889 "Data access with gaps requires scalar "
1890 "epilogue loop\n");
1891 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
1894 return true;
1897 /* A subroutine of get_load_store_type, with a subset of the same
1898 arguments. Handle the case where STMT is a load or store that
1899 accesses consecutive elements with a negative step. */
1901 static vect_memory_access_type
1902 get_negative_load_store_type (gimple *stmt, tree vectype,
1903 vec_load_store_type vls_type,
1904 unsigned int ncopies)
1906 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1907 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
1908 dr_alignment_support alignment_support_scheme;
1910 if (ncopies > 1)
1912 if (dump_enabled_p ())
1913 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1914 "multiple types with negative step.\n");
1915 return VMAT_ELEMENTWISE;
1918 alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
1919 if (alignment_support_scheme != dr_aligned
1920 && alignment_support_scheme != dr_unaligned_supported)
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "negative step but alignment required.\n");
1925 return VMAT_ELEMENTWISE;
1928 if (vls_type == VLS_STORE_INVARIANT)
1930 if (dump_enabled_p ())
1931 dump_printf_loc (MSG_NOTE, vect_location,
1932 "negative step with invariant source;"
1933 " no permute needed.\n");
1934 return VMAT_CONTIGUOUS_DOWN;
1937 if (!perm_mask_for_reverse (vectype))
1939 if (dump_enabled_p ())
1940 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1941 "negative step and reversing not supported.\n");
1942 return VMAT_ELEMENTWISE;
1945 return VMAT_CONTIGUOUS_REVERSE;
1948 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1949 if there is a memory access type that the vectorized form can use,
1950 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1951 or scatters, fill in GS_INFO accordingly.
1953 SLP says whether we're performing SLP rather than loop vectorization.
1954 VECTYPE is the vector type that the vectorized statements will use.
1955 NCOPIES is the number of vector statements that will be needed. */
1957 static bool
1958 get_load_store_type (gimple *stmt, tree vectype, bool slp,
1959 vec_load_store_type vls_type, unsigned int ncopies,
1960 vect_memory_access_type *memory_access_type,
1961 gather_scatter_info *gs_info)
1963 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1964 vec_info *vinfo = stmt_info->vinfo;
1965 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1966 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1967 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1969 *memory_access_type = VMAT_GATHER_SCATTER;
1970 gimple *def_stmt;
1971 if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info))
1972 gcc_unreachable ();
1973 else if (!vect_is_simple_use (gs_info->offset, vinfo, &def_stmt,
1974 &gs_info->offset_dt,
1975 &gs_info->offset_vectype))
1977 if (dump_enabled_p ())
1978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1979 "%s index use not simple.\n",
1980 vls_type == VLS_LOAD ? "gather" : "scatter");
1981 return false;
1984 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1986 if (!get_group_load_store_type (stmt, vectype, slp, vls_type,
1987 memory_access_type))
1988 return false;
1990 else if (STMT_VINFO_STRIDED_P (stmt_info))
1992 gcc_assert (!slp);
1993 *memory_access_type = VMAT_ELEMENTWISE;
1995 else
1997 int cmp = compare_step_with_zero (stmt);
1998 if (cmp < 0)
1999 *memory_access_type = get_negative_load_store_type
2000 (stmt, vectype, vls_type, ncopies);
2001 else if (cmp == 0)
2003 gcc_assert (vls_type == VLS_LOAD);
2004 *memory_access_type = VMAT_INVARIANT;
2006 else
2007 *memory_access_type = VMAT_CONTIGUOUS;
2010 if ((*memory_access_type == VMAT_ELEMENTWISE
2011 || *memory_access_type == VMAT_STRIDED_SLP)
2012 && !nunits.is_constant ())
2014 if (dump_enabled_p ())
2015 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2016 "Not using elementwise accesses due to variable "
2017 "vectorization factor.\n");
2018 return false;
2021 /* FIXME: At the moment the cost model seems to underestimate the
2022 cost of using elementwise accesses. This check preserves the
2023 traditional behavior until that can be fixed. */
2024 if (*memory_access_type == VMAT_ELEMENTWISE
2025 && !STMT_VINFO_STRIDED_P (stmt_info))
2027 if (dump_enabled_p ())
2028 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2029 "not falling back to elementwise accesses\n");
2030 return false;
2032 return true;
2035 /* Function vectorizable_mask_load_store.
2037 Check if STMT performs a conditional load or store that can be vectorized.
2038 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2039 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2040 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2042 static bool
2043 vectorizable_mask_load_store (gimple *stmt, gimple_stmt_iterator *gsi,
2044 gimple **vec_stmt, slp_tree slp_node)
2046 tree vec_dest = NULL;
2047 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2048 stmt_vec_info prev_stmt_info;
2049 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2050 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2051 bool nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
2052 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2053 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2054 tree rhs_vectype = NULL_TREE;
2055 tree mask_vectype;
2056 tree elem_type;
2057 gimple *new_stmt;
2058 tree dummy;
2059 tree dataref_ptr = NULL_TREE;
2060 gimple *ptr_incr;
2061 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2062 int ncopies;
2063 int i, j;
2064 bool inv_p;
2065 gather_scatter_info gs_info;
2066 vec_load_store_type vls_type;
2067 tree mask;
2068 gimple *def_stmt;
2069 enum vect_def_type dt;
2071 if (slp_node != NULL)
2072 return false;
2074 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2075 gcc_assert (ncopies >= 1);
2077 mask = gimple_call_arg (stmt, 2);
2079 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask)))
2080 return false;
2082 /* FORNOW. This restriction should be relaxed. */
2083 if (nested_in_vect_loop && ncopies > 1)
2085 if (dump_enabled_p ())
2086 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2087 "multiple types in nested loop.");
2088 return false;
2091 if (!STMT_VINFO_RELEVANT_P (stmt_info))
2092 return false;
2094 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2095 && ! vec_stmt)
2096 return false;
2098 if (!STMT_VINFO_DATA_REF (stmt_info))
2099 return false;
2101 elem_type = TREE_TYPE (vectype);
2103 if (TREE_CODE (mask) != SSA_NAME)
2104 return false;
2106 if (!vect_is_simple_use (mask, loop_vinfo, &def_stmt, &dt, &mask_vectype))
2107 return false;
2109 if (!mask_vectype)
2110 mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
2112 if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
2113 || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
2114 TYPE_VECTOR_SUBPARTS (vectype)))
2115 return false;
2117 if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2119 tree rhs = gimple_call_arg (stmt, 3);
2120 if (!vect_is_simple_use (rhs, loop_vinfo, &def_stmt, &dt, &rhs_vectype))
2121 return false;
2122 if (dt == vect_constant_def || dt == vect_external_def)
2123 vls_type = VLS_STORE_INVARIANT;
2124 else
2125 vls_type = VLS_STORE;
2127 else
2128 vls_type = VLS_LOAD;
2130 vect_memory_access_type memory_access_type;
2131 if (!get_load_store_type (stmt, vectype, false, vls_type, ncopies,
2132 &memory_access_type, &gs_info))
2133 return false;
2135 if (memory_access_type == VMAT_GATHER_SCATTER)
2137 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2138 tree masktype
2139 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
2140 if (TREE_CODE (masktype) == INTEGER_TYPE)
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2144 "masked gather with integer mask not supported.");
2145 return false;
2148 else if (memory_access_type != VMAT_CONTIGUOUS)
2150 if (dump_enabled_p ())
2151 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2152 "unsupported access type for masked %s.\n",
2153 vls_type == VLS_LOAD ? "load" : "store");
2154 return false;
2156 else if (!VECTOR_MODE_P (TYPE_MODE (vectype))
2157 || !can_vec_mask_load_store_p (TYPE_MODE (vectype),
2158 TYPE_MODE (mask_vectype),
2159 vls_type == VLS_LOAD)
2160 || (rhs_vectype
2161 && !useless_type_conversion_p (vectype, rhs_vectype)))
2162 return false;
2164 if (!vec_stmt) /* transformation not required. */
2166 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
2167 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2168 if (vls_type == VLS_LOAD)
2169 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
2170 NULL, NULL, NULL);
2171 else
2172 vect_model_store_cost (stmt_info, ncopies, memory_access_type,
2173 dt, NULL, NULL, NULL);
2174 return true;
2176 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
2178 /* Transform. */
2180 if (memory_access_type == VMAT_GATHER_SCATTER)
2182 tree vec_oprnd0 = NULL_TREE, op;
2183 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
2184 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
2185 tree ptr, vec_mask = NULL_TREE, mask_op = NULL_TREE, var, scale;
2186 tree perm_mask = NULL_TREE, prev_res = NULL_TREE;
2187 tree mask_perm_mask = NULL_TREE;
2188 edge pe = loop_preheader_edge (loop);
2189 gimple_seq seq;
2190 basic_block new_bb;
2191 enum { NARROW, NONE, WIDEN } modifier;
2192 poly_uint64 gather_off_nunits
2193 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
2195 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
2196 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2197 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2198 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2199 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
2200 scaletype = TREE_VALUE (arglist);
2201 gcc_checking_assert (types_compatible_p (srctype, rettype)
2202 && types_compatible_p (srctype, masktype));
2204 if (known_eq (nunits, gather_off_nunits))
2205 modifier = NONE;
2206 else if (known_eq (nunits * 2, gather_off_nunits))
2208 modifier = WIDEN;
2210 /* Currently widening gathers and scatters are only supported for
2211 fixed-length vectors. */
2212 int count = gather_off_nunits.to_constant ();
2213 vec_perm_builder sel (count, count, 1);
2214 for (i = 0; i < count; ++i)
2215 sel.quick_push (i | (count / 2));
2217 vec_perm_indices indices (sel, 1, count);
2218 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
2219 indices);
2221 else if (known_eq (nunits, gather_off_nunits * 2))
2223 modifier = NARROW;
2225 /* Currently narrowing gathers and scatters are only supported for
2226 fixed-length vectors. */
2227 int count = nunits.to_constant ();
2228 vec_perm_builder sel (count, count, 1);
2229 sel.quick_grow (count);
2230 for (i = 0; i < count; ++i)
2231 sel[i] = i < count / 2 ? i : i + count / 2;
2232 vec_perm_indices indices (sel, 2, count);
2233 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
2235 ncopies *= 2;
2236 for (i = 0; i < count; ++i)
2237 sel[i] = i | (count / 2);
2238 indices.new_vector (sel, 2, count);
2239 mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices);
2241 else
2242 gcc_unreachable ();
2244 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2246 ptr = fold_convert (ptrtype, gs_info.base);
2247 if (!is_gimple_min_invariant (ptr))
2249 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
2250 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2251 gcc_assert (!new_bb);
2254 scale = build_int_cst (scaletype, gs_info.scale);
2256 prev_stmt_info = NULL;
2257 for (j = 0; j < ncopies; ++j)
2259 if (modifier == WIDEN && (j & 1))
2260 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
2261 perm_mask, stmt, gsi);
2262 else if (j == 0)
2263 op = vec_oprnd0
2264 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
2265 else
2266 op = vec_oprnd0
2267 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
2269 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
2271 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
2272 TYPE_VECTOR_SUBPARTS (idxtype)));
2273 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
2274 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
2275 new_stmt
2276 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2277 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2278 op = var;
2281 if (mask_perm_mask && (j & 1))
2282 mask_op = permute_vec_elements (mask_op, mask_op,
2283 mask_perm_mask, stmt, gsi);
2284 else
2286 if (j == 0)
2287 vec_mask = vect_get_vec_def_for_operand (mask, stmt);
2288 else
2290 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2291 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2294 mask_op = vec_mask;
2295 if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
2297 gcc_assert
2298 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
2299 TYPE_VECTOR_SUBPARTS (masktype)));
2300 var = vect_get_new_ssa_name (masktype, vect_simple_var);
2301 mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
2302 new_stmt
2303 = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
2304 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2305 mask_op = var;
2309 new_stmt
2310 = gimple_build_call (gs_info.decl, 5, mask_op, ptr, op, mask_op,
2311 scale);
2313 if (!useless_type_conversion_p (vectype, rettype))
2315 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
2316 TYPE_VECTOR_SUBPARTS (rettype)));
2317 op = vect_get_new_ssa_name (rettype, vect_simple_var);
2318 gimple_call_set_lhs (new_stmt, op);
2319 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2320 var = make_ssa_name (vec_dest);
2321 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
2322 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
2324 else
2326 var = make_ssa_name (vec_dest, new_stmt);
2327 gimple_call_set_lhs (new_stmt, var);
2330 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2332 if (modifier == NARROW)
2334 if ((j & 1) == 0)
2336 prev_res = var;
2337 continue;
2339 var = permute_vec_elements (prev_res, var,
2340 perm_mask, stmt, gsi);
2341 new_stmt = SSA_NAME_DEF_STMT (var);
2344 if (prev_stmt_info == NULL)
2345 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2346 else
2347 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2348 prev_stmt_info = vinfo_for_stmt (new_stmt);
2350 return true;
2352 else if (vls_type != VLS_LOAD)
2354 tree vec_rhs = NULL_TREE, vec_mask = NULL_TREE;
2355 prev_stmt_info = NULL;
2356 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo) = true;
2357 for (i = 0; i < ncopies; i++)
2359 unsigned align, misalign;
2361 if (i == 0)
2363 tree rhs = gimple_call_arg (stmt, 3);
2364 vec_rhs = vect_get_vec_def_for_operand (rhs, stmt);
2365 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2366 mask_vectype);
2367 /* We should have catched mismatched types earlier. */
2368 gcc_assert (useless_type_conversion_p (vectype,
2369 TREE_TYPE (vec_rhs)));
2370 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2371 NULL_TREE, &dummy, gsi,
2372 &ptr_incr, false, &inv_p);
2373 gcc_assert (!inv_p);
2375 else
2377 vect_is_simple_use (vec_rhs, loop_vinfo, &def_stmt, &dt);
2378 vec_rhs = vect_get_vec_def_for_stmt_copy (dt, vec_rhs);
2379 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2380 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2381 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2382 TYPE_SIZE_UNIT (vectype));
2385 align = DR_TARGET_ALIGNMENT (dr);
2386 if (aligned_access_p (dr))
2387 misalign = 0;
2388 else if (DR_MISALIGNMENT (dr) == -1)
2390 align = TYPE_ALIGN_UNIT (elem_type);
2391 misalign = 0;
2393 else
2394 misalign = DR_MISALIGNMENT (dr);
2395 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2396 misalign);
2397 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2398 misalign ? least_bit_hwi (misalign) : align);
2399 gcall *call
2400 = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
2401 ptr, vec_mask, vec_rhs);
2402 gimple_call_set_nothrow (call, true);
2403 new_stmt = call;
2404 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2405 if (i == 0)
2406 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2407 else
2408 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2409 prev_stmt_info = vinfo_for_stmt (new_stmt);
2412 else
2414 tree vec_mask = NULL_TREE;
2415 prev_stmt_info = NULL;
2416 vec_dest = vect_create_destination_var (gimple_call_lhs (stmt), vectype);
2417 for (i = 0; i < ncopies; i++)
2419 unsigned align, misalign;
2421 if (i == 0)
2423 vec_mask = vect_get_vec_def_for_operand (mask, stmt,
2424 mask_vectype);
2425 dataref_ptr = vect_create_data_ref_ptr (stmt, vectype, NULL,
2426 NULL_TREE, &dummy, gsi,
2427 &ptr_incr, false, &inv_p);
2428 gcc_assert (!inv_p);
2430 else
2432 vect_is_simple_use (vec_mask, loop_vinfo, &def_stmt, &dt);
2433 vec_mask = vect_get_vec_def_for_stmt_copy (dt, vec_mask);
2434 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
2435 TYPE_SIZE_UNIT (vectype));
2438 align = DR_TARGET_ALIGNMENT (dr);
2439 if (aligned_access_p (dr))
2440 misalign = 0;
2441 else if (DR_MISALIGNMENT (dr) == -1)
2443 align = TYPE_ALIGN_UNIT (elem_type);
2444 misalign = 0;
2446 else
2447 misalign = DR_MISALIGNMENT (dr);
2448 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
2449 misalign);
2450 tree ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)),
2451 misalign ? least_bit_hwi (misalign) : align);
2452 gcall *call
2453 = gimple_build_call_internal (IFN_MASK_LOAD, 3, dataref_ptr,
2454 ptr, vec_mask);
2455 gimple_call_set_lhs (call, make_ssa_name (vec_dest));
2456 gimple_call_set_nothrow (call, true);
2457 vect_finish_stmt_generation (stmt, call, gsi);
2458 if (i == 0)
2459 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = call;
2460 else
2461 STMT_VINFO_RELATED_STMT (prev_stmt_info) = call;
2462 prev_stmt_info = vinfo_for_stmt (call);
2466 return true;
2469 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2471 static bool
2472 vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
2473 gimple **vec_stmt, slp_tree slp_node,
2474 tree vectype_in, enum vect_def_type *dt)
2476 tree op, vectype;
2477 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2478 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2479 unsigned ncopies;
2480 unsigned HOST_WIDE_INT nunits, num_bytes;
2482 op = gimple_call_arg (stmt, 0);
2483 vectype = STMT_VINFO_VECTYPE (stmt_info);
2485 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
2486 return false;
2488 /* Multiple types in SLP are handled by creating the appropriate number of
2489 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2490 case of SLP. */
2491 if (slp_node)
2492 ncopies = 1;
2493 else
2494 ncopies = vect_get_num_copies (loop_vinfo, vectype);
2496 gcc_assert (ncopies >= 1);
2498 tree char_vectype = get_same_sized_vectype (char_type_node, vectype_in);
2499 if (! char_vectype)
2500 return false;
2502 if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
2503 return false;
2505 unsigned word_bytes = num_bytes / nunits;
2507 /* The encoding uses one stepped pattern for each byte in the word. */
2508 vec_perm_builder elts (num_bytes, word_bytes, 3);
2509 for (unsigned i = 0; i < 3; ++i)
2510 for (unsigned j = 0; j < word_bytes; ++j)
2511 elts.quick_push ((i + 1) * word_bytes - j - 1);
2513 vec_perm_indices indices (elts, 1, num_bytes);
2514 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype), indices))
2515 return false;
2517 if (! vec_stmt)
2519 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2520 if (dump_enabled_p ())
2521 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_bswap ==="
2522 "\n");
2523 if (! PURE_SLP_STMT (stmt_info))
2525 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2526 1, vector_stmt, stmt_info, 0, vect_prologue);
2527 add_stmt_cost (stmt_info->vinfo->target_cost_data,
2528 ncopies, vec_perm, stmt_info, 0, vect_body);
2530 return true;
2533 tree bswap_vconst = vec_perm_indices_to_tree (char_vectype, indices);
2535 /* Transform. */
2536 vec<tree> vec_oprnds = vNULL;
2537 gimple *new_stmt = NULL;
2538 stmt_vec_info prev_stmt_info = NULL;
2539 for (unsigned j = 0; j < ncopies; j++)
2541 /* Handle uses. */
2542 if (j == 0)
2543 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
2544 else
2545 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
2547 /* Arguments are ready. create the new vector stmt. */
2548 unsigned i;
2549 tree vop;
2550 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
2552 tree tem = make_ssa_name (char_vectype);
2553 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2554 char_vectype, vop));
2555 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2556 tree tem2 = make_ssa_name (char_vectype);
2557 new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
2558 tem, tem, bswap_vconst);
2559 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2560 tem = make_ssa_name (vectype);
2561 new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
2562 vectype, tem2));
2563 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2564 if (slp_node)
2565 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2568 if (slp_node)
2569 continue;
2571 if (j == 0)
2572 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2573 else
2574 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2576 prev_stmt_info = vinfo_for_stmt (new_stmt);
2579 vec_oprnds.release ();
2580 return true;
2583 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2584 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2585 in a single step. On success, store the binary pack code in
2586 *CONVERT_CODE. */
2588 static bool
2589 simple_integer_narrowing (tree vectype_out, tree vectype_in,
2590 tree_code *convert_code)
2592 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
2593 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in)))
2594 return false;
2596 tree_code code;
2597 int multi_step_cvt = 0;
2598 auto_vec <tree, 8> interm_types;
2599 if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
2600 &code, &multi_step_cvt,
2601 &interm_types)
2602 || multi_step_cvt)
2603 return false;
2605 *convert_code = code;
2606 return true;
2609 /* Function vectorizable_call.
2611 Check if GS performs a function call that can be vectorized.
2612 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2613 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2614 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2616 static bool
2617 vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, gimple **vec_stmt,
2618 slp_tree slp_node)
2620 gcall *stmt;
2621 tree vec_dest;
2622 tree scalar_dest;
2623 tree op, type;
2624 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
2625 stmt_vec_info stmt_info = vinfo_for_stmt (gs), prev_stmt_info;
2626 tree vectype_out, vectype_in;
2627 poly_uint64 nunits_in;
2628 poly_uint64 nunits_out;
2629 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2630 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2631 vec_info *vinfo = stmt_info->vinfo;
2632 tree fndecl, new_temp, rhs_type;
2633 gimple *def_stmt;
2634 enum vect_def_type dt[3]
2635 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
2636 int ndts = 3;
2637 gimple *new_stmt = NULL;
2638 int ncopies, j;
2639 vec<tree> vargs = vNULL;
2640 enum { NARROW, NONE, WIDEN } modifier;
2641 size_t i, nargs;
2642 tree lhs;
2644 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
2645 return false;
2647 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
2648 && ! vec_stmt)
2649 return false;
2651 /* Is GS a vectorizable call? */
2652 stmt = dyn_cast <gcall *> (gs);
2653 if (!stmt)
2654 return false;
2656 if (gimple_call_internal_p (stmt)
2657 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2658 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
2659 return vectorizable_mask_load_store (stmt, gsi, vec_stmt,
2660 slp_node);
2662 if (gimple_call_lhs (stmt) == NULL_TREE
2663 || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
2664 return false;
2666 gcc_checking_assert (!stmt_can_throw_internal (stmt));
2668 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
2670 /* Process function arguments. */
2671 rhs_type = NULL_TREE;
2672 vectype_in = NULL_TREE;
2673 nargs = gimple_call_num_args (stmt);
2675 /* Bail out if the function has more than three arguments, we do not have
2676 interesting builtin functions to vectorize with more than two arguments
2677 except for fma. No arguments is also not good. */
2678 if (nargs == 0 || nargs > 3)
2679 return false;
2681 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2682 if (gimple_call_internal_p (stmt)
2683 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2685 nargs = 0;
2686 rhs_type = unsigned_type_node;
2689 for (i = 0; i < nargs; i++)
2691 tree opvectype;
2693 op = gimple_call_arg (stmt, i);
2695 /* We can only handle calls with arguments of the same type. */
2696 if (rhs_type
2697 && !types_compatible_p (rhs_type, TREE_TYPE (op)))
2699 if (dump_enabled_p ())
2700 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2701 "argument types differ.\n");
2702 return false;
2704 if (!rhs_type)
2705 rhs_type = TREE_TYPE (op);
2707 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[i], &opvectype))
2709 if (dump_enabled_p ())
2710 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2711 "use not simple.\n");
2712 return false;
2715 if (!vectype_in)
2716 vectype_in = opvectype;
2717 else if (opvectype
2718 && opvectype != vectype_in)
2720 if (dump_enabled_p ())
2721 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2722 "argument vector types differ.\n");
2723 return false;
2726 /* If all arguments are external or constant defs use a vector type with
2727 the same size as the output vector type. */
2728 if (!vectype_in)
2729 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
2730 if (vec_stmt)
2731 gcc_assert (vectype_in);
2732 if (!vectype_in)
2734 if (dump_enabled_p ())
2736 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2737 "no vectype for scalar type ");
2738 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
2739 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2742 return false;
2745 /* FORNOW */
2746 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
2747 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
2748 if (known_eq (nunits_in * 2, nunits_out))
2749 modifier = NARROW;
2750 else if (known_eq (nunits_out, nunits_in))
2751 modifier = NONE;
2752 else if (known_eq (nunits_out * 2, nunits_in))
2753 modifier = WIDEN;
2754 else
2755 return false;
2757 /* We only handle functions that do not read or clobber memory. */
2758 if (gimple_vuse (stmt))
2760 if (dump_enabled_p ())
2761 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2762 "function reads from or writes to memory.\n");
2763 return false;
2766 /* For now, we only vectorize functions if a target specific builtin
2767 is available. TODO -- in some cases, it might be profitable to
2768 insert the calls for pieces of the vector, in order to be able
2769 to vectorize other operations in the loop. */
2770 fndecl = NULL_TREE;
2771 internal_fn ifn = IFN_LAST;
2772 combined_fn cfn = gimple_call_combined_fn (stmt);
2773 tree callee = gimple_call_fndecl (stmt);
2775 /* First try using an internal function. */
2776 tree_code convert_code = ERROR_MARK;
2777 if (cfn != CFN_LAST
2778 && (modifier == NONE
2779 || (modifier == NARROW
2780 && simple_integer_narrowing (vectype_out, vectype_in,
2781 &convert_code))))
2782 ifn = vectorizable_internal_function (cfn, callee, vectype_out,
2783 vectype_in);
2785 /* If that fails, try asking for a target-specific built-in function. */
2786 if (ifn == IFN_LAST)
2788 if (cfn != CFN_LAST)
2789 fndecl = targetm.vectorize.builtin_vectorized_function
2790 (cfn, vectype_out, vectype_in);
2791 else
2792 fndecl = targetm.vectorize.builtin_md_vectorized_function
2793 (callee, vectype_out, vectype_in);
2796 if (ifn == IFN_LAST && !fndecl)
2798 if (cfn == CFN_GOMP_SIMD_LANE
2799 && !slp_node
2800 && loop_vinfo
2801 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2802 && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
2803 && LOOP_VINFO_LOOP (loop_vinfo)->simduid
2804 == SSA_NAME_VAR (gimple_call_arg (stmt, 0)))
2806 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2807 { 0, 1, 2, ... vf - 1 } vector. */
2808 gcc_assert (nargs == 0);
2810 else if (modifier == NONE
2811 && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
2812 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
2813 || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
2814 return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node,
2815 vectype_in, dt);
2816 else
2818 if (dump_enabled_p ())
2819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2820 "function is not vectorizable.\n");
2821 return false;
2825 if (slp_node)
2826 ncopies = 1;
2827 else if (modifier == NARROW && ifn == IFN_LAST)
2828 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
2829 else
2830 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
2832 /* Sanity check: make sure that at least one copy of the vectorized stmt
2833 needs to be generated. */
2834 gcc_assert (ncopies >= 1);
2836 if (!vec_stmt) /* transformation not required. */
2838 STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
2839 if (dump_enabled_p ())
2840 dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
2841 "\n");
2842 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
2843 if (ifn != IFN_LAST && modifier == NARROW && !slp_node)
2844 add_stmt_cost (stmt_info->vinfo->target_cost_data, ncopies / 2,
2845 vec_promote_demote, stmt_info, 0, vect_body);
2847 return true;
2850 /* Transform. */
2852 if (dump_enabled_p ())
2853 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
2855 /* Handle def. */
2856 scalar_dest = gimple_call_lhs (stmt);
2857 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
2859 prev_stmt_info = NULL;
2860 if (modifier == NONE || ifn != IFN_LAST)
2862 tree prev_res = NULL_TREE;
2863 for (j = 0; j < ncopies; ++j)
2865 /* Build argument list for the vectorized call. */
2866 if (j == 0)
2867 vargs.create (nargs);
2868 else
2869 vargs.truncate (0);
2871 if (slp_node)
2873 auto_vec<vec<tree> > vec_defs (nargs);
2874 vec<tree> vec_oprnds0;
2876 for (i = 0; i < nargs; i++)
2877 vargs.quick_push (gimple_call_arg (stmt, i));
2878 vect_get_slp_defs (vargs, slp_node, &vec_defs);
2879 vec_oprnds0 = vec_defs[0];
2881 /* Arguments are ready. Create the new vector stmt. */
2882 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_oprnd0)
2884 size_t k;
2885 for (k = 0; k < nargs; k++)
2887 vec<tree> vec_oprndsk = vec_defs[k];
2888 vargs[k] = vec_oprndsk[i];
2890 if (modifier == NARROW)
2892 tree half_res = make_ssa_name (vectype_in);
2893 gcall *call
2894 = gimple_build_call_internal_vec (ifn, vargs);
2895 gimple_call_set_lhs (call, half_res);
2896 gimple_call_set_nothrow (call, true);
2897 new_stmt = call;
2898 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2899 if ((i & 1) == 0)
2901 prev_res = half_res;
2902 continue;
2904 new_temp = make_ssa_name (vec_dest);
2905 new_stmt = gimple_build_assign (new_temp, convert_code,
2906 prev_res, half_res);
2908 else
2910 gcall *call;
2911 if (ifn != IFN_LAST)
2912 call = gimple_build_call_internal_vec (ifn, vargs);
2913 else
2914 call = gimple_build_call_vec (fndecl, vargs);
2915 new_temp = make_ssa_name (vec_dest, call);
2916 gimple_call_set_lhs (call, new_temp);
2917 gimple_call_set_nothrow (call, true);
2918 new_stmt = call;
2920 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2921 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
2924 for (i = 0; i < nargs; i++)
2926 vec<tree> vec_oprndsi = vec_defs[i];
2927 vec_oprndsi.release ();
2929 continue;
2932 for (i = 0; i < nargs; i++)
2934 op = gimple_call_arg (stmt, i);
2935 if (j == 0)
2936 vec_oprnd0
2937 = vect_get_vec_def_for_operand (op, stmt);
2938 else
2940 vec_oprnd0 = gimple_call_arg (new_stmt, i);
2941 vec_oprnd0
2942 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
2945 vargs.quick_push (vec_oprnd0);
2948 if (gimple_call_internal_p (stmt)
2949 && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE)
2951 tree cst = build_index_vector (vectype_out, j * nunits_out, 1);
2952 tree new_var
2953 = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
2954 gimple *init_stmt = gimple_build_assign (new_var, cst);
2955 vect_init_vector_1 (stmt, init_stmt, NULL);
2956 new_temp = make_ssa_name (vec_dest);
2957 new_stmt = gimple_build_assign (new_temp, new_var);
2959 else if (modifier == NARROW)
2961 tree half_res = make_ssa_name (vectype_in);
2962 gcall *call = gimple_build_call_internal_vec (ifn, vargs);
2963 gimple_call_set_lhs (call, half_res);
2964 gimple_call_set_nothrow (call, true);
2965 new_stmt = call;
2966 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2967 if ((j & 1) == 0)
2969 prev_res = half_res;
2970 continue;
2972 new_temp = make_ssa_name (vec_dest);
2973 new_stmt = gimple_build_assign (new_temp, convert_code,
2974 prev_res, half_res);
2976 else
2978 gcall *call;
2979 if (ifn != IFN_LAST)
2980 call = gimple_build_call_internal_vec (ifn, vargs);
2981 else
2982 call = gimple_build_call_vec (fndecl, vargs);
2983 new_temp = make_ssa_name (vec_dest, new_stmt);
2984 gimple_call_set_lhs (call, new_temp);
2985 gimple_call_set_nothrow (call, true);
2986 new_stmt = call;
2988 vect_finish_stmt_generation (stmt, new_stmt, gsi);
2990 if (j == (modifier == NARROW ? 1 : 0))
2991 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
2992 else
2993 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
2995 prev_stmt_info = vinfo_for_stmt (new_stmt);
2998 else if (modifier == NARROW)
3000 for (j = 0; j < ncopies; ++j)
3002 /* Build argument list for the vectorized call. */
3003 if (j == 0)
3004 vargs.create (nargs * 2);
3005 else
3006 vargs.truncate (0);
3008 if (slp_node)
3010 auto_vec<vec<tree> > vec_defs (nargs);
3011 vec<tree> vec_oprnds0;
3013 for (i = 0; i < nargs; i++)
3014 vargs.quick_push (gimple_call_arg (stmt, i));
3015 vect_get_slp_defs (vargs, slp_node, &vec_defs);
3016 vec_oprnds0 = vec_defs[0];
3018 /* Arguments are ready. Create the new vector stmt. */
3019 for (i = 0; vec_oprnds0.iterate (i, &vec_oprnd0); i += 2)
3021 size_t k;
3022 vargs.truncate (0);
3023 for (k = 0; k < nargs; k++)
3025 vec<tree> vec_oprndsk = vec_defs[k];
3026 vargs.quick_push (vec_oprndsk[i]);
3027 vargs.quick_push (vec_oprndsk[i + 1]);
3029 gcall *call;
3030 if (ifn != IFN_LAST)
3031 call = gimple_build_call_internal_vec (ifn, vargs);
3032 else
3033 call = gimple_build_call_vec (fndecl, vargs);
3034 new_temp = make_ssa_name (vec_dest, call);
3035 gimple_call_set_lhs (call, new_temp);
3036 gimple_call_set_nothrow (call, true);
3037 new_stmt = call;
3038 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3039 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3042 for (i = 0; i < nargs; i++)
3044 vec<tree> vec_oprndsi = vec_defs[i];
3045 vec_oprndsi.release ();
3047 continue;
3050 for (i = 0; i < nargs; i++)
3052 op = gimple_call_arg (stmt, i);
3053 if (j == 0)
3055 vec_oprnd0
3056 = vect_get_vec_def_for_operand (op, stmt);
3057 vec_oprnd1
3058 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3060 else
3062 vec_oprnd1 = gimple_call_arg (new_stmt, 2*i + 1);
3063 vec_oprnd0
3064 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd1);
3065 vec_oprnd1
3066 = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
3069 vargs.quick_push (vec_oprnd0);
3070 vargs.quick_push (vec_oprnd1);
3073 new_stmt = gimple_build_call_vec (fndecl, vargs);
3074 new_temp = make_ssa_name (vec_dest, new_stmt);
3075 gimple_call_set_lhs (new_stmt, new_temp);
3076 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3078 if (j == 0)
3079 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3080 else
3081 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3083 prev_stmt_info = vinfo_for_stmt (new_stmt);
3086 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
3088 else
3089 /* No current target implements this case. */
3090 return false;
3092 vargs.release ();
3094 /* The call in STMT might prevent it from being removed in dce.
3095 We however cannot remove it here, due to the way the ssa name
3096 it defines is mapped to the new definition. So just replace
3097 rhs of the statement with something harmless. */
3099 if (slp_node)
3100 return true;
3102 type = TREE_TYPE (scalar_dest);
3103 if (is_pattern_stmt_p (stmt_info))
3104 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3105 else
3106 lhs = gimple_call_lhs (stmt);
3108 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3109 set_vinfo_for_stmt (new_stmt, stmt_info);
3110 set_vinfo_for_stmt (stmt, NULL);
3111 STMT_VINFO_STMT (stmt_info) = new_stmt;
3112 gsi_replace (gsi, new_stmt, false);
3114 return true;
3118 struct simd_call_arg_info
3120 tree vectype;
3121 tree op;
3122 HOST_WIDE_INT linear_step;
3123 enum vect_def_type dt;
3124 unsigned int align;
3125 bool simd_lane_linear;
3128 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3129 is linear within simd lane (but not within whole loop), note it in
3130 *ARGINFO. */
3132 static void
3133 vect_simd_lane_linear (tree op, struct loop *loop,
3134 struct simd_call_arg_info *arginfo)
3136 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
3138 if (!is_gimple_assign (def_stmt)
3139 || gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR
3140 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt)))
3141 return;
3143 tree base = gimple_assign_rhs1 (def_stmt);
3144 HOST_WIDE_INT linear_step = 0;
3145 tree v = gimple_assign_rhs2 (def_stmt);
3146 while (TREE_CODE (v) == SSA_NAME)
3148 tree t;
3149 def_stmt = SSA_NAME_DEF_STMT (v);
3150 if (is_gimple_assign (def_stmt))
3151 switch (gimple_assign_rhs_code (def_stmt))
3153 case PLUS_EXPR:
3154 t = gimple_assign_rhs2 (def_stmt);
3155 if (linear_step || TREE_CODE (t) != INTEGER_CST)
3156 return;
3157 base = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (base), base, t);
3158 v = gimple_assign_rhs1 (def_stmt);
3159 continue;
3160 case MULT_EXPR:
3161 t = gimple_assign_rhs2 (def_stmt);
3162 if (linear_step || !tree_fits_shwi_p (t) || integer_zerop (t))
3163 return;
3164 linear_step = tree_to_shwi (t);
3165 v = gimple_assign_rhs1 (def_stmt);
3166 continue;
3167 CASE_CONVERT:
3168 t = gimple_assign_rhs1 (def_stmt);
3169 if (TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE
3170 || (TYPE_PRECISION (TREE_TYPE (v))
3171 < TYPE_PRECISION (TREE_TYPE (t))))
3172 return;
3173 if (!linear_step)
3174 linear_step = 1;
3175 v = t;
3176 continue;
3177 default:
3178 return;
3180 else if (gimple_call_internal_p (def_stmt, IFN_GOMP_SIMD_LANE)
3181 && loop->simduid
3182 && TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME
3183 && (SSA_NAME_VAR (gimple_call_arg (def_stmt, 0))
3184 == loop->simduid))
3186 if (!linear_step)
3187 linear_step = 1;
3188 arginfo->linear_step = linear_step;
3189 arginfo->op = base;
3190 arginfo->simd_lane_linear = true;
3191 return;
3196 /* Return the number of elements in vector type VECTYPE, which is associated
3197 with a SIMD clone. At present these vectors always have a constant
3198 length. */
3200 static unsigned HOST_WIDE_INT
3201 simd_clone_subparts (tree vectype)
3203 return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
3206 /* Function vectorizable_simd_clone_call.
3208 Check if STMT performs a function call that can be vectorized
3209 by calling a simd clone of the function.
3210 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3211 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3212 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3214 static bool
3215 vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
3216 gimple **vec_stmt, slp_tree slp_node)
3218 tree vec_dest;
3219 tree scalar_dest;
3220 tree op, type;
3221 tree vec_oprnd0 = NULL_TREE;
3222 stmt_vec_info stmt_info = vinfo_for_stmt (stmt), prev_stmt_info;
3223 tree vectype;
3224 unsigned int nunits;
3225 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3226 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
3227 vec_info *vinfo = stmt_info->vinfo;
3228 struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
3229 tree fndecl, new_temp;
3230 gimple *def_stmt;
3231 gimple *new_stmt = NULL;
3232 int ncopies, j;
3233 auto_vec<simd_call_arg_info> arginfo;
3234 vec<tree> vargs = vNULL;
3235 size_t i, nargs;
3236 tree lhs, rtype, ratype;
3237 vec<constructor_elt, va_gc> *ret_ctor_elts = NULL;
3239 /* Is STMT a vectorizable call? */
3240 if (!is_gimple_call (stmt))
3241 return false;
3243 fndecl = gimple_call_fndecl (stmt);
3244 if (fndecl == NULL_TREE)
3245 return false;
3247 struct cgraph_node *node = cgraph_node::get (fndecl);
3248 if (node == NULL || node->simd_clones == NULL)
3249 return false;
3251 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
3252 return false;
3254 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
3255 && ! vec_stmt)
3256 return false;
3258 if (gimple_call_lhs (stmt)
3259 && TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME)
3260 return false;
3262 gcc_checking_assert (!stmt_can_throw_internal (stmt));
3264 vectype = STMT_VINFO_VECTYPE (stmt_info);
3266 if (loop_vinfo && nested_in_vect_loop_p (loop, stmt))
3267 return false;
3269 /* FORNOW */
3270 if (slp_node)
3271 return false;
3273 /* Process function arguments. */
3274 nargs = gimple_call_num_args (stmt);
3276 /* Bail out if the function has zero arguments. */
3277 if (nargs == 0)
3278 return false;
3280 arginfo.reserve (nargs, true);
3282 for (i = 0; i < nargs; i++)
3284 simd_call_arg_info thisarginfo;
3285 affine_iv iv;
3287 thisarginfo.linear_step = 0;
3288 thisarginfo.align = 0;
3289 thisarginfo.op = NULL_TREE;
3290 thisarginfo.simd_lane_linear = false;
3292 op = gimple_call_arg (stmt, i);
3293 if (!vect_is_simple_use (op, vinfo, &def_stmt, &thisarginfo.dt,
3294 &thisarginfo.vectype)
3295 || thisarginfo.dt == vect_uninitialized_def)
3297 if (dump_enabled_p ())
3298 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3299 "use not simple.\n");
3300 return false;
3303 if (thisarginfo.dt == vect_constant_def
3304 || thisarginfo.dt == vect_external_def)
3305 gcc_assert (thisarginfo.vectype == NULL_TREE);
3306 else
3307 gcc_assert (thisarginfo.vectype != NULL_TREE);
3309 /* For linear arguments, the analyze phase should have saved
3310 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3311 if (i * 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info).length ()
3312 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2])
3314 gcc_assert (vec_stmt);
3315 thisarginfo.linear_step
3316 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2]);
3317 thisarginfo.op
3318 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 1];
3319 thisarginfo.simd_lane_linear
3320 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 3]
3321 == boolean_true_node);
3322 /* If loop has been peeled for alignment, we need to adjust it. */
3323 tree n1 = LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo);
3324 tree n2 = LOOP_VINFO_NITERS (loop_vinfo);
3325 if (n1 != n2 && !thisarginfo.simd_lane_linear)
3327 tree bias = fold_build2 (MINUS_EXPR, TREE_TYPE (n1), n1, n2);
3328 tree step = STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[i * 3 + 2];
3329 tree opt = TREE_TYPE (thisarginfo.op);
3330 bias = fold_convert (TREE_TYPE (step), bias);
3331 bias = fold_build2 (MULT_EXPR, TREE_TYPE (step), bias, step);
3332 thisarginfo.op
3333 = fold_build2 (POINTER_TYPE_P (opt)
3334 ? POINTER_PLUS_EXPR : PLUS_EXPR, opt,
3335 thisarginfo.op, bias);
3338 else if (!vec_stmt
3339 && thisarginfo.dt != vect_constant_def
3340 && thisarginfo.dt != vect_external_def
3341 && loop_vinfo
3342 && TREE_CODE (op) == SSA_NAME
3343 && simple_iv (loop, loop_containing_stmt (stmt), op,
3344 &iv, false)
3345 && tree_fits_shwi_p (iv.step))
3347 thisarginfo.linear_step = tree_to_shwi (iv.step);
3348 thisarginfo.op = iv.base;
3350 else if ((thisarginfo.dt == vect_constant_def
3351 || thisarginfo.dt == vect_external_def)
3352 && POINTER_TYPE_P (TREE_TYPE (op)))
3353 thisarginfo.align = get_pointer_alignment (op) / BITS_PER_UNIT;
3354 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3355 linear too. */
3356 if (POINTER_TYPE_P (TREE_TYPE (op))
3357 && !thisarginfo.linear_step
3358 && !vec_stmt
3359 && thisarginfo.dt != vect_constant_def
3360 && thisarginfo.dt != vect_external_def
3361 && loop_vinfo
3362 && !slp_node
3363 && TREE_CODE (op) == SSA_NAME)
3364 vect_simd_lane_linear (op, loop, &thisarginfo);
3366 arginfo.quick_push (thisarginfo);
3369 unsigned HOST_WIDE_INT vf;
3370 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
3372 if (dump_enabled_p ())
3373 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3374 "not considering SIMD clones; not yet supported"
3375 " for variable-width vectors.\n");
3376 return NULL;
3379 unsigned int badness = 0;
3380 struct cgraph_node *bestn = NULL;
3381 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info).exists ())
3382 bestn = cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info)[0]);
3383 else
3384 for (struct cgraph_node *n = node->simd_clones; n != NULL;
3385 n = n->simdclone->next_clone)
3387 unsigned int this_badness = 0;
3388 if (n->simdclone->simdlen > vf
3389 || n->simdclone->nargs != nargs)
3390 continue;
3391 if (n->simdclone->simdlen < vf)
3392 this_badness += (exact_log2 (vf)
3393 - exact_log2 (n->simdclone->simdlen)) * 1024;
3394 if (n->simdclone->inbranch)
3395 this_badness += 2048;
3396 int target_badness = targetm.simd_clone.usable (n);
3397 if (target_badness < 0)
3398 continue;
3399 this_badness += target_badness * 512;
3400 /* FORNOW: Have to add code to add the mask argument. */
3401 if (n->simdclone->inbranch)
3402 continue;
3403 for (i = 0; i < nargs; i++)
3405 switch (n->simdclone->args[i].arg_type)
3407 case SIMD_CLONE_ARG_TYPE_VECTOR:
3408 if (!useless_type_conversion_p
3409 (n->simdclone->args[i].orig_type,
3410 TREE_TYPE (gimple_call_arg (stmt, i))))
3411 i = -1;
3412 else if (arginfo[i].dt == vect_constant_def
3413 || arginfo[i].dt == vect_external_def
3414 || arginfo[i].linear_step)
3415 this_badness += 64;
3416 break;
3417 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3418 if (arginfo[i].dt != vect_constant_def
3419 && arginfo[i].dt != vect_external_def)
3420 i = -1;
3421 break;
3422 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3423 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3424 if (arginfo[i].dt == vect_constant_def
3425 || arginfo[i].dt == vect_external_def
3426 || (arginfo[i].linear_step
3427 != n->simdclone->args[i].linear_step))
3428 i = -1;
3429 break;
3430 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3431 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3432 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3433 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3434 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3435 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3436 /* FORNOW */
3437 i = -1;
3438 break;
3439 case SIMD_CLONE_ARG_TYPE_MASK:
3440 gcc_unreachable ();
3442 if (i == (size_t) -1)
3443 break;
3444 if (n->simdclone->args[i].alignment > arginfo[i].align)
3446 i = -1;
3447 break;
3449 if (arginfo[i].align)
3450 this_badness += (exact_log2 (arginfo[i].align)
3451 - exact_log2 (n->simdclone->args[i].alignment));
3453 if (i == (size_t) -1)
3454 continue;
3455 if (bestn == NULL || this_badness < badness)
3457 bestn = n;
3458 badness = this_badness;
3462 if (bestn == NULL)
3463 return false;
3465 for (i = 0; i < nargs; i++)
3466 if ((arginfo[i].dt == vect_constant_def
3467 || arginfo[i].dt == vect_external_def)
3468 && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
3470 arginfo[i].vectype
3471 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt,
3472 i)));
3473 if (arginfo[i].vectype == NULL
3474 || (simd_clone_subparts (arginfo[i].vectype)
3475 > bestn->simdclone->simdlen))
3476 return false;
3479 fndecl = bestn->decl;
3480 nunits = bestn->simdclone->simdlen;
3481 ncopies = vf / nunits;
3483 /* If the function isn't const, only allow it in simd loops where user
3484 has asserted that at least nunits consecutive iterations can be
3485 performed using SIMD instructions. */
3486 if ((loop == NULL || (unsigned) loop->safelen < nunits)
3487 && gimple_vuse (stmt))
3488 return false;
3490 /* Sanity check: make sure that at least one copy of the vectorized stmt
3491 needs to be generated. */
3492 gcc_assert (ncopies >= 1);
3494 if (!vec_stmt) /* transformation not required. */
3496 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (bestn->decl);
3497 for (i = 0; i < nargs; i++)
3498 if ((bestn->simdclone->args[i].arg_type
3499 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
3500 || (bestn->simdclone->args[i].arg_type
3501 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP))
3503 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_grow_cleared (i * 3
3504 + 1);
3505 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (arginfo[i].op);
3506 tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op))
3507 ? size_type_node : TREE_TYPE (arginfo[i].op);
3508 tree ls = build_int_cst (lst, arginfo[i].linear_step);
3509 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (ls);
3510 tree sll = arginfo[i].simd_lane_linear
3511 ? boolean_true_node : boolean_false_node;
3512 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).safe_push (sll);
3514 STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type;
3515 if (dump_enabled_p ())
3516 dump_printf_loc (MSG_NOTE, vect_location,
3517 "=== vectorizable_simd_clone_call ===\n");
3518 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3519 return true;
3522 /* Transform. */
3524 if (dump_enabled_p ())
3525 dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
3527 /* Handle def. */
3528 scalar_dest = gimple_call_lhs (stmt);
3529 vec_dest = NULL_TREE;
3530 rtype = NULL_TREE;
3531 ratype = NULL_TREE;
3532 if (scalar_dest)
3534 vec_dest = vect_create_destination_var (scalar_dest, vectype);
3535 rtype = TREE_TYPE (TREE_TYPE (fndecl));
3536 if (TREE_CODE (rtype) == ARRAY_TYPE)
3538 ratype = rtype;
3539 rtype = TREE_TYPE (ratype);
3543 prev_stmt_info = NULL;
3544 for (j = 0; j < ncopies; ++j)
3546 /* Build argument list for the vectorized call. */
3547 if (j == 0)
3548 vargs.create (nargs);
3549 else
3550 vargs.truncate (0);
3552 for (i = 0; i < nargs; i++)
3554 unsigned int k, l, m, o;
3555 tree atype;
3556 op = gimple_call_arg (stmt, i);
3557 switch (bestn->simdclone->args[i].arg_type)
3559 case SIMD_CLONE_ARG_TYPE_VECTOR:
3560 atype = bestn->simdclone->args[i].vector_type;
3561 o = nunits / simd_clone_subparts (atype);
3562 for (m = j * o; m < (j + 1) * o; m++)
3564 if (simd_clone_subparts (atype)
3565 < simd_clone_subparts (arginfo[i].vectype))
3567 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype));
3568 k = (simd_clone_subparts (arginfo[i].vectype)
3569 / simd_clone_subparts (atype));
3570 gcc_assert ((k & (k - 1)) == 0);
3571 if (m == 0)
3572 vec_oprnd0
3573 = vect_get_vec_def_for_operand (op, stmt);
3574 else
3576 vec_oprnd0 = arginfo[i].op;
3577 if ((m & (k - 1)) == 0)
3578 vec_oprnd0
3579 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3580 vec_oprnd0);
3582 arginfo[i].op = vec_oprnd0;
3583 vec_oprnd0
3584 = build3 (BIT_FIELD_REF, atype, vec_oprnd0,
3585 bitsize_int (prec),
3586 bitsize_int ((m & (k - 1)) * prec));
3587 new_stmt
3588 = gimple_build_assign (make_ssa_name (atype),
3589 vec_oprnd0);
3590 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3591 vargs.safe_push (gimple_assign_lhs (new_stmt));
3593 else
3595 k = (simd_clone_subparts (atype)
3596 / simd_clone_subparts (arginfo[i].vectype));
3597 gcc_assert ((k & (k - 1)) == 0);
3598 vec<constructor_elt, va_gc> *ctor_elts;
3599 if (k != 1)
3600 vec_alloc (ctor_elts, k);
3601 else
3602 ctor_elts = NULL;
3603 for (l = 0; l < k; l++)
3605 if (m == 0 && l == 0)
3606 vec_oprnd0
3607 = vect_get_vec_def_for_operand (op, stmt);
3608 else
3609 vec_oprnd0
3610 = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
3611 arginfo[i].op);
3612 arginfo[i].op = vec_oprnd0;
3613 if (k == 1)
3614 break;
3615 CONSTRUCTOR_APPEND_ELT (ctor_elts, NULL_TREE,
3616 vec_oprnd0);
3618 if (k == 1)
3619 vargs.safe_push (vec_oprnd0);
3620 else
3622 vec_oprnd0 = build_constructor (atype, ctor_elts);
3623 new_stmt
3624 = gimple_build_assign (make_ssa_name (atype),
3625 vec_oprnd0);
3626 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3627 vargs.safe_push (gimple_assign_lhs (new_stmt));
3631 break;
3632 case SIMD_CLONE_ARG_TYPE_UNIFORM:
3633 vargs.safe_push (op);
3634 break;
3635 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
3636 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
3637 if (j == 0)
3639 gimple_seq stmts;
3640 arginfo[i].op
3641 = force_gimple_operand (arginfo[i].op, &stmts, true,
3642 NULL_TREE);
3643 if (stmts != NULL)
3645 basic_block new_bb;
3646 edge pe = loop_preheader_edge (loop);
3647 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
3648 gcc_assert (!new_bb);
3650 if (arginfo[i].simd_lane_linear)
3652 vargs.safe_push (arginfo[i].op);
3653 break;
3655 tree phi_res = copy_ssa_name (op);
3656 gphi *new_phi = create_phi_node (phi_res, loop->header);
3657 set_vinfo_for_stmt (new_phi,
3658 new_stmt_vec_info (new_phi, loop_vinfo));
3659 add_phi_arg (new_phi, arginfo[i].op,
3660 loop_preheader_edge (loop), UNKNOWN_LOCATION);
3661 enum tree_code code
3662 = POINTER_TYPE_P (TREE_TYPE (op))
3663 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3664 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3665 ? sizetype : TREE_TYPE (op);
3666 widest_int cst
3667 = wi::mul (bestn->simdclone->args[i].linear_step,
3668 ncopies * nunits);
3669 tree tcst = wide_int_to_tree (type, cst);
3670 tree phi_arg = copy_ssa_name (op);
3671 new_stmt
3672 = gimple_build_assign (phi_arg, code, phi_res, tcst);
3673 gimple_stmt_iterator si = gsi_after_labels (loop->header);
3674 gsi_insert_after (&si, new_stmt, GSI_NEW_STMT);
3675 set_vinfo_for_stmt (new_stmt,
3676 new_stmt_vec_info (new_stmt, loop_vinfo));
3677 add_phi_arg (new_phi, phi_arg, loop_latch_edge (loop),
3678 UNKNOWN_LOCATION);
3679 arginfo[i].op = phi_res;
3680 vargs.safe_push (phi_res);
3682 else
3684 enum tree_code code
3685 = POINTER_TYPE_P (TREE_TYPE (op))
3686 ? POINTER_PLUS_EXPR : PLUS_EXPR;
3687 tree type = POINTER_TYPE_P (TREE_TYPE (op))
3688 ? sizetype : TREE_TYPE (op);
3689 widest_int cst
3690 = wi::mul (bestn->simdclone->args[i].linear_step,
3691 j * nunits);
3692 tree tcst = wide_int_to_tree (type, cst);
3693 new_temp = make_ssa_name (TREE_TYPE (op));
3694 new_stmt = gimple_build_assign (new_temp, code,
3695 arginfo[i].op, tcst);
3696 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3697 vargs.safe_push (new_temp);
3699 break;
3700 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
3701 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
3702 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
3703 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
3704 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
3705 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
3706 default:
3707 gcc_unreachable ();
3711 new_stmt = gimple_build_call_vec (fndecl, vargs);
3712 if (vec_dest)
3714 gcc_assert (ratype || simd_clone_subparts (rtype) == nunits);
3715 if (ratype)
3716 new_temp = create_tmp_var (ratype);
3717 else if (simd_clone_subparts (vectype)
3718 == simd_clone_subparts (rtype))
3719 new_temp = make_ssa_name (vec_dest, new_stmt);
3720 else
3721 new_temp = make_ssa_name (rtype, new_stmt);
3722 gimple_call_set_lhs (new_stmt, new_temp);
3724 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3726 if (vec_dest)
3728 if (simd_clone_subparts (vectype) < nunits)
3730 unsigned int k, l;
3731 poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype));
3732 poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype));
3733 k = nunits / simd_clone_subparts (vectype);
3734 gcc_assert ((k & (k - 1)) == 0);
3735 for (l = 0; l < k; l++)
3737 tree t;
3738 if (ratype)
3740 t = build_fold_addr_expr (new_temp);
3741 t = build2 (MEM_REF, vectype, t,
3742 build_int_cst (TREE_TYPE (t), l * bytes));
3744 else
3745 t = build3 (BIT_FIELD_REF, vectype, new_temp,
3746 bitsize_int (prec), bitsize_int (l * prec));
3747 new_stmt
3748 = gimple_build_assign (make_ssa_name (vectype), t);
3749 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3750 if (j == 0 && l == 0)
3751 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3752 else
3753 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3755 prev_stmt_info = vinfo_for_stmt (new_stmt);
3758 if (ratype)
3760 tree clobber = build_constructor (ratype, NULL);
3761 TREE_THIS_VOLATILE (clobber) = 1;
3762 new_stmt = gimple_build_assign (new_temp, clobber);
3763 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3765 continue;
3767 else if (simd_clone_subparts (vectype) > nunits)
3769 unsigned int k = (simd_clone_subparts (vectype)
3770 / simd_clone_subparts (rtype));
3771 gcc_assert ((k & (k - 1)) == 0);
3772 if ((j & (k - 1)) == 0)
3773 vec_alloc (ret_ctor_elts, k);
3774 if (ratype)
3776 unsigned int m, o = nunits / simd_clone_subparts (rtype);
3777 for (m = 0; m < o; m++)
3779 tree tem = build4 (ARRAY_REF, rtype, new_temp,
3780 size_int (m), NULL_TREE, NULL_TREE);
3781 new_stmt
3782 = gimple_build_assign (make_ssa_name (rtype), tem);
3783 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3784 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
3785 gimple_assign_lhs (new_stmt));
3787 tree clobber = build_constructor (ratype, NULL);
3788 TREE_THIS_VOLATILE (clobber) = 1;
3789 new_stmt = gimple_build_assign (new_temp, clobber);
3790 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3792 else
3793 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
3794 if ((j & (k - 1)) != k - 1)
3795 continue;
3796 vec_oprnd0 = build_constructor (vectype, ret_ctor_elts);
3797 new_stmt
3798 = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
3799 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3801 if ((unsigned) j == k - 1)
3802 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3803 else
3804 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3806 prev_stmt_info = vinfo_for_stmt (new_stmt);
3807 continue;
3809 else if (ratype)
3811 tree t = build_fold_addr_expr (new_temp);
3812 t = build2 (MEM_REF, vectype, t,
3813 build_int_cst (TREE_TYPE (t), 0));
3814 new_stmt
3815 = gimple_build_assign (make_ssa_name (vec_dest), t);
3816 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3817 tree clobber = build_constructor (ratype, NULL);
3818 TREE_THIS_VOLATILE (clobber) = 1;
3819 vect_finish_stmt_generation (stmt,
3820 gimple_build_assign (new_temp,
3821 clobber), gsi);
3825 if (j == 0)
3826 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
3827 else
3828 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
3830 prev_stmt_info = vinfo_for_stmt (new_stmt);
3833 vargs.release ();
3835 /* The call in STMT might prevent it from being removed in dce.
3836 We however cannot remove it here, due to the way the ssa name
3837 it defines is mapped to the new definition. So just replace
3838 rhs of the statement with something harmless. */
3840 if (slp_node)
3841 return true;
3843 if (scalar_dest)
3845 type = TREE_TYPE (scalar_dest);
3846 if (is_pattern_stmt_p (stmt_info))
3847 lhs = gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info));
3848 else
3849 lhs = gimple_call_lhs (stmt);
3850 new_stmt = gimple_build_assign (lhs, build_zero_cst (type));
3852 else
3853 new_stmt = gimple_build_nop ();
3854 set_vinfo_for_stmt (new_stmt, stmt_info);
3855 set_vinfo_for_stmt (stmt, NULL);
3856 STMT_VINFO_STMT (stmt_info) = new_stmt;
3857 gsi_replace (gsi, new_stmt, true);
3858 unlink_stmt_vdef (stmt);
3860 return true;
3864 /* Function vect_gen_widened_results_half
3866 Create a vector stmt whose code, type, number of arguments, and result
3867 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3868 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3869 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3870 needs to be created (DECL is a function-decl of a target-builtin).
3871 STMT is the original scalar stmt that we are vectorizing. */
3873 static gimple *
3874 vect_gen_widened_results_half (enum tree_code code,
3875 tree decl,
3876 tree vec_oprnd0, tree vec_oprnd1, int op_type,
3877 tree vec_dest, gimple_stmt_iterator *gsi,
3878 gimple *stmt)
3880 gimple *new_stmt;
3881 tree new_temp;
3883 /* Generate half of the widened result: */
3884 if (code == CALL_EXPR)
3886 /* Target specific support */
3887 if (op_type == binary_op)
3888 new_stmt = gimple_build_call (decl, 2, vec_oprnd0, vec_oprnd1);
3889 else
3890 new_stmt = gimple_build_call (decl, 1, vec_oprnd0);
3891 new_temp = make_ssa_name (vec_dest, new_stmt);
3892 gimple_call_set_lhs (new_stmt, new_temp);
3894 else
3896 /* Generic support */
3897 gcc_assert (op_type == TREE_CODE_LENGTH (code));
3898 if (op_type != binary_op)
3899 vec_oprnd1 = NULL;
3900 new_stmt = gimple_build_assign (vec_dest, code, vec_oprnd0, vec_oprnd1);
3901 new_temp = make_ssa_name (vec_dest, new_stmt);
3902 gimple_assign_set_lhs (new_stmt, new_temp);
3904 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3906 return new_stmt;
3910 /* Get vectorized definitions for loop-based vectorization. For the first
3911 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3912 scalar operand), and for the rest we get a copy with
3913 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3914 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3915 The vectors are collected into VEC_OPRNDS. */
3917 static void
3918 vect_get_loop_based_defs (tree *oprnd, gimple *stmt, enum vect_def_type dt,
3919 vec<tree> *vec_oprnds, int multi_step_cvt)
3921 tree vec_oprnd;
3923 /* Get first vector operand. */
3924 /* All the vector operands except the very first one (that is scalar oprnd)
3925 are stmt copies. */
3926 if (TREE_CODE (TREE_TYPE (*oprnd)) != VECTOR_TYPE)
3927 vec_oprnd = vect_get_vec_def_for_operand (*oprnd, stmt);
3928 else
3929 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, *oprnd);
3931 vec_oprnds->quick_push (vec_oprnd);
3933 /* Get second vector operand. */
3934 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
3935 vec_oprnds->quick_push (vec_oprnd);
3937 *oprnd = vec_oprnd;
3939 /* For conversion in multiple steps, continue to get operands
3940 recursively. */
3941 if (multi_step_cvt)
3942 vect_get_loop_based_defs (oprnd, stmt, dt, vec_oprnds, multi_step_cvt - 1);
3946 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3947 For multi-step conversions store the resulting vectors and call the function
3948 recursively. */
3950 static void
3951 vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
3952 int multi_step_cvt, gimple *stmt,
3953 vec<tree> vec_dsts,
3954 gimple_stmt_iterator *gsi,
3955 slp_tree slp_node, enum tree_code code,
3956 stmt_vec_info *prev_stmt_info)
3958 unsigned int i;
3959 tree vop0, vop1, new_tmp, vec_dest;
3960 gimple *new_stmt;
3961 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3963 vec_dest = vec_dsts.pop ();
3965 for (i = 0; i < vec_oprnds->length (); i += 2)
3967 /* Create demotion operation. */
3968 vop0 = (*vec_oprnds)[i];
3969 vop1 = (*vec_oprnds)[i + 1];
3970 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
3971 new_tmp = make_ssa_name (vec_dest, new_stmt);
3972 gimple_assign_set_lhs (new_stmt, new_tmp);
3973 vect_finish_stmt_generation (stmt, new_stmt, gsi);
3975 if (multi_step_cvt)
3976 /* Store the resulting vector for next recursive call. */
3977 (*vec_oprnds)[i/2] = new_tmp;
3978 else
3980 /* This is the last step of the conversion sequence. Store the
3981 vectors in SLP_NODE or in vector info of the scalar statement
3982 (or in STMT_VINFO_RELATED_STMT chain). */
3983 if (slp_node)
3984 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
3985 else
3987 if (!*prev_stmt_info)
3988 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
3989 else
3990 STMT_VINFO_RELATED_STMT (*prev_stmt_info) = new_stmt;
3992 *prev_stmt_info = vinfo_for_stmt (new_stmt);
3997 /* For multi-step demotion operations we first generate demotion operations
3998 from the source type to the intermediate types, and then combine the
3999 results (stored in VEC_OPRNDS) in demotion operation to the destination
4000 type. */
4001 if (multi_step_cvt)
4003 /* At each level of recursion we have half of the operands we had at the
4004 previous level. */
4005 vec_oprnds->truncate ((i+1)/2);
4006 vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
4007 stmt, vec_dsts, gsi, slp_node,
4008 VEC_PACK_TRUNC_EXPR,
4009 prev_stmt_info);
4012 vec_dsts.quick_push (vec_dest);
4016 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4017 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4018 the resulting vectors and call the function recursively. */
4020 static void
4021 vect_create_vectorized_promotion_stmts (vec<tree> *vec_oprnds0,
4022 vec<tree> *vec_oprnds1,
4023 gimple *stmt, tree vec_dest,
4024 gimple_stmt_iterator *gsi,
4025 enum tree_code code1,
4026 enum tree_code code2, tree decl1,
4027 tree decl2, int op_type)
4029 int i;
4030 tree vop0, vop1, new_tmp1, new_tmp2;
4031 gimple *new_stmt1, *new_stmt2;
4032 vec<tree> vec_tmp = vNULL;
4034 vec_tmp.create (vec_oprnds0->length () * 2);
4035 FOR_EACH_VEC_ELT (*vec_oprnds0, i, vop0)
4037 if (op_type == binary_op)
4038 vop1 = (*vec_oprnds1)[i];
4039 else
4040 vop1 = NULL_TREE;
4042 /* Generate the two halves of promotion operation. */
4043 new_stmt1 = vect_gen_widened_results_half (code1, decl1, vop0, vop1,
4044 op_type, vec_dest, gsi, stmt);
4045 new_stmt2 = vect_gen_widened_results_half (code2, decl2, vop0, vop1,
4046 op_type, vec_dest, gsi, stmt);
4047 if (is_gimple_call (new_stmt1))
4049 new_tmp1 = gimple_call_lhs (new_stmt1);
4050 new_tmp2 = gimple_call_lhs (new_stmt2);
4052 else
4054 new_tmp1 = gimple_assign_lhs (new_stmt1);
4055 new_tmp2 = gimple_assign_lhs (new_stmt2);
4058 /* Store the results for the next step. */
4059 vec_tmp.quick_push (new_tmp1);
4060 vec_tmp.quick_push (new_tmp2);
4063 vec_oprnds0->release ();
4064 *vec_oprnds0 = vec_tmp;
4068 /* Check if STMT performs a conversion operation, that can be vectorized.
4069 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4070 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4071 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4073 static bool
4074 vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
4075 gimple **vec_stmt, slp_tree slp_node)
4077 tree vec_dest;
4078 tree scalar_dest;
4079 tree op0, op1 = NULL_TREE;
4080 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE;
4081 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4082 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4083 enum tree_code code, code1 = ERROR_MARK, code2 = ERROR_MARK;
4084 enum tree_code codecvt1 = ERROR_MARK, codecvt2 = ERROR_MARK;
4085 tree decl1 = NULL_TREE, decl2 = NULL_TREE;
4086 tree new_temp;
4087 gimple *def_stmt;
4088 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4089 int ndts = 2;
4090 gimple *new_stmt = NULL;
4091 stmt_vec_info prev_stmt_info;
4092 poly_uint64 nunits_in;
4093 poly_uint64 nunits_out;
4094 tree vectype_out, vectype_in;
4095 int ncopies, i, j;
4096 tree lhs_type, rhs_type;
4097 enum { NARROW, NONE, WIDEN } modifier;
4098 vec<tree> vec_oprnds0 = vNULL;
4099 vec<tree> vec_oprnds1 = vNULL;
4100 tree vop0;
4101 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4102 vec_info *vinfo = stmt_info->vinfo;
4103 int multi_step_cvt = 0;
4104 vec<tree> interm_types = vNULL;
4105 tree last_oprnd, intermediate_type, cvt_type = NULL_TREE;
4106 int op_type;
4107 unsigned short fltsz;
4109 /* Is STMT a vectorizable conversion? */
4111 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4112 return false;
4114 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4115 && ! vec_stmt)
4116 return false;
4118 if (!is_gimple_assign (stmt))
4119 return false;
4121 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4122 return false;
4124 code = gimple_assign_rhs_code (stmt);
4125 if (!CONVERT_EXPR_CODE_P (code)
4126 && code != FIX_TRUNC_EXPR
4127 && code != FLOAT_EXPR
4128 && code != WIDEN_MULT_EXPR
4129 && code != WIDEN_LSHIFT_EXPR)
4130 return false;
4132 op_type = TREE_CODE_LENGTH (code);
4134 /* Check types of lhs and rhs. */
4135 scalar_dest = gimple_assign_lhs (stmt);
4136 lhs_type = TREE_TYPE (scalar_dest);
4137 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4139 op0 = gimple_assign_rhs1 (stmt);
4140 rhs_type = TREE_TYPE (op0);
4142 if ((code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4143 && !((INTEGRAL_TYPE_P (lhs_type)
4144 && INTEGRAL_TYPE_P (rhs_type))
4145 || (SCALAR_FLOAT_TYPE_P (lhs_type)
4146 && SCALAR_FLOAT_TYPE_P (rhs_type))))
4147 return false;
4149 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
4150 && ((INTEGRAL_TYPE_P (lhs_type)
4151 && !type_has_mode_precision_p (lhs_type))
4152 || (INTEGRAL_TYPE_P (rhs_type)
4153 && !type_has_mode_precision_p (rhs_type))))
4155 if (dump_enabled_p ())
4156 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4157 "type conversion to/from bit-precision unsupported."
4158 "\n");
4159 return false;
4162 /* Check the operands of the operation. */
4163 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype_in))
4165 if (dump_enabled_p ())
4166 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4167 "use not simple.\n");
4168 return false;
4170 if (op_type == binary_op)
4172 bool ok;
4174 op1 = gimple_assign_rhs2 (stmt);
4175 gcc_assert (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR);
4176 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4177 OP1. */
4178 if (CONSTANT_CLASS_P (op0))
4179 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &vectype_in);
4180 else
4181 ok = vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]);
4183 if (!ok)
4185 if (dump_enabled_p ())
4186 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4187 "use not simple.\n");
4188 return false;
4192 /* If op0 is an external or constant defs use a vector type of
4193 the same size as the output vector type. */
4194 if (!vectype_in)
4195 vectype_in = get_same_sized_vectype (rhs_type, vectype_out);
4196 if (vec_stmt)
4197 gcc_assert (vectype_in);
4198 if (!vectype_in)
4200 if (dump_enabled_p ())
4202 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4203 "no vectype for scalar type ");
4204 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4205 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4208 return false;
4211 if (VECTOR_BOOLEAN_TYPE_P (vectype_out)
4212 && !VECTOR_BOOLEAN_TYPE_P (vectype_in))
4214 if (dump_enabled_p ())
4216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4217 "can't convert between boolean and non "
4218 "boolean vectors");
4219 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
4220 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4223 return false;
4226 nunits_in = TYPE_VECTOR_SUBPARTS (vectype_in);
4227 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4228 if (known_eq (nunits_out, nunits_in))
4229 modifier = NONE;
4230 else if (multiple_p (nunits_out, nunits_in))
4231 modifier = NARROW;
4232 else
4234 gcc_checking_assert (multiple_p (nunits_in, nunits_out));
4235 modifier = WIDEN;
4238 /* Multiple types in SLP are handled by creating the appropriate number of
4239 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4240 case of SLP. */
4241 if (slp_node)
4242 ncopies = 1;
4243 else if (modifier == NARROW)
4244 ncopies = vect_get_num_copies (loop_vinfo, vectype_out);
4245 else
4246 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
4248 /* Sanity check: make sure that at least one copy of the vectorized stmt
4249 needs to be generated. */
4250 gcc_assert (ncopies >= 1);
4252 bool found_mode = false;
4253 scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
4254 scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
4255 opt_scalar_mode rhs_mode_iter;
4257 /* Supportable by target? */
4258 switch (modifier)
4260 case NONE:
4261 if (code != FIX_TRUNC_EXPR && code != FLOAT_EXPR)
4262 return false;
4263 if (supportable_convert_operation (code, vectype_out, vectype_in,
4264 &decl1, &code1))
4265 break;
4266 /* FALLTHRU */
4267 unsupported:
4268 if (dump_enabled_p ())
4269 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4270 "conversion not supported by target.\n");
4271 return false;
4273 case WIDEN:
4274 if (supportable_widening_operation (code, stmt, vectype_out, vectype_in,
4275 &code1, &code2, &multi_step_cvt,
4276 &interm_types))
4278 /* Binary widening operation can only be supported directly by the
4279 architecture. */
4280 gcc_assert (!(multi_step_cvt && op_type == binary_op));
4281 break;
4284 if (code != FLOAT_EXPR
4285 || GET_MODE_SIZE (lhs_mode) <= GET_MODE_SIZE (rhs_mode))
4286 goto unsupported;
4288 fltsz = GET_MODE_SIZE (lhs_mode);
4289 FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
4291 rhs_mode = rhs_mode_iter.require ();
4292 if (GET_MODE_SIZE (rhs_mode) > fltsz)
4293 break;
4295 cvt_type
4296 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4297 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4298 if (cvt_type == NULL_TREE)
4299 goto unsupported;
4301 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4303 if (!supportable_convert_operation (code, vectype_out,
4304 cvt_type, &decl1, &codecvt1))
4305 goto unsupported;
4307 else if (!supportable_widening_operation (code, stmt, vectype_out,
4308 cvt_type, &codecvt1,
4309 &codecvt2, &multi_step_cvt,
4310 &interm_types))
4311 continue;
4312 else
4313 gcc_assert (multi_step_cvt == 0);
4315 if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
4316 vectype_in, &code1, &code2,
4317 &multi_step_cvt, &interm_types))
4319 found_mode = true;
4320 break;
4324 if (!found_mode)
4325 goto unsupported;
4327 if (GET_MODE_SIZE (rhs_mode) == fltsz)
4328 codecvt2 = ERROR_MARK;
4329 else
4331 multi_step_cvt++;
4332 interm_types.safe_push (cvt_type);
4333 cvt_type = NULL_TREE;
4335 break;
4337 case NARROW:
4338 gcc_assert (op_type == unary_op);
4339 if (supportable_narrowing_operation (code, vectype_out, vectype_in,
4340 &code1, &multi_step_cvt,
4341 &interm_types))
4342 break;
4344 if (code != FIX_TRUNC_EXPR
4345 || GET_MODE_SIZE (lhs_mode) >= GET_MODE_SIZE (rhs_mode))
4346 goto unsupported;
4348 cvt_type
4349 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode), 0);
4350 cvt_type = get_same_sized_vectype (cvt_type, vectype_in);
4351 if (cvt_type == NULL_TREE)
4352 goto unsupported;
4353 if (!supportable_convert_operation (code, cvt_type, vectype_in,
4354 &decl1, &codecvt1))
4355 goto unsupported;
4356 if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
4357 &code1, &multi_step_cvt,
4358 &interm_types))
4359 break;
4360 goto unsupported;
4362 default:
4363 gcc_unreachable ();
4366 if (!vec_stmt) /* transformation not required. */
4368 if (dump_enabled_p ())
4369 dump_printf_loc (MSG_NOTE, vect_location,
4370 "=== vectorizable_conversion ===\n");
4371 if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
4373 STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
4374 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4376 else if (modifier == NARROW)
4378 STMT_VINFO_TYPE (stmt_info) = type_demotion_vec_info_type;
4379 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4381 else
4383 STMT_VINFO_TYPE (stmt_info) = type_promotion_vec_info_type;
4384 vect_model_promotion_demotion_cost (stmt_info, dt, multi_step_cvt);
4386 interm_types.release ();
4387 return true;
4390 /* Transform. */
4391 if (dump_enabled_p ())
4392 dump_printf_loc (MSG_NOTE, vect_location,
4393 "transform conversion. ncopies = %d.\n", ncopies);
4395 if (op_type == binary_op)
4397 if (CONSTANT_CLASS_P (op0))
4398 op0 = fold_convert (TREE_TYPE (op1), op0);
4399 else if (CONSTANT_CLASS_P (op1))
4400 op1 = fold_convert (TREE_TYPE (op0), op1);
4403 /* In case of multi-step conversion, we first generate conversion operations
4404 to the intermediate types, and then from that types to the final one.
4405 We create vector destinations for the intermediate type (TYPES) received
4406 from supportable_*_operation, and store them in the correct order
4407 for future use in vect_create_vectorized_*_stmts (). */
4408 auto_vec<tree> vec_dsts (multi_step_cvt + 1);
4409 vec_dest = vect_create_destination_var (scalar_dest,
4410 (cvt_type && modifier == WIDEN)
4411 ? cvt_type : vectype_out);
4412 vec_dsts.quick_push (vec_dest);
4414 if (multi_step_cvt)
4416 for (i = interm_types.length () - 1;
4417 interm_types.iterate (i, &intermediate_type); i--)
4419 vec_dest = vect_create_destination_var (scalar_dest,
4420 intermediate_type);
4421 vec_dsts.quick_push (vec_dest);
4425 if (cvt_type)
4426 vec_dest = vect_create_destination_var (scalar_dest,
4427 modifier == WIDEN
4428 ? vectype_out : cvt_type);
4430 if (!slp_node)
4432 if (modifier == WIDEN)
4434 vec_oprnds0.create (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1);
4435 if (op_type == binary_op)
4436 vec_oprnds1.create (1);
4438 else if (modifier == NARROW)
4439 vec_oprnds0.create (
4440 2 * (multi_step_cvt ? vect_pow2 (multi_step_cvt) : 1));
4442 else if (code == WIDEN_LSHIFT_EXPR)
4443 vec_oprnds1.create (slp_node->vec_stmts_size);
4445 last_oprnd = op0;
4446 prev_stmt_info = NULL;
4447 switch (modifier)
4449 case NONE:
4450 for (j = 0; j < ncopies; j++)
4452 if (j == 0)
4453 vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node);
4454 else
4455 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
4457 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4459 /* Arguments are ready, create the new vector stmt. */
4460 if (code1 == CALL_EXPR)
4462 new_stmt = gimple_build_call (decl1, 1, vop0);
4463 new_temp = make_ssa_name (vec_dest, new_stmt);
4464 gimple_call_set_lhs (new_stmt, new_temp);
4466 else
4468 gcc_assert (TREE_CODE_LENGTH (code1) == unary_op);
4469 new_stmt = gimple_build_assign (vec_dest, code1, vop0);
4470 new_temp = make_ssa_name (vec_dest, new_stmt);
4471 gimple_assign_set_lhs (new_stmt, new_temp);
4474 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4475 if (slp_node)
4476 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4477 else
4479 if (!prev_stmt_info)
4480 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4481 else
4482 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4483 prev_stmt_info = vinfo_for_stmt (new_stmt);
4487 break;
4489 case WIDEN:
4490 /* In case the vectorization factor (VF) is bigger than the number
4491 of elements that we can fit in a vectype (nunits), we have to
4492 generate more than one vector stmt - i.e - we need to "unroll"
4493 the vector stmt by a factor VF/nunits. */
4494 for (j = 0; j < ncopies; j++)
4496 /* Handle uses. */
4497 if (j == 0)
4499 if (slp_node)
4501 if (code == WIDEN_LSHIFT_EXPR)
4503 unsigned int k;
4505 vec_oprnd1 = op1;
4506 /* Store vec_oprnd1 for every vector stmt to be created
4507 for SLP_NODE. We check during the analysis that all
4508 the shift arguments are the same. */
4509 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
4510 vec_oprnds1.quick_push (vec_oprnd1);
4512 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4513 slp_node);
4515 else
4516 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0,
4517 &vec_oprnds1, slp_node);
4519 else
4521 vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt);
4522 vec_oprnds0.quick_push (vec_oprnd0);
4523 if (op_type == binary_op)
4525 if (code == WIDEN_LSHIFT_EXPR)
4526 vec_oprnd1 = op1;
4527 else
4528 vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt);
4529 vec_oprnds1.quick_push (vec_oprnd1);
4533 else
4535 vec_oprnd0 = vect_get_vec_def_for_stmt_copy (dt[0], vec_oprnd0);
4536 vec_oprnds0.truncate (0);
4537 vec_oprnds0.quick_push (vec_oprnd0);
4538 if (op_type == binary_op)
4540 if (code == WIDEN_LSHIFT_EXPR)
4541 vec_oprnd1 = op1;
4542 else
4543 vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1],
4544 vec_oprnd1);
4545 vec_oprnds1.truncate (0);
4546 vec_oprnds1.quick_push (vec_oprnd1);
4550 /* Arguments are ready. Create the new vector stmts. */
4551 for (i = multi_step_cvt; i >= 0; i--)
4553 tree this_dest = vec_dsts[i];
4554 enum tree_code c1 = code1, c2 = code2;
4555 if (i == 0 && codecvt2 != ERROR_MARK)
4557 c1 = codecvt1;
4558 c2 = codecvt2;
4560 vect_create_vectorized_promotion_stmts (&vec_oprnds0,
4561 &vec_oprnds1,
4562 stmt, this_dest, gsi,
4563 c1, c2, decl1, decl2,
4564 op_type);
4567 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4569 if (cvt_type)
4571 if (codecvt1 == CALL_EXPR)
4573 new_stmt = gimple_build_call (decl1, 1, vop0);
4574 new_temp = make_ssa_name (vec_dest, new_stmt);
4575 gimple_call_set_lhs (new_stmt, new_temp);
4577 else
4579 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4580 new_temp = make_ssa_name (vec_dest);
4581 new_stmt = gimple_build_assign (new_temp, codecvt1,
4582 vop0);
4585 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4587 else
4588 new_stmt = SSA_NAME_DEF_STMT (vop0);
4590 if (slp_node)
4591 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4592 else
4594 if (!prev_stmt_info)
4595 STMT_VINFO_VEC_STMT (stmt_info) = new_stmt;
4596 else
4597 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4598 prev_stmt_info = vinfo_for_stmt (new_stmt);
4603 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4604 break;
4606 case NARROW:
4607 /* In case the vectorization factor (VF) is bigger than the number
4608 of elements that we can fit in a vectype (nunits), we have to
4609 generate more than one vector stmt - i.e - we need to "unroll"
4610 the vector stmt by a factor VF/nunits. */
4611 for (j = 0; j < ncopies; j++)
4613 /* Handle uses. */
4614 if (slp_node)
4615 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
4616 slp_node);
4617 else
4619 vec_oprnds0.truncate (0);
4620 vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0,
4621 vect_pow2 (multi_step_cvt) - 1);
4624 /* Arguments are ready. Create the new vector stmts. */
4625 if (cvt_type)
4626 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
4628 if (codecvt1 == CALL_EXPR)
4630 new_stmt = gimple_build_call (decl1, 1, vop0);
4631 new_temp = make_ssa_name (vec_dest, new_stmt);
4632 gimple_call_set_lhs (new_stmt, new_temp);
4634 else
4636 gcc_assert (TREE_CODE_LENGTH (codecvt1) == unary_op);
4637 new_temp = make_ssa_name (vec_dest);
4638 new_stmt = gimple_build_assign (new_temp, codecvt1,
4639 vop0);
4642 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4643 vec_oprnds0[i] = new_temp;
4646 vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
4647 stmt, vec_dsts, gsi,
4648 slp_node, code1,
4649 &prev_stmt_info);
4652 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
4653 break;
4656 vec_oprnds0.release ();
4657 vec_oprnds1.release ();
4658 interm_types.release ();
4660 return true;
4664 /* Function vectorizable_assignment.
4666 Check if STMT performs an assignment (copy) that can be vectorized.
4667 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4668 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4669 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4671 static bool
4672 vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
4673 gimple **vec_stmt, slp_tree slp_node)
4675 tree vec_dest;
4676 tree scalar_dest;
4677 tree op;
4678 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4679 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4680 tree new_temp;
4681 gimple *def_stmt;
4682 enum vect_def_type dt[1] = {vect_unknown_def_type};
4683 int ndts = 1;
4684 int ncopies;
4685 int i, j;
4686 vec<tree> vec_oprnds = vNULL;
4687 tree vop;
4688 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4689 vec_info *vinfo = stmt_info->vinfo;
4690 gimple *new_stmt = NULL;
4691 stmt_vec_info prev_stmt_info = NULL;
4692 enum tree_code code;
4693 tree vectype_in;
4695 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4696 return false;
4698 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4699 && ! vec_stmt)
4700 return false;
4702 /* Is vectorizable assignment? */
4703 if (!is_gimple_assign (stmt))
4704 return false;
4706 scalar_dest = gimple_assign_lhs (stmt);
4707 if (TREE_CODE (scalar_dest) != SSA_NAME)
4708 return false;
4710 code = gimple_assign_rhs_code (stmt);
4711 if (gimple_assign_single_p (stmt)
4712 || code == PAREN_EXPR
4713 || CONVERT_EXPR_CODE_P (code))
4714 op = gimple_assign_rhs1 (stmt);
4715 else
4716 return false;
4718 if (code == VIEW_CONVERT_EXPR)
4719 op = TREE_OPERAND (op, 0);
4721 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4722 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
4724 /* Multiple types in SLP are handled by creating the appropriate number of
4725 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4726 case of SLP. */
4727 if (slp_node)
4728 ncopies = 1;
4729 else
4730 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4732 gcc_assert (ncopies >= 1);
4734 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt[0], &vectype_in))
4736 if (dump_enabled_p ())
4737 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4738 "use not simple.\n");
4739 return false;
4742 /* We can handle NOP_EXPR conversions that do not change the number
4743 of elements or the vector size. */
4744 if ((CONVERT_EXPR_CODE_P (code)
4745 || code == VIEW_CONVERT_EXPR)
4746 && (!vectype_in
4747 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
4748 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
4749 GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
4750 return false;
4752 /* We do not handle bit-precision changes. */
4753 if ((CONVERT_EXPR_CODE_P (code)
4754 || code == VIEW_CONVERT_EXPR)
4755 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
4756 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
4757 || !type_has_mode_precision_p (TREE_TYPE (op)))
4758 /* But a conversion that does not change the bit-pattern is ok. */
4759 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
4760 > TYPE_PRECISION (TREE_TYPE (op)))
4761 && TYPE_UNSIGNED (TREE_TYPE (op)))
4762 /* Conversion between boolean types of different sizes is
4763 a simple assignment in case their vectypes are same
4764 boolean vectors. */
4765 && (!VECTOR_BOOLEAN_TYPE_P (vectype)
4766 || !VECTOR_BOOLEAN_TYPE_P (vectype_in)))
4768 if (dump_enabled_p ())
4769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4770 "type conversion to/from bit-precision "
4771 "unsupported.\n");
4772 return false;
4775 if (!vec_stmt) /* transformation not required. */
4777 STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
4778 if (dump_enabled_p ())
4779 dump_printf_loc (MSG_NOTE, vect_location,
4780 "=== vectorizable_assignment ===\n");
4781 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
4782 return true;
4785 /* Transform. */
4786 if (dump_enabled_p ())
4787 dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
4789 /* Handle def. */
4790 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4792 /* Handle use. */
4793 for (j = 0; j < ncopies; j++)
4795 /* Handle uses. */
4796 if (j == 0)
4797 vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node);
4798 else
4799 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
4801 /* Arguments are ready. create the new vector stmt. */
4802 FOR_EACH_VEC_ELT (vec_oprnds, i, vop)
4804 if (CONVERT_EXPR_CODE_P (code)
4805 || code == VIEW_CONVERT_EXPR)
4806 vop = build1 (VIEW_CONVERT_EXPR, vectype, vop);
4807 new_stmt = gimple_build_assign (vec_dest, vop);
4808 new_temp = make_ssa_name (vec_dest, new_stmt);
4809 gimple_assign_set_lhs (new_stmt, new_temp);
4810 vect_finish_stmt_generation (stmt, new_stmt, gsi);
4811 if (slp_node)
4812 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
4815 if (slp_node)
4816 continue;
4818 if (j == 0)
4819 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
4820 else
4821 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
4823 prev_stmt_info = vinfo_for_stmt (new_stmt);
4826 vec_oprnds.release ();
4827 return true;
4831 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4832 either as shift by a scalar or by a vector. */
4834 bool
4835 vect_supportable_shift (enum tree_code code, tree scalar_type)
4838 machine_mode vec_mode;
4839 optab optab;
4840 int icode;
4841 tree vectype;
4843 vectype = get_vectype_for_scalar_type (scalar_type);
4844 if (!vectype)
4845 return false;
4847 optab = optab_for_tree_code (code, vectype, optab_scalar);
4848 if (!optab
4849 || optab_handler (optab, TYPE_MODE (vectype)) == CODE_FOR_nothing)
4851 optab = optab_for_tree_code (code, vectype, optab_vector);
4852 if (!optab
4853 || (optab_handler (optab, TYPE_MODE (vectype))
4854 == CODE_FOR_nothing))
4855 return false;
4858 vec_mode = TYPE_MODE (vectype);
4859 icode = (int) optab_handler (optab, vec_mode);
4860 if (icode == CODE_FOR_nothing)
4861 return false;
4863 return true;
4867 /* Function vectorizable_shift.
4869 Check if STMT performs a shift operation that can be vectorized.
4870 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4871 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4872 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4874 static bool
4875 vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
4876 gimple **vec_stmt, slp_tree slp_node)
4878 tree vec_dest;
4879 tree scalar_dest;
4880 tree op0, op1 = NULL;
4881 tree vec_oprnd1 = NULL_TREE;
4882 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4883 tree vectype;
4884 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4885 enum tree_code code;
4886 machine_mode vec_mode;
4887 tree new_temp;
4888 optab optab;
4889 int icode;
4890 machine_mode optab_op2_mode;
4891 gimple *def_stmt;
4892 enum vect_def_type dt[2] = {vect_unknown_def_type, vect_unknown_def_type};
4893 int ndts = 2;
4894 gimple *new_stmt = NULL;
4895 stmt_vec_info prev_stmt_info;
4896 poly_uint64 nunits_in;
4897 poly_uint64 nunits_out;
4898 tree vectype_out;
4899 tree op1_vectype;
4900 int ncopies;
4901 int j, i;
4902 vec<tree> vec_oprnds0 = vNULL;
4903 vec<tree> vec_oprnds1 = vNULL;
4904 tree vop0, vop1;
4905 unsigned int k;
4906 bool scalar_shift_arg = true;
4907 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4908 vec_info *vinfo = stmt_info->vinfo;
4910 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
4911 return false;
4913 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
4914 && ! vec_stmt)
4915 return false;
4917 /* Is STMT a vectorizable binary/unary operation? */
4918 if (!is_gimple_assign (stmt))
4919 return false;
4921 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
4922 return false;
4924 code = gimple_assign_rhs_code (stmt);
4926 if (!(code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
4927 || code == RROTATE_EXPR))
4928 return false;
4930 scalar_dest = gimple_assign_lhs (stmt);
4931 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
4932 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
4934 if (dump_enabled_p ())
4935 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4936 "bit-precision shifts not supported.\n");
4937 return false;
4940 op0 = gimple_assign_rhs1 (stmt);
4941 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
4943 if (dump_enabled_p ())
4944 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4945 "use not simple.\n");
4946 return false;
4948 /* If op0 is an external or constant def use a vector type with
4949 the same size as the output vector type. */
4950 if (!vectype)
4951 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
4952 if (vec_stmt)
4953 gcc_assert (vectype);
4954 if (!vectype)
4956 if (dump_enabled_p ())
4957 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4958 "no vectype for scalar type\n");
4959 return false;
4962 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
4963 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
4964 if (maybe_ne (nunits_out, nunits_in))
4965 return false;
4967 op1 = gimple_assign_rhs2 (stmt);
4968 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1], &op1_vectype))
4970 if (dump_enabled_p ())
4971 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4972 "use not simple.\n");
4973 return false;
4976 /* Multiple types in SLP are handled by creating the appropriate number of
4977 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4978 case of SLP. */
4979 if (slp_node)
4980 ncopies = 1;
4981 else
4982 ncopies = vect_get_num_copies (loop_vinfo, vectype);
4984 gcc_assert (ncopies >= 1);
4986 /* Determine whether the shift amount is a vector, or scalar. If the
4987 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4989 if ((dt[1] == vect_internal_def
4990 || dt[1] == vect_induction_def)
4991 && !slp_node)
4992 scalar_shift_arg = false;
4993 else if (dt[1] == vect_constant_def
4994 || dt[1] == vect_external_def
4995 || dt[1] == vect_internal_def)
4997 /* In SLP, need to check whether the shift count is the same,
4998 in loops if it is a constant or invariant, it is always
4999 a scalar shift. */
5000 if (slp_node)
5002 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
5003 gimple *slpstmt;
5005 FOR_EACH_VEC_ELT (stmts, k, slpstmt)
5006 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt), op1, 0))
5007 scalar_shift_arg = false;
5010 /* If the shift amount is computed by a pattern stmt we cannot
5011 use the scalar amount directly thus give up and use a vector
5012 shift. */
5013 if (dt[1] == vect_internal_def)
5015 gimple *def = SSA_NAME_DEF_STMT (op1);
5016 if (is_pattern_stmt_p (vinfo_for_stmt (def)))
5017 scalar_shift_arg = false;
5020 else
5022 if (dump_enabled_p ())
5023 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5024 "operand mode requires invariant argument.\n");
5025 return false;
5028 /* Vector shifted by vector. */
5029 if (!scalar_shift_arg)
5031 optab = optab_for_tree_code (code, vectype, optab_vector);
5032 if (dump_enabled_p ())
5033 dump_printf_loc (MSG_NOTE, vect_location,
5034 "vector/vector shift/rotate found.\n");
5036 if (!op1_vectype)
5037 op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
5038 if (op1_vectype == NULL_TREE
5039 || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
5041 if (dump_enabled_p ())
5042 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5043 "unusable type for last operand in"
5044 " vector/vector shift/rotate.\n");
5045 return false;
5048 /* See if the machine has a vector shifted by scalar insn and if not
5049 then see if it has a vector shifted by vector insn. */
5050 else
5052 optab = optab_for_tree_code (code, vectype, optab_scalar);
5053 if (optab
5054 && optab_handler (optab, TYPE_MODE (vectype)) != CODE_FOR_nothing)
5056 if (dump_enabled_p ())
5057 dump_printf_loc (MSG_NOTE, vect_location,
5058 "vector/scalar shift/rotate found.\n");
5060 else
5062 optab = optab_for_tree_code (code, vectype, optab_vector);
5063 if (optab
5064 && (optab_handler (optab, TYPE_MODE (vectype))
5065 != CODE_FOR_nothing))
5067 scalar_shift_arg = false;
5069 if (dump_enabled_p ())
5070 dump_printf_loc (MSG_NOTE, vect_location,
5071 "vector/vector shift/rotate found.\n");
5073 /* Unlike the other binary operators, shifts/rotates have
5074 the rhs being int, instead of the same type as the lhs,
5075 so make sure the scalar is the right type if we are
5076 dealing with vectors of long long/long/short/char. */
5077 if (dt[1] == vect_constant_def)
5078 op1 = fold_convert (TREE_TYPE (vectype), op1);
5079 else if (!useless_type_conversion_p (TREE_TYPE (vectype),
5080 TREE_TYPE (op1)))
5082 if (slp_node
5083 && TYPE_MODE (TREE_TYPE (vectype))
5084 != TYPE_MODE (TREE_TYPE (op1)))
5086 if (dump_enabled_p ())
5087 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5088 "unusable type for last operand in"
5089 " vector/vector shift/rotate.\n");
5090 return false;
5092 if (vec_stmt && !slp_node)
5094 op1 = fold_convert (TREE_TYPE (vectype), op1);
5095 op1 = vect_init_vector (stmt, op1,
5096 TREE_TYPE (vectype), NULL);
5103 /* Supportable by target? */
5104 if (!optab)
5106 if (dump_enabled_p ())
5107 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5108 "no optab.\n");
5109 return false;
5111 vec_mode = TYPE_MODE (vectype);
5112 icode = (int) optab_handler (optab, vec_mode);
5113 if (icode == CODE_FOR_nothing)
5115 if (dump_enabled_p ())
5116 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5117 "op not supported by target.\n");
5118 /* Check only during analysis. */
5119 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5120 || (!vec_stmt
5121 && !vect_worthwhile_without_simd_p (vinfo, code)))
5122 return false;
5123 if (dump_enabled_p ())
5124 dump_printf_loc (MSG_NOTE, vect_location,
5125 "proceeding using word mode.\n");
5128 /* Worthwhile without SIMD support? Check only during analysis. */
5129 if (!vec_stmt
5130 && !VECTOR_MODE_P (TYPE_MODE (vectype))
5131 && !vect_worthwhile_without_simd_p (vinfo, code))
5133 if (dump_enabled_p ())
5134 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5135 "not worthwhile without SIMD support.\n");
5136 return false;
5139 if (!vec_stmt) /* transformation not required. */
5141 STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_NOTE, vect_location,
5144 "=== vectorizable_shift ===\n");
5145 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5146 return true;
5149 /* Transform. */
5151 if (dump_enabled_p ())
5152 dump_printf_loc (MSG_NOTE, vect_location,
5153 "transform binary/unary operation.\n");
5155 /* Handle def. */
5156 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5158 prev_stmt_info = NULL;
5159 for (j = 0; j < ncopies; j++)
5161 /* Handle uses. */
5162 if (j == 0)
5164 if (scalar_shift_arg)
5166 /* Vector shl and shr insn patterns can be defined with scalar
5167 operand 2 (shift operand). In this case, use constant or loop
5168 invariant op1 directly, without extending it to vector mode
5169 first. */
5170 optab_op2_mode = insn_data[icode].operand[2].mode;
5171 if (!VECTOR_MODE_P (optab_op2_mode))
5173 if (dump_enabled_p ())
5174 dump_printf_loc (MSG_NOTE, vect_location,
5175 "operand 1 using scalar mode.\n");
5176 vec_oprnd1 = op1;
5177 vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
5178 vec_oprnds1.quick_push (vec_oprnd1);
5179 if (slp_node)
5181 /* Store vec_oprnd1 for every vector stmt to be created
5182 for SLP_NODE. We check during the analysis that all
5183 the shift arguments are the same.
5184 TODO: Allow different constants for different vector
5185 stmts generated for an SLP instance. */
5186 for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
5187 vec_oprnds1.quick_push (vec_oprnd1);
5192 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5193 (a special case for certain kind of vector shifts); otherwise,
5194 operand 1 should be of a vector type (the usual case). */
5195 if (vec_oprnd1)
5196 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5197 slp_node);
5198 else
5199 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5200 slp_node);
5202 else
5203 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5205 /* Arguments are ready. Create the new vector stmt. */
5206 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5208 vop1 = vec_oprnds1[i];
5209 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
5210 new_temp = make_ssa_name (vec_dest, new_stmt);
5211 gimple_assign_set_lhs (new_stmt, new_temp);
5212 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5213 if (slp_node)
5214 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5217 if (slp_node)
5218 continue;
5220 if (j == 0)
5221 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5222 else
5223 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5224 prev_stmt_info = vinfo_for_stmt (new_stmt);
5227 vec_oprnds0.release ();
5228 vec_oprnds1.release ();
5230 return true;
5234 /* Function vectorizable_operation.
5236 Check if STMT performs a binary, unary or ternary operation that can
5237 be vectorized.
5238 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5239 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5240 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5242 static bool
5243 vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
5244 gimple **vec_stmt, slp_tree slp_node)
5246 tree vec_dest;
5247 tree scalar_dest;
5248 tree op0, op1 = NULL_TREE, op2 = NULL_TREE;
5249 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5250 tree vectype;
5251 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5252 enum tree_code code, orig_code;
5253 machine_mode vec_mode;
5254 tree new_temp;
5255 int op_type;
5256 optab optab;
5257 bool target_support_p;
5258 gimple *def_stmt;
5259 enum vect_def_type dt[3]
5260 = {vect_unknown_def_type, vect_unknown_def_type, vect_unknown_def_type};
5261 int ndts = 3;
5262 gimple *new_stmt = NULL;
5263 stmt_vec_info prev_stmt_info;
5264 poly_uint64 nunits_in;
5265 poly_uint64 nunits_out;
5266 tree vectype_out;
5267 int ncopies;
5268 int j, i;
5269 vec<tree> vec_oprnds0 = vNULL;
5270 vec<tree> vec_oprnds1 = vNULL;
5271 vec<tree> vec_oprnds2 = vNULL;
5272 tree vop0, vop1, vop2;
5273 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5274 vec_info *vinfo = stmt_info->vinfo;
5276 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5277 return false;
5279 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5280 && ! vec_stmt)
5281 return false;
5283 /* Is STMT a vectorizable binary/unary operation? */
5284 if (!is_gimple_assign (stmt))
5285 return false;
5287 if (TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
5288 return false;
5290 orig_code = code = gimple_assign_rhs_code (stmt);
5292 /* For pointer addition and subtraction, we should use the normal
5293 plus and minus for the vector operation. */
5294 if (code == POINTER_PLUS_EXPR)
5295 code = PLUS_EXPR;
5296 if (code == POINTER_DIFF_EXPR)
5297 code = MINUS_EXPR;
5299 /* Support only unary or binary operations. */
5300 op_type = TREE_CODE_LENGTH (code);
5301 if (op_type != unary_op && op_type != binary_op && op_type != ternary_op)
5303 if (dump_enabled_p ())
5304 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5305 "num. args = %d (not unary/binary/ternary op).\n",
5306 op_type);
5307 return false;
5310 scalar_dest = gimple_assign_lhs (stmt);
5311 vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5313 /* Most operations cannot handle bit-precision types without extra
5314 truncations. */
5315 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
5316 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
5317 /* Exception are bitwise binary operations. */
5318 && code != BIT_IOR_EXPR
5319 && code != BIT_XOR_EXPR
5320 && code != BIT_AND_EXPR)
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5324 "bit-precision arithmetic not supported.\n");
5325 return false;
5328 op0 = gimple_assign_rhs1 (stmt);
5329 if (!vect_is_simple_use (op0, vinfo, &def_stmt, &dt[0], &vectype))
5331 if (dump_enabled_p ())
5332 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5333 "use not simple.\n");
5334 return false;
5336 /* If op0 is an external or constant def use a vector type with
5337 the same size as the output vector type. */
5338 if (!vectype)
5340 /* For boolean type we cannot determine vectype by
5341 invariant value (don't know whether it is a vector
5342 of booleans or vector of integers). We use output
5343 vectype because operations on boolean don't change
5344 type. */
5345 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0)))
5347 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest)))
5349 if (dump_enabled_p ())
5350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5351 "not supported operation on bool value.\n");
5352 return false;
5354 vectype = vectype_out;
5356 else
5357 vectype = get_same_sized_vectype (TREE_TYPE (op0), vectype_out);
5359 if (vec_stmt)
5360 gcc_assert (vectype);
5361 if (!vectype)
5363 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5366 "no vectype for scalar type ");
5367 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
5368 TREE_TYPE (op0));
5369 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5372 return false;
5375 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
5376 nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
5377 if (maybe_ne (nunits_out, nunits_in))
5378 return false;
5380 if (op_type == binary_op || op_type == ternary_op)
5382 op1 = gimple_assign_rhs2 (stmt);
5383 if (!vect_is_simple_use (op1, vinfo, &def_stmt, &dt[1]))
5385 if (dump_enabled_p ())
5386 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5387 "use not simple.\n");
5388 return false;
5391 if (op_type == ternary_op)
5393 op2 = gimple_assign_rhs3 (stmt);
5394 if (!vect_is_simple_use (op2, vinfo, &def_stmt, &dt[2]))
5396 if (dump_enabled_p ())
5397 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5398 "use not simple.\n");
5399 return false;
5403 /* Multiple types in SLP are handled by creating the appropriate number of
5404 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5405 case of SLP. */
5406 if (slp_node)
5407 ncopies = 1;
5408 else
5409 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5411 gcc_assert (ncopies >= 1);
5413 /* Shifts are handled in vectorizable_shift (). */
5414 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR
5415 || code == RROTATE_EXPR)
5416 return false;
5418 /* Supportable by target? */
5420 vec_mode = TYPE_MODE (vectype);
5421 if (code == MULT_HIGHPART_EXPR)
5422 target_support_p = can_mult_highpart_p (vec_mode, TYPE_UNSIGNED (vectype));
5423 else
5425 optab = optab_for_tree_code (code, vectype, optab_default);
5426 if (!optab)
5428 if (dump_enabled_p ())
5429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5430 "no optab.\n");
5431 return false;
5433 target_support_p = (optab_handler (optab, vec_mode)
5434 != CODE_FOR_nothing);
5437 if (!target_support_p)
5439 if (dump_enabled_p ())
5440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5441 "op not supported by target.\n");
5442 /* Check only during analysis. */
5443 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
5444 || (!vec_stmt && !vect_worthwhile_without_simd_p (vinfo, code)))
5445 return false;
5446 if (dump_enabled_p ())
5447 dump_printf_loc (MSG_NOTE, vect_location,
5448 "proceeding using word mode.\n");
5451 /* Worthwhile without SIMD support? Check only during analysis. */
5452 if (!VECTOR_MODE_P (vec_mode)
5453 && !vec_stmt
5454 && !vect_worthwhile_without_simd_p (vinfo, code))
5456 if (dump_enabled_p ())
5457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5458 "not worthwhile without SIMD support.\n");
5459 return false;
5462 if (!vec_stmt) /* transformation not required. */
5464 STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
5465 if (dump_enabled_p ())
5466 dump_printf_loc (MSG_NOTE, vect_location,
5467 "=== vectorizable_operation ===\n");
5468 vect_model_simple_cost (stmt_info, ncopies, dt, ndts, NULL, NULL);
5469 return true;
5472 /* Transform. */
5474 if (dump_enabled_p ())
5475 dump_printf_loc (MSG_NOTE, vect_location,
5476 "transform binary/unary operation.\n");
5478 /* Handle def. */
5479 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5481 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5482 vectors with unsigned elements, but the result is signed. So, we
5483 need to compute the MINUS_EXPR into vectype temporary and
5484 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5485 tree vec_cvt_dest = NULL_TREE;
5486 if (orig_code == POINTER_DIFF_EXPR)
5487 vec_cvt_dest = vect_create_destination_var (scalar_dest, vectype_out);
5489 /* In case the vectorization factor (VF) is bigger than the number
5490 of elements that we can fit in a vectype (nunits), we have to generate
5491 more than one vector stmt - i.e - we need to "unroll" the
5492 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5493 from one copy of the vector stmt to the next, in the field
5494 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5495 stages to find the correct vector defs to be used when vectorizing
5496 stmts that use the defs of the current stmt. The example below
5497 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5498 we need to create 4 vectorized stmts):
5500 before vectorization:
5501 RELATED_STMT VEC_STMT
5502 S1: x = memref - -
5503 S2: z = x + 1 - -
5505 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5506 there):
5507 RELATED_STMT VEC_STMT
5508 VS1_0: vx0 = memref0 VS1_1 -
5509 VS1_1: vx1 = memref1 VS1_2 -
5510 VS1_2: vx2 = memref2 VS1_3 -
5511 VS1_3: vx3 = memref3 - -
5512 S1: x = load - VS1_0
5513 S2: z = x + 1 - -
5515 step2: vectorize stmt S2 (done here):
5516 To vectorize stmt S2 we first need to find the relevant vector
5517 def for the first operand 'x'. This is, as usual, obtained from
5518 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5519 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5520 relevant vector def 'vx0'. Having found 'vx0' we can generate
5521 the vector stmt VS2_0, and as usual, record it in the
5522 STMT_VINFO_VEC_STMT of stmt S2.
5523 When creating the second copy (VS2_1), we obtain the relevant vector
5524 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5525 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5526 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5527 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5528 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5529 chain of stmts and pointers:
5530 RELATED_STMT VEC_STMT
5531 VS1_0: vx0 = memref0 VS1_1 -
5532 VS1_1: vx1 = memref1 VS1_2 -
5533 VS1_2: vx2 = memref2 VS1_3 -
5534 VS1_3: vx3 = memref3 - -
5535 S1: x = load - VS1_0
5536 VS2_0: vz0 = vx0 + v1 VS2_1 -
5537 VS2_1: vz1 = vx1 + v1 VS2_2 -
5538 VS2_2: vz2 = vx2 + v1 VS2_3 -
5539 VS2_3: vz3 = vx3 + v1 - -
5540 S2: z = x + 1 - VS2_0 */
5542 prev_stmt_info = NULL;
5543 for (j = 0; j < ncopies; j++)
5545 /* Handle uses. */
5546 if (j == 0)
5548 if (op_type == binary_op || op_type == ternary_op)
5549 vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1,
5550 slp_node);
5551 else
5552 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL,
5553 slp_node);
5554 if (op_type == ternary_op)
5555 vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL,
5556 slp_node);
5558 else
5560 vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, &vec_oprnds1);
5561 if (op_type == ternary_op)
5563 tree vec_oprnd = vec_oprnds2.pop ();
5564 vec_oprnds2.quick_push (vect_get_vec_def_for_stmt_copy (dt[2],
5565 vec_oprnd));
5569 /* Arguments are ready. Create the new vector stmt. */
5570 FOR_EACH_VEC_ELT (vec_oprnds0, i, vop0)
5572 vop1 = ((op_type == binary_op || op_type == ternary_op)
5573 ? vec_oprnds1[i] : NULL_TREE);
5574 vop2 = ((op_type == ternary_op)
5575 ? vec_oprnds2[i] : NULL_TREE);
5576 new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1, vop2);
5577 new_temp = make_ssa_name (vec_dest, new_stmt);
5578 gimple_assign_set_lhs (new_stmt, new_temp);
5579 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5580 if (vec_cvt_dest)
5582 new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
5583 new_stmt = gimple_build_assign (vec_cvt_dest, VIEW_CONVERT_EXPR,
5584 new_temp);
5585 new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
5586 gimple_assign_set_lhs (new_stmt, new_temp);
5587 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5589 if (slp_node)
5590 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
5593 if (slp_node)
5594 continue;
5596 if (j == 0)
5597 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5598 else
5599 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5600 prev_stmt_info = vinfo_for_stmt (new_stmt);
5603 vec_oprnds0.release ();
5604 vec_oprnds1.release ();
5605 vec_oprnds2.release ();
5607 return true;
5610 /* A helper function to ensure data reference DR's base alignment. */
5612 static void
5613 ensure_base_align (struct data_reference *dr)
5615 if (!dr->aux)
5616 return;
5618 if (DR_VECT_AUX (dr)->base_misaligned)
5620 tree base_decl = DR_VECT_AUX (dr)->base_decl;
5622 unsigned int align_base_to = DR_TARGET_ALIGNMENT (dr) * BITS_PER_UNIT;
5624 if (decl_in_symtab_p (base_decl))
5625 symtab_node::get (base_decl)->increase_alignment (align_base_to);
5626 else
5628 SET_DECL_ALIGN (base_decl, align_base_to);
5629 DECL_USER_ALIGN (base_decl) = 1;
5631 DR_VECT_AUX (dr)->base_misaligned = false;
5636 /* Function get_group_alias_ptr_type.
5638 Return the alias type for the group starting at FIRST_STMT. */
5640 static tree
5641 get_group_alias_ptr_type (gimple *first_stmt)
5643 struct data_reference *first_dr, *next_dr;
5644 gimple *next_stmt;
5646 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5647 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
5648 while (next_stmt)
5650 next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
5651 if (get_alias_set (DR_REF (first_dr))
5652 != get_alias_set (DR_REF (next_dr)))
5654 if (dump_enabled_p ())
5655 dump_printf_loc (MSG_NOTE, vect_location,
5656 "conflicting alias set types.\n");
5657 return ptr_type_node;
5659 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5661 return reference_alias_ptr_type (DR_REF (first_dr));
5665 /* Function vectorizable_store.
5667 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5668 can be vectorized.
5669 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5670 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5671 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5673 static bool
5674 vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
5675 slp_tree slp_node)
5677 tree scalar_dest;
5678 tree data_ref;
5679 tree op;
5680 tree vec_oprnd = NULL_TREE;
5681 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5682 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
5683 tree elem_type;
5684 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5685 struct loop *loop = NULL;
5686 machine_mode vec_mode;
5687 tree dummy;
5688 enum dr_alignment_support alignment_support_scheme;
5689 gimple *def_stmt;
5690 enum vect_def_type dt;
5691 stmt_vec_info prev_stmt_info = NULL;
5692 tree dataref_ptr = NULL_TREE;
5693 tree dataref_offset = NULL_TREE;
5694 gimple *ptr_incr = NULL;
5695 int ncopies;
5696 int j;
5697 gimple *next_stmt, *first_stmt;
5698 bool grouped_store;
5699 unsigned int group_size, i;
5700 vec<tree> oprnds = vNULL;
5701 vec<tree> result_chain = vNULL;
5702 bool inv_p;
5703 tree offset = NULL_TREE;
5704 vec<tree> vec_oprnds = vNULL;
5705 bool slp = (slp_node != NULL);
5706 unsigned int vec_num;
5707 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
5708 vec_info *vinfo = stmt_info->vinfo;
5709 tree aggr_type;
5710 gather_scatter_info gs_info;
5711 enum vect_def_type scatter_src_dt = vect_unknown_def_type;
5712 gimple *new_stmt;
5713 poly_uint64 vf;
5714 vec_load_store_type vls_type;
5715 tree ref_type;
5717 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
5718 return false;
5720 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
5721 && ! vec_stmt)
5722 return false;
5724 /* Is vectorizable store? */
5726 if (!is_gimple_assign (stmt))
5727 return false;
5729 scalar_dest = gimple_assign_lhs (stmt);
5730 if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
5731 && is_pattern_stmt_p (stmt_info))
5732 scalar_dest = TREE_OPERAND (scalar_dest, 0);
5733 if (TREE_CODE (scalar_dest) != ARRAY_REF
5734 && TREE_CODE (scalar_dest) != BIT_FIELD_REF
5735 && TREE_CODE (scalar_dest) != INDIRECT_REF
5736 && TREE_CODE (scalar_dest) != COMPONENT_REF
5737 && TREE_CODE (scalar_dest) != IMAGPART_EXPR
5738 && TREE_CODE (scalar_dest) != REALPART_EXPR
5739 && TREE_CODE (scalar_dest) != MEM_REF)
5740 return false;
5742 /* Cannot have hybrid store SLP -- that would mean storing to the
5743 same location twice. */
5744 gcc_assert (slp == PURE_SLP_STMT (stmt_info));
5746 gcc_assert (gimple_assign_single_p (stmt));
5748 tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
5749 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
5751 if (loop_vinfo)
5753 loop = LOOP_VINFO_LOOP (loop_vinfo);
5754 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
5756 else
5757 vf = 1;
5759 /* Multiple types in SLP are handled by creating the appropriate number of
5760 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5761 case of SLP. */
5762 if (slp)
5763 ncopies = 1;
5764 else
5765 ncopies = vect_get_num_copies (loop_vinfo, vectype);
5767 gcc_assert (ncopies >= 1);
5769 /* FORNOW. This restriction should be relaxed. */
5770 if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1)
5772 if (dump_enabled_p ())
5773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5774 "multiple types in nested loop.\n");
5775 return false;
5778 op = gimple_assign_rhs1 (stmt);
5780 /* In the case this is a store from a constant make sure
5781 native_encode_expr can handle it. */
5782 if (CONSTANT_CLASS_P (op) && native_encode_expr (op, NULL, 64) == 0)
5783 return false;
5785 if (!vect_is_simple_use (op, vinfo, &def_stmt, &dt, &rhs_vectype))
5787 if (dump_enabled_p ())
5788 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5789 "use not simple.\n");
5790 return false;
5793 if (dt == vect_constant_def || dt == vect_external_def)
5794 vls_type = VLS_STORE_INVARIANT;
5795 else
5796 vls_type = VLS_STORE;
5798 if (rhs_vectype && !useless_type_conversion_p (vectype, rhs_vectype))
5799 return false;
5801 elem_type = TREE_TYPE (vectype);
5802 vec_mode = TYPE_MODE (vectype);
5804 /* FORNOW. In some cases can vectorize even if data-type not supported
5805 (e.g. - array initialization with 0). */
5806 if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
5807 return false;
5809 if (!STMT_VINFO_DATA_REF (stmt_info))
5810 return false;
5812 vect_memory_access_type memory_access_type;
5813 if (!get_load_store_type (stmt, vectype, slp, vls_type, ncopies,
5814 &memory_access_type, &gs_info))
5815 return false;
5817 if (!vec_stmt) /* transformation not required. */
5819 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
5820 STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
5821 /* The SLP costs are calculated during SLP analysis. */
5822 if (!PURE_SLP_STMT (stmt_info))
5823 vect_model_store_cost (stmt_info, ncopies, memory_access_type, dt,
5824 NULL, NULL, NULL);
5825 return true;
5827 gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
5829 /* Transform. */
5831 ensure_base_align (dr);
5833 if (memory_access_type == VMAT_GATHER_SCATTER)
5835 tree vec_oprnd0 = NULL_TREE, vec_oprnd1 = NULL_TREE, op, src;
5836 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
5837 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
5838 tree ptr, mask, var, scale, perm_mask = NULL_TREE;
5839 edge pe = loop_preheader_edge (loop);
5840 gimple_seq seq;
5841 basic_block new_bb;
5842 enum { NARROW, NONE, WIDEN } modifier;
5843 poly_uint64 scatter_off_nunits
5844 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
5846 if (known_eq (nunits, scatter_off_nunits))
5847 modifier = NONE;
5848 else if (known_eq (nunits * 2, scatter_off_nunits))
5850 modifier = WIDEN;
5852 /* Currently gathers and scatters are only supported for
5853 fixed-length vectors. */
5854 unsigned int count = scatter_off_nunits.to_constant ();
5855 vec_perm_builder sel (count, count, 1);
5856 for (i = 0; i < (unsigned int) count; ++i)
5857 sel.quick_push (i | (count / 2));
5859 vec_perm_indices indices (sel, 1, count);
5860 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
5861 indices);
5862 gcc_assert (perm_mask != NULL_TREE);
5864 else if (known_eq (nunits, scatter_off_nunits * 2))
5866 modifier = NARROW;
5868 /* Currently gathers and scatters are only supported for
5869 fixed-length vectors. */
5870 unsigned int count = nunits.to_constant ();
5871 vec_perm_builder sel (count, count, 1);
5872 for (i = 0; i < (unsigned int) count; ++i)
5873 sel.quick_push (i | (count / 2));
5875 vec_perm_indices indices (sel, 2, count);
5876 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
5877 gcc_assert (perm_mask != NULL_TREE);
5878 ncopies *= 2;
5880 else
5881 gcc_unreachable ();
5883 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
5884 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5885 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5886 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5887 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
5888 scaletype = TREE_VALUE (arglist);
5890 gcc_checking_assert (TREE_CODE (masktype) == INTEGER_TYPE
5891 && TREE_CODE (rettype) == VOID_TYPE);
5893 ptr = fold_convert (ptrtype, gs_info.base);
5894 if (!is_gimple_min_invariant (ptr))
5896 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
5897 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
5898 gcc_assert (!new_bb);
5901 /* Currently we support only unconditional scatter stores,
5902 so mask should be all ones. */
5903 mask = build_int_cst (masktype, -1);
5904 mask = vect_init_vector (stmt, mask, masktype, NULL);
5906 scale = build_int_cst (scaletype, gs_info.scale);
5908 prev_stmt_info = NULL;
5909 for (j = 0; j < ncopies; ++j)
5911 if (j == 0)
5913 src = vec_oprnd1
5914 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt), stmt);
5915 op = vec_oprnd0
5916 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
5918 else if (modifier != NONE && (j & 1))
5920 if (modifier == WIDEN)
5922 src = vec_oprnd1
5923 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5924 op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
5925 stmt, gsi);
5927 else if (modifier == NARROW)
5929 src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
5930 stmt, gsi);
5931 op = vec_oprnd0
5932 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5933 vec_oprnd0);
5935 else
5936 gcc_unreachable ();
5938 else
5940 src = vec_oprnd1
5941 = vect_get_vec_def_for_stmt_copy (scatter_src_dt, vec_oprnd1);
5942 op = vec_oprnd0
5943 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
5944 vec_oprnd0);
5947 if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
5949 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
5950 TYPE_VECTOR_SUBPARTS (srctype)));
5951 var = vect_get_new_ssa_name (srctype, vect_simple_var);
5952 src = build1 (VIEW_CONVERT_EXPR, srctype, src);
5953 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
5954 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5955 src = var;
5958 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
5960 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
5961 TYPE_VECTOR_SUBPARTS (idxtype)));
5962 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
5963 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
5964 new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
5965 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5966 op = var;
5969 new_stmt
5970 = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
5972 vect_finish_stmt_generation (stmt, new_stmt, gsi);
5974 if (prev_stmt_info == NULL)
5975 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
5976 else
5977 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
5978 prev_stmt_info = vinfo_for_stmt (new_stmt);
5980 return true;
5983 grouped_store = STMT_VINFO_GROUPED_ACCESS (stmt_info);
5984 if (grouped_store)
5986 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
5987 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
5988 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
5990 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))++;
5992 /* FORNOW */
5993 gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt));
5995 /* We vectorize all the stmts of the interleaving group when we
5996 reach the last stmt in the group. */
5997 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
5998 < GROUP_SIZE (vinfo_for_stmt (first_stmt))
5999 && !slp)
6001 *vec_stmt = NULL;
6002 return true;
6005 if (slp)
6007 grouped_store = false;
6008 /* VEC_NUM is the number of vect stmts to be created for this
6009 group. */
6010 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6011 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
6012 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
6013 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
6014 op = gimple_assign_rhs1 (first_stmt);
6016 else
6017 /* VEC_NUM is the number of vect stmts to be created for this
6018 group. */
6019 vec_num = group_size;
6021 ref_type = get_group_alias_ptr_type (first_stmt);
6023 else
6025 first_stmt = stmt;
6026 first_dr = dr;
6027 group_size = vec_num = 1;
6028 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
6031 if (dump_enabled_p ())
6032 dump_printf_loc (MSG_NOTE, vect_location,
6033 "transform store. ncopies = %d\n", ncopies);
6035 if (memory_access_type == VMAT_ELEMENTWISE
6036 || memory_access_type == VMAT_STRIDED_SLP)
6038 gimple_stmt_iterator incr_gsi;
6039 bool insert_after;
6040 gimple *incr;
6041 tree offvar;
6042 tree ivstep;
6043 tree running_off;
6044 gimple_seq stmts = NULL;
6045 tree stride_base, stride_step, alias_off;
6046 tree vec_oprnd;
6047 unsigned int g;
6048 /* Checked by get_load_store_type. */
6049 unsigned int const_nunits = nunits.to_constant ();
6051 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
6053 stride_base
6054 = fold_build_pointer_plus
6055 (unshare_expr (DR_BASE_ADDRESS (first_dr)),
6056 size_binop (PLUS_EXPR,
6057 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr))),
6058 convert_to_ptrofftype (DR_INIT (first_dr))));
6059 stride_step = fold_convert (sizetype, unshare_expr (DR_STEP (first_dr)));
6061 /* For a store with loop-invariant (but other than power-of-2)
6062 stride (i.e. not a grouped access) like so:
6064 for (i = 0; i < n; i += stride)
6065 array[i] = ...;
6067 we generate a new induction variable and new stores from
6068 the components of the (vectorized) rhs:
6070 for (j = 0; ; j += VF*stride)
6071 vectemp = ...;
6072 tmp1 = vectemp[0];
6073 array[j] = tmp1;
6074 tmp2 = vectemp[1];
6075 array[j + stride] = tmp2;
6079 unsigned nstores = const_nunits;
6080 unsigned lnel = 1;
6081 tree ltype = elem_type;
6082 tree lvectype = vectype;
6083 if (slp)
6085 if (group_size < const_nunits
6086 && const_nunits % group_size == 0)
6088 nstores = const_nunits / group_size;
6089 lnel = group_size;
6090 ltype = build_vector_type (elem_type, group_size);
6091 lvectype = vectype;
6093 /* First check if vec_extract optab doesn't support extraction
6094 of vector elts directly. */
6095 scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
6096 machine_mode vmode;
6097 if (!mode_for_vector (elmode, group_size).exists (&vmode)
6098 || !VECTOR_MODE_P (vmode)
6099 || (convert_optab_handler (vec_extract_optab,
6100 TYPE_MODE (vectype), vmode)
6101 == CODE_FOR_nothing))
6103 /* Try to avoid emitting an extract of vector elements
6104 by performing the extracts using an integer type of the
6105 same size, extracting from a vector of those and then
6106 re-interpreting it as the original vector type if
6107 supported. */
6108 unsigned lsize
6109 = group_size * GET_MODE_BITSIZE (elmode);
6110 elmode = int_mode_for_size (lsize, 0).require ();
6111 unsigned int lnunits = const_nunits / group_size;
6112 /* If we can't construct such a vector fall back to
6113 element extracts from the original vector type and
6114 element size stores. */
6115 if (mode_for_vector (elmode, lnunits).exists (&vmode)
6116 && VECTOR_MODE_P (vmode)
6117 && (convert_optab_handler (vec_extract_optab,
6118 vmode, elmode)
6119 != CODE_FOR_nothing))
6121 nstores = lnunits;
6122 lnel = group_size;
6123 ltype = build_nonstandard_integer_type (lsize, 1);
6124 lvectype = build_vector_type (ltype, nstores);
6126 /* Else fall back to vector extraction anyway.
6127 Fewer stores are more important than avoiding spilling
6128 of the vector we extract from. Compared to the
6129 construction case in vectorizable_load no store-forwarding
6130 issue exists here for reasonable archs. */
6133 else if (group_size >= const_nunits
6134 && group_size % const_nunits == 0)
6136 nstores = 1;
6137 lnel = const_nunits;
6138 ltype = vectype;
6139 lvectype = vectype;
6141 ltype = build_aligned_type (ltype, TYPE_ALIGN (elem_type));
6142 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6145 ivstep = stride_step;
6146 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (ivstep), ivstep,
6147 build_int_cst (TREE_TYPE (ivstep), vf));
6149 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
6151 create_iv (stride_base, ivstep, NULL,
6152 loop, &incr_gsi, insert_after,
6153 &offvar, NULL);
6154 incr = gsi_stmt (incr_gsi);
6155 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
6157 stride_step = force_gimple_operand (stride_step, &stmts, true, NULL_TREE);
6158 if (stmts)
6159 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
6161 prev_stmt_info = NULL;
6162 alias_off = build_int_cst (ref_type, 0);
6163 next_stmt = first_stmt;
6164 for (g = 0; g < group_size; g++)
6166 running_off = offvar;
6167 if (g)
6169 tree size = TYPE_SIZE_UNIT (ltype);
6170 tree pos = fold_build2 (MULT_EXPR, sizetype, size_int (g),
6171 size);
6172 tree newoff = copy_ssa_name (running_off, NULL);
6173 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6174 running_off, pos);
6175 vect_finish_stmt_generation (stmt, incr, gsi);
6176 running_off = newoff;
6178 unsigned int group_el = 0;
6179 unsigned HOST_WIDE_INT
6180 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
6181 for (j = 0; j < ncopies; j++)
6183 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6184 and first_stmt == stmt. */
6185 if (j == 0)
6187 if (slp)
6189 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL,
6190 slp_node);
6191 vec_oprnd = vec_oprnds[0];
6193 else
6195 gcc_assert (gimple_assign_single_p (next_stmt));
6196 op = gimple_assign_rhs1 (next_stmt);
6197 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6200 else
6202 if (slp)
6203 vec_oprnd = vec_oprnds[j];
6204 else
6206 vect_is_simple_use (vec_oprnd, vinfo, &def_stmt, &dt);
6207 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, vec_oprnd);
6210 /* Pun the vector to extract from if necessary. */
6211 if (lvectype != vectype)
6213 tree tem = make_ssa_name (lvectype);
6214 gimple *pun
6215 = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
6216 lvectype, vec_oprnd));
6217 vect_finish_stmt_generation (stmt, pun, gsi);
6218 vec_oprnd = tem;
6220 for (i = 0; i < nstores; i++)
6222 tree newref, newoff;
6223 gimple *incr, *assign;
6224 tree size = TYPE_SIZE (ltype);
6225 /* Extract the i'th component. */
6226 tree pos = fold_build2 (MULT_EXPR, bitsizetype,
6227 bitsize_int (i), size);
6228 tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
6229 size, pos);
6231 elem = force_gimple_operand_gsi (gsi, elem, true,
6232 NULL_TREE, true,
6233 GSI_SAME_STMT);
6235 tree this_off = build_int_cst (TREE_TYPE (alias_off),
6236 group_el * elsz);
6237 newref = build2 (MEM_REF, ltype,
6238 running_off, this_off);
6240 /* And store it to *running_off. */
6241 assign = gimple_build_assign (newref, elem);
6242 vect_finish_stmt_generation (stmt, assign, gsi);
6244 group_el += lnel;
6245 if (! slp
6246 || group_el == group_size)
6248 newoff = copy_ssa_name (running_off, NULL);
6249 incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
6250 running_off, stride_step);
6251 vect_finish_stmt_generation (stmt, incr, gsi);
6253 running_off = newoff;
6254 group_el = 0;
6256 if (g == group_size - 1
6257 && !slp)
6259 if (j == 0 && i == 0)
6260 STMT_VINFO_VEC_STMT (stmt_info)
6261 = *vec_stmt = assign;
6262 else
6263 STMT_VINFO_RELATED_STMT (prev_stmt_info) = assign;
6264 prev_stmt_info = vinfo_for_stmt (assign);
6268 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6269 if (slp)
6270 break;
6273 vec_oprnds.release ();
6274 return true;
6277 auto_vec<tree> dr_chain (group_size);
6278 oprnds.create (group_size);
6280 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
6281 gcc_assert (alignment_support_scheme);
6282 /* Targets with store-lane instructions must not require explicit
6283 realignment. */
6284 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
6285 || alignment_support_scheme == dr_aligned
6286 || alignment_support_scheme == dr_unaligned_supported);
6288 if (memory_access_type == VMAT_CONTIGUOUS_DOWN
6289 || memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6290 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
6292 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6293 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
6294 else
6295 aggr_type = vectype;
6297 /* In case the vectorization factor (VF) is bigger than the number
6298 of elements that we can fit in a vectype (nunits), we have to generate
6299 more than one vector stmt - i.e - we need to "unroll" the
6300 vector stmt by a factor VF/nunits. For more details see documentation in
6301 vect_get_vec_def_for_copy_stmt. */
6303 /* In case of interleaving (non-unit grouped access):
6305 S1: &base + 2 = x2
6306 S2: &base = x0
6307 S3: &base + 1 = x1
6308 S4: &base + 3 = x3
6310 We create vectorized stores starting from base address (the access of the
6311 first stmt in the chain (S2 in the above example), when the last store stmt
6312 of the chain (S4) is reached:
6314 VS1: &base = vx2
6315 VS2: &base + vec_size*1 = vx0
6316 VS3: &base + vec_size*2 = vx1
6317 VS4: &base + vec_size*3 = vx3
6319 Then permutation statements are generated:
6321 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6322 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6325 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6326 (the order of the data-refs in the output of vect_permute_store_chain
6327 corresponds to the order of scalar stmts in the interleaving chain - see
6328 the documentation of vect_permute_store_chain()).
6330 In case of both multiple types and interleaving, above vector stores and
6331 permutation stmts are created for every copy. The result vector stmts are
6332 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6333 STMT_VINFO_RELATED_STMT for the next copies.
6336 prev_stmt_info = NULL;
6337 for (j = 0; j < ncopies; j++)
6340 if (j == 0)
6342 if (slp)
6344 /* Get vectorized arguments for SLP_NODE. */
6345 vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds,
6346 NULL, slp_node);
6348 vec_oprnd = vec_oprnds[0];
6350 else
6352 /* For interleaved stores we collect vectorized defs for all the
6353 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6354 used as an input to vect_permute_store_chain(), and OPRNDS as
6355 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6357 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6358 OPRNDS are of size 1. */
6359 next_stmt = first_stmt;
6360 for (i = 0; i < group_size; i++)
6362 /* Since gaps are not supported for interleaved stores,
6363 GROUP_SIZE is the exact number of stmts in the chain.
6364 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6365 there is no interleaving, GROUP_SIZE is 1, and only one
6366 iteration of the loop will be executed. */
6367 gcc_assert (next_stmt
6368 && gimple_assign_single_p (next_stmt));
6369 op = gimple_assign_rhs1 (next_stmt);
6371 vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
6372 dr_chain.quick_push (vec_oprnd);
6373 oprnds.quick_push (vec_oprnd);
6374 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6378 /* We should have catched mismatched types earlier. */
6379 gcc_assert (useless_type_conversion_p (vectype,
6380 TREE_TYPE (vec_oprnd)));
6381 bool simd_lane_access_p
6382 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
6383 if (simd_lane_access_p
6384 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
6385 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
6386 && integer_zerop (DR_OFFSET (first_dr))
6387 && integer_zerop (DR_INIT (first_dr))
6388 && alias_sets_conflict_p (get_alias_set (aggr_type),
6389 get_alias_set (TREE_TYPE (ref_type))))
6391 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
6392 dataref_offset = build_int_cst (ref_type, 0);
6393 inv_p = false;
6395 else
6396 dataref_ptr
6397 = vect_create_data_ref_ptr (first_stmt, aggr_type,
6398 simd_lane_access_p ? loop : NULL,
6399 offset, &dummy, gsi, &ptr_incr,
6400 simd_lane_access_p, &inv_p);
6401 gcc_assert (bb_vinfo || !inv_p);
6403 else
6405 /* For interleaved stores we created vectorized defs for all the
6406 defs stored in OPRNDS in the previous iteration (previous copy).
6407 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6408 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6409 next copy.
6410 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6411 OPRNDS are of size 1. */
6412 for (i = 0; i < group_size; i++)
6414 op = oprnds[i];
6415 vect_is_simple_use (op, vinfo, &def_stmt, &dt);
6416 vec_oprnd = vect_get_vec_def_for_stmt_copy (dt, op);
6417 dr_chain[i] = vec_oprnd;
6418 oprnds[i] = vec_oprnd;
6420 if (dataref_offset)
6421 dataref_offset
6422 = int_const_binop (PLUS_EXPR, dataref_offset,
6423 TYPE_SIZE_UNIT (aggr_type));
6424 else
6425 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
6426 TYPE_SIZE_UNIT (aggr_type));
6429 if (memory_access_type == VMAT_LOAD_STORE_LANES)
6431 tree vec_array;
6433 /* Combine all the vectors into an array. */
6434 vec_array = create_vector_array (vectype, vec_num);
6435 for (i = 0; i < vec_num; i++)
6437 vec_oprnd = dr_chain[i];
6438 write_vector_array (stmt, gsi, vec_oprnd, vec_array, i);
6441 /* Emit:
6442 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6443 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
6444 gcall *call = gimple_build_call_internal (IFN_STORE_LANES, 1,
6445 vec_array);
6446 gimple_call_set_lhs (call, data_ref);
6447 gimple_call_set_nothrow (call, true);
6448 new_stmt = call;
6449 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6451 else
6453 new_stmt = NULL;
6454 if (grouped_store)
6456 if (j == 0)
6457 result_chain.create (group_size);
6458 /* Permute. */
6459 vect_permute_store_chain (dr_chain, group_size, stmt, gsi,
6460 &result_chain);
6463 next_stmt = first_stmt;
6464 for (i = 0; i < vec_num; i++)
6466 unsigned align, misalign;
6468 if (i > 0)
6469 /* Bump the vector pointer. */
6470 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
6471 stmt, NULL_TREE);
6473 if (slp)
6474 vec_oprnd = vec_oprnds[i];
6475 else if (grouped_store)
6476 /* For grouped stores vectorized defs are interleaved in
6477 vect_permute_store_chain(). */
6478 vec_oprnd = result_chain[i];
6480 data_ref = fold_build2 (MEM_REF, vectype,
6481 dataref_ptr,
6482 dataref_offset
6483 ? dataref_offset
6484 : build_int_cst (ref_type, 0));
6485 align = DR_TARGET_ALIGNMENT (first_dr);
6486 if (aligned_access_p (first_dr))
6487 misalign = 0;
6488 else if (DR_MISALIGNMENT (first_dr) == -1)
6490 align = dr_alignment (vect_dr_behavior (first_dr));
6491 misalign = 0;
6492 TREE_TYPE (data_ref)
6493 = build_aligned_type (TREE_TYPE (data_ref),
6494 align * BITS_PER_UNIT);
6496 else
6498 TREE_TYPE (data_ref)
6499 = build_aligned_type (TREE_TYPE (data_ref),
6500 TYPE_ALIGN (elem_type));
6501 misalign = DR_MISALIGNMENT (first_dr);
6503 if (dataref_offset == NULL_TREE
6504 && TREE_CODE (dataref_ptr) == SSA_NAME)
6505 set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
6506 misalign);
6508 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
6510 tree perm_mask = perm_mask_for_reverse (vectype);
6511 tree perm_dest
6512 = vect_create_destination_var (gimple_assign_rhs1 (stmt),
6513 vectype);
6514 tree new_temp = make_ssa_name (perm_dest);
6516 /* Generate the permute statement. */
6517 gimple *perm_stmt
6518 = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
6519 vec_oprnd, perm_mask);
6520 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6522 perm_stmt = SSA_NAME_DEF_STMT (new_temp);
6523 vec_oprnd = new_temp;
6526 /* Arguments are ready. Create the new vector stmt. */
6527 new_stmt = gimple_build_assign (data_ref, vec_oprnd);
6528 vect_finish_stmt_generation (stmt, new_stmt, gsi);
6530 if (slp)
6531 continue;
6533 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
6534 if (!next_stmt)
6535 break;
6538 if (!slp)
6540 if (j == 0)
6541 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
6542 else
6543 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
6544 prev_stmt_info = vinfo_for_stmt (new_stmt);
6548 oprnds.release ();
6549 result_chain.release ();
6550 vec_oprnds.release ();
6552 return true;
6555 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6556 VECTOR_CST mask. No checks are made that the target platform supports the
6557 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6558 vect_gen_perm_mask_checked. */
6560 tree
6561 vect_gen_perm_mask_any (tree vectype, const vec_perm_indices &sel)
6563 tree mask_type;
6565 poly_uint64 nunits = sel.length ();
6566 gcc_assert (known_eq (nunits, TYPE_VECTOR_SUBPARTS (vectype)));
6568 mask_type = build_vector_type (ssizetype, nunits);
6569 return vec_perm_indices_to_tree (mask_type, sel);
6572 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6573 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6575 tree
6576 vect_gen_perm_mask_checked (tree vectype, const vec_perm_indices &sel)
6578 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype), sel));
6579 return vect_gen_perm_mask_any (vectype, sel);
6582 /* Given a vector variable X and Y, that was generated for the scalar
6583 STMT, generate instructions to permute the vector elements of X and Y
6584 using permutation mask MASK_VEC, insert them at *GSI and return the
6585 permuted vector variable. */
6587 static tree
6588 permute_vec_elements (tree x, tree y, tree mask_vec, gimple *stmt,
6589 gimple_stmt_iterator *gsi)
6591 tree vectype = TREE_TYPE (x);
6592 tree perm_dest, data_ref;
6593 gimple *perm_stmt;
6595 perm_dest = vect_create_destination_var (gimple_get_lhs (stmt), vectype);
6596 data_ref = make_ssa_name (perm_dest);
6598 /* Generate the permute statement. */
6599 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, x, y, mask_vec);
6600 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
6602 return data_ref;
6605 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6606 inserting them on the loops preheader edge. Returns true if we
6607 were successful in doing so (and thus STMT can be moved then),
6608 otherwise returns false. */
6610 static bool
6611 hoist_defs_of_uses (gimple *stmt, struct loop *loop)
6613 ssa_op_iter i;
6614 tree op;
6615 bool any = false;
6617 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6619 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6620 if (!gimple_nop_p (def_stmt)
6621 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6623 /* Make sure we don't need to recurse. While we could do
6624 so in simple cases when there are more complex use webs
6625 we don't have an easy way to preserve stmt order to fulfil
6626 dependencies within them. */
6627 tree op2;
6628 ssa_op_iter i2;
6629 if (gimple_code (def_stmt) == GIMPLE_PHI)
6630 return false;
6631 FOR_EACH_SSA_TREE_OPERAND (op2, def_stmt, i2, SSA_OP_USE)
6633 gimple *def_stmt2 = SSA_NAME_DEF_STMT (op2);
6634 if (!gimple_nop_p (def_stmt2)
6635 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt2)))
6636 return false;
6638 any = true;
6642 if (!any)
6643 return true;
6645 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_USE)
6647 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
6648 if (!gimple_nop_p (def_stmt)
6649 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
6651 gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
6652 gsi_remove (&gsi, false);
6653 gsi_insert_on_edge_immediate (loop_preheader_edge (loop), def_stmt);
6657 return true;
6660 /* vectorizable_load.
6662 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6663 can be vectorized.
6664 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6665 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6666 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6668 static bool
6669 vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
6670 slp_tree slp_node, slp_instance slp_node_instance)
6672 tree scalar_dest;
6673 tree vec_dest = NULL;
6674 tree data_ref = NULL;
6675 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6676 stmt_vec_info prev_stmt_info;
6677 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6678 struct loop *loop = NULL;
6679 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
6680 bool nested_in_vect_loop = false;
6681 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info), *first_dr = NULL;
6682 tree elem_type;
6683 tree new_temp;
6684 machine_mode mode;
6685 gimple *new_stmt = NULL;
6686 tree dummy;
6687 enum dr_alignment_support alignment_support_scheme;
6688 tree dataref_ptr = NULL_TREE;
6689 tree dataref_offset = NULL_TREE;
6690 gimple *ptr_incr = NULL;
6691 int ncopies;
6692 int i, j;
6693 unsigned int group_size;
6694 poly_uint64 group_gap_adj;
6695 tree msq = NULL_TREE, lsq;
6696 tree offset = NULL_TREE;
6697 tree byte_offset = NULL_TREE;
6698 tree realignment_token = NULL_TREE;
6699 gphi *phi = NULL;
6700 vec<tree> dr_chain = vNULL;
6701 bool grouped_load = false;
6702 gimple *first_stmt;
6703 gimple *first_stmt_for_drptr = NULL;
6704 bool inv_p;
6705 bool compute_in_loop = false;
6706 struct loop *at_loop;
6707 int vec_num;
6708 bool slp = (slp_node != NULL);
6709 bool slp_perm = false;
6710 enum tree_code code;
6711 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
6712 poly_uint64 vf;
6713 tree aggr_type;
6714 gather_scatter_info gs_info;
6715 vec_info *vinfo = stmt_info->vinfo;
6716 tree ref_type;
6718 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
6719 return false;
6721 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
6722 && ! vec_stmt)
6723 return false;
6725 /* Is vectorizable load? */
6726 if (!is_gimple_assign (stmt))
6727 return false;
6729 scalar_dest = gimple_assign_lhs (stmt);
6730 if (TREE_CODE (scalar_dest) != SSA_NAME)
6731 return false;
6733 code = gimple_assign_rhs_code (stmt);
6734 if (code != ARRAY_REF
6735 && code != BIT_FIELD_REF
6736 && code != INDIRECT_REF
6737 && code != COMPONENT_REF
6738 && code != IMAGPART_EXPR
6739 && code != REALPART_EXPR
6740 && code != MEM_REF
6741 && TREE_CODE_CLASS (code) != tcc_declaration)
6742 return false;
6744 if (!STMT_VINFO_DATA_REF (stmt_info))
6745 return false;
6747 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6748 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
6750 if (loop_vinfo)
6752 loop = LOOP_VINFO_LOOP (loop_vinfo);
6753 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
6754 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
6756 else
6757 vf = 1;
6759 /* Multiple types in SLP are handled by creating the appropriate number of
6760 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6761 case of SLP. */
6762 if (slp)
6763 ncopies = 1;
6764 else
6765 ncopies = vect_get_num_copies (loop_vinfo, vectype);
6767 gcc_assert (ncopies >= 1);
6769 /* FORNOW. This restriction should be relaxed. */
6770 if (nested_in_vect_loop && ncopies > 1)
6772 if (dump_enabled_p ())
6773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6774 "multiple types in nested loop.\n");
6775 return false;
6778 /* Invalidate assumptions made by dependence analysis when vectorization
6779 on the unrolled body effectively re-orders stmts. */
6780 if (ncopies > 1
6781 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6782 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6783 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6785 if (dump_enabled_p ())
6786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6787 "cannot perform implicit CSE when unrolling "
6788 "with negative dependence distance\n");
6789 return false;
6792 elem_type = TREE_TYPE (vectype);
6793 mode = TYPE_MODE (vectype);
6795 /* FORNOW. In some cases can vectorize even if data-type not supported
6796 (e.g. - data copies). */
6797 if (optab_handler (mov_optab, mode) == CODE_FOR_nothing)
6799 if (dump_enabled_p ())
6800 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6801 "Aligned load, but unsupported type.\n");
6802 return false;
6805 /* Check if the load is a part of an interleaving chain. */
6806 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
6808 grouped_load = true;
6809 /* FORNOW */
6810 gcc_assert (!nested_in_vect_loop);
6811 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
6813 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
6814 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
6816 if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
6817 slp_perm = true;
6819 /* Invalidate assumptions made by dependence analysis when vectorization
6820 on the unrolled body effectively re-orders stmts. */
6821 if (!PURE_SLP_STMT (stmt_info)
6822 && STMT_VINFO_MIN_NEG_DIST (stmt_info) != 0
6823 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
6824 STMT_VINFO_MIN_NEG_DIST (stmt_info)))
6826 if (dump_enabled_p ())
6827 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6828 "cannot perform implicit CSE when performing "
6829 "group loads with negative dependence distance\n");
6830 return false;
6833 /* Similarly when the stmt is a load that is both part of a SLP
6834 instance and a loop vectorized stmt via the same-dr mechanism
6835 we have to give up. */
6836 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)
6837 && (STMT_SLP_TYPE (stmt_info)
6838 != STMT_SLP_TYPE (vinfo_for_stmt
6839 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info)))))
6841 if (dump_enabled_p ())
6842 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6843 "conflicting SLP types for CSEd load\n");
6844 return false;
6848 vect_memory_access_type memory_access_type;
6849 if (!get_load_store_type (stmt, vectype, slp, VLS_LOAD, ncopies,
6850 &memory_access_type, &gs_info))
6851 return false;
6853 if (!vec_stmt) /* transformation not required. */
6855 if (!slp)
6856 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
6857 STMT_VINFO_TYPE (stmt_info) = load_vec_info_type;
6858 /* The SLP costs are calculated during SLP analysis. */
6859 if (!PURE_SLP_STMT (stmt_info))
6860 vect_model_load_cost (stmt_info, ncopies, memory_access_type,
6861 NULL, NULL, NULL);
6862 return true;
6865 if (!slp)
6866 gcc_assert (memory_access_type
6867 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
6869 if (dump_enabled_p ())
6870 dump_printf_loc (MSG_NOTE, vect_location,
6871 "transform load. ncopies = %d\n", ncopies);
6873 /* Transform. */
6875 ensure_base_align (dr);
6877 if (memory_access_type == VMAT_GATHER_SCATTER)
6879 tree vec_oprnd0 = NULL_TREE, op;
6880 tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info.decl));
6881 tree rettype, srctype, ptrtype, idxtype, masktype, scaletype;
6882 tree ptr, mask, var, scale, merge, perm_mask = NULL_TREE, prev_res = NULL_TREE;
6883 edge pe = loop_preheader_edge (loop);
6884 gimple_seq seq;
6885 basic_block new_bb;
6886 enum { NARROW, NONE, WIDEN } modifier;
6887 poly_uint64 gather_off_nunits
6888 = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype);
6890 if (known_eq (nunits, gather_off_nunits))
6891 modifier = NONE;
6892 else if (known_eq (nunits * 2, gather_off_nunits))
6894 modifier = WIDEN;
6896 /* Currently widening gathers are only supported for
6897 fixed-length vectors. */
6898 int count = gather_off_nunits.to_constant ();
6899 vec_perm_builder sel (count, count, 1);
6900 for (i = 0; i < count; ++i)
6901 sel.quick_push (i | (count / 2));
6903 vec_perm_indices indices (sel, 1, count);
6904 perm_mask = vect_gen_perm_mask_checked (gs_info.offset_vectype,
6905 indices);
6907 else if (known_eq (nunits, gather_off_nunits * 2))
6909 modifier = NARROW;
6911 /* Currently narrowing gathers are only supported for
6912 fixed-length vectors. */
6913 int count = nunits.to_constant ();
6914 vec_perm_builder sel (count, count, 1);
6915 for (i = 0; i < count; ++i)
6916 sel.quick_push (i < count / 2 ? i : i + count / 2);
6918 vec_perm_indices indices (sel, 2, count);
6919 perm_mask = vect_gen_perm_mask_checked (vectype, indices);
6920 ncopies *= 2;
6922 else
6923 gcc_unreachable ();
6925 rettype = TREE_TYPE (TREE_TYPE (gs_info.decl));
6926 srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6927 ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6928 idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6929 masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist);
6930 scaletype = TREE_VALUE (arglist);
6931 gcc_checking_assert (types_compatible_p (srctype, rettype));
6933 vec_dest = vect_create_destination_var (scalar_dest, vectype);
6935 ptr = fold_convert (ptrtype, gs_info.base);
6936 if (!is_gimple_min_invariant (ptr))
6938 ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE);
6939 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
6940 gcc_assert (!new_bb);
6943 /* Currently we support only unconditional gather loads,
6944 so mask should be all ones. */
6945 if (TREE_CODE (masktype) == INTEGER_TYPE)
6946 mask = build_int_cst (masktype, -1);
6947 else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE)
6949 mask = build_int_cst (TREE_TYPE (masktype), -1);
6950 mask = build_vector_from_val (masktype, mask);
6951 mask = vect_init_vector (stmt, mask, masktype, NULL);
6953 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype)))
6955 REAL_VALUE_TYPE r;
6956 long tmp[6];
6957 for (j = 0; j < 6; ++j)
6958 tmp[j] = -1;
6959 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (masktype)));
6960 mask = build_real (TREE_TYPE (masktype), r);
6961 mask = build_vector_from_val (masktype, mask);
6962 mask = vect_init_vector (stmt, mask, masktype, NULL);
6964 else
6965 gcc_unreachable ();
6967 scale = build_int_cst (scaletype, gs_info.scale);
6969 if (TREE_CODE (TREE_TYPE (rettype)) == INTEGER_TYPE)
6970 merge = build_int_cst (TREE_TYPE (rettype), 0);
6971 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype)))
6973 REAL_VALUE_TYPE r;
6974 long tmp[6];
6975 for (j = 0; j < 6; ++j)
6976 tmp[j] = 0;
6977 real_from_target (&r, tmp, TYPE_MODE (TREE_TYPE (rettype)));
6978 merge = build_real (TREE_TYPE (rettype), r);
6980 else
6981 gcc_unreachable ();
6982 merge = build_vector_from_val (rettype, merge);
6983 merge = vect_init_vector (stmt, merge, rettype, NULL);
6985 prev_stmt_info = NULL;
6986 for (j = 0; j < ncopies; ++j)
6988 if (modifier == WIDEN && (j & 1))
6989 op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
6990 perm_mask, stmt, gsi);
6991 else if (j == 0)
6992 op = vec_oprnd0
6993 = vect_get_vec_def_for_operand (gs_info.offset, stmt);
6994 else
6995 op = vec_oprnd0
6996 = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_oprnd0);
6998 if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
7000 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
7001 TYPE_VECTOR_SUBPARTS (idxtype)));
7002 var = vect_get_new_ssa_name (idxtype, vect_simple_var);
7003 op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
7004 new_stmt
7005 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7006 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7007 op = var;
7010 new_stmt
7011 = gimple_build_call (gs_info.decl, 5, merge, ptr, op, mask, scale);
7013 if (!useless_type_conversion_p (vectype, rettype))
7015 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
7016 TYPE_VECTOR_SUBPARTS (rettype)));
7017 op = vect_get_new_ssa_name (rettype, vect_simple_var);
7018 gimple_call_set_lhs (new_stmt, op);
7019 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7020 var = make_ssa_name (vec_dest);
7021 op = build1 (VIEW_CONVERT_EXPR, vectype, op);
7022 new_stmt
7023 = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
7025 else
7027 var = make_ssa_name (vec_dest, new_stmt);
7028 gimple_call_set_lhs (new_stmt, var);
7031 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7033 if (modifier == NARROW)
7035 if ((j & 1) == 0)
7037 prev_res = var;
7038 continue;
7040 var = permute_vec_elements (prev_res, var,
7041 perm_mask, stmt, gsi);
7042 new_stmt = SSA_NAME_DEF_STMT (var);
7045 if (prev_stmt_info == NULL)
7046 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7047 else
7048 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7049 prev_stmt_info = vinfo_for_stmt (new_stmt);
7051 return true;
7054 if (memory_access_type == VMAT_ELEMENTWISE
7055 || memory_access_type == VMAT_STRIDED_SLP)
7057 gimple_stmt_iterator incr_gsi;
7058 bool insert_after;
7059 gimple *incr;
7060 tree offvar;
7061 tree ivstep;
7062 tree running_off;
7063 vec<constructor_elt, va_gc> *v = NULL;
7064 gimple_seq stmts = NULL;
7065 tree stride_base, stride_step, alias_off;
7066 /* Checked by get_load_store_type. */
7067 unsigned int const_nunits = nunits.to_constant ();
7069 gcc_assert (!nested_in_vect_loop);
7071 if (slp && grouped_load)
7073 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7074 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7075 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7076 ref_type = get_group_alias_ptr_type (first_stmt);
7078 else
7080 first_stmt = stmt;
7081 first_dr = dr;
7082 group_size = 1;
7083 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7086 stride_base
7087 = fold_build_pointer_plus
7088 (DR_BASE_ADDRESS (first_dr),
7089 size_binop (PLUS_EXPR,
7090 convert_to_ptrofftype (DR_OFFSET (first_dr)),
7091 convert_to_ptrofftype (DR_INIT (first_dr))));
7092 stride_step = fold_convert (sizetype, DR_STEP (first_dr));
7094 /* For a load with loop-invariant (but other than power-of-2)
7095 stride (i.e. not a grouped access) like so:
7097 for (i = 0; i < n; i += stride)
7098 ... = array[i];
7100 we generate a new induction variable and new accesses to
7101 form a new vector (or vectors, depending on ncopies):
7103 for (j = 0; ; j += VF*stride)
7104 tmp1 = array[j];
7105 tmp2 = array[j + stride];
7107 vectemp = {tmp1, tmp2, ...}
7110 ivstep = fold_build2 (MULT_EXPR, TREE_TYPE (stride_step), stride_step,
7111 build_int_cst (TREE_TYPE (stride_step), vf));
7113 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
7115 create_iv (unshare_expr (stride_base), unshare_expr (ivstep), NULL,
7116 loop, &incr_gsi, insert_after,
7117 &offvar, NULL);
7118 incr = gsi_stmt (incr_gsi);
7119 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo));
7121 stride_step = force_gimple_operand (unshare_expr (stride_step),
7122 &stmts, true, NULL_TREE);
7123 if (stmts)
7124 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
7126 prev_stmt_info = NULL;
7127 running_off = offvar;
7128 alias_off = build_int_cst (ref_type, 0);
7129 int nloads = const_nunits;
7130 int lnel = 1;
7131 tree ltype = TREE_TYPE (vectype);
7132 tree lvectype = vectype;
7133 auto_vec<tree> dr_chain;
7134 if (memory_access_type == VMAT_STRIDED_SLP)
7136 if (group_size < const_nunits)
7138 /* First check if vec_init optab supports construction from
7139 vector elts directly. */
7140 scalar_mode elmode = SCALAR_TYPE_MODE (TREE_TYPE (vectype));
7141 machine_mode vmode;
7142 if (mode_for_vector (elmode, group_size).exists (&vmode)
7143 && VECTOR_MODE_P (vmode)
7144 && (convert_optab_handler (vec_init_optab,
7145 TYPE_MODE (vectype), vmode)
7146 != CODE_FOR_nothing))
7148 nloads = const_nunits / group_size;
7149 lnel = group_size;
7150 ltype = build_vector_type (TREE_TYPE (vectype), group_size);
7152 else
7154 /* Otherwise avoid emitting a constructor of vector elements
7155 by performing the loads using an integer type of the same
7156 size, constructing a vector of those and then
7157 re-interpreting it as the original vector type.
7158 This avoids a huge runtime penalty due to the general
7159 inability to perform store forwarding from smaller stores
7160 to a larger load. */
7161 unsigned lsize
7162 = group_size * TYPE_PRECISION (TREE_TYPE (vectype));
7163 elmode = int_mode_for_size (lsize, 0).require ();
7164 unsigned int lnunits = const_nunits / group_size;
7165 /* If we can't construct such a vector fall back to
7166 element loads of the original vector type. */
7167 if (mode_for_vector (elmode, lnunits).exists (&vmode)
7168 && VECTOR_MODE_P (vmode)
7169 && (convert_optab_handler (vec_init_optab, vmode, elmode)
7170 != CODE_FOR_nothing))
7172 nloads = lnunits;
7173 lnel = group_size;
7174 ltype = build_nonstandard_integer_type (lsize, 1);
7175 lvectype = build_vector_type (ltype, nloads);
7179 else
7181 nloads = 1;
7182 lnel = const_nunits;
7183 ltype = vectype;
7185 ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
7187 if (slp)
7189 /* For SLP permutation support we need to load the whole group,
7190 not only the number of vector stmts the permutation result
7191 fits in. */
7192 if (slp_perm)
7194 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7195 variable VF. */
7196 unsigned int const_vf = vf.to_constant ();
7197 ncopies = CEIL (group_size * const_vf, const_nunits);
7198 dr_chain.create (ncopies);
7200 else
7201 ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7203 unsigned int group_el = 0;
7204 unsigned HOST_WIDE_INT
7205 elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
7206 for (j = 0; j < ncopies; j++)
7208 if (nloads > 1)
7209 vec_alloc (v, nloads);
7210 for (i = 0; i < nloads; i++)
7212 tree this_off = build_int_cst (TREE_TYPE (alias_off),
7213 group_el * elsz);
7214 new_stmt = gimple_build_assign (make_ssa_name (ltype),
7215 build2 (MEM_REF, ltype,
7216 running_off, this_off));
7217 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7218 if (nloads > 1)
7219 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
7220 gimple_assign_lhs (new_stmt));
7222 group_el += lnel;
7223 if (! slp
7224 || group_el == group_size)
7226 tree newoff = copy_ssa_name (running_off);
7227 gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
7228 running_off, stride_step);
7229 vect_finish_stmt_generation (stmt, incr, gsi);
7231 running_off = newoff;
7232 group_el = 0;
7235 if (nloads > 1)
7237 tree vec_inv = build_constructor (lvectype, v);
7238 new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi);
7239 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7240 if (lvectype != vectype)
7242 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7243 VIEW_CONVERT_EXPR,
7244 build1 (VIEW_CONVERT_EXPR,
7245 vectype, new_temp));
7246 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7250 if (slp)
7252 if (slp_perm)
7253 dr_chain.quick_push (gimple_assign_lhs (new_stmt));
7254 else
7255 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7257 else
7259 if (j == 0)
7260 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7261 else
7262 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7263 prev_stmt_info = vinfo_for_stmt (new_stmt);
7266 if (slp_perm)
7268 unsigned n_perms;
7269 vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7270 slp_node_instance, false, &n_perms);
7272 return true;
7275 if (grouped_load)
7277 first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
7278 group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt));
7279 /* For SLP vectorization we directly vectorize a subchain
7280 without permutation. */
7281 if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
7282 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7283 /* For BB vectorization always use the first stmt to base
7284 the data ref pointer on. */
7285 if (bb_vinfo)
7286 first_stmt_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
7288 /* Check if the chain of loads is already vectorized. */
7289 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt))
7290 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7291 ??? But we can only do so if there is exactly one
7292 as we have no way to get at the rest. Leave the CSE
7293 opportunity alone.
7294 ??? With the group load eventually participating
7295 in multiple different permutations (having multiple
7296 slp nodes which refer to the same group) the CSE
7297 is even wrong code. See PR56270. */
7298 && !slp)
7300 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7301 return true;
7303 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
7304 group_gap_adj = 0;
7306 /* VEC_NUM is the number of vect stmts to be created for this group. */
7307 if (slp)
7309 grouped_load = false;
7310 /* For SLP permutation support we need to load the whole group,
7311 not only the number of vector stmts the permutation result
7312 fits in. */
7313 if (slp_perm)
7315 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7316 variable VF. */
7317 unsigned int const_vf = vf.to_constant ();
7318 unsigned int const_nunits = nunits.to_constant ();
7319 vec_num = CEIL (group_size * const_vf, const_nunits);
7320 group_gap_adj = vf * group_size - nunits * vec_num;
7322 else
7324 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7325 group_gap_adj
7326 = group_size - SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
7329 else
7330 vec_num = group_size;
7332 ref_type = get_group_alias_ptr_type (first_stmt);
7334 else
7336 first_stmt = stmt;
7337 first_dr = dr;
7338 group_size = vec_num = 1;
7339 group_gap_adj = 0;
7340 ref_type = reference_alias_ptr_type (DR_REF (first_dr));
7343 alignment_support_scheme = vect_supportable_dr_alignment (first_dr, false);
7344 gcc_assert (alignment_support_scheme);
7345 /* Targets with load-lane instructions must not require explicit
7346 realignment. */
7347 gcc_assert (memory_access_type != VMAT_LOAD_STORE_LANES
7348 || alignment_support_scheme == dr_aligned
7349 || alignment_support_scheme == dr_unaligned_supported);
7351 /* In case the vectorization factor (VF) is bigger than the number
7352 of elements that we can fit in a vectype (nunits), we have to generate
7353 more than one vector stmt - i.e - we need to "unroll" the
7354 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7355 from one copy of the vector stmt to the next, in the field
7356 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7357 stages to find the correct vector defs to be used when vectorizing
7358 stmts that use the defs of the current stmt. The example below
7359 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7360 need to create 4 vectorized stmts):
7362 before vectorization:
7363 RELATED_STMT VEC_STMT
7364 S1: x = memref - -
7365 S2: z = x + 1 - -
7367 step 1: vectorize stmt S1:
7368 We first create the vector stmt VS1_0, and, as usual, record a
7369 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7370 Next, we create the vector stmt VS1_1, and record a pointer to
7371 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7372 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7373 stmts and pointers:
7374 RELATED_STMT VEC_STMT
7375 VS1_0: vx0 = memref0 VS1_1 -
7376 VS1_1: vx1 = memref1 VS1_2 -
7377 VS1_2: vx2 = memref2 VS1_3 -
7378 VS1_3: vx3 = memref3 - -
7379 S1: x = load - VS1_0
7380 S2: z = x + 1 - -
7382 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7383 information we recorded in RELATED_STMT field is used to vectorize
7384 stmt S2. */
7386 /* In case of interleaving (non-unit grouped access):
7388 S1: x2 = &base + 2
7389 S2: x0 = &base
7390 S3: x1 = &base + 1
7391 S4: x3 = &base + 3
7393 Vectorized loads are created in the order of memory accesses
7394 starting from the access of the first stmt of the chain:
7396 VS1: vx0 = &base
7397 VS2: vx1 = &base + vec_size*1
7398 VS3: vx3 = &base + vec_size*2
7399 VS4: vx4 = &base + vec_size*3
7401 Then permutation statements are generated:
7403 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7404 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7407 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7408 (the order of the data-refs in the output of vect_permute_load_chain
7409 corresponds to the order of scalar stmts in the interleaving chain - see
7410 the documentation of vect_permute_load_chain()).
7411 The generation of permutation stmts and recording them in
7412 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7414 In case of both multiple types and interleaving, the vector loads and
7415 permutation stmts above are created for every copy. The result vector
7416 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7417 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7419 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7420 on a target that supports unaligned accesses (dr_unaligned_supported)
7421 we generate the following code:
7422 p = initial_addr;
7423 indx = 0;
7424 loop {
7425 p = p + indx * vectype_size;
7426 vec_dest = *(p);
7427 indx = indx + 1;
7430 Otherwise, the data reference is potentially unaligned on a target that
7431 does not support unaligned accesses (dr_explicit_realign_optimized) -
7432 then generate the following code, in which the data in each iteration is
7433 obtained by two vector loads, one from the previous iteration, and one
7434 from the current iteration:
7435 p1 = initial_addr;
7436 msq_init = *(floor(p1))
7437 p2 = initial_addr + VS - 1;
7438 realignment_token = call target_builtin;
7439 indx = 0;
7440 loop {
7441 p2 = p2 + indx * vectype_size
7442 lsq = *(floor(p2))
7443 vec_dest = realign_load (msq, lsq, realignment_token)
7444 indx = indx + 1;
7445 msq = lsq;
7446 } */
7448 /* If the misalignment remains the same throughout the execution of the
7449 loop, we can create the init_addr and permutation mask at the loop
7450 preheader. Otherwise, it needs to be created inside the loop.
7451 This can only occur when vectorizing memory accesses in the inner-loop
7452 nested within an outer-loop that is being vectorized. */
7454 if (nested_in_vect_loop
7455 && !multiple_p (DR_STEP_ALIGNMENT (dr),
7456 GET_MODE_SIZE (TYPE_MODE (vectype))))
7458 gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized);
7459 compute_in_loop = true;
7462 if ((alignment_support_scheme == dr_explicit_realign_optimized
7463 || alignment_support_scheme == dr_explicit_realign)
7464 && !compute_in_loop)
7466 msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
7467 alignment_support_scheme, NULL_TREE,
7468 &at_loop);
7469 if (alignment_support_scheme == dr_explicit_realign_optimized)
7471 phi = as_a <gphi *> (SSA_NAME_DEF_STMT (msq));
7472 byte_offset = size_binop (MINUS_EXPR, TYPE_SIZE_UNIT (vectype),
7473 size_one_node);
7476 else
7477 at_loop = loop;
7479 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7480 offset = size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1);
7482 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7483 aggr_type = build_array_type_nelts (elem_type, vec_num * nunits);
7484 else
7485 aggr_type = vectype;
7487 prev_stmt_info = NULL;
7488 poly_uint64 group_elt = 0;
7489 for (j = 0; j < ncopies; j++)
7491 /* 1. Create the vector or array pointer update chain. */
7492 if (j == 0)
7494 bool simd_lane_access_p
7495 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info);
7496 if (simd_lane_access_p
7497 && TREE_CODE (DR_BASE_ADDRESS (first_dr)) == ADDR_EXPR
7498 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr), 0))
7499 && integer_zerop (DR_OFFSET (first_dr))
7500 && integer_zerop (DR_INIT (first_dr))
7501 && alias_sets_conflict_p (get_alias_set (aggr_type),
7502 get_alias_set (TREE_TYPE (ref_type)))
7503 && (alignment_support_scheme == dr_aligned
7504 || alignment_support_scheme == dr_unaligned_supported))
7506 dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr));
7507 dataref_offset = build_int_cst (ref_type, 0);
7508 inv_p = false;
7510 else if (first_stmt_for_drptr
7511 && first_stmt != first_stmt_for_drptr)
7513 dataref_ptr
7514 = vect_create_data_ref_ptr (first_stmt_for_drptr, aggr_type,
7515 at_loop, offset, &dummy, gsi,
7516 &ptr_incr, simd_lane_access_p,
7517 &inv_p, byte_offset);
7518 /* Adjust the pointer by the difference to first_stmt. */
7519 data_reference_p ptrdr
7520 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr));
7521 tree diff = fold_convert (sizetype,
7522 size_binop (MINUS_EXPR,
7523 DR_INIT (first_dr),
7524 DR_INIT (ptrdr)));
7525 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7526 stmt, diff);
7528 else
7529 dataref_ptr
7530 = vect_create_data_ref_ptr (first_stmt, aggr_type, at_loop,
7531 offset, &dummy, gsi, &ptr_incr,
7532 simd_lane_access_p, &inv_p,
7533 byte_offset);
7535 else if (dataref_offset)
7536 dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset,
7537 TYPE_SIZE_UNIT (aggr_type));
7538 else
7539 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt,
7540 TYPE_SIZE_UNIT (aggr_type));
7542 if (grouped_load || slp_perm)
7543 dr_chain.create (vec_num);
7545 if (memory_access_type == VMAT_LOAD_STORE_LANES)
7547 tree vec_array;
7549 vec_array = create_vector_array (vectype, vec_num);
7551 /* Emit:
7552 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7553 data_ref = create_array_ref (aggr_type, dataref_ptr, ref_type);
7554 gcall *call = gimple_build_call_internal (IFN_LOAD_LANES, 1,
7555 data_ref);
7556 gimple_call_set_lhs (call, vec_array);
7557 gimple_call_set_nothrow (call, true);
7558 new_stmt = call;
7559 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7561 /* Extract each vector into an SSA_NAME. */
7562 for (i = 0; i < vec_num; i++)
7564 new_temp = read_vector_array (stmt, gsi, scalar_dest,
7565 vec_array, i);
7566 dr_chain.quick_push (new_temp);
7569 /* Record the mapping between SSA_NAMEs and statements. */
7570 vect_record_grouped_load_vectors (stmt, dr_chain);
7572 else
7574 for (i = 0; i < vec_num; i++)
7576 if (i > 0)
7577 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7578 stmt, NULL_TREE);
7580 /* 2. Create the vector-load in the loop. */
7581 switch (alignment_support_scheme)
7583 case dr_aligned:
7584 case dr_unaligned_supported:
7586 unsigned int align, misalign;
7588 data_ref
7589 = fold_build2 (MEM_REF, vectype, dataref_ptr,
7590 dataref_offset
7591 ? dataref_offset
7592 : build_int_cst (ref_type, 0));
7593 align = DR_TARGET_ALIGNMENT (dr);
7594 if (alignment_support_scheme == dr_aligned)
7596 gcc_assert (aligned_access_p (first_dr));
7597 misalign = 0;
7599 else if (DR_MISALIGNMENT (first_dr) == -1)
7601 align = dr_alignment (vect_dr_behavior (first_dr));
7602 misalign = 0;
7603 TREE_TYPE (data_ref)
7604 = build_aligned_type (TREE_TYPE (data_ref),
7605 align * BITS_PER_UNIT);
7607 else
7609 TREE_TYPE (data_ref)
7610 = build_aligned_type (TREE_TYPE (data_ref),
7611 TYPE_ALIGN (elem_type));
7612 misalign = DR_MISALIGNMENT (first_dr);
7614 if (dataref_offset == NULL_TREE
7615 && TREE_CODE (dataref_ptr) == SSA_NAME)
7616 set_ptr_info_alignment (get_ptr_info (dataref_ptr),
7617 align, misalign);
7618 break;
7620 case dr_explicit_realign:
7622 tree ptr, bump;
7624 tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype));
7626 if (compute_in_loop)
7627 msq = vect_setup_realignment (first_stmt, gsi,
7628 &realignment_token,
7629 dr_explicit_realign,
7630 dataref_ptr, NULL);
7632 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7633 ptr = copy_ssa_name (dataref_ptr);
7634 else
7635 ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
7636 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7637 new_stmt = gimple_build_assign
7638 (ptr, BIT_AND_EXPR, dataref_ptr,
7639 build_int_cst
7640 (TREE_TYPE (dataref_ptr),
7641 -(HOST_WIDE_INT) align));
7642 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7643 data_ref
7644 = build2 (MEM_REF, vectype, ptr,
7645 build_int_cst (ref_type, 0));
7646 vec_dest = vect_create_destination_var (scalar_dest,
7647 vectype);
7648 new_stmt = gimple_build_assign (vec_dest, data_ref);
7649 new_temp = make_ssa_name (vec_dest, new_stmt);
7650 gimple_assign_set_lhs (new_stmt, new_temp);
7651 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
7652 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
7653 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7654 msq = new_temp;
7656 bump = size_binop (MULT_EXPR, vs,
7657 TYPE_SIZE_UNIT (elem_type));
7658 bump = size_binop (MINUS_EXPR, bump, size_one_node);
7659 ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
7660 new_stmt = gimple_build_assign
7661 (NULL_TREE, BIT_AND_EXPR, ptr,
7662 build_int_cst
7663 (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
7664 ptr = copy_ssa_name (ptr, new_stmt);
7665 gimple_assign_set_lhs (new_stmt, ptr);
7666 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7667 data_ref
7668 = build2 (MEM_REF, vectype, ptr,
7669 build_int_cst (ref_type, 0));
7670 break;
7672 case dr_explicit_realign_optimized:
7674 if (TREE_CODE (dataref_ptr) == SSA_NAME)
7675 new_temp = copy_ssa_name (dataref_ptr);
7676 else
7677 new_temp = make_ssa_name (TREE_TYPE (dataref_ptr));
7678 unsigned int align = DR_TARGET_ALIGNMENT (first_dr);
7679 new_stmt = gimple_build_assign
7680 (new_temp, BIT_AND_EXPR, dataref_ptr,
7681 build_int_cst (TREE_TYPE (dataref_ptr),
7682 -(HOST_WIDE_INT) align));
7683 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7684 data_ref
7685 = build2 (MEM_REF, vectype, new_temp,
7686 build_int_cst (ref_type, 0));
7687 break;
7689 default:
7690 gcc_unreachable ();
7692 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7693 new_stmt = gimple_build_assign (vec_dest, data_ref);
7694 new_temp = make_ssa_name (vec_dest, new_stmt);
7695 gimple_assign_set_lhs (new_stmt, new_temp);
7696 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7698 /* 3. Handle explicit realignment if necessary/supported.
7699 Create in loop:
7700 vec_dest = realign_load (msq, lsq, realignment_token) */
7701 if (alignment_support_scheme == dr_explicit_realign_optimized
7702 || alignment_support_scheme == dr_explicit_realign)
7704 lsq = gimple_assign_lhs (new_stmt);
7705 if (!realignment_token)
7706 realignment_token = dataref_ptr;
7707 vec_dest = vect_create_destination_var (scalar_dest, vectype);
7708 new_stmt = gimple_build_assign (vec_dest, REALIGN_LOAD_EXPR,
7709 msq, lsq, realignment_token);
7710 new_temp = make_ssa_name (vec_dest, new_stmt);
7711 gimple_assign_set_lhs (new_stmt, new_temp);
7712 vect_finish_stmt_generation (stmt, new_stmt, gsi);
7714 if (alignment_support_scheme == dr_explicit_realign_optimized)
7716 gcc_assert (phi);
7717 if (i == vec_num - 1 && j == ncopies - 1)
7718 add_phi_arg (phi, lsq,
7719 loop_latch_edge (containing_loop),
7720 UNKNOWN_LOCATION);
7721 msq = lsq;
7725 /* 4. Handle invariant-load. */
7726 if (inv_p && !bb_vinfo)
7728 gcc_assert (!grouped_load);
7729 /* If we have versioned for aliasing or the loop doesn't
7730 have any data dependencies that would preclude this,
7731 then we are sure this is a loop invariant load and
7732 thus we can insert it on the preheader edge. */
7733 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
7734 && !nested_in_vect_loop
7735 && hoist_defs_of_uses (stmt, loop))
7737 if (dump_enabled_p ())
7739 dump_printf_loc (MSG_NOTE, vect_location,
7740 "hoisting out of the vectorized "
7741 "loop: ");
7742 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
7744 tree tem = copy_ssa_name (scalar_dest);
7745 gsi_insert_on_edge_immediate
7746 (loop_preheader_edge (loop),
7747 gimple_build_assign (tem,
7748 unshare_expr
7749 (gimple_assign_rhs1 (stmt))));
7750 new_temp = vect_init_vector (stmt, tem, vectype, NULL);
7751 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7752 set_vinfo_for_stmt (new_stmt,
7753 new_stmt_vec_info (new_stmt, vinfo));
7755 else
7757 gimple_stmt_iterator gsi2 = *gsi;
7758 gsi_next (&gsi2);
7759 new_temp = vect_init_vector (stmt, scalar_dest,
7760 vectype, &gsi2);
7761 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7765 if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
7767 tree perm_mask = perm_mask_for_reverse (vectype);
7768 new_temp = permute_vec_elements (new_temp, new_temp,
7769 perm_mask, stmt, gsi);
7770 new_stmt = SSA_NAME_DEF_STMT (new_temp);
7773 /* Collect vector loads and later create their permutation in
7774 vect_transform_grouped_load (). */
7775 if (grouped_load || slp_perm)
7776 dr_chain.quick_push (new_temp);
7778 /* Store vector loads in the corresponding SLP_NODE. */
7779 if (slp && !slp_perm)
7780 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
7782 /* With SLP permutation we load the gaps as well, without
7783 we need to skip the gaps after we manage to fully load
7784 all elements. group_gap_adj is GROUP_SIZE here. */
7785 group_elt += nunits;
7786 if (maybe_ne (group_gap_adj, 0U)
7787 && !slp_perm
7788 && known_eq (group_elt, group_size - group_gap_adj))
7790 poly_wide_int bump_val
7791 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7792 * group_gap_adj);
7793 tree bump = wide_int_to_tree (sizetype, bump_val);
7794 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7795 stmt, bump);
7796 group_elt = 0;
7799 /* Bump the vector pointer to account for a gap or for excess
7800 elements loaded for a permuted SLP load. */
7801 if (maybe_ne (group_gap_adj, 0U) && slp_perm)
7803 poly_wide_int bump_val
7804 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type))
7805 * group_gap_adj);
7806 tree bump = wide_int_to_tree (sizetype, bump_val);
7807 dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
7808 stmt, bump);
7812 if (slp && !slp_perm)
7813 continue;
7815 if (slp_perm)
7817 unsigned n_perms;
7818 if (!vect_transform_slp_perm_load (slp_node, dr_chain, gsi, vf,
7819 slp_node_instance, false,
7820 &n_perms))
7822 dr_chain.release ();
7823 return false;
7826 else
7828 if (grouped_load)
7830 if (memory_access_type != VMAT_LOAD_STORE_LANES)
7831 vect_transform_grouped_load (stmt, dr_chain, group_size, gsi);
7832 *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
7834 else
7836 if (j == 0)
7837 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
7838 else
7839 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
7840 prev_stmt_info = vinfo_for_stmt (new_stmt);
7843 dr_chain.release ();
7846 return true;
7849 /* Function vect_is_simple_cond.
7851 Input:
7852 LOOP - the loop that is being vectorized.
7853 COND - Condition that is checked for simple use.
7855 Output:
7856 *COMP_VECTYPE - the vector type for the comparison.
7857 *DTS - The def types for the arguments of the comparison
7859 Returns whether a COND can be vectorized. Checks whether
7860 condition operands are supportable using vec_is_simple_use. */
7862 static bool
7863 vect_is_simple_cond (tree cond, vec_info *vinfo,
7864 tree *comp_vectype, enum vect_def_type *dts,
7865 tree vectype)
7867 tree lhs, rhs;
7868 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
7870 /* Mask case. */
7871 if (TREE_CODE (cond) == SSA_NAME
7872 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond)))
7874 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (cond);
7875 if (!vect_is_simple_use (cond, vinfo, &lhs_def_stmt,
7876 &dts[0], comp_vectype)
7877 || !*comp_vectype
7878 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype))
7879 return false;
7880 return true;
7883 if (!COMPARISON_CLASS_P (cond))
7884 return false;
7886 lhs = TREE_OPERAND (cond, 0);
7887 rhs = TREE_OPERAND (cond, 1);
7889 if (TREE_CODE (lhs) == SSA_NAME)
7891 gimple *lhs_def_stmt = SSA_NAME_DEF_STMT (lhs);
7892 if (!vect_is_simple_use (lhs, vinfo, &lhs_def_stmt, &dts[0], &vectype1))
7893 return false;
7895 else if (TREE_CODE (lhs) == INTEGER_CST || TREE_CODE (lhs) == REAL_CST
7896 || TREE_CODE (lhs) == FIXED_CST)
7897 dts[0] = vect_constant_def;
7898 else
7899 return false;
7901 if (TREE_CODE (rhs) == SSA_NAME)
7903 gimple *rhs_def_stmt = SSA_NAME_DEF_STMT (rhs);
7904 if (!vect_is_simple_use (rhs, vinfo, &rhs_def_stmt, &dts[1], &vectype2))
7905 return false;
7907 else if (TREE_CODE (rhs) == INTEGER_CST || TREE_CODE (rhs) == REAL_CST
7908 || TREE_CODE (rhs) == FIXED_CST)
7909 dts[1] = vect_constant_def;
7910 else
7911 return false;
7913 if (vectype1 && vectype2
7914 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
7915 TYPE_VECTOR_SUBPARTS (vectype2)))
7916 return false;
7918 *comp_vectype = vectype1 ? vectype1 : vectype2;
7919 /* Invariant comparison. */
7920 if (! *comp_vectype)
7922 tree scalar_type = TREE_TYPE (lhs);
7923 /* If we can widen the comparison to match vectype do so. */
7924 if (INTEGRAL_TYPE_P (scalar_type)
7925 && tree_int_cst_lt (TYPE_SIZE (scalar_type),
7926 TYPE_SIZE (TREE_TYPE (vectype))))
7927 scalar_type = build_nonstandard_integer_type
7928 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype))),
7929 TYPE_UNSIGNED (scalar_type));
7930 *comp_vectype = get_vectype_for_scalar_type (scalar_type);
7933 return true;
7936 /* vectorizable_condition.
7938 Check if STMT is conditional modify expression that can be vectorized.
7939 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7940 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7941 at GSI.
7943 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7944 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7945 else clause if it is 2).
7947 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7949 bool
7950 vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
7951 gimple **vec_stmt, tree reduc_def, int reduc_index,
7952 slp_tree slp_node)
7954 tree scalar_dest = NULL_TREE;
7955 tree vec_dest = NULL_TREE;
7956 tree cond_expr, cond_expr0 = NULL_TREE, cond_expr1 = NULL_TREE;
7957 tree then_clause, else_clause;
7958 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7959 tree comp_vectype = NULL_TREE;
7960 tree vec_cond_lhs = NULL_TREE, vec_cond_rhs = NULL_TREE;
7961 tree vec_then_clause = NULL_TREE, vec_else_clause = NULL_TREE;
7962 tree vec_compare;
7963 tree new_temp;
7964 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7965 enum vect_def_type dts[4]
7966 = {vect_unknown_def_type, vect_unknown_def_type,
7967 vect_unknown_def_type, vect_unknown_def_type};
7968 int ndts = 4;
7969 int ncopies;
7970 enum tree_code code, cond_code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
7971 stmt_vec_info prev_stmt_info = NULL;
7972 int i, j;
7973 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
7974 vec<tree> vec_oprnds0 = vNULL;
7975 vec<tree> vec_oprnds1 = vNULL;
7976 vec<tree> vec_oprnds2 = vNULL;
7977 vec<tree> vec_oprnds3 = vNULL;
7978 tree vec_cmp_type;
7979 bool masked = false;
7981 if (reduc_index && STMT_SLP_TYPE (stmt_info))
7982 return false;
7984 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == TREE_CODE_REDUCTION)
7986 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
7987 return false;
7989 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
7990 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
7991 && reduc_def))
7992 return false;
7994 /* FORNOW: not yet supported. */
7995 if (STMT_VINFO_LIVE_P (stmt_info))
7997 if (dump_enabled_p ())
7998 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7999 "value used after loop.\n");
8000 return false;
8004 /* Is vectorizable conditional operation? */
8005 if (!is_gimple_assign (stmt))
8006 return false;
8008 code = gimple_assign_rhs_code (stmt);
8010 if (code != COND_EXPR)
8011 return false;
8013 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8014 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8016 if (slp_node)
8017 ncopies = 1;
8018 else
8019 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8021 gcc_assert (ncopies >= 1);
8022 if (reduc_index && ncopies > 1)
8023 return false; /* FORNOW */
8025 cond_expr = gimple_assign_rhs1 (stmt);
8026 then_clause = gimple_assign_rhs2 (stmt);
8027 else_clause = gimple_assign_rhs3 (stmt);
8029 if (!vect_is_simple_cond (cond_expr, stmt_info->vinfo,
8030 &comp_vectype, &dts[0], vectype)
8031 || !comp_vectype)
8032 return false;
8034 gimple *def_stmt;
8035 if (!vect_is_simple_use (then_clause, stmt_info->vinfo, &def_stmt, &dts[2],
8036 &vectype1))
8037 return false;
8038 if (!vect_is_simple_use (else_clause, stmt_info->vinfo, &def_stmt, &dts[3],
8039 &vectype2))
8040 return false;
8042 if (vectype1 && !useless_type_conversion_p (vectype, vectype1))
8043 return false;
8045 if (vectype2 && !useless_type_conversion_p (vectype, vectype2))
8046 return false;
8048 masked = !COMPARISON_CLASS_P (cond_expr);
8049 vec_cmp_type = build_same_sized_truth_vector_type (comp_vectype);
8051 if (vec_cmp_type == NULL_TREE)
8052 return false;
8054 cond_code = TREE_CODE (cond_expr);
8055 if (!masked)
8057 cond_expr0 = TREE_OPERAND (cond_expr, 0);
8058 cond_expr1 = TREE_OPERAND (cond_expr, 1);
8061 if (!masked && VECTOR_BOOLEAN_TYPE_P (comp_vectype))
8063 /* Boolean values may have another representation in vectors
8064 and therefore we prefer bit operations over comparison for
8065 them (which also works for scalar masks). We store opcodes
8066 to use in bitop1 and bitop2. Statement is vectorized as
8067 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8068 depending on bitop1 and bitop2 arity. */
8069 switch (cond_code)
8071 case GT_EXPR:
8072 bitop1 = BIT_NOT_EXPR;
8073 bitop2 = BIT_AND_EXPR;
8074 break;
8075 case GE_EXPR:
8076 bitop1 = BIT_NOT_EXPR;
8077 bitop2 = BIT_IOR_EXPR;
8078 break;
8079 case LT_EXPR:
8080 bitop1 = BIT_NOT_EXPR;
8081 bitop2 = BIT_AND_EXPR;
8082 std::swap (cond_expr0, cond_expr1);
8083 break;
8084 case LE_EXPR:
8085 bitop1 = BIT_NOT_EXPR;
8086 bitop2 = BIT_IOR_EXPR;
8087 std::swap (cond_expr0, cond_expr1);
8088 break;
8089 case NE_EXPR:
8090 bitop1 = BIT_XOR_EXPR;
8091 break;
8092 case EQ_EXPR:
8093 bitop1 = BIT_XOR_EXPR;
8094 bitop2 = BIT_NOT_EXPR;
8095 break;
8096 default:
8097 return false;
8099 cond_code = SSA_NAME;
8102 if (!vec_stmt)
8104 STMT_VINFO_TYPE (stmt_info) = condition_vec_info_type;
8105 if (bitop1 != NOP_EXPR)
8107 machine_mode mode = TYPE_MODE (comp_vectype);
8108 optab optab;
8110 optab = optab_for_tree_code (bitop1, comp_vectype, optab_default);
8111 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8112 return false;
8114 if (bitop2 != NOP_EXPR)
8116 optab = optab_for_tree_code (bitop2, comp_vectype,
8117 optab_default);
8118 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8119 return false;
8122 if (expand_vec_cond_expr_p (vectype, comp_vectype,
8123 cond_code))
8125 vect_model_simple_cost (stmt_info, ncopies, dts, ndts, NULL, NULL);
8126 return true;
8128 return false;
8131 /* Transform. */
8133 if (!slp_node)
8135 vec_oprnds0.create (1);
8136 vec_oprnds1.create (1);
8137 vec_oprnds2.create (1);
8138 vec_oprnds3.create (1);
8141 /* Handle def. */
8142 scalar_dest = gimple_assign_lhs (stmt);
8143 vec_dest = vect_create_destination_var (scalar_dest, vectype);
8145 /* Handle cond expr. */
8146 for (j = 0; j < ncopies; j++)
8148 gassign *new_stmt = NULL;
8149 if (j == 0)
8151 if (slp_node)
8153 auto_vec<tree, 4> ops;
8154 auto_vec<vec<tree>, 4> vec_defs;
8156 if (masked)
8157 ops.safe_push (cond_expr);
8158 else
8160 ops.safe_push (cond_expr0);
8161 ops.safe_push (cond_expr1);
8163 ops.safe_push (then_clause);
8164 ops.safe_push (else_clause);
8165 vect_get_slp_defs (ops, slp_node, &vec_defs);
8166 vec_oprnds3 = vec_defs.pop ();
8167 vec_oprnds2 = vec_defs.pop ();
8168 if (!masked)
8169 vec_oprnds1 = vec_defs.pop ();
8170 vec_oprnds0 = vec_defs.pop ();
8172 else
8174 gimple *gtemp;
8175 if (masked)
8177 vec_cond_lhs
8178 = vect_get_vec_def_for_operand (cond_expr, stmt,
8179 comp_vectype);
8180 vect_is_simple_use (cond_expr, stmt_info->vinfo,
8181 &gtemp, &dts[0]);
8183 else
8185 vec_cond_lhs
8186 = vect_get_vec_def_for_operand (cond_expr0,
8187 stmt, comp_vectype);
8188 vect_is_simple_use (cond_expr0, loop_vinfo, &gtemp, &dts[0]);
8190 vec_cond_rhs
8191 = vect_get_vec_def_for_operand (cond_expr1,
8192 stmt, comp_vectype);
8193 vect_is_simple_use (cond_expr1, loop_vinfo, &gtemp, &dts[1]);
8195 if (reduc_index == 1)
8196 vec_then_clause = reduc_def;
8197 else
8199 vec_then_clause = vect_get_vec_def_for_operand (then_clause,
8200 stmt);
8201 vect_is_simple_use (then_clause, loop_vinfo,
8202 &gtemp, &dts[2]);
8204 if (reduc_index == 2)
8205 vec_else_clause = reduc_def;
8206 else
8208 vec_else_clause = vect_get_vec_def_for_operand (else_clause,
8209 stmt);
8210 vect_is_simple_use (else_clause, loop_vinfo, &gtemp, &dts[3]);
8214 else
8216 vec_cond_lhs
8217 = vect_get_vec_def_for_stmt_copy (dts[0],
8218 vec_oprnds0.pop ());
8219 if (!masked)
8220 vec_cond_rhs
8221 = vect_get_vec_def_for_stmt_copy (dts[1],
8222 vec_oprnds1.pop ());
8224 vec_then_clause = vect_get_vec_def_for_stmt_copy (dts[2],
8225 vec_oprnds2.pop ());
8226 vec_else_clause = vect_get_vec_def_for_stmt_copy (dts[3],
8227 vec_oprnds3.pop ());
8230 if (!slp_node)
8232 vec_oprnds0.quick_push (vec_cond_lhs);
8233 if (!masked)
8234 vec_oprnds1.quick_push (vec_cond_rhs);
8235 vec_oprnds2.quick_push (vec_then_clause);
8236 vec_oprnds3.quick_push (vec_else_clause);
8239 /* Arguments are ready. Create the new vector stmt. */
8240 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_cond_lhs)
8242 vec_then_clause = vec_oprnds2[i];
8243 vec_else_clause = vec_oprnds3[i];
8245 if (masked)
8246 vec_compare = vec_cond_lhs;
8247 else
8249 vec_cond_rhs = vec_oprnds1[i];
8250 if (bitop1 == NOP_EXPR)
8251 vec_compare = build2 (cond_code, vec_cmp_type,
8252 vec_cond_lhs, vec_cond_rhs);
8253 else
8255 new_temp = make_ssa_name (vec_cmp_type);
8256 if (bitop1 == BIT_NOT_EXPR)
8257 new_stmt = gimple_build_assign (new_temp, bitop1,
8258 vec_cond_rhs);
8259 else
8260 new_stmt
8261 = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
8262 vec_cond_rhs);
8263 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8264 if (bitop2 == NOP_EXPR)
8265 vec_compare = new_temp;
8266 else if (bitop2 == BIT_NOT_EXPR)
8268 /* Instead of doing ~x ? y : z do x ? z : y. */
8269 vec_compare = new_temp;
8270 std::swap (vec_then_clause, vec_else_clause);
8272 else
8274 vec_compare = make_ssa_name (vec_cmp_type);
8275 new_stmt
8276 = gimple_build_assign (vec_compare, bitop2,
8277 vec_cond_lhs, new_temp);
8278 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8282 new_temp = make_ssa_name (vec_dest);
8283 new_stmt = gimple_build_assign (new_temp, VEC_COND_EXPR,
8284 vec_compare, vec_then_clause,
8285 vec_else_clause);
8286 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8287 if (slp_node)
8288 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8291 if (slp_node)
8292 continue;
8294 if (j == 0)
8295 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8296 else
8297 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8299 prev_stmt_info = vinfo_for_stmt (new_stmt);
8302 vec_oprnds0.release ();
8303 vec_oprnds1.release ();
8304 vec_oprnds2.release ();
8305 vec_oprnds3.release ();
8307 return true;
8310 /* vectorizable_comparison.
8312 Check if STMT is comparison expression that can be vectorized.
8313 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8314 comparison, put it in VEC_STMT, and insert it at GSI.
8316 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8318 static bool
8319 vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
8320 gimple **vec_stmt, tree reduc_def,
8321 slp_tree slp_node)
8323 tree lhs, rhs1, rhs2;
8324 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8325 tree vectype1 = NULL_TREE, vectype2 = NULL_TREE;
8326 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
8327 tree vec_rhs1 = NULL_TREE, vec_rhs2 = NULL_TREE;
8328 tree new_temp;
8329 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
8330 enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
8331 int ndts = 2;
8332 poly_uint64 nunits;
8333 int ncopies;
8334 enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
8335 stmt_vec_info prev_stmt_info = NULL;
8336 int i, j;
8337 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8338 vec<tree> vec_oprnds0 = vNULL;
8339 vec<tree> vec_oprnds1 = vNULL;
8340 gimple *def_stmt;
8341 tree mask_type;
8342 tree mask;
8344 if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
8345 return false;
8347 if (!vectype || !VECTOR_BOOLEAN_TYPE_P (vectype))
8348 return false;
8350 mask_type = vectype;
8351 nunits = TYPE_VECTOR_SUBPARTS (vectype);
8353 if (slp_node)
8354 ncopies = 1;
8355 else
8356 ncopies = vect_get_num_copies (loop_vinfo, vectype);
8358 gcc_assert (ncopies >= 1);
8359 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
8360 && !(STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle
8361 && reduc_def))
8362 return false;
8364 if (STMT_VINFO_LIVE_P (stmt_info))
8366 if (dump_enabled_p ())
8367 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8368 "value used after loop.\n");
8369 return false;
8372 if (!is_gimple_assign (stmt))
8373 return false;
8375 code = gimple_assign_rhs_code (stmt);
8377 if (TREE_CODE_CLASS (code) != tcc_comparison)
8378 return false;
8380 rhs1 = gimple_assign_rhs1 (stmt);
8381 rhs2 = gimple_assign_rhs2 (stmt);
8383 if (!vect_is_simple_use (rhs1, stmt_info->vinfo, &def_stmt,
8384 &dts[0], &vectype1))
8385 return false;
8387 if (!vect_is_simple_use (rhs2, stmt_info->vinfo, &def_stmt,
8388 &dts[1], &vectype2))
8389 return false;
8391 if (vectype1 && vectype2
8392 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
8393 TYPE_VECTOR_SUBPARTS (vectype2)))
8394 return false;
8396 vectype = vectype1 ? vectype1 : vectype2;
8398 /* Invariant comparison. */
8399 if (!vectype)
8401 vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
8402 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
8403 return false;
8405 else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
8406 return false;
8408 /* Can't compare mask and non-mask types. */
8409 if (vectype1 && vectype2
8410 && (VECTOR_BOOLEAN_TYPE_P (vectype1) ^ VECTOR_BOOLEAN_TYPE_P (vectype2)))
8411 return false;
8413 /* Boolean values may have another representation in vectors
8414 and therefore we prefer bit operations over comparison for
8415 them (which also works for scalar masks). We store opcodes
8416 to use in bitop1 and bitop2. Statement is vectorized as
8417 BITOP2 (rhs1 BITOP1 rhs2) or
8418 rhs1 BITOP2 (BITOP1 rhs2)
8419 depending on bitop1 and bitop2 arity. */
8420 if (VECTOR_BOOLEAN_TYPE_P (vectype))
8422 if (code == GT_EXPR)
8424 bitop1 = BIT_NOT_EXPR;
8425 bitop2 = BIT_AND_EXPR;
8427 else if (code == GE_EXPR)
8429 bitop1 = BIT_NOT_EXPR;
8430 bitop2 = BIT_IOR_EXPR;
8432 else if (code == LT_EXPR)
8434 bitop1 = BIT_NOT_EXPR;
8435 bitop2 = BIT_AND_EXPR;
8436 std::swap (rhs1, rhs2);
8437 std::swap (dts[0], dts[1]);
8439 else if (code == LE_EXPR)
8441 bitop1 = BIT_NOT_EXPR;
8442 bitop2 = BIT_IOR_EXPR;
8443 std::swap (rhs1, rhs2);
8444 std::swap (dts[0], dts[1]);
8446 else
8448 bitop1 = BIT_XOR_EXPR;
8449 if (code == EQ_EXPR)
8450 bitop2 = BIT_NOT_EXPR;
8454 if (!vec_stmt)
8456 STMT_VINFO_TYPE (stmt_info) = comparison_vec_info_type;
8457 vect_model_simple_cost (stmt_info, ncopies * (1 + (bitop2 != NOP_EXPR)),
8458 dts, ndts, NULL, NULL);
8459 if (bitop1 == NOP_EXPR)
8460 return expand_vec_cmp_expr_p (vectype, mask_type, code);
8461 else
8463 machine_mode mode = TYPE_MODE (vectype);
8464 optab optab;
8466 optab = optab_for_tree_code (bitop1, vectype, optab_default);
8467 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8468 return false;
8470 if (bitop2 != NOP_EXPR)
8472 optab = optab_for_tree_code (bitop2, vectype, optab_default);
8473 if (!optab || optab_handler (optab, mode) == CODE_FOR_nothing)
8474 return false;
8476 return true;
8480 /* Transform. */
8481 if (!slp_node)
8483 vec_oprnds0.create (1);
8484 vec_oprnds1.create (1);
8487 /* Handle def. */
8488 lhs = gimple_assign_lhs (stmt);
8489 mask = vect_create_destination_var (lhs, mask_type);
8491 /* Handle cmp expr. */
8492 for (j = 0; j < ncopies; j++)
8494 gassign *new_stmt = NULL;
8495 if (j == 0)
8497 if (slp_node)
8499 auto_vec<tree, 2> ops;
8500 auto_vec<vec<tree>, 2> vec_defs;
8502 ops.safe_push (rhs1);
8503 ops.safe_push (rhs2);
8504 vect_get_slp_defs (ops, slp_node, &vec_defs);
8505 vec_oprnds1 = vec_defs.pop ();
8506 vec_oprnds0 = vec_defs.pop ();
8508 else
8510 vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype);
8511 vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype);
8514 else
8516 vec_rhs1 = vect_get_vec_def_for_stmt_copy (dts[0],
8517 vec_oprnds0.pop ());
8518 vec_rhs2 = vect_get_vec_def_for_stmt_copy (dts[1],
8519 vec_oprnds1.pop ());
8522 if (!slp_node)
8524 vec_oprnds0.quick_push (vec_rhs1);
8525 vec_oprnds1.quick_push (vec_rhs2);
8528 /* Arguments are ready. Create the new vector stmt. */
8529 FOR_EACH_VEC_ELT (vec_oprnds0, i, vec_rhs1)
8531 vec_rhs2 = vec_oprnds1[i];
8533 new_temp = make_ssa_name (mask);
8534 if (bitop1 == NOP_EXPR)
8536 new_stmt = gimple_build_assign (new_temp, code,
8537 vec_rhs1, vec_rhs2);
8538 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8540 else
8542 if (bitop1 == BIT_NOT_EXPR)
8543 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs2);
8544 else
8545 new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
8546 vec_rhs2);
8547 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8548 if (bitop2 != NOP_EXPR)
8550 tree res = make_ssa_name (mask);
8551 if (bitop2 == BIT_NOT_EXPR)
8552 new_stmt = gimple_build_assign (res, bitop2, new_temp);
8553 else
8554 new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
8555 new_temp);
8556 vect_finish_stmt_generation (stmt, new_stmt, gsi);
8559 if (slp_node)
8560 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
8563 if (slp_node)
8564 continue;
8566 if (j == 0)
8567 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
8568 else
8569 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
8571 prev_stmt_info = vinfo_for_stmt (new_stmt);
8574 vec_oprnds0.release ();
8575 vec_oprnds1.release ();
8577 return true;
8580 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8581 can handle all live statements in the node. Otherwise return true
8582 if STMT is not live or if vectorizable_live_operation can handle it.
8583 GSI and VEC_STMT are as for vectorizable_live_operation. */
8585 static bool
8586 can_vectorize_live_stmts (gimple *stmt, gimple_stmt_iterator *gsi,
8587 slp_tree slp_node, gimple **vec_stmt)
8589 if (slp_node)
8591 gimple *slp_stmt;
8592 unsigned int i;
8593 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, slp_stmt)
8595 stmt_vec_info slp_stmt_info = vinfo_for_stmt (slp_stmt);
8596 if (STMT_VINFO_LIVE_P (slp_stmt_info)
8597 && !vectorizable_live_operation (slp_stmt, gsi, slp_node, i,
8598 vec_stmt))
8599 return false;
8602 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt))
8603 && !vectorizable_live_operation (stmt, gsi, slp_node, -1, vec_stmt))
8604 return false;
8606 return true;
8609 /* Make sure the statement is vectorizable. */
8611 bool
8612 vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
8613 slp_instance node_instance)
8615 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8616 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
8617 enum vect_relevant relevance = STMT_VINFO_RELEVANT (stmt_info);
8618 bool ok;
8619 gimple *pattern_stmt;
8620 gimple_seq pattern_def_seq;
8622 if (dump_enabled_p ())
8624 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
8625 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8628 if (gimple_has_volatile_ops (stmt))
8630 if (dump_enabled_p ())
8631 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8632 "not vectorized: stmt has volatile operands\n");
8634 return false;
8637 /* Skip stmts that do not need to be vectorized. In loops this is expected
8638 to include:
8639 - the COND_EXPR which is the loop exit condition
8640 - any LABEL_EXPRs in the loop
8641 - computations that are used only for array indexing or loop control.
8642 In basic blocks we only analyze statements that are a part of some SLP
8643 instance, therefore, all the statements are relevant.
8645 Pattern statement needs to be analyzed instead of the original statement
8646 if the original statement is not relevant. Otherwise, we analyze both
8647 statements. In basic blocks we are called from some SLP instance
8648 traversal, don't analyze pattern stmts instead, the pattern stmts
8649 already will be part of SLP instance. */
8651 pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8652 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8653 && !STMT_VINFO_LIVE_P (stmt_info))
8655 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8656 && pattern_stmt
8657 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8658 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8660 /* Analyze PATTERN_STMT instead of the original stmt. */
8661 stmt = pattern_stmt;
8662 stmt_info = vinfo_for_stmt (pattern_stmt);
8663 if (dump_enabled_p ())
8665 dump_printf_loc (MSG_NOTE, vect_location,
8666 "==> examining pattern statement: ");
8667 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8670 else
8672 if (dump_enabled_p ())
8673 dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
8675 return true;
8678 else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
8679 && node == NULL
8680 && pattern_stmt
8681 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
8682 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
8684 /* Analyze PATTERN_STMT too. */
8685 if (dump_enabled_p ())
8687 dump_printf_loc (MSG_NOTE, vect_location,
8688 "==> examining pattern statement: ");
8689 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8692 if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node,
8693 node_instance))
8694 return false;
8697 if (is_pattern_stmt_p (stmt_info)
8698 && node == NULL
8699 && (pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)))
8701 gimple_stmt_iterator si;
8703 for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
8705 gimple *pattern_def_stmt = gsi_stmt (si);
8706 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt))
8707 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt)))
8709 /* Analyze def stmt of STMT if it's a pattern stmt. */
8710 if (dump_enabled_p ())
8712 dump_printf_loc (MSG_NOTE, vect_location,
8713 "==> examining pattern def statement: ");
8714 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
8717 if (!vect_analyze_stmt (pattern_def_stmt,
8718 need_to_vectorize, node, node_instance))
8719 return false;
8724 switch (STMT_VINFO_DEF_TYPE (stmt_info))
8726 case vect_internal_def:
8727 break;
8729 case vect_reduction_def:
8730 case vect_nested_cycle:
8731 gcc_assert (!bb_vinfo
8732 && (relevance == vect_used_in_outer
8733 || relevance == vect_used_in_outer_by_reduction
8734 || relevance == vect_used_by_reduction
8735 || relevance == vect_unused_in_scope
8736 || relevance == vect_used_only_live));
8737 break;
8739 case vect_induction_def:
8740 gcc_assert (!bb_vinfo);
8741 break;
8743 case vect_constant_def:
8744 case vect_external_def:
8745 case vect_unknown_def_type:
8746 default:
8747 gcc_unreachable ();
8750 if (STMT_VINFO_RELEVANT_P (stmt_info))
8752 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))));
8753 gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
8754 || (is_gimple_call (stmt)
8755 && gimple_call_lhs (stmt) == NULL_TREE));
8756 *need_to_vectorize = true;
8759 if (PURE_SLP_STMT (stmt_info) && !node)
8761 dump_printf_loc (MSG_NOTE, vect_location,
8762 "handled only by SLP analysis\n");
8763 return true;
8766 ok = true;
8767 if (!bb_vinfo
8768 && (STMT_VINFO_RELEVANT_P (stmt_info)
8769 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
8770 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8771 || vectorizable_conversion (stmt, NULL, NULL, node)
8772 || vectorizable_shift (stmt, NULL, NULL, node)
8773 || vectorizable_operation (stmt, NULL, NULL, node)
8774 || vectorizable_assignment (stmt, NULL, NULL, node)
8775 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8776 || vectorizable_call (stmt, NULL, NULL, node)
8777 || vectorizable_store (stmt, NULL, NULL, node)
8778 || vectorizable_reduction (stmt, NULL, NULL, node, node_instance)
8779 || vectorizable_induction (stmt, NULL, NULL, node)
8780 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8781 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8782 else
8784 if (bb_vinfo)
8785 ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node)
8786 || vectorizable_conversion (stmt, NULL, NULL, node)
8787 || vectorizable_shift (stmt, NULL, NULL, node)
8788 || vectorizable_operation (stmt, NULL, NULL, node)
8789 || vectorizable_assignment (stmt, NULL, NULL, node)
8790 || vectorizable_load (stmt, NULL, NULL, node, NULL)
8791 || vectorizable_call (stmt, NULL, NULL, node)
8792 || vectorizable_store (stmt, NULL, NULL, node)
8793 || vectorizable_condition (stmt, NULL, NULL, NULL, 0, node)
8794 || vectorizable_comparison (stmt, NULL, NULL, NULL, node));
8797 if (!ok)
8799 if (dump_enabled_p ())
8801 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8802 "not vectorized: relevant stmt not ");
8803 dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
8804 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8807 return false;
8810 if (bb_vinfo)
8811 return true;
8813 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8814 need extra handling, except for vectorizable reductions. */
8815 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8816 && !can_vectorize_live_stmts (stmt, NULL, node, NULL))
8818 if (dump_enabled_p ())
8820 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8821 "not vectorized: live stmt not supported: ");
8822 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
8825 return false;
8828 return true;
8832 /* Function vect_transform_stmt.
8834 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8836 bool
8837 vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
8838 bool *grouped_store, slp_tree slp_node,
8839 slp_instance slp_node_instance)
8841 bool is_store = false;
8842 gimple *vec_stmt = NULL;
8843 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
8844 bool done;
8846 gcc_assert (slp_node || !PURE_SLP_STMT (stmt_info));
8847 gimple *old_vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
8849 switch (STMT_VINFO_TYPE (stmt_info))
8851 case type_demotion_vec_info_type:
8852 case type_promotion_vec_info_type:
8853 case type_conversion_vec_info_type:
8854 done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node);
8855 gcc_assert (done);
8856 break;
8858 case induc_vec_info_type:
8859 done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node);
8860 gcc_assert (done);
8861 break;
8863 case shift_vec_info_type:
8864 done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node);
8865 gcc_assert (done);
8866 break;
8868 case op_vec_info_type:
8869 done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node);
8870 gcc_assert (done);
8871 break;
8873 case assignment_vec_info_type:
8874 done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node);
8875 gcc_assert (done);
8876 break;
8878 case load_vec_info_type:
8879 done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node,
8880 slp_node_instance);
8881 gcc_assert (done);
8882 break;
8884 case store_vec_info_type:
8885 done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node);
8886 gcc_assert (done);
8887 if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
8889 /* In case of interleaving, the whole chain is vectorized when the
8890 last store in the chain is reached. Store stmts before the last
8891 one are skipped, and there vec_stmt_info shouldn't be freed
8892 meanwhile. */
8893 *grouped_store = true;
8894 if (STMT_VINFO_VEC_STMT (stmt_info))
8895 is_store = true;
8897 else
8898 is_store = true;
8899 break;
8901 case condition_vec_info_type:
8902 done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node);
8903 gcc_assert (done);
8904 break;
8906 case comparison_vec_info_type:
8907 done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node);
8908 gcc_assert (done);
8909 break;
8911 case call_vec_info_type:
8912 done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node);
8913 stmt = gsi_stmt (*gsi);
8914 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8915 is_store = true;
8916 break;
8918 case call_simd_clone_vec_info_type:
8919 done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node);
8920 stmt = gsi_stmt (*gsi);
8921 break;
8923 case reduc_vec_info_type:
8924 done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node,
8925 slp_node_instance);
8926 gcc_assert (done);
8927 break;
8929 default:
8930 if (!STMT_VINFO_LIVE_P (stmt_info))
8932 if (dump_enabled_p ())
8933 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
8934 "stmt not supported.\n");
8935 gcc_unreachable ();
8939 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8940 This would break hybrid SLP vectorization. */
8941 if (slp_node)
8942 gcc_assert (!vec_stmt
8943 && STMT_VINFO_VEC_STMT (stmt_info) == old_vec_stmt);
8945 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8946 is being vectorized, but outside the immediately enclosing loop. */
8947 if (vec_stmt
8948 && STMT_VINFO_LOOP_VINFO (stmt_info)
8949 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8950 STMT_VINFO_LOOP_VINFO (stmt_info)), stmt)
8951 && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
8952 && (STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_outer
8953 || STMT_VINFO_RELEVANT (stmt_info) ==
8954 vect_used_in_outer_by_reduction))
8956 struct loop *innerloop = LOOP_VINFO_LOOP (
8957 STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
8958 imm_use_iterator imm_iter;
8959 use_operand_p use_p;
8960 tree scalar_dest;
8961 gimple *exit_phi;
8963 if (dump_enabled_p ())
8964 dump_printf_loc (MSG_NOTE, vect_location,
8965 "Record the vdef for outer-loop vectorization.\n");
8967 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8968 (to be used when vectorizing outer-loop stmts that use the DEF of
8969 STMT). */
8970 if (gimple_code (stmt) == GIMPLE_PHI)
8971 scalar_dest = PHI_RESULT (stmt);
8972 else
8973 scalar_dest = gimple_assign_lhs (stmt);
8975 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
8977 if (!flow_bb_inside_loop_p (innerloop, gimple_bb (USE_STMT (use_p))))
8979 exit_phi = USE_STMT (use_p);
8980 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi)) = vec_stmt;
8985 /* Handle stmts whose DEF is used outside the loop-nest that is
8986 being vectorized. */
8987 if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
8989 done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt);
8990 gcc_assert (done);
8993 if (vec_stmt)
8994 STMT_VINFO_VEC_STMT (stmt_info) = vec_stmt;
8996 return is_store;
9000 /* Remove a group of stores (for SLP or interleaving), free their
9001 stmt_vec_info. */
9003 void
9004 vect_remove_stores (gimple *first_stmt)
9006 gimple *next = first_stmt;
9007 gimple *tmp;
9008 gimple_stmt_iterator next_si;
9010 while (next)
9012 stmt_vec_info stmt_info = vinfo_for_stmt (next);
9014 tmp = GROUP_NEXT_ELEMENT (stmt_info);
9015 if (is_pattern_stmt_p (stmt_info))
9016 next = STMT_VINFO_RELATED_STMT (stmt_info);
9017 /* Free the attached stmt_vec_info and remove the stmt. */
9018 next_si = gsi_for_stmt (next);
9019 unlink_stmt_vdef (next);
9020 gsi_remove (&next_si, true);
9021 release_defs (next);
9022 free_stmt_vec_info (next);
9023 next = tmp;
9028 /* Function new_stmt_vec_info.
9030 Create and initialize a new stmt_vec_info struct for STMT. */
9032 stmt_vec_info
9033 new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
9035 stmt_vec_info res;
9036 res = (stmt_vec_info) xcalloc (1, sizeof (struct _stmt_vec_info));
9038 STMT_VINFO_TYPE (res) = undef_vec_info_type;
9039 STMT_VINFO_STMT (res) = stmt;
9040 res->vinfo = vinfo;
9041 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
9042 STMT_VINFO_LIVE_P (res) = false;
9043 STMT_VINFO_VECTYPE (res) = NULL;
9044 STMT_VINFO_VEC_STMT (res) = NULL;
9045 STMT_VINFO_VECTORIZABLE (res) = true;
9046 STMT_VINFO_IN_PATTERN_P (res) = false;
9047 STMT_VINFO_RELATED_STMT (res) = NULL;
9048 STMT_VINFO_PATTERN_DEF_SEQ (res) = NULL;
9049 STMT_VINFO_DATA_REF (res) = NULL;
9050 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
9051 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
9053 if (gimple_code (stmt) == GIMPLE_PHI
9054 && is_loop_header_bb_p (gimple_bb (stmt)))
9055 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
9056 else
9057 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
9059 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
9060 STMT_SLP_TYPE (res) = loop_vect;
9061 STMT_VINFO_NUM_SLP_USES (res) = 0;
9063 GROUP_FIRST_ELEMENT (res) = NULL;
9064 GROUP_NEXT_ELEMENT (res) = NULL;
9065 GROUP_SIZE (res) = 0;
9066 GROUP_STORE_COUNT (res) = 0;
9067 GROUP_GAP (res) = 0;
9068 GROUP_SAME_DR_STMT (res) = NULL;
9070 return res;
9074 /* Create a hash table for stmt_vec_info. */
9076 void
9077 init_stmt_vec_info_vec (void)
9079 gcc_assert (!stmt_vec_info_vec.exists ());
9080 stmt_vec_info_vec.create (50);
9084 /* Free hash table for stmt_vec_info. */
9086 void
9087 free_stmt_vec_info_vec (void)
9089 unsigned int i;
9090 stmt_vec_info info;
9091 FOR_EACH_VEC_ELT (stmt_vec_info_vec, i, info)
9092 if (info != NULL)
9093 free_stmt_vec_info (STMT_VINFO_STMT (info));
9094 gcc_assert (stmt_vec_info_vec.exists ());
9095 stmt_vec_info_vec.release ();
9099 /* Free stmt vectorization related info. */
9101 void
9102 free_stmt_vec_info (gimple *stmt)
9104 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9106 if (!stmt_info)
9107 return;
9109 /* Check if this statement has a related "pattern stmt"
9110 (introduced by the vectorizer during the pattern recognition
9111 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9112 too. */
9113 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
9115 stmt_vec_info patt_info
9116 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9117 if (patt_info)
9119 gimple_seq seq = STMT_VINFO_PATTERN_DEF_SEQ (patt_info);
9120 gimple *patt_stmt = STMT_VINFO_STMT (patt_info);
9121 gimple_set_bb (patt_stmt, NULL);
9122 tree lhs = gimple_get_lhs (patt_stmt);
9123 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9124 release_ssa_name (lhs);
9125 if (seq)
9127 gimple_stmt_iterator si;
9128 for (si = gsi_start (seq); !gsi_end_p (si); gsi_next (&si))
9130 gimple *seq_stmt = gsi_stmt (si);
9131 gimple_set_bb (seq_stmt, NULL);
9132 lhs = gimple_get_lhs (seq_stmt);
9133 if (lhs && TREE_CODE (lhs) == SSA_NAME)
9134 release_ssa_name (lhs);
9135 free_stmt_vec_info (seq_stmt);
9138 free_stmt_vec_info (patt_stmt);
9142 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
9143 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
9144 set_vinfo_for_stmt (stmt, NULL);
9145 free (stmt_info);
9149 /* Function get_vectype_for_scalar_type_and_size.
9151 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9152 by the target. */
9154 static tree
9155 get_vectype_for_scalar_type_and_size (tree scalar_type, poly_uint64 size)
9157 tree orig_scalar_type = scalar_type;
9158 scalar_mode inner_mode;
9159 machine_mode simd_mode;
9160 poly_uint64 nunits;
9161 tree vectype;
9163 if (!is_int_mode (TYPE_MODE (scalar_type), &inner_mode)
9164 && !is_float_mode (TYPE_MODE (scalar_type), &inner_mode))
9165 return NULL_TREE;
9167 unsigned int nbytes = GET_MODE_SIZE (inner_mode);
9169 /* For vector types of elements whose mode precision doesn't
9170 match their types precision we use a element type of mode
9171 precision. The vectorization routines will have to make sure
9172 they support the proper result truncation/extension.
9173 We also make sure to build vector types with INTEGER_TYPE
9174 component type only. */
9175 if (INTEGRAL_TYPE_P (scalar_type)
9176 && (GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)
9177 || TREE_CODE (scalar_type) != INTEGER_TYPE))
9178 scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
9179 TYPE_UNSIGNED (scalar_type));
9181 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9182 When the component mode passes the above test simply use a type
9183 corresponding to that mode. The theory is that any use that
9184 would cause problems with this will disable vectorization anyway. */
9185 else if (!SCALAR_FLOAT_TYPE_P (scalar_type)
9186 && !INTEGRAL_TYPE_P (scalar_type))
9187 scalar_type = lang_hooks.types.type_for_mode (inner_mode, 1);
9189 /* We can't build a vector type of elements with alignment bigger than
9190 their size. */
9191 else if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
9192 scalar_type = lang_hooks.types.type_for_mode (inner_mode,
9193 TYPE_UNSIGNED (scalar_type));
9195 /* If we felt back to using the mode fail if there was
9196 no scalar type for it. */
9197 if (scalar_type == NULL_TREE)
9198 return NULL_TREE;
9200 /* If no size was supplied use the mode the target prefers. Otherwise
9201 lookup a vector mode of the specified size. */
9202 if (known_eq (size, 0U))
9203 simd_mode = targetm.vectorize.preferred_simd_mode (inner_mode);
9204 else if (!multiple_p (size, nbytes, &nunits)
9205 || !mode_for_vector (inner_mode, nunits).exists (&simd_mode))
9206 return NULL_TREE;
9207 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9208 if (!multiple_p (GET_MODE_SIZE (simd_mode), nbytes, &nunits))
9209 return NULL_TREE;
9211 vectype = build_vector_type (scalar_type, nunits);
9213 if (!VECTOR_MODE_P (TYPE_MODE (vectype))
9214 && !INTEGRAL_MODE_P (TYPE_MODE (vectype)))
9215 return NULL_TREE;
9217 /* Re-attach the address-space qualifier if we canonicalized the scalar
9218 type. */
9219 if (TYPE_ADDR_SPACE (orig_scalar_type) != TYPE_ADDR_SPACE (vectype))
9220 return build_qualified_type
9221 (vectype, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type)));
9223 return vectype;
9226 poly_uint64 current_vector_size;
9228 /* Function get_vectype_for_scalar_type.
9230 Returns the vector type corresponding to SCALAR_TYPE as supported
9231 by the target. */
9233 tree
9234 get_vectype_for_scalar_type (tree scalar_type)
9236 tree vectype;
9237 vectype = get_vectype_for_scalar_type_and_size (scalar_type,
9238 current_vector_size);
9239 if (vectype
9240 && known_eq (current_vector_size, 0U))
9241 current_vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
9242 return vectype;
9245 /* Function get_mask_type_for_scalar_type.
9247 Returns the mask type corresponding to a result of comparison
9248 of vectors of specified SCALAR_TYPE as supported by target. */
9250 tree
9251 get_mask_type_for_scalar_type (tree scalar_type)
9253 tree vectype = get_vectype_for_scalar_type (scalar_type);
9255 if (!vectype)
9256 return NULL;
9258 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype),
9259 current_vector_size);
9262 /* Function get_same_sized_vectype
9264 Returns a vector type corresponding to SCALAR_TYPE of size
9265 VECTOR_TYPE if supported by the target. */
9267 tree
9268 get_same_sized_vectype (tree scalar_type, tree vector_type)
9270 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type))
9271 return build_same_sized_truth_vector_type (vector_type);
9273 return get_vectype_for_scalar_type_and_size
9274 (scalar_type, GET_MODE_SIZE (TYPE_MODE (vector_type)));
9277 /* Function vect_is_simple_use.
9279 Input:
9280 VINFO - the vect info of the loop or basic block that is being vectorized.
9281 OPERAND - operand in the loop or bb.
9282 Output:
9283 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9284 DT - the type of definition
9286 Returns whether a stmt with OPERAND can be vectorized.
9287 For loops, supportable operands are constants, loop invariants, and operands
9288 that are defined by the current iteration of the loop. Unsupportable
9289 operands are those that are defined by a previous iteration of the loop (as
9290 is the case in reduction/induction computations).
9291 For basic blocks, supportable operands are constants and bb invariants.
9292 For now, operands defined outside the basic block are not supported. */
9294 bool
9295 vect_is_simple_use (tree operand, vec_info *vinfo,
9296 gimple **def_stmt, enum vect_def_type *dt)
9298 *def_stmt = NULL;
9299 *dt = vect_unknown_def_type;
9301 if (dump_enabled_p ())
9303 dump_printf_loc (MSG_NOTE, vect_location,
9304 "vect_is_simple_use: operand ");
9305 dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
9306 dump_printf (MSG_NOTE, "\n");
9309 if (CONSTANT_CLASS_P (operand))
9311 *dt = vect_constant_def;
9312 return true;
9315 if (is_gimple_min_invariant (operand))
9317 *dt = vect_external_def;
9318 return true;
9321 if (TREE_CODE (operand) != SSA_NAME)
9323 if (dump_enabled_p ())
9324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9325 "not ssa-name.\n");
9326 return false;
9329 if (SSA_NAME_IS_DEFAULT_DEF (operand))
9331 *dt = vect_external_def;
9332 return true;
9335 *def_stmt = SSA_NAME_DEF_STMT (operand);
9336 if (dump_enabled_p ())
9338 dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
9339 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
9342 if (! vect_stmt_in_region_p (vinfo, *def_stmt))
9343 *dt = vect_external_def;
9344 else
9346 stmt_vec_info stmt_vinfo = vinfo_for_stmt (*def_stmt);
9347 *dt = STMT_VINFO_DEF_TYPE (stmt_vinfo);
9350 if (dump_enabled_p ())
9352 dump_printf_loc (MSG_NOTE, vect_location, "type of def: ");
9353 switch (*dt)
9355 case vect_uninitialized_def:
9356 dump_printf (MSG_NOTE, "uninitialized\n");
9357 break;
9358 case vect_constant_def:
9359 dump_printf (MSG_NOTE, "constant\n");
9360 break;
9361 case vect_external_def:
9362 dump_printf (MSG_NOTE, "external\n");
9363 break;
9364 case vect_internal_def:
9365 dump_printf (MSG_NOTE, "internal\n");
9366 break;
9367 case vect_induction_def:
9368 dump_printf (MSG_NOTE, "induction\n");
9369 break;
9370 case vect_reduction_def:
9371 dump_printf (MSG_NOTE, "reduction\n");
9372 break;
9373 case vect_double_reduction_def:
9374 dump_printf (MSG_NOTE, "double reduction\n");
9375 break;
9376 case vect_nested_cycle:
9377 dump_printf (MSG_NOTE, "nested cycle\n");
9378 break;
9379 case vect_unknown_def_type:
9380 dump_printf (MSG_NOTE, "unknown\n");
9381 break;
9385 if (*dt == vect_unknown_def_type)
9387 if (dump_enabled_p ())
9388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9389 "Unsupported pattern.\n");
9390 return false;
9393 switch (gimple_code (*def_stmt))
9395 case GIMPLE_PHI:
9396 case GIMPLE_ASSIGN:
9397 case GIMPLE_CALL:
9398 break;
9399 default:
9400 if (dump_enabled_p ())
9401 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
9402 "unsupported defining stmt:\n");
9403 return false;
9406 return true;
9409 /* Function vect_is_simple_use.
9411 Same as vect_is_simple_use but also determines the vector operand
9412 type of OPERAND and stores it to *VECTYPE. If the definition of
9413 OPERAND is vect_uninitialized_def, vect_constant_def or
9414 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9415 is responsible to compute the best suited vector type for the
9416 scalar operand. */
9418 bool
9419 vect_is_simple_use (tree operand, vec_info *vinfo,
9420 gimple **def_stmt, enum vect_def_type *dt, tree *vectype)
9422 if (!vect_is_simple_use (operand, vinfo, def_stmt, dt))
9423 return false;
9425 /* Now get a vector type if the def is internal, otherwise supply
9426 NULL_TREE and leave it up to the caller to figure out a proper
9427 type for the use stmt. */
9428 if (*dt == vect_internal_def
9429 || *dt == vect_induction_def
9430 || *dt == vect_reduction_def
9431 || *dt == vect_double_reduction_def
9432 || *dt == vect_nested_cycle)
9434 stmt_vec_info stmt_info = vinfo_for_stmt (*def_stmt);
9436 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
9437 && !STMT_VINFO_RELEVANT (stmt_info)
9438 && !STMT_VINFO_LIVE_P (stmt_info))
9439 stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
9441 *vectype = STMT_VINFO_VECTYPE (stmt_info);
9442 gcc_assert (*vectype != NULL_TREE);
9444 else if (*dt == vect_uninitialized_def
9445 || *dt == vect_constant_def
9446 || *dt == vect_external_def)
9447 *vectype = NULL_TREE;
9448 else
9449 gcc_unreachable ();
9451 return true;
9455 /* Function supportable_widening_operation
9457 Check whether an operation represented by the code CODE is a
9458 widening operation that is supported by the target platform in
9459 vector form (i.e., when operating on arguments of type VECTYPE_IN
9460 producing a result of type VECTYPE_OUT).
9462 Widening operations we currently support are NOP (CONVERT), FLOAT
9463 and WIDEN_MULT. This function checks if these operations are supported
9464 by the target platform either directly (via vector tree-codes), or via
9465 target builtins.
9467 Output:
9468 - CODE1 and CODE2 are codes of vector operations to be used when
9469 vectorizing the operation, if available.
9470 - MULTI_STEP_CVT determines the number of required intermediate steps in
9471 case of multi-step conversion (like char->short->int - in that case
9472 MULTI_STEP_CVT will be 1).
9473 - INTERM_TYPES contains the intermediate type required to perform the
9474 widening operation (short in the above example). */
9476 bool
9477 supportable_widening_operation (enum tree_code code, gimple *stmt,
9478 tree vectype_out, tree vectype_in,
9479 enum tree_code *code1, enum tree_code *code2,
9480 int *multi_step_cvt,
9481 vec<tree> *interm_types)
9483 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
9484 loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
9485 struct loop *vect_loop = NULL;
9486 machine_mode vec_mode;
9487 enum insn_code icode1, icode2;
9488 optab optab1, optab2;
9489 tree vectype = vectype_in;
9490 tree wide_vectype = vectype_out;
9491 enum tree_code c1, c2;
9492 int i;
9493 tree prev_type, intermediate_type;
9494 machine_mode intermediate_mode, prev_mode;
9495 optab optab3, optab4;
9497 *multi_step_cvt = 0;
9498 if (loop_info)
9499 vect_loop = LOOP_VINFO_LOOP (loop_info);
9501 switch (code)
9503 case WIDEN_MULT_EXPR:
9504 /* The result of a vectorized widening operation usually requires
9505 two vectors (because the widened results do not fit into one vector).
9506 The generated vector results would normally be expected to be
9507 generated in the same order as in the original scalar computation,
9508 i.e. if 8 results are generated in each vector iteration, they are
9509 to be organized as follows:
9510 vect1: [res1,res2,res3,res4],
9511 vect2: [res5,res6,res7,res8].
9513 However, in the special case that the result of the widening
9514 operation is used in a reduction computation only, the order doesn't
9515 matter (because when vectorizing a reduction we change the order of
9516 the computation). Some targets can take advantage of this and
9517 generate more efficient code. For example, targets like Altivec,
9518 that support widen_mult using a sequence of {mult_even,mult_odd}
9519 generate the following vectors:
9520 vect1: [res1,res3,res5,res7],
9521 vect2: [res2,res4,res6,res8].
9523 When vectorizing outer-loops, we execute the inner-loop sequentially
9524 (each vectorized inner-loop iteration contributes to VF outer-loop
9525 iterations in parallel). We therefore don't allow to change the
9526 order of the computation in the inner-loop during outer-loop
9527 vectorization. */
9528 /* TODO: Another case in which order doesn't *really* matter is when we
9529 widen and then contract again, e.g. (short)((int)x * y >> 8).
9530 Normally, pack_trunc performs an even/odd permute, whereas the
9531 repack from an even/odd expansion would be an interleave, which
9532 would be significantly simpler for e.g. AVX2. */
9533 /* In any case, in order to avoid duplicating the code below, recurse
9534 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9535 are properly set up for the caller. If we fail, we'll continue with
9536 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9537 if (vect_loop
9538 && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
9539 && !nested_in_vect_loop_p (vect_loop, stmt)
9540 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
9541 stmt, vectype_out, vectype_in,
9542 code1, code2, multi_step_cvt,
9543 interm_types))
9545 /* Elements in a vector with vect_used_by_reduction property cannot
9546 be reordered if the use chain with this property does not have the
9547 same operation. One such an example is s += a * b, where elements
9548 in a and b cannot be reordered. Here we check if the vector defined
9549 by STMT is only directly used in the reduction statement. */
9550 tree lhs = gimple_assign_lhs (stmt);
9551 use_operand_p dummy;
9552 gimple *use_stmt;
9553 stmt_vec_info use_stmt_info = NULL;
9554 if (single_imm_use (lhs, &dummy, &use_stmt)
9555 && (use_stmt_info = vinfo_for_stmt (use_stmt))
9556 && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
9557 return true;
9559 c1 = VEC_WIDEN_MULT_LO_EXPR;
9560 c2 = VEC_WIDEN_MULT_HI_EXPR;
9561 break;
9563 case DOT_PROD_EXPR:
9564 c1 = DOT_PROD_EXPR;
9565 c2 = DOT_PROD_EXPR;
9566 break;
9568 case SAD_EXPR:
9569 c1 = SAD_EXPR;
9570 c2 = SAD_EXPR;
9571 break;
9573 case VEC_WIDEN_MULT_EVEN_EXPR:
9574 /* Support the recursion induced just above. */
9575 c1 = VEC_WIDEN_MULT_EVEN_EXPR;
9576 c2 = VEC_WIDEN_MULT_ODD_EXPR;
9577 break;
9579 case WIDEN_LSHIFT_EXPR:
9580 c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
9581 c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
9582 break;
9584 CASE_CONVERT:
9585 c1 = VEC_UNPACK_LO_EXPR;
9586 c2 = VEC_UNPACK_HI_EXPR;
9587 break;
9589 case FLOAT_EXPR:
9590 c1 = VEC_UNPACK_FLOAT_LO_EXPR;
9591 c2 = VEC_UNPACK_FLOAT_HI_EXPR;
9592 break;
9594 case FIX_TRUNC_EXPR:
9595 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9596 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9597 computing the operation. */
9598 return false;
9600 default:
9601 gcc_unreachable ();
9604 if (BYTES_BIG_ENDIAN && c1 != VEC_WIDEN_MULT_EVEN_EXPR)
9605 std::swap (c1, c2);
9607 if (code == FIX_TRUNC_EXPR)
9609 /* The signedness is determined from output operand. */
9610 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9611 optab2 = optab_for_tree_code (c2, vectype_out, optab_default);
9613 else
9615 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9616 optab2 = optab_for_tree_code (c2, vectype, optab_default);
9619 if (!optab1 || !optab2)
9620 return false;
9622 vec_mode = TYPE_MODE (vectype);
9623 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing
9624 || (icode2 = optab_handler (optab2, vec_mode)) == CODE_FOR_nothing)
9625 return false;
9627 *code1 = c1;
9628 *code2 = c2;
9630 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9631 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9632 /* For scalar masks we may have different boolean
9633 vector types having the same QImode. Thus we
9634 add additional check for elements number. */
9635 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9636 || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
9637 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9639 /* Check if it's a multi-step conversion that can be done using intermediate
9640 types. */
9642 prev_type = vectype;
9643 prev_mode = vec_mode;
9645 if (!CONVERT_EXPR_CODE_P (code))
9646 return false;
9648 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9649 intermediate steps in promotion sequence. We try
9650 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9651 not. */
9652 interm_types->create (MAX_INTERM_CVT_STEPS);
9653 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9655 intermediate_mode = insn_data[icode1].operand[0].mode;
9656 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9658 poly_uint64 intermediate_nelts
9659 = exact_div (TYPE_VECTOR_SUBPARTS (prev_type), 2);
9660 intermediate_type
9661 = build_truth_vector_type (intermediate_nelts,
9662 current_vector_size);
9663 if (intermediate_mode != TYPE_MODE (intermediate_type))
9664 return false;
9666 else
9667 intermediate_type
9668 = lang_hooks.types.type_for_mode (intermediate_mode,
9669 TYPE_UNSIGNED (prev_type));
9671 optab3 = optab_for_tree_code (c1, intermediate_type, optab_default);
9672 optab4 = optab_for_tree_code (c2, intermediate_type, optab_default);
9674 if (!optab3 || !optab4
9675 || (icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing
9676 || insn_data[icode1].operand[0].mode != intermediate_mode
9677 || (icode2 = optab_handler (optab2, prev_mode)) == CODE_FOR_nothing
9678 || insn_data[icode2].operand[0].mode != intermediate_mode
9679 || ((icode1 = optab_handler (optab3, intermediate_mode))
9680 == CODE_FOR_nothing)
9681 || ((icode2 = optab_handler (optab4, intermediate_mode))
9682 == CODE_FOR_nothing))
9683 break;
9685 interm_types->quick_push (intermediate_type);
9686 (*multi_step_cvt)++;
9688 if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
9689 && insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
9690 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9691 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
9692 TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
9694 prev_type = intermediate_type;
9695 prev_mode = intermediate_mode;
9698 interm_types->release ();
9699 return false;
9703 /* Function supportable_narrowing_operation
9705 Check whether an operation represented by the code CODE is a
9706 narrowing operation that is supported by the target platform in
9707 vector form (i.e., when operating on arguments of type VECTYPE_IN
9708 and producing a result of type VECTYPE_OUT).
9710 Narrowing operations we currently support are NOP (CONVERT) and
9711 FIX_TRUNC. This function checks if these operations are supported by
9712 the target platform directly via vector tree-codes.
9714 Output:
9715 - CODE1 is the code of a vector operation to be used when
9716 vectorizing the operation, if available.
9717 - MULTI_STEP_CVT determines the number of required intermediate steps in
9718 case of multi-step conversion (like int->short->char - in that case
9719 MULTI_STEP_CVT will be 1).
9720 - INTERM_TYPES contains the intermediate type required to perform the
9721 narrowing operation (short in the above example). */
9723 bool
9724 supportable_narrowing_operation (enum tree_code code,
9725 tree vectype_out, tree vectype_in,
9726 enum tree_code *code1, int *multi_step_cvt,
9727 vec<tree> *interm_types)
9729 machine_mode vec_mode;
9730 enum insn_code icode1;
9731 optab optab1, interm_optab;
9732 tree vectype = vectype_in;
9733 tree narrow_vectype = vectype_out;
9734 enum tree_code c1;
9735 tree intermediate_type, prev_type;
9736 machine_mode intermediate_mode, prev_mode;
9737 int i;
9738 bool uns;
9740 *multi_step_cvt = 0;
9741 switch (code)
9743 CASE_CONVERT:
9744 c1 = VEC_PACK_TRUNC_EXPR;
9745 break;
9747 case FIX_TRUNC_EXPR:
9748 c1 = VEC_PACK_FIX_TRUNC_EXPR;
9749 break;
9751 case FLOAT_EXPR:
9752 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9753 tree code and optabs used for computing the operation. */
9754 return false;
9756 default:
9757 gcc_unreachable ();
9760 if (code == FIX_TRUNC_EXPR)
9761 /* The signedness is determined from output operand. */
9762 optab1 = optab_for_tree_code (c1, vectype_out, optab_default);
9763 else
9764 optab1 = optab_for_tree_code (c1, vectype, optab_default);
9766 if (!optab1)
9767 return false;
9769 vec_mode = TYPE_MODE (vectype);
9770 if ((icode1 = optab_handler (optab1, vec_mode)) == CODE_FOR_nothing)
9771 return false;
9773 *code1 = c1;
9775 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9776 /* For scalar masks we may have different boolean
9777 vector types having the same QImode. Thus we
9778 add additional check for elements number. */
9779 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9780 || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
9781 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9783 /* Check if it's a multi-step conversion that can be done using intermediate
9784 types. */
9785 prev_mode = vec_mode;
9786 prev_type = vectype;
9787 if (code == FIX_TRUNC_EXPR)
9788 uns = TYPE_UNSIGNED (vectype_out);
9789 else
9790 uns = TYPE_UNSIGNED (vectype);
9792 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9793 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9794 costly than signed. */
9795 if (code == FIX_TRUNC_EXPR && uns)
9797 enum insn_code icode2;
9799 intermediate_type
9800 = lang_hooks.types.type_for_mode (TYPE_MODE (vectype_out), 0);
9801 interm_optab
9802 = optab_for_tree_code (c1, intermediate_type, optab_default);
9803 if (interm_optab != unknown_optab
9804 && (icode2 = optab_handler (optab1, vec_mode)) != CODE_FOR_nothing
9805 && insn_data[icode1].operand[0].mode
9806 == insn_data[icode2].operand[0].mode)
9808 uns = false;
9809 optab1 = interm_optab;
9810 icode1 = icode2;
9814 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9815 intermediate steps in promotion sequence. We try
9816 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9817 interm_types->create (MAX_INTERM_CVT_STEPS);
9818 for (i = 0; i < MAX_INTERM_CVT_STEPS; i++)
9820 intermediate_mode = insn_data[icode1].operand[0].mode;
9821 if (VECTOR_BOOLEAN_TYPE_P (prev_type))
9823 intermediate_type
9824 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) * 2,
9825 current_vector_size);
9826 if (intermediate_mode != TYPE_MODE (intermediate_type))
9827 return false;
9829 else
9830 intermediate_type
9831 = lang_hooks.types.type_for_mode (intermediate_mode, uns);
9832 interm_optab
9833 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR, intermediate_type,
9834 optab_default);
9835 if (!interm_optab
9836 || ((icode1 = optab_handler (optab1, prev_mode)) == CODE_FOR_nothing)
9837 || insn_data[icode1].operand[0].mode != intermediate_mode
9838 || ((icode1 = optab_handler (interm_optab, intermediate_mode))
9839 == CODE_FOR_nothing))
9840 break;
9842 interm_types->quick_push (intermediate_type);
9843 (*multi_step_cvt)++;
9845 if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
9846 return (!VECTOR_BOOLEAN_TYPE_P (vectype)
9847 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
9848 TYPE_VECTOR_SUBPARTS (narrow_vectype)));
9850 prev_mode = intermediate_mode;
9851 prev_type = intermediate_type;
9852 optab1 = interm_optab;
9855 interm_types->release ();
9856 return false;