typeck.c (cp_truthvalue_conversion): Add tsubst_flags_t parameter and use it in calls...
[official-gcc.git] / gcc / tree-vect-data-refs.c
blob3ace4569ba457cc0af57cc458b448d37174b9bcb
1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "predict.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "ssa.h"
34 #include "optabs-tree.h"
35 #include "cgraph.h"
36 #include "dumpfile.h"
37 #include "alias.h"
38 #include "fold-const.h"
39 #include "stor-layout.h"
40 #include "tree-eh.h"
41 #include "gimplify.h"
42 #include "gimple-iterator.h"
43 #include "gimplify-me.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "cfgloop.h"
48 #include "tree-scalar-evolution.h"
49 #include "tree-vectorizer.h"
50 #include "expr.h"
51 #include "builtins.h"
52 #include "tree-cfg.h"
53 #include "tree-hash-traits.h"
54 #include "vec-perm-indices.h"
55 #include "internal-fn.h"
57 /* Return true if load- or store-lanes optab OPTAB is implemented for
58 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
60 static bool
61 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
62 tree vectype, unsigned HOST_WIDE_INT count)
64 machine_mode mode, array_mode;
65 bool limit_p;
67 mode = TYPE_MODE (vectype);
68 if (!targetm.array_mode (mode, count).exists (&array_mode))
70 poly_uint64 bits = count * GET_MODE_BITSIZE (mode);
71 limit_p = !targetm.array_mode_supported_p (mode, count);
72 if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
74 if (dump_enabled_p ())
75 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
76 "no array mode for %s[%wu]\n",
77 GET_MODE_NAME (mode), count);
78 return false;
82 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
84 if (dump_enabled_p ())
85 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
86 "cannot use %s<%s><%s>\n", name,
87 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
88 return false;
91 if (dump_enabled_p ())
92 dump_printf_loc (MSG_NOTE, vect_location,
93 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
94 GET_MODE_NAME (mode));
96 return true;
100 /* Return the smallest scalar part of STMT_INFO.
101 This is used to determine the vectype of the stmt. We generally set the
102 vectype according to the type of the result (lhs). For stmts whose
103 result-type is different than the type of the arguments (e.g., demotion,
104 promotion), vectype will be reset appropriately (later). Note that we have
105 to visit the smallest datatype in this function, because that determines the
106 VF. If the smallest datatype in the loop is present only as the rhs of a
107 promotion operation - we'd miss it.
108 Such a case, where a variable of this datatype does not appear in the lhs
109 anywhere in the loop, can only occur if it's an invariant: e.g.:
110 'int_x = (int) short_inv', which we'd expect to have been optimized away by
111 invariant motion. However, we cannot rely on invariant motion to always
112 take invariants out of the loop, and so in the case of promotion we also
113 have to check the rhs.
114 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
115 types. */
117 tree
118 vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
119 HOST_WIDE_INT *lhs_size_unit,
120 HOST_WIDE_INT *rhs_size_unit)
122 tree scalar_type = gimple_expr_type (stmt_info->stmt);
123 HOST_WIDE_INT lhs, rhs;
125 /* During the analysis phase, this function is called on arbitrary
126 statements that might not have scalar results. */
127 if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type)))
128 return scalar_type;
130 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
132 gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
133 if (assign
134 && (gimple_assign_cast_p (assign)
135 || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
136 || gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
137 || gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
138 || gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
139 || gimple_assign_rhs_code (assign) == FLOAT_EXPR))
141 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
143 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
144 if (rhs < lhs)
145 scalar_type = rhs_type;
147 else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
149 unsigned int i = 0;
150 if (gimple_call_internal_p (call))
152 internal_fn ifn = gimple_call_internal_fn (call);
153 if (internal_load_fn_p (ifn) || internal_store_fn_p (ifn))
154 /* gimple_expr_type already picked the type of the loaded
155 or stored data. */
156 i = ~0U;
157 else if (internal_fn_mask_index (ifn) == 0)
158 i = 1;
160 if (i < gimple_call_num_args (call))
162 tree rhs_type = TREE_TYPE (gimple_call_arg (call, i));
163 if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (rhs_type)))
165 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
166 if (rhs < lhs)
167 scalar_type = rhs_type;
172 *lhs_size_unit = lhs;
173 *rhs_size_unit = rhs;
174 return scalar_type;
178 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
179 tested at run-time. Return TRUE if DDR was successfully inserted.
180 Return false if versioning is not supported. */
182 static opt_result
183 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
185 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
187 if ((unsigned) param_vect_max_version_for_alias_checks == 0)
188 return opt_result::failure_at (vect_location,
189 "will not create alias checks, as"
190 " --param vect-max-version-for-alias-checks"
191 " == 0\n");
193 opt_result res
194 = runtime_alias_check_p (ddr, loop,
195 optimize_loop_nest_for_speed_p (loop));
196 if (!res)
197 return res;
199 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
200 return opt_result::success ();
203 /* Record that loop LOOP_VINFO needs to check that VALUE is nonzero. */
205 static void
206 vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
208 vec<tree> checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
209 for (unsigned int i = 0; i < checks.length(); ++i)
210 if (checks[i] == value)
211 return;
213 if (dump_enabled_p ())
214 dump_printf_loc (MSG_NOTE, vect_location,
215 "need run-time check that %T is nonzero\n",
216 value);
217 LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
220 /* Return true if we know that the order of vectorized DR_INFO_A and
221 vectorized DR_INFO_B will be the same as the order of DR_INFO_A and
222 DR_INFO_B. At least one of the accesses is a write. */
224 static bool
225 vect_preserves_scalar_order_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b)
227 stmt_vec_info stmtinfo_a = dr_info_a->stmt;
228 stmt_vec_info stmtinfo_b = dr_info_b->stmt;
230 /* Single statements are always kept in their original order. */
231 if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
232 && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
233 return true;
235 /* STMT_A and STMT_B belong to overlapping groups. All loads in a
236 SLP group are emitted at the position of the last scalar load and
237 all loads in an interleaving group are emitted at the position
238 of the first scalar load.
239 Stores in a group are emitted at the position of the last scalar store.
240 Compute that position and check whether the resulting order matches
241 the current one.
242 We have not yet decided between SLP and interleaving so we have
243 to conservatively assume both. */
244 stmt_vec_info il_a;
245 stmt_vec_info last_a = il_a = DR_GROUP_FIRST_ELEMENT (stmtinfo_a);
246 if (last_a)
248 for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_a); s;
249 s = DR_GROUP_NEXT_ELEMENT (s))
250 last_a = get_later_stmt (last_a, s);
251 if (!DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_a)))
253 for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
254 s = DR_GROUP_NEXT_ELEMENT (s))
255 if (get_later_stmt (il_a, s) == il_a)
256 il_a = s;
258 else
259 il_a = last_a;
261 else
262 last_a = il_a = stmtinfo_a;
263 stmt_vec_info il_b;
264 stmt_vec_info last_b = il_b = DR_GROUP_FIRST_ELEMENT (stmtinfo_b);
265 if (last_b)
267 for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_b); s;
268 s = DR_GROUP_NEXT_ELEMENT (s))
269 last_b = get_later_stmt (last_b, s);
270 if (!DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_b)))
272 for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
273 s = DR_GROUP_NEXT_ELEMENT (s))
274 if (get_later_stmt (il_b, s) == il_b)
275 il_b = s;
277 else
278 il_b = last_b;
280 else
281 last_b = il_b = stmtinfo_b;
282 bool a_after_b = (get_later_stmt (stmtinfo_a, stmtinfo_b) == stmtinfo_a);
283 return (/* SLP */
284 (get_later_stmt (last_a, last_b) == last_a) == a_after_b
285 /* Interleaving */
286 && (get_later_stmt (il_a, il_b) == il_a) == a_after_b
287 /* Mixed */
288 && (get_later_stmt (il_a, last_b) == il_a) == a_after_b
289 && (get_later_stmt (last_a, il_b) == last_a) == a_after_b);
292 /* A subroutine of vect_analyze_data_ref_dependence. Handle
293 DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
294 distances. These distances are conservatively correct but they don't
295 reflect a guaranteed dependence.
297 Return true if this function does all the work necessary to avoid
298 an alias or false if the caller should use the dependence distances
299 to limit the vectorization factor in the usual way. LOOP_DEPTH is
300 the depth of the loop described by LOOP_VINFO and the other arguments
301 are as for vect_analyze_data_ref_dependence. */
303 static bool
304 vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
305 loop_vec_info loop_vinfo,
306 int loop_depth, unsigned int *max_vf)
308 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
309 lambda_vector dist_v;
310 unsigned int i;
311 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
313 int dist = dist_v[loop_depth];
314 if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr)))
316 /* If the user asserted safelen >= DIST consecutive iterations
317 can be executed concurrently, assume independence.
319 ??? An alternative would be to add the alias check even
320 in this case, and vectorize the fallback loop with the
321 maximum VF set to safelen. However, if the user has
322 explicitly given a length, it's less likely that that
323 would be a win. */
324 if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
326 if ((unsigned int) loop->safelen < *max_vf)
327 *max_vf = loop->safelen;
328 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
329 continue;
332 /* For dependence distances of 2 or more, we have the option
333 of limiting VF or checking for an alias at runtime.
334 Prefer to check at runtime if we can, to avoid limiting
335 the VF unnecessarily when the bases are in fact independent.
337 Note that the alias checks will be removed if the VF ends up
338 being small enough. */
339 dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
340 dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
341 return (!STMT_VINFO_GATHER_SCATTER_P (dr_info_a->stmt)
342 && !STMT_VINFO_GATHER_SCATTER_P (dr_info_b->stmt)
343 && vect_mark_for_runtime_alias_test (ddr, loop_vinfo));
346 return true;
350 /* Function vect_analyze_data_ref_dependence.
352 FIXME: I needed to change the sense of the returned flag.
354 Return FALSE if there (might) exist a dependence between a memory-reference
355 DRA and a memory-reference DRB. When versioning for alias may check a
356 dependence at run-time, return TRUE. Adjust *MAX_VF according to
357 the data dependence. */
359 static opt_result
360 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
361 loop_vec_info loop_vinfo,
362 unsigned int *max_vf)
364 unsigned int i;
365 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
366 struct data_reference *dra = DDR_A (ddr);
367 struct data_reference *drb = DDR_B (ddr);
368 dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
369 dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (drb);
370 stmt_vec_info stmtinfo_a = dr_info_a->stmt;
371 stmt_vec_info stmtinfo_b = dr_info_b->stmt;
372 lambda_vector dist_v;
373 unsigned int loop_depth;
375 /* In loop analysis all data references should be vectorizable. */
376 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
377 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
378 gcc_unreachable ();
380 /* Independent data accesses. */
381 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
382 return opt_result::success ();
384 if (dra == drb
385 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
386 return opt_result::success ();
388 /* We do not have to consider dependences between accesses that belong
389 to the same group, unless the stride could be smaller than the
390 group size. */
391 if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
392 && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
393 == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
394 && !STMT_VINFO_STRIDED_P (stmtinfo_a))
395 return opt_result::success ();
397 /* Even if we have an anti-dependence then, as the vectorized loop covers at
398 least two scalar iterations, there is always also a true dependence.
399 As the vectorizer does not re-order loads and stores we can ignore
400 the anti-dependence if TBAA can disambiguate both DRs similar to the
401 case with known negative distance anti-dependences (positive
402 distance anti-dependences would violate TBAA constraints). */
403 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
404 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
405 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
406 get_alias_set (DR_REF (drb))))
407 return opt_result::success ();
409 /* Unknown data dependence. */
410 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
412 /* If user asserted safelen consecutive iterations can be
413 executed concurrently, assume independence. */
414 if (loop->safelen >= 2)
416 if ((unsigned int) loop->safelen < *max_vf)
417 *max_vf = loop->safelen;
418 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
419 return opt_result::success ();
422 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
423 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
424 return opt_result::failure_at
425 (stmtinfo_a->stmt,
426 "versioning for alias not supported for: "
427 "can't determine dependence between %T and %T\n",
428 DR_REF (dra), DR_REF (drb));
430 if (dump_enabled_p ())
431 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
432 "versioning for alias required: "
433 "can't determine dependence between %T and %T\n",
434 DR_REF (dra), DR_REF (drb));
436 /* Add to list of ddrs that need to be tested at run-time. */
437 return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
440 /* Known data dependence. */
441 if (DDR_NUM_DIST_VECTS (ddr) == 0)
443 /* If user asserted safelen consecutive iterations can be
444 executed concurrently, assume independence. */
445 if (loop->safelen >= 2)
447 if ((unsigned int) loop->safelen < *max_vf)
448 *max_vf = loop->safelen;
449 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
450 return opt_result::success ();
453 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
454 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
455 return opt_result::failure_at
456 (stmtinfo_a->stmt,
457 "versioning for alias not supported for: "
458 "bad dist vector for %T and %T\n",
459 DR_REF (dra), DR_REF (drb));
461 if (dump_enabled_p ())
462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
463 "versioning for alias required: "
464 "bad dist vector for %T and %T\n",
465 DR_REF (dra), DR_REF (drb));
466 /* Add to list of ddrs that need to be tested at run-time. */
467 return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
470 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
472 if (DDR_COULD_BE_INDEPENDENT_P (ddr)
473 && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
474 loop_depth, max_vf))
475 return opt_result::success ();
477 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
479 int dist = dist_v[loop_depth];
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE, vect_location,
483 "dependence distance = %d.\n", dist);
485 if (dist == 0)
487 if (dump_enabled_p ())
488 dump_printf_loc (MSG_NOTE, vect_location,
489 "dependence distance == 0 between %T and %T\n",
490 DR_REF (dra), DR_REF (drb));
492 /* When we perform grouped accesses and perform implicit CSE
493 by detecting equal accesses and doing disambiguation with
494 runtime alias tests like for
495 .. = a[i];
496 .. = a[i+1];
497 a[i] = ..;
498 a[i+1] = ..;
499 *p = ..;
500 .. = a[i];
501 .. = a[i+1];
502 where we will end up loading { a[i], a[i+1] } once, make
503 sure that inserting group loads before the first load and
504 stores after the last store will do the right thing.
505 Similar for groups like
506 a[i] = ...;
507 ... = a[i];
508 a[i+1] = ...;
509 where loads from the group interleave with the store. */
510 if (!vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
511 return opt_result::failure_at (stmtinfo_a->stmt,
512 "READ_WRITE dependence"
513 " in interleaving.\n");
515 if (loop->safelen < 2)
517 tree indicator = dr_zero_step_indicator (dra);
518 if (!indicator || integer_zerop (indicator))
519 return opt_result::failure_at (stmtinfo_a->stmt,
520 "access also has a zero step\n");
521 else if (TREE_CODE (indicator) != INTEGER_CST)
522 vect_check_nonzero_value (loop_vinfo, indicator);
524 continue;
527 if (dist > 0 && DDR_REVERSED_P (ddr))
529 /* If DDR_REVERSED_P the order of the data-refs in DDR was
530 reversed (to make distance vector positive), and the actual
531 distance is negative. */
532 if (dump_enabled_p ())
533 dump_printf_loc (MSG_NOTE, vect_location,
534 "dependence distance negative.\n");
535 /* When doing outer loop vectorization, we need to check if there is
536 a backward dependence at the inner loop level if the dependence
537 at the outer loop is reversed. See PR81740. */
538 if (nested_in_vect_loop_p (loop, stmtinfo_a)
539 || nested_in_vect_loop_p (loop, stmtinfo_b))
541 unsigned inner_depth = index_in_loop_nest (loop->inner->num,
542 DDR_LOOP_NEST (ddr));
543 if (dist_v[inner_depth] < 0)
544 return opt_result::failure_at (stmtinfo_a->stmt,
545 "not vectorized, dependence "
546 "between data-refs %T and %T\n",
547 DR_REF (dra), DR_REF (drb));
549 /* Record a negative dependence distance to later limit the
550 amount of stmt copying / unrolling we can perform.
551 Only need to handle read-after-write dependence. */
552 if (DR_IS_READ (drb)
553 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
554 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
555 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
556 continue;
559 unsigned int abs_dist = abs (dist);
560 if (abs_dist >= 2 && abs_dist < *max_vf)
562 /* The dependence distance requires reduction of the maximal
563 vectorization factor. */
564 *max_vf = abs_dist;
565 if (dump_enabled_p ())
566 dump_printf_loc (MSG_NOTE, vect_location,
567 "adjusting maximal vectorization factor to %i\n",
568 *max_vf);
571 if (abs_dist >= *max_vf)
573 /* Dependence distance does not create dependence, as far as
574 vectorization is concerned, in this case. */
575 if (dump_enabled_p ())
576 dump_printf_loc (MSG_NOTE, vect_location,
577 "dependence distance >= VF.\n");
578 continue;
581 return opt_result::failure_at (stmtinfo_a->stmt,
582 "not vectorized, possible dependence "
583 "between data-refs %T and %T\n",
584 DR_REF (dra), DR_REF (drb));
587 return opt_result::success ();
590 /* Function vect_analyze_data_ref_dependences.
592 Examine all the data references in the loop, and make sure there do not
593 exist any data dependences between them. Set *MAX_VF according to
594 the maximum vectorization factor the data dependences allow. */
596 opt_result
597 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
598 unsigned int *max_vf)
600 unsigned int i;
601 struct data_dependence_relation *ddr;
603 DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
605 if (!LOOP_VINFO_DDRS (loop_vinfo).exists ())
607 LOOP_VINFO_DDRS (loop_vinfo)
608 .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
609 * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
610 /* We need read-read dependences to compute
611 STMT_VINFO_SAME_ALIGN_REFS. */
612 bool res = compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
613 &LOOP_VINFO_DDRS (loop_vinfo),
614 LOOP_VINFO_LOOP_NEST (loop_vinfo),
615 true);
616 gcc_assert (res);
619 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
621 /* For epilogues we either have no aliases or alias versioning
622 was applied to original loop. Therefore we may just get max_vf
623 using VF of original loop. */
624 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
625 *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
626 else
627 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
629 opt_result res
630 = vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf);
631 if (!res)
632 return res;
635 return opt_result::success ();
639 /* Function vect_slp_analyze_data_ref_dependence.
641 Return TRUE if there (might) exist a dependence between a memory-reference
642 DRA and a memory-reference DRB for VINFO. When versioning for alias
643 may check a dependence at run-time, return FALSE. Adjust *MAX_VF
644 according to the data dependence. */
646 static bool
647 vect_slp_analyze_data_ref_dependence (vec_info *vinfo,
648 struct data_dependence_relation *ddr)
650 struct data_reference *dra = DDR_A (ddr);
651 struct data_reference *drb = DDR_B (ddr);
652 dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
653 dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
655 /* We need to check dependences of statements marked as unvectorizable
656 as well, they still can prohibit vectorization. */
658 /* Independent data accesses. */
659 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
660 return false;
662 if (dra == drb)
663 return false;
665 /* Read-read is OK. */
666 if (DR_IS_READ (dra) && DR_IS_READ (drb))
667 return false;
669 /* If dra and drb are part of the same interleaving chain consider
670 them independent. */
671 if (STMT_VINFO_GROUPED_ACCESS (dr_info_a->stmt)
672 && (DR_GROUP_FIRST_ELEMENT (dr_info_a->stmt)
673 == DR_GROUP_FIRST_ELEMENT (dr_info_b->stmt)))
674 return false;
676 /* Unknown data dependence. */
677 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
679 if (dump_enabled_p ())
680 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
681 "can't determine dependence between %T and %T\n",
682 DR_REF (dra), DR_REF (drb));
684 else if (dump_enabled_p ())
685 dump_printf_loc (MSG_NOTE, vect_location,
686 "determined dependence between %T and %T\n",
687 DR_REF (dra), DR_REF (drb));
689 return true;
693 /* Analyze dependences involved in the transform of SLP NODE. STORES
694 contain the vector of scalar stores of this instance if we are
695 disambiguating the loads. */
697 static bool
698 vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
699 vec<stmt_vec_info> stores,
700 stmt_vec_info last_store_info)
702 /* This walks over all stmts involved in the SLP load/store done
703 in NODE verifying we can sink them up to the last stmt in the
704 group. */
705 stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
706 vec_info *vinfo = last_access_info->vinfo;
707 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
709 stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k];
710 if (access_info == last_access_info)
711 continue;
712 data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
713 ao_ref ref;
714 bool ref_initialized_p = false;
715 for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
716 gsi_stmt (gsi) != last_access_info->stmt; gsi_next (&gsi))
718 gimple *stmt = gsi_stmt (gsi);
719 if (! gimple_vuse (stmt)
720 || (DR_IS_READ (dr_a) && ! gimple_vdef (stmt)))
721 continue;
723 /* If we couldn't record a (single) data reference for this
724 stmt we have to resort to the alias oracle. */
725 stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
726 data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
727 if (!dr_b)
729 /* We are moving a store or sinking a load - this means
730 we cannot use TBAA for disambiguation. */
731 if (!ref_initialized_p)
732 ao_ref_init (&ref, DR_REF (dr_a));
733 if (stmt_may_clobber_ref_p_1 (stmt, &ref, false)
734 || ref_maybe_used_by_stmt_p (stmt, &ref, false))
735 return false;
736 continue;
739 bool dependent = false;
740 /* If we run into a store of this same instance (we've just
741 marked those) then delay dependence checking until we run
742 into the last store because this is where it will have
743 been sunk to (and we verify if we can do that as well). */
744 if (gimple_visited_p (stmt))
746 if (stmt_info != last_store_info)
747 continue;
748 unsigned i;
749 stmt_vec_info store_info;
750 FOR_EACH_VEC_ELT (stores, i, store_info)
752 data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
753 ddr_p ddr = initialize_data_dependence_relation
754 (dr_a, store_dr, vNULL);
755 dependent
756 = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
757 free_dependence_relation (ddr);
758 if (dependent)
759 break;
762 else
764 ddr_p ddr = initialize_data_dependence_relation (dr_a,
765 dr_b, vNULL);
766 dependent = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
767 free_dependence_relation (ddr);
769 if (dependent)
770 return false;
773 return true;
777 /* Function vect_analyze_data_ref_dependences.
779 Examine all the data references in the basic-block, and make sure there
780 do not exist any data dependences between them. Set *MAX_VF according to
781 the maximum vectorization factor the data dependences allow. */
783 bool
784 vect_slp_analyze_instance_dependence (slp_instance instance)
786 DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
788 /* The stores of this instance are at the root of the SLP tree. */
789 slp_tree store = SLP_INSTANCE_TREE (instance);
790 if (! STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (store)[0]))
791 store = NULL;
793 /* Verify we can sink stores to the vectorized stmt insert location. */
794 stmt_vec_info last_store_info = NULL;
795 if (store)
797 if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL))
798 return false;
800 /* Mark stores in this instance and remember the last one. */
801 last_store_info = vect_find_last_scalar_stmt_in_slp (store);
802 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
803 gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
806 bool res = true;
808 /* Verify we can sink loads to the vectorized stmt insert location,
809 special-casing stores of this instance. */
810 slp_tree load;
811 unsigned int i;
812 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load)
813 if (! vect_slp_analyze_node_dependences (instance, load,
814 store
815 ? SLP_TREE_SCALAR_STMTS (store)
816 : vNULL, last_store_info))
818 res = false;
819 break;
822 /* Unset the visited flag. */
823 if (store)
824 for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
825 gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
827 return res;
830 /* Record the base alignment guarantee given by DRB, which occurs
831 in STMT_INFO. */
833 static void
834 vect_record_base_alignment (stmt_vec_info stmt_info,
835 innermost_loop_behavior *drb)
837 vec_info *vinfo = stmt_info->vinfo;
838 bool existed;
839 innermost_loop_behavior *&entry
840 = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
841 if (!existed || entry->base_alignment < drb->base_alignment)
843 entry = drb;
844 if (dump_enabled_p ())
845 dump_printf_loc (MSG_NOTE, vect_location,
846 "recording new base alignment for %T\n"
847 " alignment: %d\n"
848 " misalignment: %d\n"
849 " based on: %G",
850 drb->base_address,
851 drb->base_alignment,
852 drb->base_misalignment,
853 stmt_info->stmt);
857 /* If the region we're going to vectorize is reached, all unconditional
858 data references occur at least once. We can therefore pool the base
859 alignment guarantees from each unconditional reference. Do this by
860 going through all the data references in VINFO and checking whether
861 the containing statement makes the reference unconditionally. If so,
862 record the alignment of the base address in VINFO so that it can be
863 used for all other references with the same base. */
865 void
866 vect_record_base_alignments (vec_info *vinfo)
868 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
869 class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
870 data_reference *dr;
871 unsigned int i;
872 FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
874 dr_vec_info *dr_info = vinfo->lookup_dr (dr);
875 stmt_vec_info stmt_info = dr_info->stmt;
876 if (!DR_IS_CONDITIONAL_IN_STMT (dr)
877 && STMT_VINFO_VECTORIZABLE (stmt_info)
878 && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
880 vect_record_base_alignment (stmt_info, &DR_INNERMOST (dr));
882 /* If DR is nested in the loop that is being vectorized, we can also
883 record the alignment of the base wrt the outer loop. */
884 if (loop && nested_in_vect_loop_p (loop, stmt_info))
885 vect_record_base_alignment
886 (stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
891 /* Return the target alignment for the vectorized form of DR_INFO. */
893 static poly_uint64
894 vect_calculate_target_alignment (dr_vec_info *dr_info)
896 tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
897 return targetm.vectorize.preferred_vector_alignment (vectype);
900 /* Function vect_compute_data_ref_alignment
902 Compute the misalignment of the data reference DR_INFO.
904 Output:
905 1. DR_MISALIGNMENT (DR_INFO) is defined.
907 FOR NOW: No analysis is actually performed. Misalignment is calculated
908 only for trivial cases. TODO. */
910 static void
911 vect_compute_data_ref_alignment (dr_vec_info *dr_info)
913 stmt_vec_info stmt_info = dr_info->stmt;
914 vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
915 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
916 class loop *loop = NULL;
917 tree ref = DR_REF (dr_info->dr);
918 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
920 if (dump_enabled_p ())
921 dump_printf_loc (MSG_NOTE, vect_location,
922 "vect_compute_data_ref_alignment:\n");
924 if (loop_vinfo)
925 loop = LOOP_VINFO_LOOP (loop_vinfo);
927 /* Initialize misalignment to unknown. */
928 SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
930 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
931 return;
933 innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
934 bool step_preserves_misalignment_p;
936 poly_uint64 vector_alignment
937 = exact_div (vect_calculate_target_alignment (dr_info), BITS_PER_UNIT);
938 DR_TARGET_ALIGNMENT (dr_info) = vector_alignment;
940 /* If the main loop has peeled for alignment we have no way of knowing
941 whether the data accesses in the epilogues are aligned. We can't at
942 compile time answer the question whether we have entered the main loop or
943 not. Fixes PR 92351. */
944 if (loop_vinfo)
946 loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
947 if (orig_loop_vinfo
948 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (orig_loop_vinfo) != 0)
949 return;
952 unsigned HOST_WIDE_INT vect_align_c;
953 if (!vector_alignment.is_constant (&vect_align_c))
954 return;
956 /* No step for BB vectorization. */
957 if (!loop)
959 gcc_assert (integer_zerop (drb->step));
960 step_preserves_misalignment_p = true;
963 /* In case the dataref is in an inner-loop of the loop that is being
964 vectorized (LOOP), we use the base and misalignment information
965 relative to the outer-loop (LOOP). This is ok only if the misalignment
966 stays the same throughout the execution of the inner-loop, which is why
967 we have to check that the stride of the dataref in the inner-loop evenly
968 divides by the vector alignment. */
969 else if (nested_in_vect_loop_p (loop, stmt_info))
971 step_preserves_misalignment_p
972 = (DR_STEP_ALIGNMENT (dr_info->dr) % vect_align_c) == 0;
974 if (dump_enabled_p ())
976 if (step_preserves_misalignment_p)
977 dump_printf_loc (MSG_NOTE, vect_location,
978 "inner step divides the vector alignment.\n");
979 else
980 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
981 "inner step doesn't divide the vector"
982 " alignment.\n");
986 /* Similarly we can only use base and misalignment information relative to
987 an innermost loop if the misalignment stays the same throughout the
988 execution of the loop. As above, this is the case if the stride of
989 the dataref evenly divides by the alignment. */
990 else
992 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
993 step_preserves_misalignment_p
994 = multiple_p (DR_STEP_ALIGNMENT (dr_info->dr) * vf, vect_align_c);
996 if (!step_preserves_misalignment_p && dump_enabled_p ())
997 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
998 "step doesn't divide the vector alignment.\n");
1001 unsigned int base_alignment = drb->base_alignment;
1002 unsigned int base_misalignment = drb->base_misalignment;
1004 /* Calculate the maximum of the pooled base address alignment and the
1005 alignment that we can compute for DR itself. */
1006 innermost_loop_behavior **entry = base_alignments->get (drb->base_address);
1007 if (entry && base_alignment < (*entry)->base_alignment)
1009 base_alignment = (*entry)->base_alignment;
1010 base_misalignment = (*entry)->base_misalignment;
1013 if (drb->offset_alignment < vect_align_c
1014 || !step_preserves_misalignment_p
1015 /* We need to know whether the step wrt the vectorized loop is
1016 negative when computing the starting misalignment below. */
1017 || TREE_CODE (drb->step) != INTEGER_CST)
1019 if (dump_enabled_p ())
1020 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1021 "Unknown alignment for access: %T\n", ref);
1022 return;
1025 if (base_alignment < vect_align_c)
1027 unsigned int max_alignment;
1028 tree base = get_base_for_alignment (drb->base_address, &max_alignment);
1029 if (max_alignment < vect_align_c
1030 || !vect_can_force_dr_alignment_p (base,
1031 vect_align_c * BITS_PER_UNIT))
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_NOTE, vect_location,
1035 "can't force alignment of ref: %T\n", ref);
1036 return;
1039 /* Force the alignment of the decl.
1040 NOTE: This is the only change to the code we make during
1041 the analysis phase, before deciding to vectorize the loop. */
1042 if (dump_enabled_p ())
1043 dump_printf_loc (MSG_NOTE, vect_location,
1044 "force alignment of %T\n", ref);
1046 dr_info->base_decl = base;
1047 dr_info->base_misaligned = true;
1048 base_misalignment = 0;
1050 poly_int64 misalignment
1051 = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
1053 /* If this is a backward running DR then first access in the larger
1054 vectype actually is N-1 elements before the address in the DR.
1055 Adjust misalign accordingly. */
1056 if (tree_int_cst_sgn (drb->step) < 0)
1057 /* PLUS because STEP is negative. */
1058 misalignment += ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
1059 * TREE_INT_CST_LOW (drb->step));
1061 unsigned int const_misalignment;
1062 if (!known_misalignment (misalignment, vect_align_c, &const_misalignment))
1064 if (dump_enabled_p ())
1065 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1066 "Non-constant misalignment for access: %T\n", ref);
1067 return;
1070 SET_DR_MISALIGNMENT (dr_info, const_misalignment);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1074 "misalign = %d bytes of ref %T\n",
1075 DR_MISALIGNMENT (dr_info), ref);
1077 return;
1080 /* Function vect_update_misalignment_for_peel.
1081 Sets DR_INFO's misalignment
1082 - to 0 if it has the same alignment as DR_PEEL_INFO,
1083 - to the misalignment computed using NPEEL if DR_INFO's salignment is known,
1084 - to -1 (unknown) otherwise.
1086 DR_INFO - the data reference whose misalignment is to be adjusted.
1087 DR_PEEL_INFO - the data reference whose misalignment is being made
1088 zero in the vector loop by the peel.
1089 NPEEL - the number of iterations in the peel loop if the misalignment
1090 of DR_PEEL_INFO is known at compile time. */
1092 static void
1093 vect_update_misalignment_for_peel (dr_vec_info *dr_info,
1094 dr_vec_info *dr_peel_info, int npeel)
1096 unsigned int i;
1097 vec<dr_p> same_aligned_drs;
1098 struct data_reference *current_dr;
1099 stmt_vec_info peel_stmt_info = dr_peel_info->stmt;
1101 /* It can be assumed that if dr_info has the same alignment as dr_peel,
1102 it is aligned in the vector loop. */
1103 same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (peel_stmt_info);
1104 FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
1106 if (current_dr != dr_info->dr)
1107 continue;
1108 gcc_assert (!known_alignment_for_access_p (dr_info)
1109 || !known_alignment_for_access_p (dr_peel_info)
1110 || (DR_MISALIGNMENT (dr_info)
1111 == DR_MISALIGNMENT (dr_peel_info)));
1112 SET_DR_MISALIGNMENT (dr_info, 0);
1113 return;
1116 unsigned HOST_WIDE_INT alignment;
1117 if (DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment)
1118 && known_alignment_for_access_p (dr_info)
1119 && known_alignment_for_access_p (dr_peel_info))
1121 int misal = DR_MISALIGNMENT (dr_info);
1122 misal += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
1123 misal &= alignment - 1;
1124 SET_DR_MISALIGNMENT (dr_info, misal);
1125 return;
1128 if (dump_enabled_p ())
1129 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
1130 "to unknown (-1).\n");
1131 SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
1135 /* Function verify_data_ref_alignment
1137 Return TRUE if DR_INFO can be handled with respect to alignment. */
1139 static opt_result
1140 verify_data_ref_alignment (dr_vec_info *dr_info)
1142 enum dr_alignment_support supportable_dr_alignment
1143 = vect_supportable_dr_alignment (dr_info, false);
1144 if (!supportable_dr_alignment)
1145 return opt_result::failure_at
1146 (dr_info->stmt->stmt,
1147 DR_IS_READ (dr_info->dr)
1148 ? "not vectorized: unsupported unaligned load: %T\n"
1149 : "not vectorized: unsupported unaligned store: %T\n",
1150 DR_REF (dr_info->dr));
1152 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
1153 dump_printf_loc (MSG_NOTE, vect_location,
1154 "Vectorizing an unaligned access.\n");
1156 return opt_result::success ();
1159 /* Function vect_verify_datarefs_alignment
1161 Return TRUE if all data references in the loop can be
1162 handled with respect to alignment. */
1164 opt_result
1165 vect_verify_datarefs_alignment (loop_vec_info vinfo)
1167 vec<data_reference_p> datarefs = vinfo->shared->datarefs;
1168 struct data_reference *dr;
1169 unsigned int i;
1171 FOR_EACH_VEC_ELT (datarefs, i, dr)
1173 dr_vec_info *dr_info = vinfo->lookup_dr (dr);
1174 stmt_vec_info stmt_info = dr_info->stmt;
1176 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1177 continue;
1179 /* For interleaving, only the alignment of the first access matters. */
1180 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1181 && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1182 continue;
1184 /* Strided accesses perform only component accesses, alignment is
1185 irrelevant for them. */
1186 if (STMT_VINFO_STRIDED_P (stmt_info)
1187 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1188 continue;
1190 opt_result res = verify_data_ref_alignment (dr_info);
1191 if (!res)
1192 return res;
1195 return opt_result::success ();
1198 /* Given an memory reference EXP return whether its alignment is less
1199 than its size. */
1201 static bool
1202 not_size_aligned (tree exp)
1204 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
1205 return true;
1207 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
1208 > get_object_alignment (exp));
1211 /* Function vector_alignment_reachable_p
1213 Return true if vector alignment for DR_INFO is reachable by peeling
1214 a few loop iterations. Return false otherwise. */
1216 static bool
1217 vector_alignment_reachable_p (dr_vec_info *dr_info)
1219 stmt_vec_info stmt_info = dr_info->stmt;
1220 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1222 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1224 /* For interleaved access we peel only if number of iterations in
1225 the prolog loop ({VF - misalignment}), is a multiple of the
1226 number of the interleaved accesses. */
1227 int elem_size, mis_in_elements;
1229 /* FORNOW: handle only known alignment. */
1230 if (!known_alignment_for_access_p (dr_info))
1231 return false;
1233 poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1234 poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
1235 elem_size = vector_element_size (vector_size, nelements);
1236 mis_in_elements = DR_MISALIGNMENT (dr_info) / elem_size;
1238 if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
1239 return false;
1242 /* If misalignment is known at the compile time then allow peeling
1243 only if natural alignment is reachable through peeling. */
1244 if (known_alignment_for_access_p (dr_info) && !aligned_access_p (dr_info))
1246 HOST_WIDE_INT elmsize =
1247 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1248 if (dump_enabled_p ())
1250 dump_printf_loc (MSG_NOTE, vect_location,
1251 "data size = %wd. misalignment = %d.\n", elmsize,
1252 DR_MISALIGNMENT (dr_info));
1254 if (DR_MISALIGNMENT (dr_info) % elmsize)
1256 if (dump_enabled_p ())
1257 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1258 "data size does not divide the misalignment.\n");
1259 return false;
1263 if (!known_alignment_for_access_p (dr_info))
1265 tree type = TREE_TYPE (DR_REF (dr_info->dr));
1266 bool is_packed = not_size_aligned (DR_REF (dr_info->dr));
1267 if (dump_enabled_p ())
1268 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1269 "Unknown misalignment, %snaturally aligned\n",
1270 is_packed ? "not " : "");
1271 return targetm.vectorize.vector_alignment_reachable (type, is_packed);
1274 return true;
1278 /* Calculate the cost of the memory access represented by DR_INFO. */
1280 static void
1281 vect_get_data_access_cost (dr_vec_info *dr_info,
1282 unsigned int *inside_cost,
1283 unsigned int *outside_cost,
1284 stmt_vector_for_cost *body_cost_vec,
1285 stmt_vector_for_cost *prologue_cost_vec)
1287 stmt_vec_info stmt_info = dr_info->stmt;
1288 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1289 int ncopies;
1291 if (PURE_SLP_STMT (stmt_info))
1292 ncopies = 1;
1293 else
1294 ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
1296 if (DR_IS_READ (dr_info->dr))
1297 vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost,
1298 prologue_cost_vec, body_cost_vec, false);
1299 else
1300 vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
1302 if (dump_enabled_p ())
1303 dump_printf_loc (MSG_NOTE, vect_location,
1304 "vect_get_data_access_cost: inside_cost = %d, "
1305 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1309 typedef struct _vect_peel_info
1311 dr_vec_info *dr_info;
1312 int npeel;
1313 unsigned int count;
1314 } *vect_peel_info;
1316 typedef struct _vect_peel_extended_info
1318 struct _vect_peel_info peel_info;
1319 unsigned int inside_cost;
1320 unsigned int outside_cost;
1321 } *vect_peel_extended_info;
1324 /* Peeling hashtable helpers. */
1326 struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
1328 static inline hashval_t hash (const _vect_peel_info *);
1329 static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
1332 inline hashval_t
1333 peel_info_hasher::hash (const _vect_peel_info *peel_info)
1335 return (hashval_t) peel_info->npeel;
1338 inline bool
1339 peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
1341 return (a->npeel == b->npeel);
1345 /* Insert DR_INFO into peeling hash table with NPEEL as key. */
1347 static void
1348 vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
1349 loop_vec_info loop_vinfo, dr_vec_info *dr_info,
1350 int npeel)
1352 struct _vect_peel_info elem, *slot;
1353 _vect_peel_info **new_slot;
1354 bool supportable_dr_alignment
1355 = vect_supportable_dr_alignment (dr_info, true);
1357 elem.npeel = npeel;
1358 slot = peeling_htab->find (&elem);
1359 if (slot)
1360 slot->count++;
1361 else
1363 slot = XNEW (struct _vect_peel_info);
1364 slot->npeel = npeel;
1365 slot->dr_info = dr_info;
1366 slot->count = 1;
1367 new_slot = peeling_htab->find_slot (slot, INSERT);
1368 *new_slot = slot;
1371 if (!supportable_dr_alignment
1372 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1373 slot->count += VECT_MAX_COST;
1377 /* Traverse peeling hash table to find peeling option that aligns maximum
1378 number of data accesses. */
1381 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1382 _vect_peel_extended_info *max)
1384 vect_peel_info elem = *slot;
1386 if (elem->count > max->peel_info.count
1387 || (elem->count == max->peel_info.count
1388 && max->peel_info.npeel > elem->npeel))
1390 max->peel_info.npeel = elem->npeel;
1391 max->peel_info.count = elem->count;
1392 max->peel_info.dr_info = elem->dr_info;
1395 return 1;
1398 /* Get the costs of peeling NPEEL iterations for LOOP_VINFO, checking
1399 data access costs for all data refs. If UNKNOWN_MISALIGNMENT is true,
1400 we assume DR0_INFO's misalignment will be zero after peeling. */
1402 static void
1403 vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo,
1404 dr_vec_info *dr0_info,
1405 unsigned int *inside_cost,
1406 unsigned int *outside_cost,
1407 stmt_vector_for_cost *body_cost_vec,
1408 stmt_vector_for_cost *prologue_cost_vec,
1409 unsigned int npeel,
1410 bool unknown_misalignment)
1412 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1413 unsigned i;
1414 data_reference *dr;
1416 FOR_EACH_VEC_ELT (datarefs, i, dr)
1418 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
1419 stmt_vec_info stmt_info = dr_info->stmt;
1420 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1421 continue;
1423 /* For interleaving, only the alignment of the first access
1424 matters. */
1425 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1426 && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1427 continue;
1429 /* Strided accesses perform only component accesses, alignment is
1430 irrelevant for them. */
1431 if (STMT_VINFO_STRIDED_P (stmt_info)
1432 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1433 continue;
1435 int save_misalignment;
1436 save_misalignment = DR_MISALIGNMENT (dr_info);
1437 if (npeel == 0)
1439 else if (unknown_misalignment && dr_info == dr0_info)
1440 SET_DR_MISALIGNMENT (dr_info, 0);
1441 else
1442 vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
1443 vect_get_data_access_cost (dr_info, inside_cost, outside_cost,
1444 body_cost_vec, prologue_cost_vec);
1445 SET_DR_MISALIGNMENT (dr_info, save_misalignment);
1449 /* Traverse peeling hash table and calculate cost for each peeling option.
1450 Find the one with the lowest cost. */
1453 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1454 _vect_peel_extended_info *min)
1456 vect_peel_info elem = *slot;
1457 int dummy;
1458 unsigned int inside_cost = 0, outside_cost = 0;
1459 stmt_vec_info stmt_info = elem->dr_info->stmt;
1460 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1461 stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
1462 epilogue_cost_vec;
1464 prologue_cost_vec.create (2);
1465 body_cost_vec.create (2);
1466 epilogue_cost_vec.create (2);
1468 vect_get_peeling_costs_all_drs (loop_vinfo, elem->dr_info, &inside_cost,
1469 &outside_cost, &body_cost_vec,
1470 &prologue_cost_vec, elem->npeel, false);
1472 body_cost_vec.release ();
1474 outside_cost += vect_get_known_peeling_cost
1475 (loop_vinfo, elem->npeel, &dummy,
1476 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1477 &prologue_cost_vec, &epilogue_cost_vec);
1479 /* Prologue and epilogue costs are added to the target model later.
1480 These costs depend only on the scalar iteration cost, the
1481 number of peeling iterations finally chosen, and the number of
1482 misaligned statements. So discard the information found here. */
1483 prologue_cost_vec.release ();
1484 epilogue_cost_vec.release ();
1486 if (inside_cost < min->inside_cost
1487 || (inside_cost == min->inside_cost
1488 && outside_cost < min->outside_cost))
1490 min->inside_cost = inside_cost;
1491 min->outside_cost = outside_cost;
1492 min->peel_info.dr_info = elem->dr_info;
1493 min->peel_info.npeel = elem->npeel;
1494 min->peel_info.count = elem->count;
1497 return 1;
1501 /* Choose best peeling option by traversing peeling hash table and either
1502 choosing an option with the lowest cost (if cost model is enabled) or the
1503 option that aligns as many accesses as possible. */
1505 static struct _vect_peel_extended_info
1506 vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
1507 loop_vec_info loop_vinfo)
1509 struct _vect_peel_extended_info res;
1511 res.peel_info.dr_info = NULL;
1513 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1515 res.inside_cost = INT_MAX;
1516 res.outside_cost = INT_MAX;
1517 peeling_htab->traverse <_vect_peel_extended_info *,
1518 vect_peeling_hash_get_lowest_cost> (&res);
1520 else
1522 res.peel_info.count = 0;
1523 peeling_htab->traverse <_vect_peel_extended_info *,
1524 vect_peeling_hash_get_most_frequent> (&res);
1525 res.inside_cost = 0;
1526 res.outside_cost = 0;
1529 return res;
1532 /* Return true if the new peeling NPEEL is supported. */
1534 static bool
1535 vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info,
1536 unsigned npeel)
1538 unsigned i;
1539 struct data_reference *dr = NULL;
1540 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1541 enum dr_alignment_support supportable_dr_alignment;
1543 /* Ensure that all data refs can be vectorized after the peel. */
1544 FOR_EACH_VEC_ELT (datarefs, i, dr)
1546 int save_misalignment;
1548 if (dr == dr0_info->dr)
1549 continue;
1551 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
1552 stmt_vec_info stmt_info = dr_info->stmt;
1553 /* For interleaving, only the alignment of the first access
1554 matters. */
1555 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1556 && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1557 continue;
1559 /* Strided accesses perform only component accesses, alignment is
1560 irrelevant for them. */
1561 if (STMT_VINFO_STRIDED_P (stmt_info)
1562 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1563 continue;
1565 save_misalignment = DR_MISALIGNMENT (dr_info);
1566 vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
1567 supportable_dr_alignment
1568 = vect_supportable_dr_alignment (dr_info, false);
1569 SET_DR_MISALIGNMENT (dr_info, save_misalignment);
1571 if (!supportable_dr_alignment)
1572 return false;
1575 return true;
1578 /* Function vect_enhance_data_refs_alignment
1580 This pass will use loop versioning and loop peeling in order to enhance
1581 the alignment of data references in the loop.
1583 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1584 original loop is to be vectorized. Any other loops that are created by
1585 the transformations performed in this pass - are not supposed to be
1586 vectorized. This restriction will be relaxed.
1588 This pass will require a cost model to guide it whether to apply peeling
1589 or versioning or a combination of the two. For example, the scheme that
1590 intel uses when given a loop with several memory accesses, is as follows:
1591 choose one memory access ('p') which alignment you want to force by doing
1592 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1593 other accesses are not necessarily aligned, or (2) use loop versioning to
1594 generate one loop in which all accesses are aligned, and another loop in
1595 which only 'p' is necessarily aligned.
1597 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1598 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1599 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1601 Devising a cost model is the most critical aspect of this work. It will
1602 guide us on which access to peel for, whether to use loop versioning, how
1603 many versions to create, etc. The cost model will probably consist of
1604 generic considerations as well as target specific considerations (on
1605 powerpc for example, misaligned stores are more painful than misaligned
1606 loads).
1608 Here are the general steps involved in alignment enhancements:
1610 -- original loop, before alignment analysis:
1611 for (i=0; i<N; i++){
1612 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1613 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1616 -- After vect_compute_data_refs_alignment:
1617 for (i=0; i<N; i++){
1618 x = q[i]; # DR_MISALIGNMENT(q) = 3
1619 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1622 -- Possibility 1: we do loop versioning:
1623 if (p is aligned) {
1624 for (i=0; i<N; i++){ # loop 1A
1625 x = q[i]; # DR_MISALIGNMENT(q) = 3
1626 p[i] = y; # DR_MISALIGNMENT(p) = 0
1629 else {
1630 for (i=0; i<N; i++){ # loop 1B
1631 x = q[i]; # DR_MISALIGNMENT(q) = 3
1632 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1636 -- Possibility 2: we do loop peeling:
1637 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1638 x = q[i];
1639 p[i] = y;
1641 for (i = 3; i < N; i++){ # loop 2A
1642 x = q[i]; # DR_MISALIGNMENT(q) = 0
1643 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1646 -- Possibility 3: combination of loop peeling and versioning:
1647 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1648 x = q[i];
1649 p[i] = y;
1651 if (p is aligned) {
1652 for (i = 3; i<N; i++){ # loop 3A
1653 x = q[i]; # DR_MISALIGNMENT(q) = 0
1654 p[i] = y; # DR_MISALIGNMENT(p) = 0
1657 else {
1658 for (i = 3; i<N; i++){ # loop 3B
1659 x = q[i]; # DR_MISALIGNMENT(q) = 0
1660 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1664 These loops are later passed to loop_transform to be vectorized. The
1665 vectorizer will use the alignment information to guide the transformation
1666 (whether to generate regular loads/stores, or with special handling for
1667 misalignment). */
1669 opt_result
1670 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1672 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1673 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1674 enum dr_alignment_support supportable_dr_alignment;
1675 dr_vec_info *first_store = NULL;
1676 dr_vec_info *dr0_info = NULL;
1677 struct data_reference *dr;
1678 unsigned int i, j;
1679 bool do_peeling = false;
1680 bool do_versioning = false;
1681 unsigned int npeel = 0;
1682 bool one_misalignment_known = false;
1683 bool one_misalignment_unknown = false;
1684 bool one_dr_unsupportable = false;
1685 dr_vec_info *unsupportable_dr_info = NULL;
1686 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1687 unsigned possible_npeel_number = 1;
1688 tree vectype;
1689 unsigned int mis, same_align_drs_max = 0;
1690 hash_table<peel_info_hasher> peeling_htab (1);
1692 DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
1694 /* Reset data so we can safely be called multiple times. */
1695 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1696 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
1698 /* While cost model enhancements are expected in the future, the high level
1699 view of the code at this time is as follows:
1701 A) If there is a misaligned access then see if peeling to align
1702 this access can make all data references satisfy
1703 vect_supportable_dr_alignment. If so, update data structures
1704 as needed and return true.
1706 B) If peeling wasn't possible and there is a data reference with an
1707 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1708 then see if loop versioning checks can be used to make all data
1709 references satisfy vect_supportable_dr_alignment. If so, update
1710 data structures as needed and return true.
1712 C) If neither peeling nor versioning were successful then return false if
1713 any data reference does not satisfy vect_supportable_dr_alignment.
1715 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1717 Note, Possibility 3 above (which is peeling and versioning together) is not
1718 being done at this time. */
1720 /* (1) Peeling to force alignment. */
1722 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1723 Considerations:
1724 + How many accesses will become aligned due to the peeling
1725 - How many accesses will become unaligned due to the peeling,
1726 and the cost of misaligned accesses.
1727 - The cost of peeling (the extra runtime checks, the increase
1728 in code size). */
1730 FOR_EACH_VEC_ELT (datarefs, i, dr)
1732 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
1733 stmt_vec_info stmt_info = dr_info->stmt;
1735 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1736 continue;
1738 /* For interleaving, only the alignment of the first access
1739 matters. */
1740 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1741 && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1742 continue;
1744 /* For scatter-gather or invariant accesses there is nothing
1745 to enhance. */
1746 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
1747 || integer_zerop (DR_STEP (dr)))
1748 continue;
1750 /* Strided accesses perform only component accesses, alignment is
1751 irrelevant for them. */
1752 if (STMT_VINFO_STRIDED_P (stmt_info)
1753 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1754 continue;
1756 supportable_dr_alignment = vect_supportable_dr_alignment (dr_info, true);
1757 do_peeling = vector_alignment_reachable_p (dr_info);
1758 if (do_peeling)
1760 if (known_alignment_for_access_p (dr_info))
1762 unsigned int npeel_tmp = 0;
1763 bool negative = tree_int_cst_compare (DR_STEP (dr),
1764 size_zero_node) < 0;
1766 vectype = STMT_VINFO_VECTYPE (stmt_info);
1767 /* If known_alignment_for_access_p then we have set
1768 DR_MISALIGNMENT which is only done if we know it at compiler
1769 time, so it is safe to assume target alignment is constant.
1771 unsigned int target_align =
1772 DR_TARGET_ALIGNMENT (dr_info).to_constant ();
1773 unsigned int dr_size = vect_get_scalar_dr_size (dr_info);
1774 mis = (negative
1775 ? DR_MISALIGNMENT (dr_info)
1776 : -DR_MISALIGNMENT (dr_info));
1777 if (DR_MISALIGNMENT (dr_info) != 0)
1778 npeel_tmp = (mis & (target_align - 1)) / dr_size;
1780 /* For multiple types, it is possible that the bigger type access
1781 will have more than one peeling option. E.g., a loop with two
1782 types: one of size (vector size / 4), and the other one of
1783 size (vector size / 8). Vectorization factor will 8. If both
1784 accesses are misaligned by 3, the first one needs one scalar
1785 iteration to be aligned, and the second one needs 5. But the
1786 first one will be aligned also by peeling 5 scalar
1787 iterations, and in that case both accesses will be aligned.
1788 Hence, except for the immediate peeling amount, we also want
1789 to try to add full vector size, while we don't exceed
1790 vectorization factor.
1791 We do this automatically for cost model, since we calculate
1792 cost for every peeling option. */
1793 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1795 poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
1796 ? vf * DR_GROUP_SIZE (stmt_info) : vf);
1797 possible_npeel_number
1798 = vect_get_num_vectors (nscalars, vectype);
1800 /* NPEEL_TMP is 0 when there is no misalignment, but also
1801 allow peeling NELEMENTS. */
1802 if (DR_MISALIGNMENT (dr_info) == 0)
1803 possible_npeel_number++;
1806 /* Save info about DR in the hash table. Also include peeling
1807 amounts according to the explanation above. */
1808 for (j = 0; j < possible_npeel_number; j++)
1810 vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
1811 dr_info, npeel_tmp);
1812 npeel_tmp += target_align / dr_size;
1815 one_misalignment_known = true;
1817 else
1819 /* If we don't know any misalignment values, we prefer
1820 peeling for data-ref that has the maximum number of data-refs
1821 with the same alignment, unless the target prefers to align
1822 stores over load. */
1823 unsigned same_align_drs
1824 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1825 if (!dr0_info
1826 || same_align_drs_max < same_align_drs)
1828 same_align_drs_max = same_align_drs;
1829 dr0_info = dr_info;
1831 /* For data-refs with the same number of related
1832 accesses prefer the one where the misalign
1833 computation will be invariant in the outermost loop. */
1834 else if (same_align_drs_max == same_align_drs)
1836 class loop *ivloop0, *ivloop;
1837 ivloop0 = outermost_invariant_loop_for_expr
1838 (loop, DR_BASE_ADDRESS (dr0_info->dr));
1839 ivloop = outermost_invariant_loop_for_expr
1840 (loop, DR_BASE_ADDRESS (dr));
1841 if ((ivloop && !ivloop0)
1842 || (ivloop && ivloop0
1843 && flow_loop_nested_p (ivloop, ivloop0)))
1844 dr0_info = dr_info;
1847 one_misalignment_unknown = true;
1849 /* Check for data refs with unsupportable alignment that
1850 can be peeled. */
1851 if (!supportable_dr_alignment)
1853 one_dr_unsupportable = true;
1854 unsupportable_dr_info = dr_info;
1857 if (!first_store && DR_IS_WRITE (dr))
1858 first_store = dr_info;
1861 else
1863 if (!aligned_access_p (dr_info))
1865 if (dump_enabled_p ())
1866 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1867 "vector alignment may not be reachable\n");
1868 break;
1873 /* Check if we can possibly peel the loop. */
1874 if (!vect_can_advance_ivs_p (loop_vinfo)
1875 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))
1876 || loop->inner)
1877 do_peeling = false;
1879 struct _vect_peel_extended_info peel_for_known_alignment;
1880 struct _vect_peel_extended_info peel_for_unknown_alignment;
1881 struct _vect_peel_extended_info best_peel;
1883 peel_for_unknown_alignment.inside_cost = INT_MAX;
1884 peel_for_unknown_alignment.outside_cost = INT_MAX;
1885 peel_for_unknown_alignment.peel_info.count = 0;
1887 if (do_peeling
1888 && one_misalignment_unknown)
1890 /* Check if the target requires to prefer stores over loads, i.e., if
1891 misaligned stores are more expensive than misaligned loads (taking
1892 drs with same alignment into account). */
1893 unsigned int load_inside_cost = 0;
1894 unsigned int load_outside_cost = 0;
1895 unsigned int store_inside_cost = 0;
1896 unsigned int store_outside_cost = 0;
1897 unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
1899 stmt_vector_for_cost dummy;
1900 dummy.create (2);
1901 vect_get_peeling_costs_all_drs (loop_vinfo, dr0_info,
1902 &load_inside_cost,
1903 &load_outside_cost,
1904 &dummy, &dummy, estimated_npeels, true);
1905 dummy.release ();
1907 if (first_store)
1909 dummy.create (2);
1910 vect_get_peeling_costs_all_drs (loop_vinfo, first_store,
1911 &store_inside_cost,
1912 &store_outside_cost,
1913 &dummy, &dummy,
1914 estimated_npeels, true);
1915 dummy.release ();
1917 else
1919 store_inside_cost = INT_MAX;
1920 store_outside_cost = INT_MAX;
1923 if (load_inside_cost > store_inside_cost
1924 || (load_inside_cost == store_inside_cost
1925 && load_outside_cost > store_outside_cost))
1927 dr0_info = first_store;
1928 peel_for_unknown_alignment.inside_cost = store_inside_cost;
1929 peel_for_unknown_alignment.outside_cost = store_outside_cost;
1931 else
1933 peel_for_unknown_alignment.inside_cost = load_inside_cost;
1934 peel_for_unknown_alignment.outside_cost = load_outside_cost;
1937 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
1938 prologue_cost_vec.create (2);
1939 epilogue_cost_vec.create (2);
1941 int dummy2;
1942 peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
1943 (loop_vinfo, estimated_npeels, &dummy2,
1944 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1945 &prologue_cost_vec, &epilogue_cost_vec);
1947 prologue_cost_vec.release ();
1948 epilogue_cost_vec.release ();
1950 peel_for_unknown_alignment.peel_info.count = 1
1951 + STMT_VINFO_SAME_ALIGN_REFS (dr0_info->stmt).length ();
1954 peel_for_unknown_alignment.peel_info.npeel = 0;
1955 peel_for_unknown_alignment.peel_info.dr_info = dr0_info;
1957 best_peel = peel_for_unknown_alignment;
1959 peel_for_known_alignment.inside_cost = INT_MAX;
1960 peel_for_known_alignment.outside_cost = INT_MAX;
1961 peel_for_known_alignment.peel_info.count = 0;
1962 peel_for_known_alignment.peel_info.dr_info = NULL;
1964 if (do_peeling && one_misalignment_known)
1966 /* Peeling is possible, but there is no data access that is not supported
1967 unless aligned. So we try to choose the best possible peeling from
1968 the hash table. */
1969 peel_for_known_alignment = vect_peeling_hash_choose_best_peeling
1970 (&peeling_htab, loop_vinfo);
1973 /* Compare costs of peeling for known and unknown alignment. */
1974 if (peel_for_known_alignment.peel_info.dr_info != NULL
1975 && peel_for_unknown_alignment.inside_cost
1976 >= peel_for_known_alignment.inside_cost)
1978 best_peel = peel_for_known_alignment;
1980 /* If the best peeling for known alignment has NPEEL == 0, perform no
1981 peeling at all except if there is an unsupportable dr that we can
1982 align. */
1983 if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable)
1984 do_peeling = false;
1987 /* If there is an unsupportable data ref, prefer this over all choices so far
1988 since we'd have to discard a chosen peeling except when it accidentally
1989 aligned the unsupportable data ref. */
1990 if (one_dr_unsupportable)
1991 dr0_info = unsupportable_dr_info;
1992 else if (do_peeling)
1994 /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
1995 TODO: Use nopeel_outside_cost or get rid of it? */
1996 unsigned nopeel_inside_cost = 0;
1997 unsigned nopeel_outside_cost = 0;
1999 stmt_vector_for_cost dummy;
2000 dummy.create (2);
2001 vect_get_peeling_costs_all_drs (loop_vinfo, NULL, &nopeel_inside_cost,
2002 &nopeel_outside_cost, &dummy, &dummy,
2003 0, false);
2004 dummy.release ();
2006 /* Add epilogue costs. As we do not peel for alignment here, no prologue
2007 costs will be recorded. */
2008 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2009 prologue_cost_vec.create (2);
2010 epilogue_cost_vec.create (2);
2012 int dummy2;
2013 nopeel_outside_cost += vect_get_known_peeling_cost
2014 (loop_vinfo, 0, &dummy2,
2015 &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
2016 &prologue_cost_vec, &epilogue_cost_vec);
2018 prologue_cost_vec.release ();
2019 epilogue_cost_vec.release ();
2021 npeel = best_peel.peel_info.npeel;
2022 dr0_info = best_peel.peel_info.dr_info;
2024 /* If no peeling is not more expensive than the best peeling we
2025 have so far, don't perform any peeling. */
2026 if (nopeel_inside_cost <= best_peel.inside_cost)
2027 do_peeling = false;
2030 if (do_peeling)
2032 stmt_vec_info stmt_info = dr0_info->stmt;
2033 vectype = STMT_VINFO_VECTYPE (stmt_info);
2035 if (known_alignment_for_access_p (dr0_info))
2037 bool negative = tree_int_cst_compare (DR_STEP (dr0_info->dr),
2038 size_zero_node) < 0;
2039 if (!npeel)
2041 /* Since it's known at compile time, compute the number of
2042 iterations in the peeled loop (the peeling factor) for use in
2043 updating DR_MISALIGNMENT values. The peeling factor is the
2044 vectorization factor minus the misalignment as an element
2045 count. */
2046 mis = (negative
2047 ? DR_MISALIGNMENT (dr0_info)
2048 : -DR_MISALIGNMENT (dr0_info));
2049 /* If known_alignment_for_access_p then we have set
2050 DR_MISALIGNMENT which is only done if we know it at compiler
2051 time, so it is safe to assume target alignment is constant.
2053 unsigned int target_align =
2054 DR_TARGET_ALIGNMENT (dr0_info).to_constant ();
2055 npeel = ((mis & (target_align - 1))
2056 / vect_get_scalar_dr_size (dr0_info));
2059 /* For interleaved data access every iteration accesses all the
2060 members of the group, therefore we divide the number of iterations
2061 by the group size. */
2062 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2063 npeel /= DR_GROUP_SIZE (stmt_info);
2065 if (dump_enabled_p ())
2066 dump_printf_loc (MSG_NOTE, vect_location,
2067 "Try peeling by %d\n", npeel);
2070 /* Ensure that all datarefs can be vectorized after the peel. */
2071 if (!vect_peeling_supportable (loop_vinfo, dr0_info, npeel))
2072 do_peeling = false;
2074 /* Check if all datarefs are supportable and log. */
2075 if (do_peeling && known_alignment_for_access_p (dr0_info) && npeel == 0)
2077 opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
2078 if (!stat)
2079 do_peeling = false;
2080 else
2081 return stat;
2084 /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
2085 if (do_peeling)
2087 unsigned max_allowed_peel
2088 = param_vect_max_peeling_for_alignment;
2089 if (flag_vect_cost_model == VECT_COST_MODEL_CHEAP)
2090 max_allowed_peel = 0;
2091 if (max_allowed_peel != (unsigned)-1)
2093 unsigned max_peel = npeel;
2094 if (max_peel == 0)
2096 poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr0_info);
2097 unsigned HOST_WIDE_INT target_align_c;
2098 if (target_align.is_constant (&target_align_c))
2099 max_peel =
2100 target_align_c / vect_get_scalar_dr_size (dr0_info) - 1;
2101 else
2103 do_peeling = false;
2104 if (dump_enabled_p ())
2105 dump_printf_loc (MSG_NOTE, vect_location,
2106 "Disable peeling, max peels set and vector"
2107 " alignment unknown\n");
2110 if (max_peel > max_allowed_peel)
2112 do_peeling = false;
2113 if (dump_enabled_p ())
2114 dump_printf_loc (MSG_NOTE, vect_location,
2115 "Disable peeling, max peels reached: %d\n", max_peel);
2120 /* Cost model #2 - if peeling may result in a remaining loop not
2121 iterating enough to be vectorized then do not peel. Since this
2122 is a cost heuristic rather than a correctness decision, use the
2123 most likely runtime value for variable vectorization factors. */
2124 if (do_peeling
2125 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2127 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
2128 unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
2129 if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
2130 < assumed_vf + max_peel)
2131 do_peeling = false;
2134 if (do_peeling)
2136 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
2137 If the misalignment of DR_i is identical to that of dr0 then set
2138 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
2139 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
2140 by the peeling factor times the element size of DR_i (MOD the
2141 vectorization factor times the size). Otherwise, the
2142 misalignment of DR_i must be set to unknown. */
2143 FOR_EACH_VEC_ELT (datarefs, i, dr)
2144 if (dr != dr0_info->dr)
2146 /* Strided accesses perform only component accesses, alignment
2147 is irrelevant for them. */
2148 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2149 stmt_info = dr_info->stmt;
2150 if (STMT_VINFO_STRIDED_P (stmt_info)
2151 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
2152 continue;
2154 vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
2157 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
2158 if (npeel)
2159 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
2160 else
2161 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2162 = DR_MISALIGNMENT (dr0_info);
2163 SET_DR_MISALIGNMENT (dr0_info, 0);
2164 if (dump_enabled_p ())
2166 dump_printf_loc (MSG_NOTE, vect_location,
2167 "Alignment of access forced using peeling.\n");
2168 dump_printf_loc (MSG_NOTE, vect_location,
2169 "Peeling for alignment will be applied.\n");
2172 /* The inside-loop cost will be accounted for in vectorizable_load
2173 and vectorizable_store correctly with adjusted alignments.
2174 Drop the body_cst_vec on the floor here. */
2175 opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
2176 gcc_assert (stat);
2177 return stat;
2181 /* (2) Versioning to force alignment. */
2183 /* Try versioning if:
2184 1) optimize loop for speed and the cost-model is not cheap
2185 2) there is at least one unsupported misaligned data ref with an unknown
2186 misalignment, and
2187 3) all misaligned data refs with a known misalignment are supported, and
2188 4) the number of runtime alignment checks is within reason. */
2190 do_versioning
2191 = (optimize_loop_nest_for_speed_p (loop)
2192 && !loop->inner /* FORNOW */
2193 && flag_vect_cost_model != VECT_COST_MODEL_CHEAP);
2195 if (do_versioning)
2197 FOR_EACH_VEC_ELT (datarefs, i, dr)
2199 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2200 stmt_vec_info stmt_info = dr_info->stmt;
2202 /* For interleaving, only the alignment of the first access
2203 matters. */
2204 if (aligned_access_p (dr_info)
2205 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
2206 && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info))
2207 continue;
2209 if (STMT_VINFO_STRIDED_P (stmt_info))
2211 /* Strided loads perform only component accesses, alignment is
2212 irrelevant for them. */
2213 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
2214 continue;
2215 do_versioning = false;
2216 break;
2219 supportable_dr_alignment
2220 = vect_supportable_dr_alignment (dr_info, false);
2222 if (!supportable_dr_alignment)
2224 int mask;
2225 tree vectype;
2227 if (known_alignment_for_access_p (dr_info)
2228 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
2229 >= (unsigned) param_vect_max_version_for_alignment_checks)
2231 do_versioning = false;
2232 break;
2235 vectype = STMT_VINFO_VECTYPE (stmt_info);
2236 gcc_assert (vectype);
2238 /* At present we don't support versioning for alignment
2239 with variable VF, since there's no guarantee that the
2240 VF is a power of two. We could relax this if we added
2241 a way of enforcing a power-of-two size. */
2242 unsigned HOST_WIDE_INT size;
2243 if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size))
2245 do_versioning = false;
2246 break;
2249 /* Forcing alignment in the first iteration is no good if
2250 we don't keep it across iterations. For now, just disable
2251 versioning in this case.
2252 ?? We could actually unroll the loop to achieve the required
2253 overall step alignment, and forcing the alignment could be
2254 done by doing some iterations of the non-vectorized loop. */
2255 if (!multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
2256 * DR_STEP_ALIGNMENT (dr),
2257 DR_TARGET_ALIGNMENT (dr_info)))
2259 do_versioning = false;
2260 break;
2263 /* The rightmost bits of an aligned address must be zeros.
2264 Construct the mask needed for this test. For example,
2265 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
2266 mask must be 15 = 0xf. */
2267 mask = size - 1;
2269 /* FORNOW: use the same mask to test all potentially unaligned
2270 references in the loop. The vectorizer currently supports
2271 a single vector size, see the reference to
2272 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
2273 vectorization factor is computed. */
2274 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
2275 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
2276 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
2277 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
2281 /* Versioning requires at least one misaligned data reference. */
2282 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2283 do_versioning = false;
2284 else if (!do_versioning)
2285 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
2288 if (do_versioning)
2290 vec<stmt_vec_info> may_misalign_stmts
2291 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2292 stmt_vec_info stmt_info;
2294 /* It can now be assumed that the data references in the statements
2295 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
2296 of the loop being vectorized. */
2297 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt_info)
2299 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
2300 SET_DR_MISALIGNMENT (dr_info, 0);
2301 if (dump_enabled_p ())
2302 dump_printf_loc (MSG_NOTE, vect_location,
2303 "Alignment of access forced using versioning.\n");
2306 if (dump_enabled_p ())
2307 dump_printf_loc (MSG_NOTE, vect_location,
2308 "Versioning for alignment will be applied.\n");
2310 /* Peeling and versioning can't be done together at this time. */
2311 gcc_assert (! (do_peeling && do_versioning));
2313 opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
2314 gcc_assert (stat);
2315 return stat;
2318 /* This point is reached if neither peeling nor versioning is being done. */
2319 gcc_assert (! (do_peeling || do_versioning));
2321 opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
2322 return stat;
2326 /* Function vect_find_same_alignment_drs.
2328 Update group and alignment relations in VINFO according to the chosen
2329 vectorization factor. */
2331 static void
2332 vect_find_same_alignment_drs (vec_info *vinfo, data_dependence_relation *ddr)
2334 struct data_reference *dra = DDR_A (ddr);
2335 struct data_reference *drb = DDR_B (ddr);
2336 dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
2337 dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
2338 stmt_vec_info stmtinfo_a = dr_info_a->stmt;
2339 stmt_vec_info stmtinfo_b = dr_info_b->stmt;
2341 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
2342 return;
2344 if (dra == drb)
2345 return;
2347 if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
2348 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
2349 return;
2351 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
2352 || !operand_equal_p (DR_OFFSET (dra), DR_OFFSET (drb), 0)
2353 || !operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2354 return;
2356 /* Two references with distance zero have the same alignment. */
2357 poly_offset_int diff = (wi::to_poly_offset (DR_INIT (dra))
2358 - wi::to_poly_offset (DR_INIT (drb)));
2359 if (maybe_ne (diff, 0))
2361 /* Get the wider of the two alignments. */
2362 poly_uint64 align_a =
2363 exact_div (vect_calculate_target_alignment (dr_info_a),
2364 BITS_PER_UNIT);
2365 poly_uint64 align_b =
2366 exact_div (vect_calculate_target_alignment (dr_info_b),
2367 BITS_PER_UNIT);
2368 unsigned HOST_WIDE_INT align_a_c, align_b_c;
2369 if (!align_a.is_constant (&align_a_c)
2370 || !align_b.is_constant (&align_b_c))
2371 return;
2373 unsigned HOST_WIDE_INT max_align = MAX (align_a_c, align_b_c);
2375 /* Require the gap to be a multiple of the larger vector alignment. */
2376 if (!multiple_p (diff, max_align))
2377 return;
2380 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
2381 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
2382 if (dump_enabled_p ())
2383 dump_printf_loc (MSG_NOTE, vect_location,
2384 "accesses have the same alignment: %T and %T\n",
2385 DR_REF (dra), DR_REF (drb));
2389 /* Function vect_analyze_data_refs_alignment
2391 Analyze the alignment of the data-references in the loop.
2392 Return FALSE if a data reference is found that cannot be vectorized. */
2394 opt_result
2395 vect_analyze_data_refs_alignment (loop_vec_info vinfo)
2397 DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
2399 /* Mark groups of data references with same alignment using
2400 data dependence information. */
2401 vec<ddr_p> ddrs = vinfo->shared->ddrs;
2402 struct data_dependence_relation *ddr;
2403 unsigned int i;
2405 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2406 vect_find_same_alignment_drs (vinfo, ddr);
2408 vec<data_reference_p> datarefs = vinfo->shared->datarefs;
2409 struct data_reference *dr;
2411 vect_record_base_alignments (vinfo);
2412 FOR_EACH_VEC_ELT (datarefs, i, dr)
2414 dr_vec_info *dr_info = vinfo->lookup_dr (dr);
2415 if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
2416 vect_compute_data_ref_alignment (dr_info);
2419 return opt_result::success ();
2423 /* Analyze alignment of DRs of stmts in NODE. */
2425 static bool
2426 vect_slp_analyze_and_verify_node_alignment (slp_tree node)
2428 /* We vectorize from the first scalar stmt in the node unless
2429 the node is permuted in which case we start from the first
2430 element in the group. */
2431 stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
2432 dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2433 if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
2434 first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
2436 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
2437 vect_compute_data_ref_alignment (dr_info);
2438 /* For creating the data-ref pointer we need alignment of the
2439 first element anyway. */
2440 if (dr_info != first_dr_info)
2441 vect_compute_data_ref_alignment (first_dr_info);
2442 if (! verify_data_ref_alignment (dr_info))
2444 if (dump_enabled_p ())
2445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2446 "not vectorized: bad data alignment in basic "
2447 "block.\n");
2448 return false;
2451 return true;
2454 /* Function vect_slp_analyze_instance_alignment
2456 Analyze the alignment of the data-references in the SLP instance.
2457 Return FALSE if a data reference is found that cannot be vectorized. */
2459 bool
2460 vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
2462 DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
2464 slp_tree node;
2465 unsigned i;
2466 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
2467 if (! vect_slp_analyze_and_verify_node_alignment (node))
2468 return false;
2470 node = SLP_INSTANCE_TREE (instance);
2471 if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0])
2472 && ! vect_slp_analyze_and_verify_node_alignment
2473 (SLP_INSTANCE_TREE (instance)))
2474 return false;
2476 return true;
2480 /* Analyze groups of accesses: check that DR_INFO belongs to a group of
2481 accesses of legal size, step, etc. Detect gaps, single element
2482 interleaving, and other special cases. Set grouped access info.
2483 Collect groups of strided stores for further use in SLP analysis.
2484 Worker for vect_analyze_group_access. */
2486 static bool
2487 vect_analyze_group_access_1 (dr_vec_info *dr_info)
2489 data_reference *dr = dr_info->dr;
2490 tree step = DR_STEP (dr);
2491 tree scalar_type = TREE_TYPE (DR_REF (dr));
2492 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2493 stmt_vec_info stmt_info = dr_info->stmt;
2494 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2495 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2496 HOST_WIDE_INT dr_step = -1;
2497 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2498 bool slp_impossible = false;
2500 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2501 size of the interleaving group (including gaps). */
2502 if (tree_fits_shwi_p (step))
2504 dr_step = tree_to_shwi (step);
2505 /* Check that STEP is a multiple of type size. Otherwise there is
2506 a non-element-sized gap at the end of the group which we
2507 cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
2508 ??? As we can handle non-constant step fine here we should
2509 simply remove uses of DR_GROUP_GAP between the last and first
2510 element and instead rely on DR_STEP. DR_GROUP_SIZE then would
2511 simply not include that gap. */
2512 if ((dr_step % type_size) != 0)
2514 if (dump_enabled_p ())
2515 dump_printf_loc (MSG_NOTE, vect_location,
2516 "Step %T is not a multiple of the element size"
2517 " for %T\n",
2518 step, DR_REF (dr));
2519 return false;
2521 groupsize = absu_hwi (dr_step) / type_size;
2523 else
2524 groupsize = 0;
2526 /* Not consecutive access is possible only if it is a part of interleaving. */
2527 if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
2529 /* Check if it this DR is a part of interleaving, and is a single
2530 element of the group that is accessed in the loop. */
2532 /* Gaps are supported only for loads. STEP must be a multiple of the type
2533 size. */
2534 if (DR_IS_READ (dr)
2535 && (dr_step % type_size) == 0
2536 && groupsize > 0)
2538 DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
2539 DR_GROUP_SIZE (stmt_info) = groupsize;
2540 DR_GROUP_GAP (stmt_info) = groupsize - 1;
2541 if (dump_enabled_p ())
2542 dump_printf_loc (MSG_NOTE, vect_location,
2543 "Detected single element interleaving %T"
2544 " step %T\n",
2545 DR_REF (dr), step);
2547 return true;
2550 if (dump_enabled_p ())
2551 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2552 "not consecutive access %G", stmt_info->stmt);
2554 if (bb_vinfo)
2556 /* Mark the statement as unvectorizable. */
2557 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
2558 return true;
2561 if (dump_enabled_p ())
2562 dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
2563 STMT_VINFO_STRIDED_P (stmt_info) = true;
2564 return true;
2567 if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
2569 /* First stmt in the interleaving chain. Check the chain. */
2570 stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
2571 struct data_reference *data_ref = dr;
2572 unsigned int count = 1;
2573 tree prev_init = DR_INIT (data_ref);
2574 HOST_WIDE_INT diff, gaps = 0;
2576 /* By construction, all group members have INTEGER_CST DR_INITs. */
2577 while (next)
2579 /* We never have the same DR multiple times. */
2580 gcc_assert (tree_int_cst_compare (DR_INIT (data_ref),
2581 DR_INIT (STMT_VINFO_DATA_REF (next))) != 0);
2583 data_ref = STMT_VINFO_DATA_REF (next);
2585 /* All group members have the same STEP by construction. */
2586 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2588 /* Check that the distance between two accesses is equal to the type
2589 size. Otherwise, we have gaps. */
2590 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2591 - TREE_INT_CST_LOW (prev_init)) / type_size;
2592 if (diff != 1)
2594 /* FORNOW: SLP of accesses with gaps is not supported. */
2595 slp_impossible = true;
2596 if (DR_IS_WRITE (data_ref))
2598 if (dump_enabled_p ())
2599 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2600 "interleaved store with gaps\n");
2601 return false;
2604 gaps += diff - 1;
2607 last_accessed_element += diff;
2609 /* Store the gap from the previous member of the group. If there is no
2610 gap in the access, DR_GROUP_GAP is always 1. */
2611 DR_GROUP_GAP (next) = diff;
2613 prev_init = DR_INIT (data_ref);
2614 next = DR_GROUP_NEXT_ELEMENT (next);
2615 /* Count the number of data-refs in the chain. */
2616 count++;
2619 if (groupsize == 0)
2620 groupsize = count + gaps;
2622 /* This could be UINT_MAX but as we are generating code in a very
2623 inefficient way we have to cap earlier. See PR78699 for example. */
2624 if (groupsize > 4096)
2626 if (dump_enabled_p ())
2627 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2628 "group is too large\n");
2629 return false;
2632 /* Check that the size of the interleaving is equal to count for stores,
2633 i.e., that there are no gaps. */
2634 if (groupsize != count
2635 && !DR_IS_READ (dr))
2637 groupsize = count;
2638 STMT_VINFO_STRIDED_P (stmt_info) = true;
2641 /* If there is a gap after the last load in the group it is the
2642 difference between the groupsize and the last accessed
2643 element.
2644 When there is no gap, this difference should be 0. */
2645 DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
2647 DR_GROUP_SIZE (stmt_info) = groupsize;
2648 if (dump_enabled_p ())
2650 dump_printf_loc (MSG_NOTE, vect_location,
2651 "Detected interleaving ");
2652 if (DR_IS_READ (dr))
2653 dump_printf (MSG_NOTE, "load ");
2654 else if (STMT_VINFO_STRIDED_P (stmt_info))
2655 dump_printf (MSG_NOTE, "strided store ");
2656 else
2657 dump_printf (MSG_NOTE, "store ");
2658 dump_printf (MSG_NOTE, "of size %u\n",
2659 (unsigned)groupsize);
2660 dump_printf_loc (MSG_NOTE, vect_location, "\t%G", stmt_info->stmt);
2661 next = DR_GROUP_NEXT_ELEMENT (stmt_info);
2662 while (next)
2664 if (DR_GROUP_GAP (next) != 1)
2665 dump_printf_loc (MSG_NOTE, vect_location,
2666 "\t<gap of %d elements>\n",
2667 DR_GROUP_GAP (next) - 1);
2668 dump_printf_loc (MSG_NOTE, vect_location, "\t%G", next->stmt);
2669 next = DR_GROUP_NEXT_ELEMENT (next);
2671 if (DR_GROUP_GAP (stmt_info) != 0)
2672 dump_printf_loc (MSG_NOTE, vect_location,
2673 "\t<gap of %d elements>\n",
2674 DR_GROUP_GAP (stmt_info));
2677 /* SLP: create an SLP data structure for every interleaving group of
2678 stores for further analysis in vect_analyse_slp. */
2679 if (DR_IS_WRITE (dr) && !slp_impossible)
2681 if (loop_vinfo)
2682 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
2683 if (bb_vinfo)
2684 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
2688 return true;
2691 /* Analyze groups of accesses: check that DR_INFO belongs to a group of
2692 accesses of legal size, step, etc. Detect gaps, single element
2693 interleaving, and other special cases. Set grouped access info.
2694 Collect groups of strided stores for further use in SLP analysis. */
2696 static bool
2697 vect_analyze_group_access (dr_vec_info *dr_info)
2699 if (!vect_analyze_group_access_1 (dr_info))
2701 /* Dissolve the group if present. */
2702 stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
2703 while (stmt_info)
2705 stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
2706 DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
2707 DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
2708 stmt_info = next;
2710 return false;
2712 return true;
2715 /* Analyze the access pattern of the data-reference DR_INFO.
2716 In case of non-consecutive accesses call vect_analyze_group_access() to
2717 analyze groups of accesses. */
2719 static bool
2720 vect_analyze_data_ref_access (dr_vec_info *dr_info)
2722 data_reference *dr = dr_info->dr;
2723 tree step = DR_STEP (dr);
2724 tree scalar_type = TREE_TYPE (DR_REF (dr));
2725 stmt_vec_info stmt_info = dr_info->stmt;
2726 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2727 class loop *loop = NULL;
2729 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
2730 return true;
2732 if (loop_vinfo)
2733 loop = LOOP_VINFO_LOOP (loop_vinfo);
2735 if (loop_vinfo && !step)
2737 if (dump_enabled_p ())
2738 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2739 "bad data-ref access in loop\n");
2740 return false;
2743 /* Allow loads with zero step in inner-loop vectorization. */
2744 if (loop_vinfo && integer_zerop (step))
2746 DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
2747 if (!nested_in_vect_loop_p (loop, stmt_info))
2748 return DR_IS_READ (dr);
2749 /* Allow references with zero step for outer loops marked
2750 with pragma omp simd only - it guarantees absence of
2751 loop-carried dependencies between inner loop iterations. */
2752 if (loop->safelen < 2)
2754 if (dump_enabled_p ())
2755 dump_printf_loc (MSG_NOTE, vect_location,
2756 "zero step in inner loop of nest\n");
2757 return false;
2761 if (loop && nested_in_vect_loop_p (loop, stmt_info))
2763 /* Interleaved accesses are not yet supported within outer-loop
2764 vectorization for references in the inner-loop. */
2765 DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
2767 /* For the rest of the analysis we use the outer-loop step. */
2768 step = STMT_VINFO_DR_STEP (stmt_info);
2769 if (integer_zerop (step))
2771 if (dump_enabled_p ())
2772 dump_printf_loc (MSG_NOTE, vect_location,
2773 "zero step in outer loop.\n");
2774 return DR_IS_READ (dr);
2778 /* Consecutive? */
2779 if (TREE_CODE (step) == INTEGER_CST)
2781 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2782 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2783 || (dr_step < 0
2784 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2786 /* Mark that it is not interleaving. */
2787 DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
2788 return true;
2792 if (loop && nested_in_vect_loop_p (loop, stmt_info))
2794 if (dump_enabled_p ())
2795 dump_printf_loc (MSG_NOTE, vect_location,
2796 "grouped access in outer loop.\n");
2797 return false;
2801 /* Assume this is a DR handled by non-constant strided load case. */
2802 if (TREE_CODE (step) != INTEGER_CST)
2803 return (STMT_VINFO_STRIDED_P (stmt_info)
2804 && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
2805 || vect_analyze_group_access (dr_info)));
2807 /* Not consecutive access - check if it's a part of interleaving group. */
2808 return vect_analyze_group_access (dr_info);
2811 /* Compare two data-references DRA and DRB to group them into chunks
2812 suitable for grouping. */
2814 static int
2815 dr_group_sort_cmp (const void *dra_, const void *drb_)
2817 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2818 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2819 int cmp;
2821 /* Stabilize sort. */
2822 if (dra == drb)
2823 return 0;
2825 /* DRs in different loops never belong to the same group. */
2826 loop_p loopa = gimple_bb (DR_STMT (dra))->loop_father;
2827 loop_p loopb = gimple_bb (DR_STMT (drb))->loop_father;
2828 if (loopa != loopb)
2829 return loopa->num < loopb->num ? -1 : 1;
2831 /* Ordering of DRs according to base. */
2832 cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2833 DR_BASE_ADDRESS (drb));
2834 if (cmp != 0)
2835 return cmp;
2837 /* And according to DR_OFFSET. */
2838 cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2839 if (cmp != 0)
2840 return cmp;
2842 /* Put reads before writes. */
2843 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2844 return DR_IS_READ (dra) ? -1 : 1;
2846 /* Then sort after access size. */
2847 cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2848 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2849 if (cmp != 0)
2850 return cmp;
2852 /* And after step. */
2853 cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
2854 if (cmp != 0)
2855 return cmp;
2857 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2858 cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
2859 if (cmp == 0)
2860 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2861 return cmp;
2864 /* If OP is the result of a conversion, return the unconverted value,
2865 otherwise return null. */
2867 static tree
2868 strip_conversion (tree op)
2870 if (TREE_CODE (op) != SSA_NAME)
2871 return NULL_TREE;
2872 gimple *stmt = SSA_NAME_DEF_STMT (op);
2873 if (!is_gimple_assign (stmt)
2874 || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
2875 return NULL_TREE;
2876 return gimple_assign_rhs1 (stmt);
2879 /* Return true if vectorizable_* routines can handle statements STMT1_INFO
2880 and STMT2_INFO being in a single group. When ALLOW_SLP_P, masked loads can
2881 be grouped in SLP mode. */
2883 static bool
2884 can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info,
2885 bool allow_slp_p)
2887 if (gimple_assign_single_p (stmt1_info->stmt))
2888 return gimple_assign_single_p (stmt2_info->stmt);
2890 gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
2891 if (call1 && gimple_call_internal_p (call1))
2893 /* Check for two masked loads or two masked stores. */
2894 gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
2895 if (!call2 || !gimple_call_internal_p (call2))
2896 return false;
2897 internal_fn ifn = gimple_call_internal_fn (call1);
2898 if (ifn != IFN_MASK_LOAD && ifn != IFN_MASK_STORE)
2899 return false;
2900 if (ifn != gimple_call_internal_fn (call2))
2901 return false;
2903 /* Check that the masks are the same. Cope with casts of masks,
2904 like those created by build_mask_conversion. */
2905 tree mask1 = gimple_call_arg (call1, 2);
2906 tree mask2 = gimple_call_arg (call2, 2);
2907 if (!operand_equal_p (mask1, mask2, 0)
2908 && (ifn == IFN_MASK_STORE || !allow_slp_p))
2910 mask1 = strip_conversion (mask1);
2911 if (!mask1)
2912 return false;
2913 mask2 = strip_conversion (mask2);
2914 if (!mask2)
2915 return false;
2916 if (!operand_equal_p (mask1, mask2, 0))
2917 return false;
2919 return true;
2922 return false;
2925 /* Function vect_analyze_data_ref_accesses.
2927 Analyze the access pattern of all the data references in the loop.
2929 FORNOW: the only access pattern that is considered vectorizable is a
2930 simple step 1 (consecutive) access.
2932 FORNOW: handle only arrays and pointer accesses. */
2934 opt_result
2935 vect_analyze_data_ref_accesses (vec_info *vinfo)
2937 unsigned int i;
2938 vec<data_reference_p> datarefs = vinfo->shared->datarefs;
2939 struct data_reference *dr;
2941 DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
2943 if (datarefs.is_empty ())
2944 return opt_result::success ();
2946 /* Sort the array of datarefs to make building the interleaving chains
2947 linear. Don't modify the original vector's order, it is needed for
2948 determining what dependencies are reversed. */
2949 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2950 datarefs_copy.qsort (dr_group_sort_cmp);
2951 hash_set<stmt_vec_info> to_fixup;
2953 /* Build the interleaving chains. */
2954 for (i = 0; i < datarefs_copy.length () - 1;)
2956 data_reference_p dra = datarefs_copy[i];
2957 dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
2958 stmt_vec_info stmtinfo_a = dr_info_a->stmt;
2959 stmt_vec_info lastinfo = NULL;
2960 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
2961 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
2963 ++i;
2964 continue;
2966 for (i = i + 1; i < datarefs_copy.length (); ++i)
2968 data_reference_p drb = datarefs_copy[i];
2969 dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
2970 stmt_vec_info stmtinfo_b = dr_info_b->stmt;
2971 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
2972 || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
2973 break;
2975 /* ??? Imperfect sorting (non-compatible types, non-modulo
2976 accesses, same accesses) can lead to a group to be artificially
2977 split here as we don't just skip over those. If it really
2978 matters we can push those to a worklist and re-iterate
2979 over them. The we can just skip ahead to the next DR here. */
2981 /* DRs in a different loop should not be put into the same
2982 interleaving group. */
2983 if (gimple_bb (DR_STMT (dra))->loop_father
2984 != gimple_bb (DR_STMT (drb))->loop_father)
2985 break;
2987 /* Check that the data-refs have same first location (except init)
2988 and they are both either store or load (not load and store,
2989 not masked loads or stores). */
2990 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2991 || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2992 DR_BASE_ADDRESS (drb)) != 0
2993 || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
2994 || !can_group_stmts_p (stmtinfo_a, stmtinfo_b, true))
2995 break;
2997 /* Check that the data-refs have the same constant size. */
2998 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2999 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
3000 if (!tree_fits_uhwi_p (sza)
3001 || !tree_fits_uhwi_p (szb)
3002 || !tree_int_cst_equal (sza, szb))
3003 break;
3005 /* Check that the data-refs have the same step. */
3006 if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
3007 break;
3009 /* Check the types are compatible.
3010 ??? We don't distinguish this during sorting. */
3011 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
3012 TREE_TYPE (DR_REF (drb))))
3013 break;
3015 /* Check that the DR_INITs are compile-time constants. */
3016 if (TREE_CODE (DR_INIT (dra)) != INTEGER_CST
3017 || TREE_CODE (DR_INIT (drb)) != INTEGER_CST)
3018 break;
3020 /* Different .GOMP_SIMD_LANE calls still give the same lane,
3021 just hold extra information. */
3022 if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_a)
3023 && STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_b)
3024 && data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb)) == 0)
3025 break;
3027 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
3028 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
3029 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
3030 HOST_WIDE_INT init_prev
3031 = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]));
3032 gcc_assert (init_a <= init_b
3033 && init_a <= init_prev
3034 && init_prev <= init_b);
3036 /* Do not place the same access in the interleaving chain twice. */
3037 if (init_b == init_prev)
3039 gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]))
3040 < gimple_uid (DR_STMT (drb)));
3041 /* Simply link in duplicates and fix up the chain below. */
3043 else
3045 /* If init_b == init_a + the size of the type * k, we have an
3046 interleaving, and DRA is accessed before DRB. */
3047 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
3048 if (type_size_a == 0
3049 || (init_b - init_a) % type_size_a != 0)
3050 break;
3052 /* If we have a store, the accesses are adjacent. This splits
3053 groups into chunks we support (we don't support vectorization
3054 of stores with gaps). */
3055 if (!DR_IS_READ (dra) && init_b - init_prev != type_size_a)
3056 break;
3058 /* If the step (if not zero or non-constant) is greater than the
3059 difference between data-refs' inits this splits groups into
3060 suitable sizes. */
3061 if (tree_fits_shwi_p (DR_STEP (dra)))
3063 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
3064 if (step != 0 && step <= (init_b - init_a))
3065 break;
3069 if (dump_enabled_p ())
3070 dump_printf_loc (MSG_NOTE, vect_location,
3071 DR_IS_READ (dra)
3072 ? "Detected interleaving load %T and %T\n"
3073 : "Detected interleaving store %T and %T\n",
3074 DR_REF (dra), DR_REF (drb));
3076 /* Link the found element into the group list. */
3077 if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3079 DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = stmtinfo_a;
3080 lastinfo = stmtinfo_a;
3082 DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = stmtinfo_a;
3083 DR_GROUP_NEXT_ELEMENT (lastinfo) = stmtinfo_b;
3084 lastinfo = stmtinfo_b;
3086 STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a)
3087 = !can_group_stmts_p (stmtinfo_a, stmtinfo_b, false);
3089 if (dump_enabled_p () && STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
3090 dump_printf_loc (MSG_NOTE, vect_location,
3091 "Load suitable for SLP vectorization only.\n");
3093 if (init_b == init_prev
3094 && !to_fixup.add (DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3095 && dump_enabled_p ())
3096 dump_printf_loc (MSG_NOTE, vect_location,
3097 "Queuing group with duplicate access for fixup\n");
3101 /* Fixup groups with duplicate entries by splitting it. */
3102 while (1)
3104 hash_set<stmt_vec_info>::iterator it = to_fixup.begin ();
3105 if (!(it != to_fixup.end ()))
3106 break;
3107 stmt_vec_info grp = *it;
3108 to_fixup.remove (grp);
3110 /* Find the earliest duplicate group member. */
3111 unsigned first_duplicate = -1u;
3112 stmt_vec_info next, g = grp;
3113 while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3115 if (tree_int_cst_equal (DR_INIT (STMT_VINFO_DR_INFO (next)->dr),
3116 DR_INIT (STMT_VINFO_DR_INFO (g)->dr))
3117 && gimple_uid (STMT_VINFO_STMT (next)) < first_duplicate)
3118 first_duplicate = gimple_uid (STMT_VINFO_STMT (next));
3119 g = next;
3121 if (first_duplicate == -1U)
3122 continue;
3124 /* Then move all stmts after the first duplicate to a new group.
3125 Note this is a heuristic but one with the property that *it
3126 is fixed up completely. */
3127 g = grp;
3128 stmt_vec_info newgroup = NULL, ng = grp;
3129 while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3131 if (gimple_uid (STMT_VINFO_STMT (next)) >= first_duplicate)
3133 DR_GROUP_NEXT_ELEMENT (g) = DR_GROUP_NEXT_ELEMENT (next);
3134 if (!newgroup)
3135 newgroup = next;
3136 else
3137 DR_GROUP_NEXT_ELEMENT (ng) = next;
3138 ng = next;
3139 DR_GROUP_FIRST_ELEMENT (ng) = newgroup;
3141 else
3142 g = DR_GROUP_NEXT_ELEMENT (g);
3144 DR_GROUP_NEXT_ELEMENT (ng) = NULL;
3146 /* Fixup the new group which still may contain duplicates. */
3147 to_fixup.add (newgroup);
3150 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
3152 dr_vec_info *dr_info = vinfo->lookup_dr (dr);
3153 if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
3154 && !vect_analyze_data_ref_access (dr_info))
3156 if (dump_enabled_p ())
3157 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3158 "not vectorized: complicated access pattern.\n");
3160 if (is_a <bb_vec_info> (vinfo))
3162 /* Mark the statement as not vectorizable. */
3163 STMT_VINFO_VECTORIZABLE (dr_info->stmt) = false;
3164 continue;
3166 else
3168 datarefs_copy.release ();
3169 return opt_result::failure_at (dr_info->stmt->stmt,
3170 "not vectorized:"
3171 " complicated access pattern.\n");
3176 datarefs_copy.release ();
3177 return opt_result::success ();
3180 /* Function vect_vfa_segment_size.
3182 Input:
3183 DR_INFO: The data reference.
3184 LENGTH_FACTOR: segment length to consider.
3186 Return a value suitable for the dr_with_seg_len::seg_len field.
3187 This is the "distance travelled" by the pointer from the first
3188 iteration in the segment to the last. Note that it does not include
3189 the size of the access; in effect it only describes the first byte. */
3191 static tree
3192 vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor)
3194 length_factor = size_binop (MINUS_EXPR,
3195 fold_convert (sizetype, length_factor),
3196 size_one_node);
3197 return size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr_info->dr)),
3198 length_factor);
3201 /* Return a value that, when added to abs (vect_vfa_segment_size (DR_INFO)),
3202 gives the worst-case number of bytes covered by the segment. */
3204 static unsigned HOST_WIDE_INT
3205 vect_vfa_access_size (dr_vec_info *dr_info)
3207 stmt_vec_info stmt_vinfo = dr_info->stmt;
3208 tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
3209 unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
3210 unsigned HOST_WIDE_INT access_size = ref_size;
3211 if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
3213 gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
3214 access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
3216 if (STMT_VINFO_VEC_STMT (stmt_vinfo)
3217 && (vect_supportable_dr_alignment (dr_info, false)
3218 == dr_explicit_realign_optimized))
3220 /* We might access a full vector's worth. */
3221 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
3222 access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
3224 return access_size;
3227 /* Get the minimum alignment for all the scalar accesses that DR_INFO
3228 describes. */
3230 static unsigned int
3231 vect_vfa_align (dr_vec_info *dr_info)
3233 return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
3236 /* Function vect_no_alias_p.
3238 Given data references A and B with equal base and offset, see whether
3239 the alias relation can be decided at compilation time. Return 1 if
3240 it can and the references alias, 0 if it can and the references do
3241 not alias, and -1 if we cannot decide at compile time. SEGMENT_LENGTH_A,
3242 SEGMENT_LENGTH_B, ACCESS_SIZE_A and ACCESS_SIZE_B are the equivalent
3243 of dr_with_seg_len::{seg_len,access_size} for A and B. */
3245 static int
3246 vect_compile_time_alias (dr_vec_info *a, dr_vec_info *b,
3247 tree segment_length_a, tree segment_length_b,
3248 unsigned HOST_WIDE_INT access_size_a,
3249 unsigned HOST_WIDE_INT access_size_b)
3251 poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a->dr));
3252 poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b->dr));
3253 poly_uint64 const_length_a;
3254 poly_uint64 const_length_b;
3256 /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
3257 bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
3258 [a, a+12) */
3259 if (tree_int_cst_compare (DR_STEP (a->dr), size_zero_node) < 0)
3261 const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
3262 offset_a = (offset_a + access_size_a) - const_length_a;
3264 else
3265 const_length_a = tree_to_poly_uint64 (segment_length_a);
3266 if (tree_int_cst_compare (DR_STEP (b->dr), size_zero_node) < 0)
3268 const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
3269 offset_b = (offset_b + access_size_b) - const_length_b;
3271 else
3272 const_length_b = tree_to_poly_uint64 (segment_length_b);
3274 const_length_a += access_size_a;
3275 const_length_b += access_size_b;
3277 if (ranges_known_overlap_p (offset_a, const_length_a,
3278 offset_b, const_length_b))
3279 return 1;
3281 if (!ranges_maybe_overlap_p (offset_a, const_length_a,
3282 offset_b, const_length_b))
3283 return 0;
3285 return -1;
3288 /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
3289 in DDR is >= VF. */
3291 static bool
3292 dependence_distance_ge_vf (data_dependence_relation *ddr,
3293 unsigned int loop_depth, poly_uint64 vf)
3295 if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
3296 || DDR_NUM_DIST_VECTS (ddr) == 0)
3297 return false;
3299 /* If the dependence is exact, we should have limited the VF instead. */
3300 gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr));
3302 unsigned int i;
3303 lambda_vector dist_v;
3304 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
3306 HOST_WIDE_INT dist = dist_v[loop_depth];
3307 if (dist != 0
3308 && !(dist > 0 && DDR_REVERSED_P (ddr))
3309 && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
3310 return false;
3313 if (dump_enabled_p ())
3314 dump_printf_loc (MSG_NOTE, vect_location,
3315 "dependence distance between %T and %T is >= VF\n",
3316 DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
3318 return true;
3321 /* Dump LOWER_BOUND using flags DUMP_KIND. Dumps are known to be enabled. */
3323 static void
3324 dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
3326 dump_printf (dump_kind, "%s (%T) >= ",
3327 lower_bound.unsigned_p ? "unsigned" : "abs",
3328 lower_bound.expr);
3329 dump_dec (dump_kind, lower_bound.min_value);
3332 /* Record that the vectorized loop requires the vec_lower_bound described
3333 by EXPR, UNSIGNED_P and MIN_VALUE. */
3335 static void
3336 vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
3337 poly_uint64 min_value)
3339 vec<vec_lower_bound> lower_bounds = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
3340 for (unsigned int i = 0; i < lower_bounds.length (); ++i)
3341 if (operand_equal_p (lower_bounds[i].expr, expr, 0))
3343 unsigned_p &= lower_bounds[i].unsigned_p;
3344 min_value = upper_bound (lower_bounds[i].min_value, min_value);
3345 if (lower_bounds[i].unsigned_p != unsigned_p
3346 || maybe_lt (lower_bounds[i].min_value, min_value))
3348 lower_bounds[i].unsigned_p = unsigned_p;
3349 lower_bounds[i].min_value = min_value;
3350 if (dump_enabled_p ())
3352 dump_printf_loc (MSG_NOTE, vect_location,
3353 "updating run-time check to ");
3354 dump_lower_bound (MSG_NOTE, lower_bounds[i]);
3355 dump_printf (MSG_NOTE, "\n");
3358 return;
3361 vec_lower_bound lower_bound (expr, unsigned_p, min_value);
3362 if (dump_enabled_p ())
3364 dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
3365 dump_lower_bound (MSG_NOTE, lower_bound);
3366 dump_printf (MSG_NOTE, "\n");
3368 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
3371 /* Return true if it's unlikely that the step of the vectorized form of DR_INFO
3372 will span fewer than GAP bytes. */
3374 static bool
3375 vect_small_gap_p (loop_vec_info loop_vinfo, dr_vec_info *dr_info,
3376 poly_int64 gap)
3378 stmt_vec_info stmt_info = dr_info->stmt;
3379 HOST_WIDE_INT count
3380 = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
3381 if (DR_GROUP_FIRST_ELEMENT (stmt_info))
3382 count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
3383 return (estimated_poly_value (gap)
3384 <= count * vect_get_scalar_dr_size (dr_info));
3387 /* Return true if we know that there is no alias between DR_INFO_A and
3388 DR_INFO_B when abs (DR_STEP (DR_INFO_A->dr)) >= N for some N.
3389 When returning true, set *LOWER_BOUND_OUT to this N. */
3391 static bool
3392 vectorizable_with_step_bound_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b,
3393 poly_uint64 *lower_bound_out)
3395 /* Check that there is a constant gap of known sign between DR_A
3396 and DR_B. */
3397 data_reference *dr_a = dr_info_a->dr;
3398 data_reference *dr_b = dr_info_b->dr;
3399 poly_int64 init_a, init_b;
3400 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
3401 || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
3402 || !operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)
3403 || !poly_int_tree_p (DR_INIT (dr_a), &init_a)
3404 || !poly_int_tree_p (DR_INIT (dr_b), &init_b)
3405 || !ordered_p (init_a, init_b))
3406 return false;
3408 /* Sort DR_A and DR_B by the address they access. */
3409 if (maybe_lt (init_b, init_a))
3411 std::swap (init_a, init_b);
3412 std::swap (dr_info_a, dr_info_b);
3413 std::swap (dr_a, dr_b);
3416 /* If the two accesses could be dependent within a scalar iteration,
3417 make sure that we'd retain their order. */
3418 if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_info_a), init_b)
3419 && !vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
3420 return false;
3422 /* There is no alias if abs (DR_STEP) is greater than or equal to
3423 the bytes spanned by the combination of the two accesses. */
3424 *lower_bound_out = init_b + vect_get_scalar_dr_size (dr_info_b) - init_a;
3425 return true;
3428 /* Function vect_prune_runtime_alias_test_list.
3430 Prune a list of ddrs to be tested at run-time by versioning for alias.
3431 Merge several alias checks into one if possible.
3432 Return FALSE if resulting list of ddrs is longer then allowed by
3433 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
3435 opt_result
3436 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
3438 typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
3439 hash_set <tree_pair_hash> compared_objects;
3441 vec<ddr_p> may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
3442 vec<dr_with_seg_len_pair_t> &comp_alias_ddrs
3443 = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
3444 vec<vec_object_pair> &check_unequal_addrs
3445 = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
3446 poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3447 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
3449 ddr_p ddr;
3450 unsigned int i;
3451 tree length_factor;
3453 DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
3455 /* Step values are irrelevant for aliasing if the number of vector
3456 iterations is equal to the number of scalar iterations (which can
3457 happen for fully-SLP loops). */
3458 bool ignore_step_p = known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1U);
3460 if (!ignore_step_p)
3462 /* Convert the checks for nonzero steps into bound tests. */
3463 tree value;
3464 FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
3465 vect_check_lower_bound (loop_vinfo, value, true, 1);
3468 if (may_alias_ddrs.is_empty ())
3469 return opt_result::success ();
3471 comp_alias_ddrs.create (may_alias_ddrs.length ());
3473 unsigned int loop_depth
3474 = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num,
3475 LOOP_VINFO_LOOP_NEST (loop_vinfo));
3477 /* First, we collect all data ref pairs for aliasing checks. */
3478 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
3480 int comp_res;
3481 poly_uint64 lower_bound;
3482 tree segment_length_a, segment_length_b;
3483 unsigned HOST_WIDE_INT access_size_a, access_size_b;
3484 unsigned int align_a, align_b;
3486 /* Ignore the alias if the VF we chose ended up being no greater
3487 than the dependence distance. */
3488 if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor))
3489 continue;
3491 if (DDR_OBJECT_A (ddr))
3493 vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
3494 if (!compared_objects.add (new_pair))
3496 if (dump_enabled_p ())
3497 dump_printf_loc (MSG_NOTE, vect_location,
3498 "checking that %T and %T"
3499 " have different addresses\n",
3500 new_pair.first, new_pair.second);
3501 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
3503 continue;
3506 dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
3507 stmt_vec_info stmt_info_a = dr_info_a->stmt;
3509 dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
3510 stmt_vec_info stmt_info_b = dr_info_b->stmt;
3512 /* Skip the pair if inter-iteration dependencies are irrelevant
3513 and intra-iteration dependencies are guaranteed to be honored. */
3514 if (ignore_step_p
3515 && (vect_preserves_scalar_order_p (dr_info_a, dr_info_b)
3516 || vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
3517 &lower_bound)))
3519 if (dump_enabled_p ())
3520 dump_printf_loc (MSG_NOTE, vect_location,
3521 "no need for alias check between "
3522 "%T and %T when VF is 1\n",
3523 DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
3524 continue;
3527 /* See whether we can handle the alias using a bounds check on
3528 the step, and whether that's likely to be the best approach.
3529 (It might not be, for example, if the minimum step is much larger
3530 than the number of bytes handled by one vector iteration.) */
3531 if (!ignore_step_p
3532 && TREE_CODE (DR_STEP (dr_info_a->dr)) != INTEGER_CST
3533 && vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
3534 &lower_bound)
3535 && (vect_small_gap_p (loop_vinfo, dr_info_a, lower_bound)
3536 || vect_small_gap_p (loop_vinfo, dr_info_b, lower_bound)))
3538 bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
3539 if (dump_enabled_p ())
3541 dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
3542 "%T and %T when the step %T is outside ",
3543 DR_REF (dr_info_a->dr),
3544 DR_REF (dr_info_b->dr),
3545 DR_STEP (dr_info_a->dr));
3546 if (unsigned_p)
3547 dump_printf (MSG_NOTE, "[0");
3548 else
3550 dump_printf (MSG_NOTE, "(");
3551 dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
3553 dump_printf (MSG_NOTE, ", ");
3554 dump_dec (MSG_NOTE, lower_bound);
3555 dump_printf (MSG_NOTE, ")\n");
3557 vect_check_lower_bound (loop_vinfo, DR_STEP (dr_info_a->dr),
3558 unsigned_p, lower_bound);
3559 continue;
3562 stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
3563 if (dr_group_first_a)
3565 stmt_info_a = dr_group_first_a;
3566 dr_info_a = STMT_VINFO_DR_INFO (stmt_info_a);
3569 stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
3570 if (dr_group_first_b)
3572 stmt_info_b = dr_group_first_b;
3573 dr_info_b = STMT_VINFO_DR_INFO (stmt_info_b);
3576 if (ignore_step_p)
3578 segment_length_a = size_zero_node;
3579 segment_length_b = size_zero_node;
3581 else
3583 if (!operand_equal_p (DR_STEP (dr_info_a->dr),
3584 DR_STEP (dr_info_b->dr), 0))
3585 length_factor = scalar_loop_iters;
3586 else
3587 length_factor = size_int (vect_factor);
3588 segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
3589 segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
3591 access_size_a = vect_vfa_access_size (dr_info_a);
3592 access_size_b = vect_vfa_access_size (dr_info_b);
3593 align_a = vect_vfa_align (dr_info_a);
3594 align_b = vect_vfa_align (dr_info_b);
3596 comp_res = data_ref_compare_tree (DR_BASE_ADDRESS (dr_info_a->dr),
3597 DR_BASE_ADDRESS (dr_info_b->dr));
3598 if (comp_res == 0)
3599 comp_res = data_ref_compare_tree (DR_OFFSET (dr_info_a->dr),
3600 DR_OFFSET (dr_info_b->dr));
3602 /* See whether the alias is known at compilation time. */
3603 if (comp_res == 0
3604 && TREE_CODE (DR_STEP (dr_info_a->dr)) == INTEGER_CST
3605 && TREE_CODE (DR_STEP (dr_info_b->dr)) == INTEGER_CST
3606 && poly_int_tree_p (segment_length_a)
3607 && poly_int_tree_p (segment_length_b))
3609 int res = vect_compile_time_alias (dr_info_a, dr_info_b,
3610 segment_length_a,
3611 segment_length_b,
3612 access_size_a,
3613 access_size_b);
3614 if (res >= 0 && dump_enabled_p ())
3616 dump_printf_loc (MSG_NOTE, vect_location,
3617 "can tell at compile time that %T and %T",
3618 DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
3619 if (res == 0)
3620 dump_printf (MSG_NOTE, " do not alias\n");
3621 else
3622 dump_printf (MSG_NOTE, " alias\n");
3625 if (res == 0)
3626 continue;
3628 if (res == 1)
3629 return opt_result::failure_at (stmt_info_b->stmt,
3630 "not vectorized:"
3631 " compilation time alias: %G%G",
3632 stmt_info_a->stmt,
3633 stmt_info_b->stmt);
3636 dr_with_seg_len_pair_t dr_with_seg_len_pair
3637 (dr_with_seg_len (dr_info_a->dr, segment_length_a,
3638 access_size_a, align_a),
3639 dr_with_seg_len (dr_info_b->dr, segment_length_b,
3640 access_size_b, align_b));
3642 /* Canonicalize pairs by sorting the two DR members. */
3643 if (comp_res > 0)
3644 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
3646 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
3649 prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
3651 unsigned int count = (comp_alias_ddrs.length ()
3652 + check_unequal_addrs.length ());
3654 if (dump_enabled_p ())
3655 dump_printf_loc (MSG_NOTE, vect_location,
3656 "improved number of alias checks from %d to %d\n",
3657 may_alias_ddrs.length (), count);
3658 unsigned limit = param_vect_max_version_for_alias_checks;
3659 if (flag_simd_cost_model == VECT_COST_MODEL_CHEAP)
3660 limit = param_vect_max_version_for_alias_checks * 6 / 10;
3661 if (count > limit)
3662 return opt_result::failure_at
3663 (vect_location,
3664 "number of versioning for alias run-time tests exceeds %d "
3665 "(--param vect-max-version-for-alias-checks)\n", limit);
3667 return opt_result::success ();
3670 /* Check whether we can use an internal function for a gather load
3671 or scatter store. READ_P is true for loads and false for stores.
3672 MASKED_P is true if the load or store is conditional. MEMORY_TYPE is
3673 the type of the memory elements being loaded or stored. OFFSET_TYPE
3674 is the type of the offset that is being applied to the invariant
3675 base address. SCALE is the amount by which the offset should
3676 be multiplied *after* it has been converted to address width.
3678 Return true if the function is supported, storing the function id in
3679 *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT. */
3681 bool
3682 vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p,
3683 tree vectype, tree memory_type, tree offset_type,
3684 int scale, internal_fn *ifn_out,
3685 tree *offset_vectype_out)
3687 unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
3688 unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype)));
3689 if (element_bits != memory_bits)
3690 /* For now the vector elements must be the same width as the
3691 memory elements. */
3692 return false;
3694 /* Work out which function we need. */
3695 internal_fn ifn;
3696 if (read_p)
3697 ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
3698 else
3699 ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
3701 for (;;)
3703 tree offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type);
3704 if (!offset_vectype)
3705 return false;
3707 /* Test whether the target supports this combination. */
3708 if (internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
3709 offset_vectype, scale))
3711 *ifn_out = ifn;
3712 *offset_vectype_out = offset_vectype;
3713 return true;
3716 if (TYPE_PRECISION (offset_type) >= POINTER_SIZE
3717 && TYPE_PRECISION (offset_type) >= element_bits)
3718 return false;
3720 offset_type = build_nonstandard_integer_type
3721 (TYPE_PRECISION (offset_type) * 2, TYPE_UNSIGNED (offset_type));
3725 /* STMT_INFO is a call to an internal gather load or scatter store function.
3726 Describe the operation in INFO. */
3728 static void
3729 vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
3730 gather_scatter_info *info)
3732 gcall *call = as_a <gcall *> (stmt_info->stmt);
3733 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3734 data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3736 info->ifn = gimple_call_internal_fn (call);
3737 info->decl = NULL_TREE;
3738 info->base = gimple_call_arg (call, 0);
3739 info->offset = gimple_call_arg (call, 1);
3740 info->offset_dt = vect_unknown_def_type;
3741 info->offset_vectype = NULL_TREE;
3742 info->scale = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
3743 info->element_type = TREE_TYPE (vectype);
3744 info->memory_type = TREE_TYPE (DR_REF (dr));
3747 /* Return true if a non-affine read or write in STMT_INFO is suitable for a
3748 gather load or scatter store. Describe the operation in *INFO if so. */
3750 bool
3751 vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
3752 gather_scatter_info *info)
3754 HOST_WIDE_INT scale = 1;
3755 poly_int64 pbitpos, pbitsize;
3756 class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
3757 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3758 tree offtype = NULL_TREE;
3759 tree decl = NULL_TREE, base, off;
3760 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3761 tree memory_type = TREE_TYPE (DR_REF (dr));
3762 machine_mode pmode;
3763 int punsignedp, reversep, pvolatilep = 0;
3764 internal_fn ifn;
3765 tree offset_vectype;
3766 bool masked_p = false;
3768 /* See whether this is already a call to a gather/scatter internal function.
3769 If not, see whether it's a masked load or store. */
3770 gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
3771 if (call && gimple_call_internal_p (call))
3773 ifn = gimple_call_internal_fn (call);
3774 if (internal_gather_scatter_fn_p (ifn))
3776 vect_describe_gather_scatter_call (stmt_info, info);
3777 return true;
3779 masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
3782 /* True if we should aim to use internal functions rather than
3783 built-in functions. */
3784 bool use_ifn_p = (DR_IS_READ (dr)
3785 ? supports_vec_gather_load_p ()
3786 : supports_vec_scatter_store_p ());
3788 base = DR_REF (dr);
3789 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
3790 see if we can use the def stmt of the address. */
3791 if (masked_p
3792 && TREE_CODE (base) == MEM_REF
3793 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
3794 && integer_zerop (TREE_OPERAND (base, 1))
3795 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
3797 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3798 if (is_gimple_assign (def_stmt)
3799 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3800 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3803 /* The gather and scatter builtins need address of the form
3804 loop_invariant + vector * {1, 2, 4, 8}
3806 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3807 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3808 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3809 multiplications and additions in it. To get a vector, we need
3810 a single SSA_NAME that will be defined in the loop and will
3811 contain everything that is not loop invariant and that can be
3812 vectorized. The following code attempts to find such a preexistng
3813 SSA_NAME OFF and put the loop invariants into a tree BASE
3814 that can be gimplified before the loop. */
3815 base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
3816 &punsignedp, &reversep, &pvolatilep);
3817 if (reversep)
3818 return false;
3820 poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
3822 if (TREE_CODE (base) == MEM_REF)
3824 if (!integer_zerop (TREE_OPERAND (base, 1)))
3826 if (off == NULL_TREE)
3827 off = wide_int_to_tree (sizetype, mem_ref_offset (base));
3828 else
3829 off = size_binop (PLUS_EXPR, off,
3830 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3832 base = TREE_OPERAND (base, 0);
3834 else
3835 base = build_fold_addr_expr (base);
3837 if (off == NULL_TREE)
3838 off = size_zero_node;
3840 /* If base is not loop invariant, either off is 0, then we start with just
3841 the constant offset in the loop invariant BASE and continue with base
3842 as OFF, otherwise give up.
3843 We could handle that case by gimplifying the addition of base + off
3844 into some SSA_NAME and use that as off, but for now punt. */
3845 if (!expr_invariant_in_loop_p (loop, base))
3847 if (!integer_zerop (off))
3848 return false;
3849 off = base;
3850 base = size_int (pbytepos);
3852 /* Otherwise put base + constant offset into the loop invariant BASE
3853 and continue with OFF. */
3854 else
3856 base = fold_convert (sizetype, base);
3857 base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
3860 /* OFF at this point may be either a SSA_NAME or some tree expression
3861 from get_inner_reference. Try to peel off loop invariants from it
3862 into BASE as long as possible. */
3863 STRIP_NOPS (off);
3864 while (offtype == NULL_TREE)
3866 enum tree_code code;
3867 tree op0, op1, add = NULL_TREE;
3869 if (TREE_CODE (off) == SSA_NAME)
3871 gimple *def_stmt = SSA_NAME_DEF_STMT (off);
3873 if (expr_invariant_in_loop_p (loop, off))
3874 return false;
3876 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3877 break;
3879 op0 = gimple_assign_rhs1 (def_stmt);
3880 code = gimple_assign_rhs_code (def_stmt);
3881 op1 = gimple_assign_rhs2 (def_stmt);
3883 else
3885 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3886 return false;
3887 code = TREE_CODE (off);
3888 extract_ops_from_tree (off, &code, &op0, &op1);
3890 switch (code)
3892 case POINTER_PLUS_EXPR:
3893 case PLUS_EXPR:
3894 if (expr_invariant_in_loop_p (loop, op0))
3896 add = op0;
3897 off = op1;
3898 do_add:
3899 add = fold_convert (sizetype, add);
3900 if (scale != 1)
3901 add = size_binop (MULT_EXPR, add, size_int (scale));
3902 base = size_binop (PLUS_EXPR, base, add);
3903 continue;
3905 if (expr_invariant_in_loop_p (loop, op1))
3907 add = op1;
3908 off = op0;
3909 goto do_add;
3911 break;
3912 case MINUS_EXPR:
3913 if (expr_invariant_in_loop_p (loop, op1))
3915 add = fold_convert (sizetype, op1);
3916 add = size_binop (MINUS_EXPR, size_zero_node, add);
3917 off = op0;
3918 goto do_add;
3920 break;
3921 case MULT_EXPR:
3922 if (scale == 1 && tree_fits_shwi_p (op1))
3924 int new_scale = tree_to_shwi (op1);
3925 /* Only treat this as a scaling operation if the target
3926 supports it for at least some offset type. */
3927 if (use_ifn_p
3928 && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
3929 masked_p, vectype, memory_type,
3930 signed_char_type_node,
3931 new_scale, &ifn,
3932 &offset_vectype)
3933 && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
3934 masked_p, vectype, memory_type,
3935 unsigned_char_type_node,
3936 new_scale, &ifn,
3937 &offset_vectype))
3938 break;
3939 scale = new_scale;
3940 off = op0;
3941 continue;
3943 break;
3944 case SSA_NAME:
3945 off = op0;
3946 continue;
3947 CASE_CONVERT:
3948 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3949 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3950 break;
3952 /* Don't include the conversion if the target is happy with
3953 the current offset type. */
3954 if (use_ifn_p
3955 && vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
3956 masked_p, vectype, memory_type,
3957 TREE_TYPE (off), scale, &ifn,
3958 &offset_vectype))
3959 break;
3961 if (TYPE_PRECISION (TREE_TYPE (op0))
3962 == TYPE_PRECISION (TREE_TYPE (off)))
3964 off = op0;
3965 continue;
3968 if (TYPE_PRECISION (TREE_TYPE (op0))
3969 < TYPE_PRECISION (TREE_TYPE (off)))
3971 off = op0;
3972 offtype = TREE_TYPE (off);
3973 STRIP_NOPS (off);
3974 continue;
3976 break;
3977 default:
3978 break;
3980 break;
3983 /* If at the end OFF still isn't a SSA_NAME or isn't
3984 defined in the loop, punt. */
3985 if (TREE_CODE (off) != SSA_NAME
3986 || expr_invariant_in_loop_p (loop, off))
3987 return false;
3989 if (offtype == NULL_TREE)
3990 offtype = TREE_TYPE (off);
3992 if (use_ifn_p)
3994 if (!vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr), masked_p,
3995 vectype, memory_type, offtype, scale,
3996 &ifn, &offset_vectype))
3997 return false;
3999 else
4001 if (DR_IS_READ (dr))
4003 if (targetm.vectorize.builtin_gather)
4004 decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
4006 else
4008 if (targetm.vectorize.builtin_scatter)
4009 decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
4012 if (!decl)
4013 return false;
4015 ifn = IFN_LAST;
4016 /* The offset vector type will be read from DECL when needed. */
4017 offset_vectype = NULL_TREE;
4020 info->ifn = ifn;
4021 info->decl = decl;
4022 info->base = base;
4023 info->offset = off;
4024 info->offset_dt = vect_unknown_def_type;
4025 info->offset_vectype = offset_vectype;
4026 info->scale = scale;
4027 info->element_type = TREE_TYPE (vectype);
4028 info->memory_type = memory_type;
4029 return true;
4032 /* Find the data references in STMT, analyze them with respect to LOOP and
4033 append them to DATAREFS. Return false if datarefs in this stmt cannot
4034 be handled. */
4036 opt_result
4037 vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
4038 vec<data_reference_p> *datarefs)
4040 /* We can ignore clobbers for dataref analysis - they are removed during
4041 loop vectorization and BB vectorization checks dependences with a
4042 stmt walk. */
4043 if (gimple_clobber_p (stmt))
4044 return opt_result::success ();
4046 if (gimple_has_volatile_ops (stmt))
4047 return opt_result::failure_at (stmt, "not vectorized: volatile type: %G",
4048 stmt);
4050 if (stmt_can_throw_internal (cfun, stmt))
4051 return opt_result::failure_at (stmt,
4052 "not vectorized:"
4053 " statement can throw an exception: %G",
4054 stmt);
4056 auto_vec<data_reference_p, 2> refs;
4057 opt_result res = find_data_references_in_stmt (loop, stmt, &refs);
4058 if (!res)
4059 return res;
4061 if (refs.is_empty ())
4062 return opt_result::success ();
4064 if (refs.length () > 1)
4065 return opt_result::failure_at (stmt,
4066 "not vectorized:"
4067 " more than one data ref in stmt: %G", stmt);
4069 if (gcall *call = dyn_cast <gcall *> (stmt))
4070 if (!gimple_call_internal_p (call)
4071 || (gimple_call_internal_fn (call) != IFN_MASK_LOAD
4072 && gimple_call_internal_fn (call) != IFN_MASK_STORE))
4073 return opt_result::failure_at (stmt,
4074 "not vectorized: dr in a call %G", stmt);
4076 data_reference_p dr = refs.pop ();
4077 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
4078 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
4079 return opt_result::failure_at (stmt,
4080 "not vectorized:"
4081 " statement is bitfield access %G", stmt);
4083 if (DR_BASE_ADDRESS (dr)
4084 && TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
4085 return opt_result::failure_at (stmt,
4086 "not vectorized:"
4087 " base addr of dr is a constant\n");
4089 /* Check whether this may be a SIMD lane access and adjust the
4090 DR to make it easier for us to handle it. */
4091 if (loop
4092 && loop->simduid
4093 && (!DR_BASE_ADDRESS (dr)
4094 || !DR_OFFSET (dr)
4095 || !DR_INIT (dr)
4096 || !DR_STEP (dr)))
4098 struct data_reference *newdr
4099 = create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt,
4100 DR_IS_READ (dr), DR_IS_CONDITIONAL_IN_STMT (dr));
4101 if (DR_BASE_ADDRESS (newdr)
4102 && DR_OFFSET (newdr)
4103 && DR_INIT (newdr)
4104 && DR_STEP (newdr)
4105 && TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
4106 && integer_zerop (DR_STEP (newdr)))
4108 tree base_address = DR_BASE_ADDRESS (newdr);
4109 tree off = DR_OFFSET (newdr);
4110 tree step = ssize_int (1);
4111 if (integer_zerop (off)
4112 && TREE_CODE (base_address) == POINTER_PLUS_EXPR)
4114 off = TREE_OPERAND (base_address, 1);
4115 base_address = TREE_OPERAND (base_address, 0);
4117 STRIP_NOPS (off);
4118 if (TREE_CODE (off) == MULT_EXPR
4119 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
4121 step = TREE_OPERAND (off, 1);
4122 off = TREE_OPERAND (off, 0);
4123 STRIP_NOPS (off);
4125 if (CONVERT_EXPR_P (off)
4126 && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0)))
4127 < TYPE_PRECISION (TREE_TYPE (off))))
4128 off = TREE_OPERAND (off, 0);
4129 if (TREE_CODE (off) == SSA_NAME)
4131 gimple *def = SSA_NAME_DEF_STMT (off);
4132 /* Look through widening conversion. */
4133 if (is_gimple_assign (def)
4134 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
4136 tree rhs1 = gimple_assign_rhs1 (def);
4137 if (TREE_CODE (rhs1) == SSA_NAME
4138 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
4139 && (TYPE_PRECISION (TREE_TYPE (off))
4140 > TYPE_PRECISION (TREE_TYPE (rhs1))))
4141 def = SSA_NAME_DEF_STMT (rhs1);
4143 if (is_gimple_call (def)
4144 && gimple_call_internal_p (def)
4145 && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE))
4147 tree arg = gimple_call_arg (def, 0);
4148 tree reft = TREE_TYPE (DR_REF (newdr));
4149 gcc_assert (TREE_CODE (arg) == SSA_NAME);
4150 arg = SSA_NAME_VAR (arg);
4151 if (arg == loop->simduid
4152 /* For now. */
4153 && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step))
4155 DR_BASE_ADDRESS (newdr) = base_address;
4156 DR_OFFSET (newdr) = ssize_int (0);
4157 DR_STEP (newdr) = step;
4158 DR_OFFSET_ALIGNMENT (newdr) = BIGGEST_ALIGNMENT;
4159 DR_STEP_ALIGNMENT (newdr) = highest_pow2_factor (step);
4160 /* Mark as simd-lane access. */
4161 tree arg2 = gimple_call_arg (def, 1);
4162 newdr->aux = (void *) (-1 - tree_to_uhwi (arg2));
4163 free_data_ref (dr);
4164 datarefs->safe_push (newdr);
4165 return opt_result::success ();
4170 free_data_ref (newdr);
4173 datarefs->safe_push (dr);
4174 return opt_result::success ();
4177 /* Function vect_analyze_data_refs.
4179 Find all the data references in the loop or basic block.
4181 The general structure of the analysis of data refs in the vectorizer is as
4182 follows:
4183 1- vect_analyze_data_refs(loop/bb): call
4184 compute_data_dependences_for_loop/bb to find and analyze all data-refs
4185 in the loop/bb and their dependences.
4186 2- vect_analyze_dependences(): apply dependence testing using ddrs.
4187 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
4188 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
4192 opt_result
4193 vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf, bool *fatal)
4195 class loop *loop = NULL;
4196 unsigned int i;
4197 struct data_reference *dr;
4198 tree scalar_type;
4200 DUMP_VECT_SCOPE ("vect_analyze_data_refs");
4202 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
4203 loop = LOOP_VINFO_LOOP (loop_vinfo);
4205 /* Go through the data-refs, check that the analysis succeeded. Update
4206 pointer from stmt_vec_info struct to DR and vectype. */
4208 vec<data_reference_p> datarefs = vinfo->shared->datarefs;
4209 FOR_EACH_VEC_ELT (datarefs, i, dr)
4211 enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
4212 poly_uint64 vf;
4214 gcc_assert (DR_REF (dr));
4215 stmt_vec_info stmt_info = vinfo->lookup_stmt (DR_STMT (dr));
4216 gcc_assert (!stmt_info->dr_aux.dr);
4217 stmt_info->dr_aux.dr = dr;
4218 stmt_info->dr_aux.stmt = stmt_info;
4220 /* Check that analysis of the data-ref succeeded. */
4221 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
4222 || !DR_STEP (dr))
4224 bool maybe_gather
4225 = DR_IS_READ (dr)
4226 && !TREE_THIS_VOLATILE (DR_REF (dr))
4227 && (targetm.vectorize.builtin_gather != NULL
4228 || supports_vec_gather_load_p ());
4229 bool maybe_scatter
4230 = DR_IS_WRITE (dr)
4231 && !TREE_THIS_VOLATILE (DR_REF (dr))
4232 && (targetm.vectorize.builtin_scatter != NULL
4233 || supports_vec_scatter_store_p ());
4235 /* If target supports vector gather loads or scatter stores,
4236 see if they can't be used. */
4237 if (is_a <loop_vec_info> (vinfo)
4238 && !nested_in_vect_loop_p (loop, stmt_info))
4240 if (maybe_gather || maybe_scatter)
4242 if (maybe_gather)
4243 gatherscatter = GATHER;
4244 else
4245 gatherscatter = SCATTER;
4249 if (gatherscatter == SG_NONE)
4251 if (dump_enabled_p ())
4252 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4253 "not vectorized: data ref analysis "
4254 "failed %G", stmt_info->stmt);
4255 if (is_a <bb_vec_info> (vinfo))
4257 /* In BB vectorization the ref can still participate
4258 in dependence analysis, we just can't vectorize it. */
4259 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
4260 continue;
4262 return opt_result::failure_at (stmt_info->stmt,
4263 "not vectorized:"
4264 " data ref analysis failed: %G",
4265 stmt_info->stmt);
4269 /* See if this was detected as SIMD lane access. */
4270 if (dr->aux == (void *)-1
4271 || dr->aux == (void *)-2
4272 || dr->aux == (void *)-3
4273 || dr->aux == (void *)-4)
4275 if (nested_in_vect_loop_p (loop, stmt_info))
4276 return opt_result::failure_at (stmt_info->stmt,
4277 "not vectorized:"
4278 " data ref analysis failed: %G",
4279 stmt_info->stmt);
4280 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)
4281 = -(uintptr_t) dr->aux;
4284 tree base = get_base_address (DR_REF (dr));
4285 if (base && VAR_P (base) && DECL_NONALIASED (base))
4287 if (dump_enabled_p ())
4288 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4289 "not vectorized: base object not addressable "
4290 "for stmt: %G", stmt_info->stmt);
4291 if (is_a <bb_vec_info> (vinfo))
4293 /* In BB vectorization the ref can still participate
4294 in dependence analysis, we just can't vectorize it. */
4295 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
4296 continue;
4298 return opt_result::failure_at (stmt_info->stmt,
4299 "not vectorized: base object not"
4300 " addressable for stmt: %G",
4301 stmt_info->stmt);
4304 if (is_a <loop_vec_info> (vinfo)
4305 && DR_STEP (dr)
4306 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
4308 if (nested_in_vect_loop_p (loop, stmt_info))
4309 return opt_result::failure_at (stmt_info->stmt,
4310 "not vectorized: "
4311 "not suitable for strided load %G",
4312 stmt_info->stmt);
4313 STMT_VINFO_STRIDED_P (stmt_info) = true;
4316 /* Update DR field in stmt_vec_info struct. */
4318 /* If the dataref is in an inner-loop of the loop that is considered for
4319 for vectorization, we also want to analyze the access relative to
4320 the outer-loop (DR contains information only relative to the
4321 inner-most enclosing loop). We do that by building a reference to the
4322 first location accessed by the inner-loop, and analyze it relative to
4323 the outer-loop. */
4324 if (loop && nested_in_vect_loop_p (loop, stmt_info))
4326 /* Build a reference to the first location accessed by the
4327 inner loop: *(BASE + INIT + OFFSET). By construction,
4328 this address must be invariant in the inner loop, so we
4329 can consider it as being used in the outer loop. */
4330 tree base = unshare_expr (DR_BASE_ADDRESS (dr));
4331 tree offset = unshare_expr (DR_OFFSET (dr));
4332 tree init = unshare_expr (DR_INIT (dr));
4333 tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
4334 init, offset);
4335 tree init_addr = fold_build_pointer_plus (base, init_offset);
4336 tree init_ref = build_fold_indirect_ref (init_addr);
4338 if (dump_enabled_p ())
4339 dump_printf_loc (MSG_NOTE, vect_location,
4340 "analyze in outer loop: %T\n", init_ref);
4342 opt_result res
4343 = dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
4344 init_ref, loop, stmt_info->stmt);
4345 if (!res)
4346 /* dr_analyze_innermost already explained the failure. */
4347 return res;
4349 if (dump_enabled_p ())
4350 dump_printf_loc (MSG_NOTE, vect_location,
4351 "\touter base_address: %T\n"
4352 "\touter offset from base address: %T\n"
4353 "\touter constant offset from base address: %T\n"
4354 "\touter step: %T\n"
4355 "\touter base alignment: %d\n\n"
4356 "\touter base misalignment: %d\n"
4357 "\touter offset alignment: %d\n"
4358 "\touter step alignment: %d\n",
4359 STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
4360 STMT_VINFO_DR_OFFSET (stmt_info),
4361 STMT_VINFO_DR_INIT (stmt_info),
4362 STMT_VINFO_DR_STEP (stmt_info),
4363 STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
4364 STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
4365 STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
4366 STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
4369 /* Set vectype for STMT. */
4370 scalar_type = TREE_TYPE (DR_REF (dr));
4371 STMT_VINFO_VECTYPE (stmt_info)
4372 = get_vectype_for_scalar_type (vinfo, scalar_type);
4373 if (!STMT_VINFO_VECTYPE (stmt_info))
4375 if (dump_enabled_p ())
4377 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4378 "not vectorized: no vectype for stmt: %G",
4379 stmt_info->stmt);
4380 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
4381 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
4382 scalar_type);
4383 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
4386 if (is_a <bb_vec_info> (vinfo))
4388 /* No vector type is fine, the ref can still participate
4389 in dependence analysis, we just can't vectorize it. */
4390 STMT_VINFO_VECTORIZABLE (stmt_info) = false;
4391 continue;
4393 if (fatal)
4394 *fatal = false;
4395 return opt_result::failure_at (stmt_info->stmt,
4396 "not vectorized:"
4397 " no vectype for stmt: %G"
4398 " scalar_type: %T\n",
4399 stmt_info->stmt, scalar_type);
4401 else
4403 if (dump_enabled_p ())
4404 dump_printf_loc (MSG_NOTE, vect_location,
4405 "got vectype for stmt: %G%T\n",
4406 stmt_info->stmt, STMT_VINFO_VECTYPE (stmt_info));
4409 /* Adjust the minimal vectorization factor according to the
4410 vector type. */
4411 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
4412 *min_vf = upper_bound (*min_vf, vf);
4414 if (gatherscatter != SG_NONE)
4416 gather_scatter_info gs_info;
4417 if (!vect_check_gather_scatter (stmt_info,
4418 as_a <loop_vec_info> (vinfo),
4419 &gs_info)
4420 || !get_vectype_for_scalar_type (vinfo,
4421 TREE_TYPE (gs_info.offset)))
4423 if (fatal)
4424 *fatal = false;
4425 return opt_result::failure_at
4426 (stmt_info->stmt,
4427 (gatherscatter == GATHER)
4428 ? "not vectorized: not suitable for gather load %G"
4429 : "not vectorized: not suitable for scatter store %G",
4430 stmt_info->stmt);
4432 STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
4436 /* We used to stop processing and prune the list here. Verify we no
4437 longer need to. */
4438 gcc_assert (i == datarefs.length ());
4440 return opt_result::success ();
4444 /* Function vect_get_new_vect_var.
4446 Returns a name for a new variable. The current naming scheme appends the
4447 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
4448 the name of vectorizer generated variables, and appends that to NAME if
4449 provided. */
4451 tree
4452 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
4454 const char *prefix;
4455 tree new_vect_var;
4457 switch (var_kind)
4459 case vect_simple_var:
4460 prefix = "vect";
4461 break;
4462 case vect_scalar_var:
4463 prefix = "stmp";
4464 break;
4465 case vect_mask_var:
4466 prefix = "mask";
4467 break;
4468 case vect_pointer_var:
4469 prefix = "vectp";
4470 break;
4471 default:
4472 gcc_unreachable ();
4475 if (name)
4477 char* tmp = concat (prefix, "_", name, NULL);
4478 new_vect_var = create_tmp_reg (type, tmp);
4479 free (tmp);
4481 else
4482 new_vect_var = create_tmp_reg (type, prefix);
4484 return new_vect_var;
4487 /* Like vect_get_new_vect_var but return an SSA name. */
4489 tree
4490 vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
4492 const char *prefix;
4493 tree new_vect_var;
4495 switch (var_kind)
4497 case vect_simple_var:
4498 prefix = "vect";
4499 break;
4500 case vect_scalar_var:
4501 prefix = "stmp";
4502 break;
4503 case vect_pointer_var:
4504 prefix = "vectp";
4505 break;
4506 default:
4507 gcc_unreachable ();
4510 if (name)
4512 char* tmp = concat (prefix, "_", name, NULL);
4513 new_vect_var = make_temp_ssa_name (type, NULL, tmp);
4514 free (tmp);
4516 else
4517 new_vect_var = make_temp_ssa_name (type, NULL, prefix);
4519 return new_vect_var;
4522 /* Duplicate ptr info and set alignment/misaligment on NAME from DR_INFO. */
4524 static void
4525 vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
4527 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr_info->dr));
4528 int misalign = DR_MISALIGNMENT (dr_info);
4529 if (misalign == DR_MISALIGNMENT_UNKNOWN)
4530 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
4531 else
4532 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name),
4533 known_alignment (DR_TARGET_ALIGNMENT (dr_info)),
4534 misalign);
4537 /* Function vect_create_addr_base_for_vector_ref.
4539 Create an expression that computes the address of the first memory location
4540 that will be accessed for a data reference.
4542 Input:
4543 STMT_INFO: The statement containing the data reference.
4544 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
4545 OFFSET: Optional. If supplied, it is be added to the initial address.
4546 LOOP: Specify relative to which loop-nest should the address be computed.
4547 For example, when the dataref is in an inner-loop nested in an
4548 outer-loop that is now being vectorized, LOOP can be either the
4549 outer-loop, or the inner-loop. The first memory location accessed
4550 by the following dataref ('in' points to short):
4552 for (i=0; i<N; i++)
4553 for (j=0; j<M; j++)
4554 s += in[i+j]
4556 is as follows:
4557 if LOOP=i_loop: &in (relative to i_loop)
4558 if LOOP=j_loop: &in+i*2B (relative to j_loop)
4559 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
4560 initial address. Unlike OFFSET, which is number of elements to
4561 be added, BYTE_OFFSET is measured in bytes.
4563 Output:
4564 1. Return an SSA_NAME whose value is the address of the memory location of
4565 the first vector of the data reference.
4566 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
4567 these statement(s) which define the returned SSA_NAME.
4569 FORNOW: We are only handling array accesses with step 1. */
4571 tree
4572 vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
4573 gimple_seq *new_stmt_list,
4574 tree offset,
4575 tree byte_offset)
4577 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
4578 struct data_reference *dr = dr_info->dr;
4579 const char *base_name;
4580 tree addr_base;
4581 tree dest;
4582 gimple_seq seq = NULL;
4583 tree vect_ptr_type;
4584 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
4585 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4586 innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
4588 tree data_ref_base = unshare_expr (drb->base_address);
4589 tree base_offset = unshare_expr (drb->offset);
4590 tree init = unshare_expr (drb->init);
4592 if (loop_vinfo)
4593 base_name = get_name (data_ref_base);
4594 else
4596 base_offset = ssize_int (0);
4597 init = ssize_int (0);
4598 base_name = get_name (DR_REF (dr));
4601 /* Create base_offset */
4602 base_offset = size_binop (PLUS_EXPR,
4603 fold_convert (sizetype, base_offset),
4604 fold_convert (sizetype, init));
4606 if (offset)
4608 offset = fold_build2 (MULT_EXPR, sizetype,
4609 fold_convert (sizetype, offset), step);
4610 base_offset = fold_build2 (PLUS_EXPR, sizetype,
4611 base_offset, offset);
4613 if (byte_offset)
4615 byte_offset = fold_convert (sizetype, byte_offset);
4616 base_offset = fold_build2 (PLUS_EXPR, sizetype,
4617 base_offset, byte_offset);
4620 /* base + base_offset */
4621 if (loop_vinfo)
4622 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
4623 else
4625 addr_base = build1 (ADDR_EXPR,
4626 build_pointer_type (TREE_TYPE (DR_REF (dr))),
4627 unshare_expr (DR_REF (dr)));
4630 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
4631 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
4632 addr_base = force_gimple_operand (addr_base, &seq, true, dest);
4633 gimple_seq_add_seq (new_stmt_list, seq);
4635 if (DR_PTR_INFO (dr)
4636 && TREE_CODE (addr_base) == SSA_NAME
4637 && !SSA_NAME_PTR_INFO (addr_base))
4639 vect_duplicate_ssa_name_ptr_info (addr_base, dr_info);
4640 if (offset || byte_offset)
4641 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
4644 if (dump_enabled_p ())
4645 dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
4647 return addr_base;
4651 /* Function vect_create_data_ref_ptr.
4653 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
4654 location accessed in the loop by STMT_INFO, along with the def-use update
4655 chain to appropriately advance the pointer through the loop iterations.
4656 Also set aliasing information for the pointer. This pointer is used by
4657 the callers to this function to create a memory reference expression for
4658 vector load/store access.
4660 Input:
4661 1. STMT_INFO: a stmt that references memory. Expected to be of the form
4662 GIMPLE_ASSIGN <name, data-ref> or
4663 GIMPLE_ASSIGN <data-ref, name>.
4664 2. AGGR_TYPE: the type of the reference, which should be either a vector
4665 or an array.
4666 3. AT_LOOP: the loop where the vector memref is to be created.
4667 4. OFFSET (optional): an offset to be added to the initial address accessed
4668 by the data-ref in STMT_INFO.
4669 5. BSI: location where the new stmts are to be placed if there is no loop
4670 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4671 pointing to the initial address.
4672 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4673 to the initial address accessed by the data-ref in STMT_INFO. This is
4674 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4675 in bytes.
4676 8. IV_STEP (optional, defaults to NULL): the amount that should be added
4677 to the IV during each iteration of the loop. NULL says to move
4678 by one copy of AGGR_TYPE up or down, depending on the step of the
4679 data reference.
4681 Output:
4682 1. Declare a new ptr to vector_type, and have it point to the base of the
4683 data reference (initial addressed accessed by the data reference).
4684 For example, for vector of type V8HI, the following code is generated:
4686 v8hi *ap;
4687 ap = (v8hi *)initial_address;
4689 if OFFSET is not supplied:
4690 initial_address = &a[init];
4691 if OFFSET is supplied:
4692 initial_address = &a[init + OFFSET];
4693 if BYTE_OFFSET is supplied:
4694 initial_address = &a[init] + BYTE_OFFSET;
4696 Return the initial_address in INITIAL_ADDRESS.
4698 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4699 update the pointer in each iteration of the loop.
4701 Return the increment stmt that updates the pointer in PTR_INCR.
4703 3. Return the pointer. */
4705 tree
4706 vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
4707 class loop *at_loop, tree offset,
4708 tree *initial_address, gimple_stmt_iterator *gsi,
4709 gimple **ptr_incr, bool only_init,
4710 tree byte_offset, tree iv_step)
4712 const char *base_name;
4713 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4714 class loop *loop = NULL;
4715 bool nested_in_vect_loop = false;
4716 class loop *containing_loop = NULL;
4717 tree aggr_ptr_type;
4718 tree aggr_ptr;
4719 tree new_temp;
4720 gimple_seq new_stmt_list = NULL;
4721 edge pe = NULL;
4722 basic_block new_bb;
4723 tree aggr_ptr_init;
4724 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
4725 struct data_reference *dr = dr_info->dr;
4726 tree aptr;
4727 gimple_stmt_iterator incr_gsi;
4728 bool insert_after;
4729 tree indx_before_incr, indx_after_incr;
4730 gimple *incr;
4731 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4733 gcc_assert (iv_step != NULL_TREE
4734 || TREE_CODE (aggr_type) == ARRAY_TYPE
4735 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4737 if (loop_vinfo)
4739 loop = LOOP_VINFO_LOOP (loop_vinfo);
4740 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
4741 containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
4742 pe = loop_preheader_edge (loop);
4744 else
4746 gcc_assert (bb_vinfo);
4747 only_init = true;
4748 *ptr_incr = NULL;
4751 /* Create an expression for the first address accessed by this load
4752 in LOOP. */
4753 base_name = get_name (DR_BASE_ADDRESS (dr));
4755 if (dump_enabled_p ())
4757 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4758 dump_printf_loc (MSG_NOTE, vect_location,
4759 "create %s-pointer variable to type: %T",
4760 get_tree_code_name (TREE_CODE (aggr_type)),
4761 aggr_type);
4762 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4763 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4764 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4765 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4766 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4767 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4768 else
4769 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4770 dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
4773 /* (1) Create the new aggregate-pointer variable.
4774 Vector and array types inherit the alias set of their component
4775 type by default so we need to use a ref-all pointer if the data
4776 reference does not conflict with the created aggregated data
4777 reference because it is not addressable. */
4778 bool need_ref_all = false;
4779 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4780 get_alias_set (DR_REF (dr))))
4781 need_ref_all = true;
4782 /* Likewise for any of the data references in the stmt group. */
4783 else if (DR_GROUP_SIZE (stmt_info) > 1)
4785 stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
4788 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4789 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4790 get_alias_set (DR_REF (sdr))))
4792 need_ref_all = true;
4793 break;
4795 sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
4797 while (sinfo);
4799 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4800 need_ref_all);
4801 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4804 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4805 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4806 def-use update cycles for the pointer: one relative to the outer-loop
4807 (LOOP), which is what steps (3) and (4) below do. The other is relative
4808 to the inner-loop (which is the inner-most loop containing the dataref),
4809 and this is done be step (5) below.
4811 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4812 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4813 redundant. Steps (3),(4) create the following:
4815 vp0 = &base_addr;
4816 LOOP: vp1 = phi(vp0,vp2)
4819 vp2 = vp1 + step
4820 goto LOOP
4822 If there is an inner-loop nested in loop, then step (5) will also be
4823 applied, and an additional update in the inner-loop will be created:
4825 vp0 = &base_addr;
4826 LOOP: vp1 = phi(vp0,vp2)
4828 inner: vp3 = phi(vp1,vp4)
4829 vp4 = vp3 + inner_step
4830 if () goto inner
4832 vp2 = vp1 + step
4833 if () goto LOOP */
4835 /* (2) Calculate the initial address of the aggregate-pointer, and set
4836 the aggregate-pointer to point to it before the loop. */
4838 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4840 new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
4841 offset, byte_offset);
4842 if (new_stmt_list)
4844 if (pe)
4846 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4847 gcc_assert (!new_bb);
4849 else
4850 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4853 *initial_address = new_temp;
4854 aggr_ptr_init = new_temp;
4856 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4857 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4858 inner-loop nested in LOOP (during outer-loop vectorization). */
4860 /* No update in loop is required. */
4861 if (only_init && (!loop_vinfo || at_loop == loop))
4862 aptr = aggr_ptr_init;
4863 else
4865 /* Accesses to invariant addresses should be handled specially
4866 by the caller. */
4867 tree step = vect_dr_behavior (dr_info)->step;
4868 gcc_assert (!integer_zerop (step));
4870 if (iv_step == NULL_TREE)
4872 /* The step of the aggregate pointer is the type size,
4873 negated for downward accesses. */
4874 iv_step = TYPE_SIZE_UNIT (aggr_type);
4875 if (tree_int_cst_sgn (step) == -1)
4876 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4879 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4881 create_iv (aggr_ptr_init,
4882 fold_convert (aggr_ptr_type, iv_step),
4883 aggr_ptr, loop, &incr_gsi, insert_after,
4884 &indx_before_incr, &indx_after_incr);
4885 incr = gsi_stmt (incr_gsi);
4886 loop_vinfo->add_stmt (incr);
4888 /* Copy the points-to information if it exists. */
4889 if (DR_PTR_INFO (dr))
4891 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
4892 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
4894 if (ptr_incr)
4895 *ptr_incr = incr;
4897 aptr = indx_before_incr;
4900 if (!nested_in_vect_loop || only_init)
4901 return aptr;
4904 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4905 nested in LOOP, if exists. */
4907 gcc_assert (nested_in_vect_loop);
4908 if (!only_init)
4910 standard_iv_increment_position (containing_loop, &incr_gsi,
4911 &insert_after);
4912 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4913 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4914 &indx_after_incr);
4915 incr = gsi_stmt (incr_gsi);
4916 loop_vinfo->add_stmt (incr);
4918 /* Copy the points-to information if it exists. */
4919 if (DR_PTR_INFO (dr))
4921 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
4922 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
4924 if (ptr_incr)
4925 *ptr_incr = incr;
4927 return indx_before_incr;
4929 else
4930 gcc_unreachable ();
4934 /* Function bump_vector_ptr
4936 Increment a pointer (to a vector type) by vector-size. If requested,
4937 i.e. if PTR-INCR is given, then also connect the new increment stmt
4938 to the existing def-use update-chain of the pointer, by modifying
4939 the PTR_INCR as illustrated below:
4941 The pointer def-use update-chain before this function:
4942 DATAREF_PTR = phi (p_0, p_2)
4943 ....
4944 PTR_INCR: p_2 = DATAREF_PTR + step
4946 The pointer def-use update-chain after this function:
4947 DATAREF_PTR = phi (p_0, p_2)
4948 ....
4949 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4950 ....
4951 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4953 Input:
4954 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4955 in the loop.
4956 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4957 the loop. The increment amount across iterations is expected
4958 to be vector_size.
4959 BSI - location where the new update stmt is to be placed.
4960 STMT_INFO - the original scalar memory-access stmt that is being vectorized.
4961 BUMP - optional. The offset by which to bump the pointer. If not given,
4962 the offset is assumed to be vector_size.
4964 Output: Return NEW_DATAREF_PTR as illustrated above.
4968 tree
4969 bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
4970 stmt_vec_info stmt_info, tree bump)
4972 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4973 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4974 tree update = TYPE_SIZE_UNIT (vectype);
4975 gassign *incr_stmt;
4976 ssa_op_iter iter;
4977 use_operand_p use_p;
4978 tree new_dataref_ptr;
4980 if (bump)
4981 update = bump;
4983 if (TREE_CODE (dataref_ptr) == SSA_NAME)
4984 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4985 else
4986 new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
4987 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4988 dataref_ptr, update);
4989 vect_finish_stmt_generation (stmt_info, incr_stmt, gsi);
4991 /* Copy the points-to information if it exists. */
4992 if (DR_PTR_INFO (dr))
4994 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4995 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4998 if (!ptr_incr)
4999 return new_dataref_ptr;
5001 /* Update the vector-pointer's cross-iteration increment. */
5002 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
5004 tree use = USE_FROM_PTR (use_p);
5006 if (use == dataref_ptr)
5007 SET_USE (use_p, new_dataref_ptr);
5008 else
5009 gcc_assert (operand_equal_p (use, update, 0));
5012 return new_dataref_ptr;
5016 /* Copy memory reference info such as base/clique from the SRC reference
5017 to the DEST MEM_REF. */
5019 void
5020 vect_copy_ref_info (tree dest, tree src)
5022 if (TREE_CODE (dest) != MEM_REF)
5023 return;
5025 tree src_base = src;
5026 while (handled_component_p (src_base))
5027 src_base = TREE_OPERAND (src_base, 0);
5028 if (TREE_CODE (src_base) != MEM_REF
5029 && TREE_CODE (src_base) != TARGET_MEM_REF)
5030 return;
5032 MR_DEPENDENCE_CLIQUE (dest) = MR_DEPENDENCE_CLIQUE (src_base);
5033 MR_DEPENDENCE_BASE (dest) = MR_DEPENDENCE_BASE (src_base);
5037 /* Function vect_create_destination_var.
5039 Create a new temporary of type VECTYPE. */
5041 tree
5042 vect_create_destination_var (tree scalar_dest, tree vectype)
5044 tree vec_dest;
5045 const char *name;
5046 char *new_name;
5047 tree type;
5048 enum vect_var_kind kind;
5050 kind = vectype
5051 ? VECTOR_BOOLEAN_TYPE_P (vectype)
5052 ? vect_mask_var
5053 : vect_simple_var
5054 : vect_scalar_var;
5055 type = vectype ? vectype : TREE_TYPE (scalar_dest);
5057 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
5059 name = get_name (scalar_dest);
5060 if (name)
5061 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
5062 else
5063 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
5064 vec_dest = vect_get_new_vect_var (type, kind, new_name);
5065 free (new_name);
5067 return vec_dest;
5070 /* Function vect_grouped_store_supported.
5072 Returns TRUE if interleave high and interleave low permutations
5073 are supported, and FALSE otherwise. */
5075 bool
5076 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
5078 machine_mode mode = TYPE_MODE (vectype);
5080 /* vect_permute_store_chain requires the group size to be equal to 3 or
5081 be a power of two. */
5082 if (count != 3 && exact_log2 (count) == -1)
5084 if (dump_enabled_p ())
5085 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5086 "the size of the group of accesses"
5087 " is not a power of 2 or not eqaul to 3\n");
5088 return false;
5091 /* Check that the permutation is supported. */
5092 if (VECTOR_MODE_P (mode))
5094 unsigned int i;
5095 if (count == 3)
5097 unsigned int j0 = 0, j1 = 0, j2 = 0;
5098 unsigned int i, j;
5100 unsigned int nelt;
5101 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5103 if (dump_enabled_p ())
5104 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5105 "cannot handle groups of 3 stores for"
5106 " variable-length vectors\n");
5107 return false;
5110 vec_perm_builder sel (nelt, nelt, 1);
5111 sel.quick_grow (nelt);
5112 vec_perm_indices indices;
5113 for (j = 0; j < 3; j++)
5115 int nelt0 = ((3 - j) * nelt) % 3;
5116 int nelt1 = ((3 - j) * nelt + 1) % 3;
5117 int nelt2 = ((3 - j) * nelt + 2) % 3;
5118 for (i = 0; i < nelt; i++)
5120 if (3 * i + nelt0 < nelt)
5121 sel[3 * i + nelt0] = j0++;
5122 if (3 * i + nelt1 < nelt)
5123 sel[3 * i + nelt1] = nelt + j1++;
5124 if (3 * i + nelt2 < nelt)
5125 sel[3 * i + nelt2] = 0;
5127 indices.new_vector (sel, 2, nelt);
5128 if (!can_vec_perm_const_p (mode, indices))
5130 if (dump_enabled_p ())
5131 dump_printf (MSG_MISSED_OPTIMIZATION,
5132 "permutation op not supported by target.\n");
5133 return false;
5136 for (i = 0; i < nelt; i++)
5138 if (3 * i + nelt0 < nelt)
5139 sel[3 * i + nelt0] = 3 * i + nelt0;
5140 if (3 * i + nelt1 < nelt)
5141 sel[3 * i + nelt1] = 3 * i + nelt1;
5142 if (3 * i + nelt2 < nelt)
5143 sel[3 * i + nelt2] = nelt + j2++;
5145 indices.new_vector (sel, 2, nelt);
5146 if (!can_vec_perm_const_p (mode, indices))
5148 if (dump_enabled_p ())
5149 dump_printf (MSG_MISSED_OPTIMIZATION,
5150 "permutation op not supported by target.\n");
5151 return false;
5154 return true;
5156 else
5158 /* If length is not equal to 3 then only power of 2 is supported. */
5159 gcc_assert (pow2p_hwi (count));
5160 poly_uint64 nelt = GET_MODE_NUNITS (mode);
5162 /* The encoding has 2 interleaved stepped patterns. */
5163 vec_perm_builder sel (nelt, 2, 3);
5164 sel.quick_grow (6);
5165 for (i = 0; i < 3; i++)
5167 sel[i * 2] = i;
5168 sel[i * 2 + 1] = i + nelt;
5170 vec_perm_indices indices (sel, 2, nelt);
5171 if (can_vec_perm_const_p (mode, indices))
5173 for (i = 0; i < 6; i++)
5174 sel[i] += exact_div (nelt, 2);
5175 indices.new_vector (sel, 2, nelt);
5176 if (can_vec_perm_const_p (mode, indices))
5177 return true;
5182 if (dump_enabled_p ())
5183 dump_printf (MSG_MISSED_OPTIMIZATION,
5184 "permutation op not supported by target.\n");
5185 return false;
5189 /* Return TRUE if vec_{mask_}store_lanes is available for COUNT vectors of
5190 type VECTYPE. MASKED_P says whether the masked form is needed. */
5192 bool
5193 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
5194 bool masked_p)
5196 if (masked_p)
5197 return vect_lanes_optab_supported_p ("vec_mask_store_lanes",
5198 vec_mask_store_lanes_optab,
5199 vectype, count);
5200 else
5201 return vect_lanes_optab_supported_p ("vec_store_lanes",
5202 vec_store_lanes_optab,
5203 vectype, count);
5207 /* Function vect_permute_store_chain.
5209 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
5210 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
5211 the data correctly for the stores. Return the final references for stores
5212 in RESULT_CHAIN.
5214 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5215 The input is 4 vectors each containing 8 elements. We assign a number to
5216 each element, the input sequence is:
5218 1st vec: 0 1 2 3 4 5 6 7
5219 2nd vec: 8 9 10 11 12 13 14 15
5220 3rd vec: 16 17 18 19 20 21 22 23
5221 4th vec: 24 25 26 27 28 29 30 31
5223 The output sequence should be:
5225 1st vec: 0 8 16 24 1 9 17 25
5226 2nd vec: 2 10 18 26 3 11 19 27
5227 3rd vec: 4 12 20 28 5 13 21 30
5228 4th vec: 6 14 22 30 7 15 23 31
5230 i.e., we interleave the contents of the four vectors in their order.
5232 We use interleave_high/low instructions to create such output. The input of
5233 each interleave_high/low operation is two vectors:
5234 1st vec 2nd vec
5235 0 1 2 3 4 5 6 7
5236 the even elements of the result vector are obtained left-to-right from the
5237 high/low elements of the first vector. The odd elements of the result are
5238 obtained left-to-right from the high/low elements of the second vector.
5239 The output of interleave_high will be: 0 4 1 5
5240 and of interleave_low: 2 6 3 7
5243 The permutation is done in log LENGTH stages. In each stage interleave_high
5244 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
5245 where the first argument is taken from the first half of DR_CHAIN and the
5246 second argument from it's second half.
5247 In our example,
5249 I1: interleave_high (1st vec, 3rd vec)
5250 I2: interleave_low (1st vec, 3rd vec)
5251 I3: interleave_high (2nd vec, 4th vec)
5252 I4: interleave_low (2nd vec, 4th vec)
5254 The output for the first stage is:
5256 I1: 0 16 1 17 2 18 3 19
5257 I2: 4 20 5 21 6 22 7 23
5258 I3: 8 24 9 25 10 26 11 27
5259 I4: 12 28 13 29 14 30 15 31
5261 The output of the second stage, i.e. the final result is:
5263 I1: 0 8 16 24 1 9 17 25
5264 I2: 2 10 18 26 3 11 19 27
5265 I3: 4 12 20 28 5 13 21 30
5266 I4: 6 14 22 30 7 15 23 31. */
5268 void
5269 vect_permute_store_chain (vec<tree> dr_chain,
5270 unsigned int length,
5271 stmt_vec_info stmt_info,
5272 gimple_stmt_iterator *gsi,
5273 vec<tree> *result_chain)
5275 tree vect1, vect2, high, low;
5276 gimple *perm_stmt;
5277 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5278 tree perm_mask_low, perm_mask_high;
5279 tree data_ref;
5280 tree perm3_mask_low, perm3_mask_high;
5281 unsigned int i, j, n, log_length = exact_log2 (length);
5283 result_chain->quick_grow (length);
5284 memcpy (result_chain->address (), dr_chain.address (),
5285 length * sizeof (tree));
5287 if (length == 3)
5289 /* vect_grouped_store_supported ensures that this is constant. */
5290 unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
5291 unsigned int j0 = 0, j1 = 0, j2 = 0;
5293 vec_perm_builder sel (nelt, nelt, 1);
5294 sel.quick_grow (nelt);
5295 vec_perm_indices indices;
5296 for (j = 0; j < 3; j++)
5298 int nelt0 = ((3 - j) * nelt) % 3;
5299 int nelt1 = ((3 - j) * nelt + 1) % 3;
5300 int nelt2 = ((3 - j) * nelt + 2) % 3;
5302 for (i = 0; i < nelt; i++)
5304 if (3 * i + nelt0 < nelt)
5305 sel[3 * i + nelt0] = j0++;
5306 if (3 * i + nelt1 < nelt)
5307 sel[3 * i + nelt1] = nelt + j1++;
5308 if (3 * i + nelt2 < nelt)
5309 sel[3 * i + nelt2] = 0;
5311 indices.new_vector (sel, 2, nelt);
5312 perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
5314 for (i = 0; i < nelt; i++)
5316 if (3 * i + nelt0 < nelt)
5317 sel[3 * i + nelt0] = 3 * i + nelt0;
5318 if (3 * i + nelt1 < nelt)
5319 sel[3 * i + nelt1] = 3 * i + nelt1;
5320 if (3 * i + nelt2 < nelt)
5321 sel[3 * i + nelt2] = nelt + j2++;
5323 indices.new_vector (sel, 2, nelt);
5324 perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
5326 vect1 = dr_chain[0];
5327 vect2 = dr_chain[1];
5329 /* Create interleaving stmt:
5330 low = VEC_PERM_EXPR <vect1, vect2,
5331 {j, nelt, *, j + 1, nelt + j + 1, *,
5332 j + 2, nelt + j + 2, *, ...}> */
5333 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5334 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
5335 vect2, perm3_mask_low);
5336 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5338 vect1 = data_ref;
5339 vect2 = dr_chain[2];
5340 /* Create interleaving stmt:
5341 low = VEC_PERM_EXPR <vect1, vect2,
5342 {0, 1, nelt + j, 3, 4, nelt + j + 1,
5343 6, 7, nelt + j + 2, ...}> */
5344 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5345 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
5346 vect2, perm3_mask_high);
5347 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5348 (*result_chain)[j] = data_ref;
5351 else
5353 /* If length is not equal to 3 then only power of 2 is supported. */
5354 gcc_assert (pow2p_hwi (length));
5356 /* The encoding has 2 interleaved stepped patterns. */
5357 poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
5358 vec_perm_builder sel (nelt, 2, 3);
5359 sel.quick_grow (6);
5360 for (i = 0; i < 3; i++)
5362 sel[i * 2] = i;
5363 sel[i * 2 + 1] = i + nelt;
5365 vec_perm_indices indices (sel, 2, nelt);
5366 perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
5368 for (i = 0; i < 6; i++)
5369 sel[i] += exact_div (nelt, 2);
5370 indices.new_vector (sel, 2, nelt);
5371 perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
5373 for (i = 0, n = log_length; i < n; i++)
5375 for (j = 0; j < length/2; j++)
5377 vect1 = dr_chain[j];
5378 vect2 = dr_chain[j+length/2];
5380 /* Create interleaving stmt:
5381 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
5382 ...}> */
5383 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
5384 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
5385 vect2, perm_mask_high);
5386 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5387 (*result_chain)[2*j] = high;
5389 /* Create interleaving stmt:
5390 low = VEC_PERM_EXPR <vect1, vect2,
5391 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
5392 ...}> */
5393 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
5394 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
5395 vect2, perm_mask_low);
5396 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5397 (*result_chain)[2*j+1] = low;
5399 memcpy (dr_chain.address (), result_chain->address (),
5400 length * sizeof (tree));
5405 /* Function vect_setup_realignment
5407 This function is called when vectorizing an unaligned load using
5408 the dr_explicit_realign[_optimized] scheme.
5409 This function generates the following code at the loop prolog:
5411 p = initial_addr;
5412 x msq_init = *(floor(p)); # prolog load
5413 realignment_token = call target_builtin;
5414 loop:
5415 x msq = phi (msq_init, ---)
5417 The stmts marked with x are generated only for the case of
5418 dr_explicit_realign_optimized.
5420 The code above sets up a new (vector) pointer, pointing to the first
5421 location accessed by STMT_INFO, and a "floor-aligned" load using that
5422 pointer. It also generates code to compute the "realignment-token"
5423 (if the relevant target hook was defined), and creates a phi-node at the
5424 loop-header bb whose arguments are the result of the prolog-load (created
5425 by this function) and the result of a load that takes place in the loop
5426 (to be created by the caller to this function).
5428 For the case of dr_explicit_realign_optimized:
5429 The caller to this function uses the phi-result (msq) to create the
5430 realignment code inside the loop, and sets up the missing phi argument,
5431 as follows:
5432 loop:
5433 msq = phi (msq_init, lsq)
5434 lsq = *(floor(p')); # load in loop
5435 result = realign_load (msq, lsq, realignment_token);
5437 For the case of dr_explicit_realign:
5438 loop:
5439 msq = *(floor(p)); # load in loop
5440 p' = p + (VS-1);
5441 lsq = *(floor(p')); # load in loop
5442 result = realign_load (msq, lsq, realignment_token);
5444 Input:
5445 STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
5446 a memory location that may be unaligned.
5447 BSI - place where new code is to be inserted.
5448 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
5449 is used.
5451 Output:
5452 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
5453 target hook, if defined.
5454 Return value - the result of the loop-header phi node. */
5456 tree
5457 vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
5458 tree *realignment_token,
5459 enum dr_alignment_support alignment_support_scheme,
5460 tree init_addr,
5461 class loop **at_loop)
5463 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5464 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5465 dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
5466 struct data_reference *dr = dr_info->dr;
5467 class loop *loop = NULL;
5468 edge pe = NULL;
5469 tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
5470 tree vec_dest;
5471 gimple *inc;
5472 tree ptr;
5473 tree data_ref;
5474 basic_block new_bb;
5475 tree msq_init = NULL_TREE;
5476 tree new_temp;
5477 gphi *phi_stmt;
5478 tree msq = NULL_TREE;
5479 gimple_seq stmts = NULL;
5480 bool compute_in_loop = false;
5481 bool nested_in_vect_loop = false;
5482 class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
5483 class loop *loop_for_initial_load = NULL;
5485 if (loop_vinfo)
5487 loop = LOOP_VINFO_LOOP (loop_vinfo);
5488 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
5491 gcc_assert (alignment_support_scheme == dr_explicit_realign
5492 || alignment_support_scheme == dr_explicit_realign_optimized);
5494 /* We need to generate three things:
5495 1. the misalignment computation
5496 2. the extra vector load (for the optimized realignment scheme).
5497 3. the phi node for the two vectors from which the realignment is
5498 done (for the optimized realignment scheme). */
5500 /* 1. Determine where to generate the misalignment computation.
5502 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
5503 calculation will be generated by this function, outside the loop (in the
5504 preheader). Otherwise, INIT_ADDR had already been computed for us by the
5505 caller, inside the loop.
5507 Background: If the misalignment remains fixed throughout the iterations of
5508 the loop, then both realignment schemes are applicable, and also the
5509 misalignment computation can be done outside LOOP. This is because we are
5510 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
5511 are a multiple of VS (the Vector Size), and therefore the misalignment in
5512 different vectorized LOOP iterations is always the same.
5513 The problem arises only if the memory access is in an inner-loop nested
5514 inside LOOP, which is now being vectorized using outer-loop vectorization.
5515 This is the only case when the misalignment of the memory access may not
5516 remain fixed throughout the iterations of the inner-loop (as explained in
5517 detail in vect_supportable_dr_alignment). In this case, not only is the
5518 optimized realignment scheme not applicable, but also the misalignment
5519 computation (and generation of the realignment token that is passed to
5520 REALIGN_LOAD) have to be done inside the loop.
5522 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
5523 or not, which in turn determines if the misalignment is computed inside
5524 the inner-loop, or outside LOOP. */
5526 if (init_addr != NULL_TREE || !loop_vinfo)
5528 compute_in_loop = true;
5529 gcc_assert (alignment_support_scheme == dr_explicit_realign);
5533 /* 2. Determine where to generate the extra vector load.
5535 For the optimized realignment scheme, instead of generating two vector
5536 loads in each iteration, we generate a single extra vector load in the
5537 preheader of the loop, and in each iteration reuse the result of the
5538 vector load from the previous iteration. In case the memory access is in
5539 an inner-loop nested inside LOOP, which is now being vectorized using
5540 outer-loop vectorization, we need to determine whether this initial vector
5541 load should be generated at the preheader of the inner-loop, or can be
5542 generated at the preheader of LOOP. If the memory access has no evolution
5543 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
5544 to be generated inside LOOP (in the preheader of the inner-loop). */
5546 if (nested_in_vect_loop)
5548 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
5549 bool invariant_in_outerloop =
5550 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
5551 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
5553 else
5554 loop_for_initial_load = loop;
5555 if (at_loop)
5556 *at_loop = loop_for_initial_load;
5558 if (loop_for_initial_load)
5559 pe = loop_preheader_edge (loop_for_initial_load);
5561 /* 3. For the case of the optimized realignment, create the first vector
5562 load at the loop preheader. */
5564 if (alignment_support_scheme == dr_explicit_realign_optimized)
5566 /* Create msq_init = *(floor(p1)) in the loop preheader */
5567 gassign *new_stmt;
5569 gcc_assert (!compute_in_loop);
5570 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5571 ptr = vect_create_data_ref_ptr (stmt_info, vectype,
5572 loop_for_initial_load, NULL_TREE,
5573 &init_addr, NULL, &inc, true);
5574 if (TREE_CODE (ptr) == SSA_NAME)
5575 new_temp = copy_ssa_name (ptr);
5576 else
5577 new_temp = make_ssa_name (TREE_TYPE (ptr));
5578 poly_uint64 align = DR_TARGET_ALIGNMENT (dr_info);
5579 tree type = TREE_TYPE (ptr);
5580 new_stmt = gimple_build_assign
5581 (new_temp, BIT_AND_EXPR, ptr,
5582 fold_build2 (MINUS_EXPR, type,
5583 build_int_cst (type, 0),
5584 build_int_cst (type, align)));
5585 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5586 gcc_assert (!new_bb);
5587 data_ref
5588 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
5589 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
5590 vect_copy_ref_info (data_ref, DR_REF (dr));
5591 new_stmt = gimple_build_assign (vec_dest, data_ref);
5592 new_temp = make_ssa_name (vec_dest, new_stmt);
5593 gimple_assign_set_lhs (new_stmt, new_temp);
5594 if (pe)
5596 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5597 gcc_assert (!new_bb);
5599 else
5600 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5602 msq_init = gimple_assign_lhs (new_stmt);
5605 /* 4. Create realignment token using a target builtin, if available.
5606 It is done either inside the containing loop, or before LOOP (as
5607 determined above). */
5609 if (targetm.vectorize.builtin_mask_for_load)
5611 gcall *new_stmt;
5612 tree builtin_decl;
5614 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
5615 if (!init_addr)
5617 /* Generate the INIT_ADDR computation outside LOOP. */
5618 init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts,
5619 NULL_TREE);
5620 if (loop)
5622 pe = loop_preheader_edge (loop);
5623 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
5624 gcc_assert (!new_bb);
5626 else
5627 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
5630 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
5631 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
5632 vec_dest =
5633 vect_create_destination_var (scalar_dest,
5634 gimple_call_return_type (new_stmt));
5635 new_temp = make_ssa_name (vec_dest, new_stmt);
5636 gimple_call_set_lhs (new_stmt, new_temp);
5638 if (compute_in_loop)
5639 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5640 else
5642 /* Generate the misalignment computation outside LOOP. */
5643 pe = loop_preheader_edge (loop);
5644 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
5645 gcc_assert (!new_bb);
5648 *realignment_token = gimple_call_lhs (new_stmt);
5650 /* The result of the CALL_EXPR to this builtin is determined from
5651 the value of the parameter and no global variables are touched
5652 which makes the builtin a "const" function. Requiring the
5653 builtin to have the "const" attribute makes it unnecessary
5654 to call mark_call_clobbered. */
5655 gcc_assert (TREE_READONLY (builtin_decl));
5658 if (alignment_support_scheme == dr_explicit_realign)
5659 return msq;
5661 gcc_assert (!compute_in_loop);
5662 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
5665 /* 5. Create msq = phi <msq_init, lsq> in loop */
5667 pe = loop_preheader_edge (containing_loop);
5668 vec_dest = vect_create_destination_var (scalar_dest, vectype);
5669 msq = make_ssa_name (vec_dest);
5670 phi_stmt = create_phi_node (msq, containing_loop->header);
5671 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
5673 return msq;
5677 /* Function vect_grouped_load_supported.
5679 COUNT is the size of the load group (the number of statements plus the
5680 number of gaps). SINGLE_ELEMENT_P is true if there is actually
5681 only one statement, with a gap of COUNT - 1.
5683 Returns true if a suitable permute exists. */
5685 bool
5686 vect_grouped_load_supported (tree vectype, bool single_element_p,
5687 unsigned HOST_WIDE_INT count)
5689 machine_mode mode = TYPE_MODE (vectype);
5691 /* If this is single-element interleaving with an element distance
5692 that leaves unused vector loads around punt - we at least create
5693 very sub-optimal code in that case (and blow up memory,
5694 see PR65518). */
5695 if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
5697 if (dump_enabled_p ())
5698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5699 "single-element interleaving not supported "
5700 "for not adjacent vector loads\n");
5701 return false;
5704 /* vect_permute_load_chain requires the group size to be equal to 3 or
5705 be a power of two. */
5706 if (count != 3 && exact_log2 (count) == -1)
5708 if (dump_enabled_p ())
5709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5710 "the size of the group of accesses"
5711 " is not a power of 2 or not equal to 3\n");
5712 return false;
5715 /* Check that the permutation is supported. */
5716 if (VECTOR_MODE_P (mode))
5718 unsigned int i, j;
5719 if (count == 3)
5721 unsigned int nelt;
5722 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5724 if (dump_enabled_p ())
5725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5726 "cannot handle groups of 3 loads for"
5727 " variable-length vectors\n");
5728 return false;
5731 vec_perm_builder sel (nelt, nelt, 1);
5732 sel.quick_grow (nelt);
5733 vec_perm_indices indices;
5734 unsigned int k;
5735 for (k = 0; k < 3; k++)
5737 for (i = 0; i < nelt; i++)
5738 if (3 * i + k < 2 * nelt)
5739 sel[i] = 3 * i + k;
5740 else
5741 sel[i] = 0;
5742 indices.new_vector (sel, 2, nelt);
5743 if (!can_vec_perm_const_p (mode, indices))
5745 if (dump_enabled_p ())
5746 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5747 "shuffle of 3 loads is not supported by"
5748 " target\n");
5749 return false;
5751 for (i = 0, j = 0; i < nelt; i++)
5752 if (3 * i + k < 2 * nelt)
5753 sel[i] = i;
5754 else
5755 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5756 indices.new_vector (sel, 2, nelt);
5757 if (!can_vec_perm_const_p (mode, indices))
5759 if (dump_enabled_p ())
5760 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5761 "shuffle of 3 loads is not supported by"
5762 " target\n");
5763 return false;
5766 return true;
5768 else
5770 /* If length is not equal to 3 then only power of 2 is supported. */
5771 gcc_assert (pow2p_hwi (count));
5772 poly_uint64 nelt = GET_MODE_NUNITS (mode);
5774 /* The encoding has a single stepped pattern. */
5775 vec_perm_builder sel (nelt, 1, 3);
5776 sel.quick_grow (3);
5777 for (i = 0; i < 3; i++)
5778 sel[i] = i * 2;
5779 vec_perm_indices indices (sel, 2, nelt);
5780 if (can_vec_perm_const_p (mode, indices))
5782 for (i = 0; i < 3; i++)
5783 sel[i] = i * 2 + 1;
5784 indices.new_vector (sel, 2, nelt);
5785 if (can_vec_perm_const_p (mode, indices))
5786 return true;
5791 if (dump_enabled_p ())
5792 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5793 "extract even/odd not supported by target\n");
5794 return false;
5797 /* Return TRUE if vec_{masked_}load_lanes is available for COUNT vectors of
5798 type VECTYPE. MASKED_P says whether the masked form is needed. */
5800 bool
5801 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
5802 bool masked_p)
5804 if (masked_p)
5805 return vect_lanes_optab_supported_p ("vec_mask_load_lanes",
5806 vec_mask_load_lanes_optab,
5807 vectype, count);
5808 else
5809 return vect_lanes_optab_supported_p ("vec_load_lanes",
5810 vec_load_lanes_optab,
5811 vectype, count);
5814 /* Function vect_permute_load_chain.
5816 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5817 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5818 the input data correctly. Return the final references for loads in
5819 RESULT_CHAIN.
5821 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5822 The input is 4 vectors each containing 8 elements. We assign a number to each
5823 element, the input sequence is:
5825 1st vec: 0 1 2 3 4 5 6 7
5826 2nd vec: 8 9 10 11 12 13 14 15
5827 3rd vec: 16 17 18 19 20 21 22 23
5828 4th vec: 24 25 26 27 28 29 30 31
5830 The output sequence should be:
5832 1st vec: 0 4 8 12 16 20 24 28
5833 2nd vec: 1 5 9 13 17 21 25 29
5834 3rd vec: 2 6 10 14 18 22 26 30
5835 4th vec: 3 7 11 15 19 23 27 31
5837 i.e., the first output vector should contain the first elements of each
5838 interleaving group, etc.
5840 We use extract_even/odd instructions to create such output. The input of
5841 each extract_even/odd operation is two vectors
5842 1st vec 2nd vec
5843 0 1 2 3 4 5 6 7
5845 and the output is the vector of extracted even/odd elements. The output of
5846 extract_even will be: 0 2 4 6
5847 and of extract_odd: 1 3 5 7
5850 The permutation is done in log LENGTH stages. In each stage extract_even
5851 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5852 their order. In our example,
5854 E1: extract_even (1st vec, 2nd vec)
5855 E2: extract_odd (1st vec, 2nd vec)
5856 E3: extract_even (3rd vec, 4th vec)
5857 E4: extract_odd (3rd vec, 4th vec)
5859 The output for the first stage will be:
5861 E1: 0 2 4 6 8 10 12 14
5862 E2: 1 3 5 7 9 11 13 15
5863 E3: 16 18 20 22 24 26 28 30
5864 E4: 17 19 21 23 25 27 29 31
5866 In order to proceed and create the correct sequence for the next stage (or
5867 for the correct output, if the second stage is the last one, as in our
5868 example), we first put the output of extract_even operation and then the
5869 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5870 The input for the second stage is:
5872 1st vec (E1): 0 2 4 6 8 10 12 14
5873 2nd vec (E3): 16 18 20 22 24 26 28 30
5874 3rd vec (E2): 1 3 5 7 9 11 13 15
5875 4th vec (E4): 17 19 21 23 25 27 29 31
5877 The output of the second stage:
5879 E1: 0 4 8 12 16 20 24 28
5880 E2: 2 6 10 14 18 22 26 30
5881 E3: 1 5 9 13 17 21 25 29
5882 E4: 3 7 11 15 19 23 27 31
5884 And RESULT_CHAIN after reordering:
5886 1st vec (E1): 0 4 8 12 16 20 24 28
5887 2nd vec (E3): 1 5 9 13 17 21 25 29
5888 3rd vec (E2): 2 6 10 14 18 22 26 30
5889 4th vec (E4): 3 7 11 15 19 23 27 31. */
5891 static void
5892 vect_permute_load_chain (vec<tree> dr_chain,
5893 unsigned int length,
5894 stmt_vec_info stmt_info,
5895 gimple_stmt_iterator *gsi,
5896 vec<tree> *result_chain)
5898 tree data_ref, first_vect, second_vect;
5899 tree perm_mask_even, perm_mask_odd;
5900 tree perm3_mask_low, perm3_mask_high;
5901 gimple *perm_stmt;
5902 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5903 unsigned int i, j, log_length = exact_log2 (length);
5905 result_chain->quick_grow (length);
5906 memcpy (result_chain->address (), dr_chain.address (),
5907 length * sizeof (tree));
5909 if (length == 3)
5911 /* vect_grouped_load_supported ensures that this is constant. */
5912 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
5913 unsigned int k;
5915 vec_perm_builder sel (nelt, nelt, 1);
5916 sel.quick_grow (nelt);
5917 vec_perm_indices indices;
5918 for (k = 0; k < 3; k++)
5920 for (i = 0; i < nelt; i++)
5921 if (3 * i + k < 2 * nelt)
5922 sel[i] = 3 * i + k;
5923 else
5924 sel[i] = 0;
5925 indices.new_vector (sel, 2, nelt);
5926 perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
5928 for (i = 0, j = 0; i < nelt; i++)
5929 if (3 * i + k < 2 * nelt)
5930 sel[i] = i;
5931 else
5932 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5933 indices.new_vector (sel, 2, nelt);
5934 perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
5936 first_vect = dr_chain[0];
5937 second_vect = dr_chain[1];
5939 /* Create interleaving stmt (low part of):
5940 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5941 ...}> */
5942 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5943 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5944 second_vect, perm3_mask_low);
5945 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5947 /* Create interleaving stmt (high part of):
5948 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5949 ...}> */
5950 first_vect = data_ref;
5951 second_vect = dr_chain[2];
5952 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5953 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5954 second_vect, perm3_mask_high);
5955 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5956 (*result_chain)[k] = data_ref;
5959 else
5961 /* If length is not equal to 3 then only power of 2 is supported. */
5962 gcc_assert (pow2p_hwi (length));
5964 /* The encoding has a single stepped pattern. */
5965 poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
5966 vec_perm_builder sel (nelt, 1, 3);
5967 sel.quick_grow (3);
5968 for (i = 0; i < 3; ++i)
5969 sel[i] = i * 2;
5970 vec_perm_indices indices (sel, 2, nelt);
5971 perm_mask_even = vect_gen_perm_mask_checked (vectype, indices);
5973 for (i = 0; i < 3; ++i)
5974 sel[i] = i * 2 + 1;
5975 indices.new_vector (sel, 2, nelt);
5976 perm_mask_odd = vect_gen_perm_mask_checked (vectype, indices);
5978 for (i = 0; i < log_length; i++)
5980 for (j = 0; j < length; j += 2)
5982 first_vect = dr_chain[j];
5983 second_vect = dr_chain[j+1];
5985 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5986 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5987 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5988 first_vect, second_vect,
5989 perm_mask_even);
5990 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5991 (*result_chain)[j/2] = data_ref;
5993 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5994 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5995 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5996 first_vect, second_vect,
5997 perm_mask_odd);
5998 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
5999 (*result_chain)[j/2+length/2] = data_ref;
6001 memcpy (dr_chain.address (), result_chain->address (),
6002 length * sizeof (tree));
6007 /* Function vect_shift_permute_load_chain.
6009 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
6010 sequence of stmts to reorder the input data accordingly.
6011 Return the final references for loads in RESULT_CHAIN.
6012 Return true if successed, false otherwise.
6014 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
6015 The input is 3 vectors each containing 8 elements. We assign a
6016 number to each element, the input sequence is:
6018 1st vec: 0 1 2 3 4 5 6 7
6019 2nd vec: 8 9 10 11 12 13 14 15
6020 3rd vec: 16 17 18 19 20 21 22 23
6022 The output sequence should be:
6024 1st vec: 0 3 6 9 12 15 18 21
6025 2nd vec: 1 4 7 10 13 16 19 22
6026 3rd vec: 2 5 8 11 14 17 20 23
6028 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
6030 First we shuffle all 3 vectors to get correct elements order:
6032 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
6033 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
6034 3rd vec: (16 19 22) (17 20 23) (18 21)
6036 Next we unite and shift vector 3 times:
6038 1st step:
6039 shift right by 6 the concatenation of:
6040 "1st vec" and "2nd vec"
6041 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
6042 "2nd vec" and "3rd vec"
6043 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
6044 "3rd vec" and "1st vec"
6045 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
6046 | New vectors |
6048 So that now new vectors are:
6050 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
6051 2nd vec: (10 13) (16 19 22) (17 20 23)
6052 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
6054 2nd step:
6055 shift right by 5 the concatenation of:
6056 "1st vec" and "3rd vec"
6057 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
6058 "2nd vec" and "1st vec"
6059 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
6060 "3rd vec" and "2nd vec"
6061 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
6062 | New vectors |
6064 So that now new vectors are:
6066 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
6067 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
6068 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
6070 3rd step:
6071 shift right by 5 the concatenation of:
6072 "1st vec" and "1st vec"
6073 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
6074 shift right by 3 the concatenation of:
6075 "2nd vec" and "2nd vec"
6076 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
6077 | New vectors |
6079 So that now all vectors are READY:
6080 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
6081 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
6082 3rd vec: ( 1 4 7) (10 13) (16 19 22)
6084 This algorithm is faster than one in vect_permute_load_chain if:
6085 1. "shift of a concatination" is faster than general permutation.
6086 This is usually so.
6087 2. The TARGET machine can't execute vector instructions in parallel.
6088 This is because each step of the algorithm depends on previous.
6089 The algorithm in vect_permute_load_chain is much more parallel.
6091 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
6094 static bool
6095 vect_shift_permute_load_chain (vec<tree> dr_chain,
6096 unsigned int length,
6097 stmt_vec_info stmt_info,
6098 gimple_stmt_iterator *gsi,
6099 vec<tree> *result_chain)
6101 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
6102 tree perm2_mask1, perm2_mask2, perm3_mask;
6103 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
6104 gimple *perm_stmt;
6106 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6107 unsigned int i;
6108 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6110 unsigned HOST_WIDE_INT nelt, vf;
6111 if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
6112 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
6113 /* Not supported for variable-length vectors. */
6114 return false;
6116 vec_perm_builder sel (nelt, nelt, 1);
6117 sel.quick_grow (nelt);
6119 result_chain->quick_grow (length);
6120 memcpy (result_chain->address (), dr_chain.address (),
6121 length * sizeof (tree));
6123 if (pow2p_hwi (length) && vf > 4)
6125 unsigned int j, log_length = exact_log2 (length);
6126 for (i = 0; i < nelt / 2; ++i)
6127 sel[i] = i * 2;
6128 for (i = 0; i < nelt / 2; ++i)
6129 sel[nelt / 2 + i] = i * 2 + 1;
6130 vec_perm_indices indices (sel, 2, nelt);
6131 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6133 if (dump_enabled_p ())
6134 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6135 "shuffle of 2 fields structure is not \
6136 supported by target\n");
6137 return false;
6139 perm2_mask1 = vect_gen_perm_mask_checked (vectype, indices);
6141 for (i = 0; i < nelt / 2; ++i)
6142 sel[i] = i * 2 + 1;
6143 for (i = 0; i < nelt / 2; ++i)
6144 sel[nelt / 2 + i] = i * 2;
6145 indices.new_vector (sel, 2, nelt);
6146 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6148 if (dump_enabled_p ())
6149 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6150 "shuffle of 2 fields structure is not \
6151 supported by target\n");
6152 return false;
6154 perm2_mask2 = vect_gen_perm_mask_checked (vectype, indices);
6156 /* Generating permutation constant to shift all elements.
6157 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
6158 for (i = 0; i < nelt; i++)
6159 sel[i] = nelt / 2 + i;
6160 indices.new_vector (sel, 2, nelt);
6161 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6163 if (dump_enabled_p ())
6164 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6165 "shift permutation is not supported by target\n");
6166 return false;
6168 shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
6170 /* Generating permutation constant to select vector from 2.
6171 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
6172 for (i = 0; i < nelt / 2; i++)
6173 sel[i] = i;
6174 for (i = nelt / 2; i < nelt; i++)
6175 sel[i] = nelt + i;
6176 indices.new_vector (sel, 2, nelt);
6177 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6179 if (dump_enabled_p ())
6180 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6181 "select is not supported by target\n");
6182 return false;
6184 select_mask = vect_gen_perm_mask_checked (vectype, indices);
6186 for (i = 0; i < log_length; i++)
6188 for (j = 0; j < length; j += 2)
6190 first_vect = dr_chain[j];
6191 second_vect = dr_chain[j + 1];
6193 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
6194 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6195 first_vect, first_vect,
6196 perm2_mask1);
6197 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6198 vect[0] = data_ref;
6200 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
6201 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6202 second_vect, second_vect,
6203 perm2_mask2);
6204 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6205 vect[1] = data_ref;
6207 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
6208 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6209 vect[0], vect[1], shift1_mask);
6210 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6211 (*result_chain)[j/2 + length/2] = data_ref;
6213 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
6214 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6215 vect[0], vect[1], select_mask);
6216 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6217 (*result_chain)[j/2] = data_ref;
6219 memcpy (dr_chain.address (), result_chain->address (),
6220 length * sizeof (tree));
6222 return true;
6224 if (length == 3 && vf > 2)
6226 unsigned int k = 0, l = 0;
6228 /* Generating permutation constant to get all elements in rigth order.
6229 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
6230 for (i = 0; i < nelt; i++)
6232 if (3 * k + (l % 3) >= nelt)
6234 k = 0;
6235 l += (3 - (nelt % 3));
6237 sel[i] = 3 * k + (l % 3);
6238 k++;
6240 vec_perm_indices indices (sel, 2, nelt);
6241 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6243 if (dump_enabled_p ())
6244 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6245 "shuffle of 3 fields structure is not \
6246 supported by target\n");
6247 return false;
6249 perm3_mask = vect_gen_perm_mask_checked (vectype, indices);
6251 /* Generating permutation constant to shift all elements.
6252 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
6253 for (i = 0; i < nelt; i++)
6254 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
6255 indices.new_vector (sel, 2, nelt);
6256 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6258 if (dump_enabled_p ())
6259 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6260 "shift permutation is not supported by target\n");
6261 return false;
6263 shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
6265 /* Generating permutation constant to shift all elements.
6266 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
6267 for (i = 0; i < nelt; i++)
6268 sel[i] = 2 * (nelt / 3) + 1 + i;
6269 indices.new_vector (sel, 2, nelt);
6270 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6272 if (dump_enabled_p ())
6273 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6274 "shift permutation is not supported by target\n");
6275 return false;
6277 shift2_mask = vect_gen_perm_mask_checked (vectype, indices);
6279 /* Generating permutation constant to shift all elements.
6280 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
6281 for (i = 0; i < nelt; i++)
6282 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
6283 indices.new_vector (sel, 2, nelt);
6284 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6286 if (dump_enabled_p ())
6287 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6288 "shift permutation is not supported by target\n");
6289 return false;
6291 shift3_mask = vect_gen_perm_mask_checked (vectype, indices);
6293 /* Generating permutation constant to shift all elements.
6294 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
6295 for (i = 0; i < nelt; i++)
6296 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
6297 indices.new_vector (sel, 2, nelt);
6298 if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
6300 if (dump_enabled_p ())
6301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6302 "shift permutation is not supported by target\n");
6303 return false;
6305 shift4_mask = vect_gen_perm_mask_checked (vectype, indices);
6307 for (k = 0; k < 3; k++)
6309 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
6310 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6311 dr_chain[k], dr_chain[k],
6312 perm3_mask);
6313 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6314 vect[k] = data_ref;
6317 for (k = 0; k < 3; k++)
6319 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
6320 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6321 vect[k % 3], vect[(k + 1) % 3],
6322 shift1_mask);
6323 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6324 vect_shift[k] = data_ref;
6327 for (k = 0; k < 3; k++)
6329 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
6330 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
6331 vect_shift[(4 - k) % 3],
6332 vect_shift[(3 - k) % 3],
6333 shift2_mask);
6334 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6335 vect[k] = data_ref;
6338 (*result_chain)[3 - (nelt % 3)] = vect[2];
6340 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
6341 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
6342 vect[0], shift3_mask);
6343 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6344 (*result_chain)[nelt % 3] = data_ref;
6346 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
6347 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
6348 vect[1], shift4_mask);
6349 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
6350 (*result_chain)[0] = data_ref;
6351 return true;
6353 return false;
6356 /* Function vect_transform_grouped_load.
6358 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
6359 to perform their permutation and ascribe the result vectorized statements to
6360 the scalar statements.
6363 void
6364 vect_transform_grouped_load (stmt_vec_info stmt_info, vec<tree> dr_chain,
6365 int size, gimple_stmt_iterator *gsi)
6367 machine_mode mode;
6368 vec<tree> result_chain = vNULL;
6370 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
6371 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
6372 vectors, that are ready for vector computation. */
6373 result_chain.create (size);
6375 /* If reassociation width for vector type is 2 or greater target machine can
6376 execute 2 or more vector instructions in parallel. Otherwise try to
6377 get chain for loads group using vect_shift_permute_load_chain. */
6378 mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
6379 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
6380 || pow2p_hwi (size)
6381 || !vect_shift_permute_load_chain (dr_chain, size, stmt_info,
6382 gsi, &result_chain))
6383 vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain);
6384 vect_record_grouped_load_vectors (stmt_info, result_chain);
6385 result_chain.release ();
6388 /* RESULT_CHAIN contains the output of a group of grouped loads that were
6389 generated as part of the vectorization of STMT_INFO. Assign the statement
6390 for each vector to the associated scalar statement. */
6392 void
6393 vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
6394 vec<tree> result_chain)
6396 vec_info *vinfo = stmt_info->vinfo;
6397 stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
6398 unsigned int i, gap_count;
6399 tree tmp_data_ref;
6401 /* Put a permuted data-ref in the VECTORIZED_STMT field.
6402 Since we scan the chain starting from it's first node, their order
6403 corresponds the order of data-refs in RESULT_CHAIN. */
6404 stmt_vec_info next_stmt_info = first_stmt_info;
6405 gap_count = 1;
6406 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
6408 if (!next_stmt_info)
6409 break;
6411 /* Skip the gaps. Loads created for the gaps will be removed by dead
6412 code elimination pass later. No need to check for the first stmt in
6413 the group, since it always exists.
6414 DR_GROUP_GAP is the number of steps in elements from the previous
6415 access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
6416 correspond to the gaps. */
6417 if (next_stmt_info != first_stmt_info
6418 && gap_count < DR_GROUP_GAP (next_stmt_info))
6420 gap_count++;
6421 continue;
6424 /* ??? The following needs cleanup after the removal of
6425 DR_GROUP_SAME_DR_STMT. */
6426 if (next_stmt_info)
6428 stmt_vec_info new_stmt_info = vinfo->lookup_def (tmp_data_ref);
6429 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
6430 copies, and we put the new vector statement in the first available
6431 RELATED_STMT. */
6432 if (!STMT_VINFO_VEC_STMT (next_stmt_info))
6433 STMT_VINFO_VEC_STMT (next_stmt_info) = new_stmt_info;
6434 else
6436 stmt_vec_info prev_stmt_info
6437 = STMT_VINFO_VEC_STMT (next_stmt_info);
6438 stmt_vec_info rel_stmt_info
6439 = STMT_VINFO_RELATED_STMT (prev_stmt_info);
6440 while (rel_stmt_info)
6442 prev_stmt_info = rel_stmt_info;
6443 rel_stmt_info = STMT_VINFO_RELATED_STMT (rel_stmt_info);
6446 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
6449 next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
6450 gap_count = 1;
6455 /* Function vect_force_dr_alignment_p.
6457 Returns whether the alignment of a DECL can be forced to be aligned
6458 on ALIGNMENT bit boundary. */
6460 bool
6461 vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment)
6463 if (!VAR_P (decl))
6464 return false;
6466 if (decl_in_symtab_p (decl)
6467 && !symtab_node::get (decl)->can_increase_alignment_p ())
6468 return false;
6470 if (TREE_STATIC (decl))
6471 return (known_le (alignment,
6472 (unsigned HOST_WIDE_INT) MAX_OFILE_ALIGNMENT));
6473 else
6474 return (known_le (alignment, (unsigned HOST_WIDE_INT) MAX_STACK_ALIGNMENT));
6478 /* Return whether the data reference DR_INFO is supported with respect to its
6479 alignment.
6480 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
6481 it is aligned, i.e., check if it is possible to vectorize it with different
6482 alignment. */
6484 enum dr_alignment_support
6485 vect_supportable_dr_alignment (dr_vec_info *dr_info,
6486 bool check_aligned_accesses)
6488 data_reference *dr = dr_info->dr;
6489 stmt_vec_info stmt_info = dr_info->stmt;
6490 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6491 machine_mode mode = TYPE_MODE (vectype);
6492 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6493 class loop *vect_loop = NULL;
6494 bool nested_in_vect_loop = false;
6496 if (aligned_access_p (dr_info) && !check_aligned_accesses)
6497 return dr_aligned;
6499 /* For now assume all conditional loads/stores support unaligned
6500 access without any special code. */
6501 if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
6502 if (gimple_call_internal_p (stmt)
6503 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
6504 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
6505 return dr_unaligned_supported;
6507 if (loop_vinfo)
6509 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
6510 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
6513 /* Possibly unaligned access. */
6515 /* We can choose between using the implicit realignment scheme (generating
6516 a misaligned_move stmt) and the explicit realignment scheme (generating
6517 aligned loads with a REALIGN_LOAD). There are two variants to the
6518 explicit realignment scheme: optimized, and unoptimized.
6519 We can optimize the realignment only if the step between consecutive
6520 vector loads is equal to the vector size. Since the vector memory
6521 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
6522 is guaranteed that the misalignment amount remains the same throughout the
6523 execution of the vectorized loop. Therefore, we can create the
6524 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
6525 at the loop preheader.
6527 However, in the case of outer-loop vectorization, when vectorizing a
6528 memory access in the inner-loop nested within the LOOP that is now being
6529 vectorized, while it is guaranteed that the misalignment of the
6530 vectorized memory access will remain the same in different outer-loop
6531 iterations, it is *not* guaranteed that is will remain the same throughout
6532 the execution of the inner-loop. This is because the inner-loop advances
6533 with the original scalar step (and not in steps of VS). If the inner-loop
6534 step happens to be a multiple of VS, then the misalignment remains fixed
6535 and we can use the optimized realignment scheme. For example:
6537 for (i=0; i<N; i++)
6538 for (j=0; j<M; j++)
6539 s += a[i+j];
6541 When vectorizing the i-loop in the above example, the step between
6542 consecutive vector loads is 1, and so the misalignment does not remain
6543 fixed across the execution of the inner-loop, and the realignment cannot
6544 be optimized (as illustrated in the following pseudo vectorized loop):
6546 for (i=0; i<N; i+=4)
6547 for (j=0; j<M; j++){
6548 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
6549 // when j is {0,1,2,3,4,5,6,7,...} respectively.
6550 // (assuming that we start from an aligned address).
6553 We therefore have to use the unoptimized realignment scheme:
6555 for (i=0; i<N; i+=4)
6556 for (j=k; j<M; j+=4)
6557 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
6558 // that the misalignment of the initial address is
6559 // 0).
6561 The loop can then be vectorized as follows:
6563 for (k=0; k<4; k++){
6564 rt = get_realignment_token (&vp[k]);
6565 for (i=0; i<N; i+=4){
6566 v1 = vp[i+k];
6567 for (j=k; j<M; j+=4){
6568 v2 = vp[i+j+VS-1];
6569 va = REALIGN_LOAD <v1,v2,rt>;
6570 vs += va;
6571 v1 = v2;
6574 } */
6576 if (DR_IS_READ (dr))
6578 bool is_packed = false;
6579 tree type = (TREE_TYPE (DR_REF (dr)));
6581 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
6582 && (!targetm.vectorize.builtin_mask_for_load
6583 || targetm.vectorize.builtin_mask_for_load ()))
6585 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6587 /* If we are doing SLP then the accesses need not have the
6588 same alignment, instead it depends on the SLP group size. */
6589 if (loop_vinfo
6590 && STMT_SLP_TYPE (stmt_info)
6591 && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6592 * (DR_GROUP_SIZE
6593 (DR_GROUP_FIRST_ELEMENT (stmt_info))),
6594 TYPE_VECTOR_SUBPARTS (vectype)))
6596 else if (!loop_vinfo
6597 || (nested_in_vect_loop
6598 && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
6599 GET_MODE_SIZE (TYPE_MODE (vectype)))))
6600 return dr_explicit_realign;
6601 else
6602 return dr_explicit_realign_optimized;
6604 if (!known_alignment_for_access_p (dr_info))
6605 is_packed = not_size_aligned (DR_REF (dr));
6607 if (targetm.vectorize.support_vector_misalignment
6608 (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
6609 /* Can't software pipeline the loads, but can at least do them. */
6610 return dr_unaligned_supported;
6612 else
6614 bool is_packed = false;
6615 tree type = (TREE_TYPE (DR_REF (dr)));
6617 if (!known_alignment_for_access_p (dr_info))
6618 is_packed = not_size_aligned (DR_REF (dr));
6620 if (targetm.vectorize.support_vector_misalignment
6621 (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
6622 return dr_unaligned_supported;
6625 /* Unsupported. */
6626 return dr_unaligned_unsupported;